aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-08-23 22:33:36 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-09-05 00:33:58 -0400
commit2257e05c170561bb1168ee50205ae920008c37fb (patch)
tree90982c8a2a60aaa246d0008581a9966f4b972034 /drivers/net/mv643xx_eth.c
parent78fff83b0347d72e996cef883f09c3e5b8544f73 (diff)
mv643xx_eth: get rid of receive-side locking
By having the receive out-of-memory handling timer schedule the napi poll handler and then doing oom processing from the napi poll handler, all code that touches receive state moves to napi context, letting us get rid of all explicit locking in the receive paths since the only mutual exclusion we need anymore at that point is protection against reentering ourselves, which is provided by napi synchronisation. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c132
1 files changed, 68 insertions, 64 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2b7e76d9ac0..3831a8bffbd 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -329,8 +329,6 @@ struct rx_queue {
329 dma_addr_t rx_desc_dma; 329 dma_addr_t rx_desc_dma;
330 int rx_desc_area_size; 330 int rx_desc_area_size;
331 struct sk_buff **rx_skb; 331 struct sk_buff **rx_skb;
332
333 struct timer_list rx_oom;
334}; 332};
335 333
336struct tx_queue { 334struct tx_queue {
@@ -372,6 +370,7 @@ struct mv643xx_eth_private {
372 u8 rxq_mask; 370 u8 rxq_mask;
373 int rxq_primary; 371 int rxq_primary;
374 struct napi_struct napi; 372 struct napi_struct napi;
373 struct timer_list rx_oom;
375 struct rx_queue rxq[8]; 374 struct rx_queue rxq[8];
376 375
377 /* 376 /*
@@ -473,44 +472,43 @@ static void __txq_maybe_wake(struct tx_queue *txq)
473/* rx ***********************************************************************/ 472/* rx ***********************************************************************/
474static void txq_reclaim(struct tx_queue *txq, int force); 473static void txq_reclaim(struct tx_queue *txq, int force);
475 474
476static void rxq_refill(struct rx_queue *rxq) 475static int rxq_refill(struct rx_queue *rxq, int budget, int *oom)
477{ 476{
478 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 477 int skb_size;
479 unsigned long flags; 478 int refilled;
480 479
481 spin_lock_irqsave(&mp->lock, flags); 480 /*
481 * Reserve 2+14 bytes for an ethernet header (the hardware
482 * automatically prepends 2 bytes of dummy data to each
483 * received packet), 16 bytes for up to four VLAN tags, and
484 * 4 bytes for the trailing FCS -- 36 bytes total.
485 */
486 skb_size = rxq_to_mp(rxq)->dev->mtu + 36;
487
488 /*
489 * Make sure that the skb size is a multiple of 8 bytes, as
490 * the lower three bits of the receive descriptor's buffer
491 * size field are ignored by the hardware.
492 */
493 skb_size = (skb_size + 7) & ~7;
482 494
483 while (rxq->rx_desc_count < rxq->rx_ring_size) { 495 refilled = 0;
484 int skb_size; 496 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
485 struct sk_buff *skb; 497 struct sk_buff *skb;
486 int unaligned; 498 int unaligned;
487 int rx; 499 int rx;
488 500
489 /*
490 * Reserve 2+14 bytes for an ethernet header (the
491 * hardware automatically prepends 2 bytes of dummy
492 * data to each received packet), 16 bytes for up to
493 * four VLAN tags, and 4 bytes for the trailing FCS
494 * -- 36 bytes total.
495 */
496 skb_size = mp->dev->mtu + 36;
497
498 /*
499 * Make sure that the skb size is a multiple of 8
500 * bytes, as the lower three bits of the receive
501 * descriptor's buffer size field are ignored by
502 * the hardware.
503 */
504 skb_size = (skb_size + 7) & ~7;
505
506 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 501 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
507 if (skb == NULL) 502 if (skb == NULL) {
503 *oom = 1;
508 break; 504 break;
505 }
509 506
510 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 507 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
511 if (unaligned) 508 if (unaligned)
512 skb_reserve(skb, dma_get_cache_alignment() - unaligned); 509 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
513 510
511 refilled++;
514 rxq->rx_desc_count++; 512 rxq->rx_desc_count++;
515 513
516 rx = rxq->rx_used_desc++; 514 rx = rxq->rx_used_desc++;
@@ -534,15 +532,7 @@ static void rxq_refill(struct rx_queue *rxq)
534 skb_reserve(skb, 2); 532 skb_reserve(skb, 2);
535 } 533 }
536 534
537 if (rxq->rx_desc_count != rxq->rx_ring_size) 535 return refilled;
538 mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
539
540 spin_unlock_irqrestore(&mp->lock, flags);
541}
542
543static inline void rxq_refill_timer_wrapper(unsigned long data)
544{
545 rxq_refill((struct rx_queue *)data);
546} 536}
547 537
548static int rxq_process(struct rx_queue *rxq, int budget) 538static int rxq_process(struct rx_queue *rxq, int budget)
@@ -556,17 +546,12 @@ static int rxq_process(struct rx_queue *rxq, int budget)
556 struct rx_desc *rx_desc; 546 struct rx_desc *rx_desc;
557 unsigned int cmd_sts; 547 unsigned int cmd_sts;
558 struct sk_buff *skb; 548 struct sk_buff *skb;
559 unsigned long flags;
560
561 spin_lock_irqsave(&mp->lock, flags);
562 549
563 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 550 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
564 551
565 cmd_sts = rx_desc->cmd_sts; 552 cmd_sts = rx_desc->cmd_sts;
566 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 553 if (cmd_sts & BUFFER_OWNED_BY_DMA)
567 spin_unlock_irqrestore(&mp->lock, flags);
568 break; 554 break;
569 }
570 rmb(); 555 rmb();
571 556
572 skb = rxq->rx_skb[rxq->rx_curr_desc]; 557 skb = rxq->rx_skb[rxq->rx_curr_desc];
@@ -576,8 +561,6 @@ static int rxq_process(struct rx_queue *rxq, int budget)
576 if (rxq->rx_curr_desc == rxq->rx_ring_size) 561 if (rxq->rx_curr_desc == rxq->rx_ring_size)
577 rxq->rx_curr_desc = 0; 562 rxq->rx_curr_desc = 0;
578 563
579 spin_unlock_irqrestore(&mp->lock, flags);
580
581 dma_unmap_single(NULL, rx_desc->buf_ptr, 564 dma_unmap_single(NULL, rx_desc->buf_ptr,
582 rx_desc->buf_size, DMA_FROM_DEVICE); 565 rx_desc->buf_size, DMA_FROM_DEVICE);
583 rxq->rx_desc_count--; 566 rxq->rx_desc_count--;
@@ -635,15 +618,14 @@ static int rxq_process(struct rx_queue *rxq, int budget)
635 mp->dev->last_rx = jiffies; 618 mp->dev->last_rx = jiffies;
636 } 619 }
637 620
638 rxq_refill(rxq);
639
640 return rx; 621 return rx;
641} 622}
642 623
643static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 624static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
644{ 625{
645 struct mv643xx_eth_private *mp; 626 struct mv643xx_eth_private *mp;
646 int rx; 627 int work_done;
628 int oom;
647 int i; 629 int i;
648 630
649 mp = container_of(napi, struct mv643xx_eth_private, napi); 631 mp = container_of(napi, struct mv643xx_eth_private, napi);
@@ -663,17 +645,32 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
663 } 645 }
664#endif 646#endif
665 647
666 rx = 0; 648 work_done = 0;
667 for (i = 7; rx < budget && i >= 0; i--) 649 oom = 0;
668 if (mp->rxq_mask & (1 << i)) 650 for (i = 7; work_done < budget && i >= 0; i--) {
669 rx += rxq_process(mp->rxq + i, budget - rx); 651 if (mp->rxq_mask & (1 << i)) {
652 struct rx_queue *rxq = mp->rxq + i;
670 653
671 if (rx < budget) { 654 work_done += rxq_process(rxq, budget - work_done);
655 work_done += rxq_refill(rxq, budget - work_done, &oom);
656 }
657 }
658
659 if (work_done < budget) {
660 if (oom)
661 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
672 netif_rx_complete(mp->dev, napi); 662 netif_rx_complete(mp->dev, napi);
673 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 663 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
674 } 664 }
675 665
676 return rx; 666 return work_done;
667}
668
669static inline void oom_timer_wrapper(unsigned long data)
670{
671 struct mv643xx_eth_private *mp = (void *)data;
672
673 napi_schedule(&mp->napi);
677} 674}
678 675
679 676
@@ -1565,10 +1562,6 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1565 nexti * sizeof(struct rx_desc); 1562 nexti * sizeof(struct rx_desc);
1566 } 1563 }
1567 1564
1568 init_timer(&rxq->rx_oom);
1569 rxq->rx_oom.data = (unsigned long)rxq;
1570 rxq->rx_oom.function = rxq_refill_timer_wrapper;
1571
1572 return 0; 1565 return 0;
1573 1566
1574 1567
@@ -1591,8 +1584,6 @@ static void rxq_deinit(struct rx_queue *rxq)
1591 1584
1592 rxq_disable(rxq); 1585 rxq_disable(rxq);
1593 1586
1594 del_timer_sync(&rxq->rx_oom);
1595
1596 for (i = 0; i < rxq->rx_ring_size; i++) { 1587 for (i = 0; i < rxq->rx_ring_size; i++) {
1597 if (rxq->rx_skb[i]) { 1588 if (rxq->rx_skb[i]) {
1598 dev_kfree_skb(rxq->rx_skb[i]); 1589 dev_kfree_skb(rxq->rx_skb[i]);
@@ -1854,7 +1845,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1854 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1845 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1855 rdl(mp, INT_MASK(mp->port_num)); 1846 rdl(mp, INT_MASK(mp->port_num));
1856 1847
1857 netif_rx_schedule(dev, &mp->napi); 1848 napi_schedule(&mp->napi);
1858 } 1849 }
1859 1850
1860 /* 1851 /*
@@ -2041,6 +2032,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2041{ 2032{
2042 struct mv643xx_eth_private *mp = netdev_priv(dev); 2033 struct mv643xx_eth_private *mp = netdev_priv(dev);
2043 int err; 2034 int err;
2035 int oom;
2044 int i; 2036 int i;
2045 2037
2046 wrl(mp, INT_CAUSE(mp->port_num), 0); 2038 wrl(mp, INT_CAUSE(mp->port_num), 0);
@@ -2056,6 +2048,9 @@ static int mv643xx_eth_open(struct net_device *dev)
2056 2048
2057 init_mac_tables(mp); 2049 init_mac_tables(mp);
2058 2050
2051 napi_enable(&mp->napi);
2052
2053 oom = 0;
2059 for (i = 0; i < 8; i++) { 2054 for (i = 0; i < 8; i++) {
2060 if ((mp->rxq_mask & (1 << i)) == 0) 2055 if ((mp->rxq_mask & (1 << i)) == 0)
2061 continue; 2056 continue;
@@ -2068,7 +2063,12 @@ static int mv643xx_eth_open(struct net_device *dev)
2068 goto out; 2063 goto out;
2069 } 2064 }
2070 2065
2071 rxq_refill(mp->rxq + i); 2066 rxq_refill(mp->rxq + i, INT_MAX, &oom);
2067 }
2068
2069 if (oom) {
2070 mp->rx_oom.expires = jiffies + (HZ / 10);
2071 add_timer(&mp->rx_oom);
2072 } 2072 }
2073 2073
2074 for (i = 0; i < 8; i++) { 2074 for (i = 0; i < 8; i++) {
@@ -2084,8 +2084,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2084 } 2084 }
2085 } 2085 }
2086 2086
2087 napi_enable(&mp->napi);
2088
2089 netif_carrier_off(dev); 2087 netif_carrier_off(dev);
2090 netif_stop_queue(dev); 2088 netif_stop_queue(dev);
2091 2089
@@ -2150,6 +2148,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
2150 2148
2151 napi_disable(&mp->napi); 2149 napi_disable(&mp->napi);
2152 2150
2151 del_timer_sync(&mp->rx_oom);
2152
2153 netif_carrier_off(dev); 2153 netif_carrier_off(dev);
2154 netif_stop_queue(dev); 2154 netif_stop_queue(dev);
2155 2155
@@ -2613,8 +2613,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2613 2613
2614 mp->dev = dev; 2614 mp->dev = dev;
2615 2615
2616 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2617
2618 set_params(mp, pd); 2616 set_params(mp, pd);
2619 2617
2620 spin_lock_init(&mp->lock); 2618 spin_lock_init(&mp->lock);
@@ -2633,6 +2631,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2633 } 2631 }
2634 init_pscr(mp, pd->speed, pd->duplex); 2632 init_pscr(mp, pd->speed, pd->duplex);
2635 2633
2634 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2635
2636 init_timer(&mp->rx_oom);
2637 mp->rx_oom.data = (unsigned long)mp;
2638 mp->rx_oom.function = oom_timer_wrapper;
2639
2636 2640
2637 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2641 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2638 BUG_ON(!res); 2642 BUG_ON(!res);