summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorFlorian Fainelli <f.fainelli@gmail.com>2015-05-28 18:24:43 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-31 02:51:17 -0400
commitc73b01837eeeba91b9c22c67c6372a6b73f08473 (patch)
treeb868339e4d147662449e6451d2ad9a85b2dd13b7 /drivers/net
parentbaf387a8edaa4a55afeaf4f498d3891ddcb03fb7 (diff)
net: systemport: rewrite bcm_sysport_rx_refill
Currently, bcm_sysport_desc_rx() calls bcm_sysport_rx_refill() at the end of Rx packet processing loop, after the current Rx packet has already been passed to napi_gro_receive(). However, bcm_sysport_rx_refill() might fail to allocate a new Rx skb, thus leaving a hole on the Rx queue where no valid Rx buffer exists. To eliminate this situation: 1. Rewrite bcm_sysport_rx_refill() to retain the current Rx skb on the Rx queue if a new replacement Rx skb can't be allocated and DMA-mapped. In this case, the data on the current Rx skb is effectively dropped. 2. Modify bcm_sysport_desc_rx() to call bcm_sysport_rx_refill() at the top of Rx packet processing loop, so that the new replacement Rx skb is already in place before the current Rx skb is processed. This is loosely inspired from d6707bec5986 ("net: bcmgenet: rewrite bcmgenet_rx_refill()") Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c81
1 files changed, 41 insertions, 40 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 267330ccd595..d777b0db9e63 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -524,62 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
524 dma_unmap_addr_set(cb, dma_addr, 0); 524 dma_unmap_addr_set(cb, dma_addr, 0);
525} 525}
526 526
527static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 527static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
528 struct bcm_sysport_cb *cb) 528 struct bcm_sysport_cb *cb)
529{ 529{
530 struct device *kdev = &priv->pdev->dev; 530 struct device *kdev = &priv->pdev->dev;
531 struct net_device *ndev = priv->netdev; 531 struct net_device *ndev = priv->netdev;
532 struct sk_buff *skb, *rx_skb;
532 dma_addr_t mapping; 533 dma_addr_t mapping;
533 int ret;
534 534
535 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 535 /* Allocate a new SKB for a new packet */
536 if (!cb->skb) { 536 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
537 if (!skb) {
538 priv->mib.alloc_rx_buff_failed++;
537 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 539 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
538 return -ENOMEM; 540 return NULL;
539 } 541 }
540 542
541 mapping = dma_map_single(kdev, cb->skb->data, 543 mapping = dma_map_single(kdev, skb->data,
542 RX_BUF_LENGTH, DMA_FROM_DEVICE); 544 RX_BUF_LENGTH, DMA_FROM_DEVICE);
543 ret = dma_mapping_error(kdev, mapping); 545 if (dma_mapping_error(kdev, mapping)) {
544 if (ret) {
545 priv->mib.rx_dma_failed++; 546 priv->mib.rx_dma_failed++;
546 bcm_sysport_free_cb(cb); 547 dev_kfree_skb_any(skb);
547 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 548 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
548 return ret; 549 return NULL;
549 } 550 }
550 551
552 /* Grab the current SKB on the ring */
553 rx_skb = cb->skb;
554 if (likely(rx_skb))
555 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
556 RX_BUF_LENGTH, DMA_FROM_DEVICE);
557
558 /* Put the new SKB on the ring */
559 cb->skb = skb;
551 dma_unmap_addr_set(cb, dma_addr, mapping); 560 dma_unmap_addr_set(cb, dma_addr, mapping);
552 dma_desc_set_addr(priv, cb->bd_addr, mapping); 561 dma_desc_set_addr(priv, cb->bd_addr, mapping);
553 562
554 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 563 netif_dbg(priv, rx_status, ndev, "RX refill\n");
555 564
556 return 0; 565 /* Return the current SKB to the caller */
566 return rx_skb;
557} 567}
558 568
559static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 569static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
560{ 570{
561 struct bcm_sysport_cb *cb; 571 struct bcm_sysport_cb *cb;
562 int ret = 0; 572 struct sk_buff *skb;
563 unsigned int i; 573 unsigned int i;
564 574
565 for (i = 0; i < priv->num_rx_bds; i++) { 575 for (i = 0; i < priv->num_rx_bds; i++) {
566 cb = &priv->rx_cbs[i]; 576 cb = &priv->rx_cbs[i];
567 if (cb->skb) 577 skb = bcm_sysport_rx_refill(priv, cb);
568 continue; 578 if (skb)
569 579 dev_kfree_skb(skb);
570 ret = bcm_sysport_rx_refill(priv, cb); 580 if (!cb->skb)
571 if (ret) 581 return -ENOMEM;
572 break;
573 } 582 }
574 583
575 return ret; 584 return 0;
576} 585}
577 586
578/* Poll the hardware for up to budget packets to process */ 587/* Poll the hardware for up to budget packets to process */
579static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 588static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
580 unsigned int budget) 589 unsigned int budget)
581{ 590{
582 struct device *kdev = &priv->pdev->dev;
583 struct net_device *ndev = priv->netdev; 591 struct net_device *ndev = priv->netdev;
584 unsigned int processed = 0, to_process; 592 unsigned int processed = 0, to_process;
585 struct bcm_sysport_cb *cb; 593 struct bcm_sysport_cb *cb;
@@ -587,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
587 unsigned int p_index; 595 unsigned int p_index;
588 u16 len, status; 596 u16 len, status;
589 struct bcm_rsb *rsb; 597 struct bcm_rsb *rsb;
590 int ret;
591 598
592 /* Determine how much we should process since last call */ 599 /* Determine how much we should process since last call */
593 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 600 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -605,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
605 612
606 while ((processed < to_process) && (processed < budget)) { 613 while ((processed < to_process) && (processed < budget)) {
607 cb = &priv->rx_cbs[priv->rx_read_ptr]; 614 cb = &priv->rx_cbs[priv->rx_read_ptr];
608 skb = cb->skb; 615 skb = bcm_sysport_rx_refill(priv, cb);
609
610 processed++;
611 priv->rx_read_ptr++;
612 616
613 if (priv->rx_read_ptr == priv->num_rx_bds)
614 priv->rx_read_ptr = 0;
615 617
616 /* We do not have a backing SKB, so we do not a corresponding 618 /* We do not have a backing SKB, so we do not a corresponding
617 * DMA mapping for this incoming packet since 619 * DMA mapping for this incoming packet since
@@ -622,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
622 netif_err(priv, rx_err, ndev, "out of memory!\n"); 624 netif_err(priv, rx_err, ndev, "out of memory!\n");
623 ndev->stats.rx_dropped++; 625 ndev->stats.rx_dropped++;
624 ndev->stats.rx_errors++; 626 ndev->stats.rx_errors++;
625 goto refill; 627 goto next;
626 } 628 }
627 629
628 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
629 RX_BUF_LENGTH, DMA_FROM_DEVICE);
630
631 /* Extract the Receive Status Block prepended */ 630 /* Extract the Receive Status Block prepended */
632 rsb = (struct bcm_rsb *)skb->data; 631 rsb = (struct bcm_rsb *)skb->data;
633 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 632 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -643,8 +642,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
643 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 642 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
644 ndev->stats.rx_dropped++; 643 ndev->stats.rx_dropped++;
645 ndev->stats.rx_errors++; 644 ndev->stats.rx_errors++;
646 bcm_sysport_free_cb(cb); 645 dev_kfree_skb_any(skb);
647 goto refill; 646 goto next;
648 } 647 }
649 648
650 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 649 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -653,8 +652,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
653 ndev->stats.rx_over_errors++; 652 ndev->stats.rx_over_errors++;
654 ndev->stats.rx_dropped++; 653 ndev->stats.rx_dropped++;
655 ndev->stats.rx_errors++; 654 ndev->stats.rx_errors++;
656 bcm_sysport_free_cb(cb); 655 dev_kfree_skb_any(skb);
657 goto refill; 656 goto next;
658 } 657 }
659 658
660 skb_put(skb, len); 659 skb_put(skb, len);
@@ -681,10 +680,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
681 ndev->stats.rx_bytes += len; 680 ndev->stats.rx_bytes += len;
682 681
683 napi_gro_receive(&priv->napi, skb); 682 napi_gro_receive(&priv->napi, skb);
684refill: 683next:
685 ret = bcm_sysport_rx_refill(priv, cb); 684 processed++;
686 if (ret) 685 priv->rx_read_ptr++;
687 priv->mib.alloc_rx_buff_failed++; 686
687 if (priv->rx_read_ptr == priv->num_rx_bds)
688 priv->rx_read_ptr = 0;
688 } 689 }
689 690
690 return processed; 691 return processed;