aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
authorSoren Brinkmann <soren.brinkmann@xilinx.com>2014-05-04 18:43:02 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-05 17:11:18 -0400
commitc8ea5a22bd3b27d68ec2f95483ce8bfe7f114933 (patch)
tree28ab536723891f977119ac51d2b1dbc00416a2ae /drivers/net/ethernet/cadence
parent504ad98df3a6b027ce997ca8f620e949cafb151f (diff)
net: macb: Fix race between HW and driver
Under "heavy" RX load, the driver cannot handle the descriptors fast enough. In detail, when a descriptor is consumed, its used flag is cleared and once the RX budget is consumed all descriptors with a cleared used flag are prepared to receive more data. Under load though, the HW may constantly receive more data and use those descriptors with a cleared used flag before they are actually prepared for next usage. The head and tail pointers into the RX-ring should always be valid and we can omit clearing and checking of the used flag. Signed-off-by: Soren Brinkmann <soren.brinkmann@xilinx.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/macb.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3e13aa31548a..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
599{ 599{
600 unsigned int entry; 600 unsigned int entry;
601 struct sk_buff *skb; 601 struct sk_buff *skb;
602 struct macb_dma_desc *desc;
603 dma_addr_t paddr; 602 dma_addr_t paddr;
604 603
605 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
606 u32 addr, ctrl;
607
608 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
609 desc = &bp->rx_ring[entry];
610 606
611 /* Make hw descriptor updates visible to CPU */ 607 /* Make hw descriptor updates visible to CPU */
612 rmb(); 608 rmb();
613 609
614 addr = desc->addr;
615 ctrl = desc->ctrl;
616 bp->rx_prepared_head++; 610 bp->rx_prepared_head++;
617 611
618 if ((addr & MACB_BIT(RX_USED)))
619 continue;
620
621 if (bp->rx_skbuff[entry] == NULL) { 612 if (bp->rx_skbuff[entry] == NULL) {
622 /* allocate sk_buff for this free entry in ring */ 613 /* allocate sk_buff for this free entry in ring */
623 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 614 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
698 if (!(addr & MACB_BIT(RX_USED))) 689 if (!(addr & MACB_BIT(RX_USED)))
699 break; 690 break;
700 691
701 desc->addr &= ~MACB_BIT(RX_USED);
702 bp->rx_tail++; 692 bp->rx_tail++;
703 count++; 693 count++;
704 694