diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2018-01-03 13:09:23 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-01-03 13:44:14 -0500 |
commit | ee4aa8df70fa6d76bd776c025dc0d8d746c18317 (patch) | |
tree | 294265ef55cbeafb5189f3bd6fed9cd8ec395f2d | |
parent | 74c88af59fa31bc1bbb3f795b3bf2636e8b2fe2e (diff) |
3c59x: fix missing dma_mapping_error check and bad ring refill logic
A few spots in 3c59x missed calls to dma_mapping_error checks, casuing
WARN_ONS to trigger. Clean those up. While we're at it, refactor the
refill code a bit so that if skb allocation or dma mapping fails, we
recycle the existing buffer. This prevents holes in the rx ring, and
makes for much simpler logic
Note: This is compile only tested. Ted, if you could run this and
confirm that it continues to work properly, I would appreciate it, as I
currently don't have access to this hardware
Signed-off-by: Neil Horman <nhorman@redhat.com>
CC: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
CC: "David S. Miller" <davem@davemloft.net>
Reported-by: tedheadster@gmail.com
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/3com/3c59x.c | 90 |
1 files changed, 38 insertions, 52 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index f4e13a7014bd..36c8950dbd2d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -602,7 +602,7 @@ struct vortex_private { | |||
602 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | 602 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
603 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | 603 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
604 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | 604 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ |
605 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | 605 | unsigned int dirty_tx; /* The ring entries to be free()ed. */ |
606 | struct vortex_extra_stats xstats; /* NIC-specific extra stats */ | 606 | struct vortex_extra_stats xstats; /* NIC-specific extra stats */ |
607 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ | 607 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ |
608 | dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ | 608 | dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ |
@@ -618,7 +618,6 @@ struct vortex_private { | |||
618 | 618 | ||
619 | /* The remainder are related to chip state, mostly media selection. */ | 619 | /* The remainder are related to chip state, mostly media selection. */ |
620 | struct timer_list timer; /* Media selection timer. */ | 620 | struct timer_list timer; /* Media selection timer. */ |
621 | struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ | ||
622 | int options; /* User-settable misc. driver options. */ | 621 | int options; /* User-settable misc. driver options. */ |
623 | unsigned int media_override:4, /* Passed-in media type. */ | 622 | unsigned int media_override:4, /* Passed-in media type. */ |
624 | default_media:4, /* Read from the EEPROM/Wn3_Config. */ | 623 | default_media:4, /* Read from the EEPROM/Wn3_Config. */ |
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits); | |||
760 | static int mdio_read(struct net_device *dev, int phy_id, int location); | 759 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
761 | static void mdio_write(struct net_device *vp, int phy_id, int location, int value); | 760 | static void mdio_write(struct net_device *vp, int phy_id, int location, int value); |
762 | static void vortex_timer(struct timer_list *t); | 761 | static void vortex_timer(struct timer_list *t); |
763 | static void rx_oom_timer(struct timer_list *t); | ||
764 | static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, | 762 | static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, |
765 | struct net_device *dev); | 763 | struct net_device *dev); |
766 | static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, | 764 | static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, |
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev) | |||
1601 | 1599 | ||
1602 | timer_setup(&vp->timer, vortex_timer, 0); | 1600 | timer_setup(&vp->timer, vortex_timer, 0); |
1603 | mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); | 1601 | mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); |
1604 | timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0); | ||
1605 | 1602 | ||
1606 | if (vortex_debug > 1) | 1603 | if (vortex_debug > 1) |
1607 | pr_debug("%s: Initial media type %s.\n", | 1604 | pr_debug("%s: Initial media type %s.\n", |
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev) | |||
1676 | window_write16(vp, 0x0040, 4, Wn4_NetDiag); | 1673 | window_write16(vp, 0x0040, 4, Wn4_NetDiag); |
1677 | 1674 | ||
1678 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | 1675 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ |
1679 | vp->cur_rx = vp->dirty_rx = 0; | 1676 | vp->cur_rx = 0; |
1680 | /* Initialize the RxEarly register as recommended. */ | 1677 | /* Initialize the RxEarly register as recommended. */ |
1681 | iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); | 1678 | iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); |
1682 | iowrite32(0x0020, ioaddr + PktStatus); | 1679 | iowrite32(0x0020, ioaddr + PktStatus); |
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev) | |||
1729 | struct vortex_private *vp = netdev_priv(dev); | 1726 | struct vortex_private *vp = netdev_priv(dev); |
1730 | int i; | 1727 | int i; |
1731 | int retval; | 1728 | int retval; |
1729 | dma_addr_t dma; | ||
1732 | 1730 | ||
1733 | /* Use the now-standard shared IRQ implementation. */ | 1731 | /* Use the now-standard shared IRQ implementation. */ |
1734 | if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? | 1732 | if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? |
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev) | |||
1753 | break; /* Bad news! */ | 1751 | break; /* Bad news! */ |
1754 | 1752 | ||
1755 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ | 1753 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ |
1756 | vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | 1754 | dma = pci_map_single(VORTEX_PCI(vp), skb->data, |
1755 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
1756 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) | ||
1757 | break; | ||
1758 | vp->rx_ring[i].addr = cpu_to_le32(dma); | ||
1757 | } | 1759 | } |
1758 | if (i != RX_RING_SIZE) { | 1760 | if (i != RX_RING_SIZE) { |
1759 | pr_emerg("%s: no memory for rx ring\n", dev->name); | 1761 | pr_emerg("%s: no memory for rx ring\n", dev->name); |
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2067 | int len = (skb->len + 3) & ~3; | 2069 | int len = (skb->len + 3) & ~3; |
2068 | vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, | 2070 | vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, |
2069 | PCI_DMA_TODEVICE); | 2071 | PCI_DMA_TODEVICE); |
2072 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { | ||
2073 | dev_kfree_skb_any(skb); | ||
2074 | dev->stats.tx_dropped++; | ||
2075 | return NETDEV_TX_OK; | ||
2076 | } | ||
2077 | |||
2070 | spin_lock_irq(&vp->window_lock); | 2078 | spin_lock_irq(&vp->window_lock); |
2071 | window_set(vp, 7); | 2079 | window_set(vp, 7); |
2072 | iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); | 2080 | iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); |
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev) | |||
2593 | int entry = vp->cur_rx % RX_RING_SIZE; | 2601 | int entry = vp->cur_rx % RX_RING_SIZE; |
2594 | void __iomem *ioaddr = vp->ioaddr; | 2602 | void __iomem *ioaddr = vp->ioaddr; |
2595 | int rx_status; | 2603 | int rx_status; |
2596 | int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; | 2604 | int rx_work_limit = RX_RING_SIZE; |
2597 | 2605 | ||
2598 | if (vortex_debug > 5) | 2606 | if (vortex_debug > 5) |
2599 | pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); | 2607 | pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); |
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev) | |||
2614 | } else { | 2622 | } else { |
2615 | /* The packet length: up to 4.5K!. */ | 2623 | /* The packet length: up to 4.5K!. */ |
2616 | int pkt_len = rx_status & 0x1fff; | 2624 | int pkt_len = rx_status & 0x1fff; |
2617 | struct sk_buff *skb; | 2625 | struct sk_buff *skb, *newskb; |
2626 | dma_addr_t newdma; | ||
2618 | dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); | 2627 | dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); |
2619 | 2628 | ||
2620 | if (vortex_debug > 4) | 2629 | if (vortex_debug > 4) |
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev) | |||
2633 | pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2642 | pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
2634 | vp->rx_copy++; | 2643 | vp->rx_copy++; |
2635 | } else { | 2644 | } else { |
2645 | /* Pre-allocate the replacement skb. If it or its | ||
2646 | * mapping fails then recycle the buffer thats already | ||
2647 | * in place | ||
2648 | */ | ||
2649 | newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); | ||
2650 | if (!newskb) { | ||
2651 | dev->stats.rx_dropped++; | ||
2652 | goto clear_complete; | ||
2653 | } | ||
2654 | newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, | ||
2655 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
2656 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { | ||
2657 | dev->stats.rx_dropped++; | ||
2658 | consume_skb(newskb); | ||
2659 | goto clear_complete; | ||
2660 | } | ||
2661 | |||
2636 | /* Pass up the skbuff already on the Rx ring. */ | 2662 | /* Pass up the skbuff already on the Rx ring. */ |
2637 | skb = vp->rx_skbuff[entry]; | 2663 | skb = vp->rx_skbuff[entry]; |
2638 | vp->rx_skbuff[entry] = NULL; | 2664 | vp->rx_skbuff[entry] = newskb; |
2665 | vp->rx_ring[entry].addr = cpu_to_le32(newdma); | ||
2639 | skb_put(skb, pkt_len); | 2666 | skb_put(skb, pkt_len); |
2640 | pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2667 | pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
2641 | vp->rx_nocopy++; | 2668 | vp->rx_nocopy++; |
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev) | |||
2653 | netif_rx(skb); | 2680 | netif_rx(skb); |
2654 | dev->stats.rx_packets++; | 2681 | dev->stats.rx_packets++; |
2655 | } | 2682 | } |
2656 | entry = (++vp->cur_rx) % RX_RING_SIZE; | ||
2657 | } | ||
2658 | /* Refill the Rx ring buffers. */ | ||
2659 | for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { | ||
2660 | struct sk_buff *skb; | ||
2661 | entry = vp->dirty_rx % RX_RING_SIZE; | ||
2662 | if (vp->rx_skbuff[entry] == NULL) { | ||
2663 | skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); | ||
2664 | if (skb == NULL) { | ||
2665 | static unsigned long last_jif; | ||
2666 | if (time_after(jiffies, last_jif + 10 * HZ)) { | ||
2667 | pr_warn("%s: memory shortage\n", | ||
2668 | dev->name); | ||
2669 | last_jif = jiffies; | ||
2670 | } | ||
2671 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) | ||
2672 | mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); | ||
2673 | break; /* Bad news! */ | ||
2674 | } | ||
2675 | 2683 | ||
2676 | vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | 2684 | clear_complete: |
2677 | vp->rx_skbuff[entry] = skb; | ||
2678 | } | ||
2679 | vp->rx_ring[entry].status = 0; /* Clear complete bit. */ | 2685 | vp->rx_ring[entry].status = 0; /* Clear complete bit. */ |
2680 | iowrite16(UpUnstall, ioaddr + EL3_CMD); | 2686 | iowrite16(UpUnstall, ioaddr + EL3_CMD); |
2687 | entry = (++vp->cur_rx) % RX_RING_SIZE; | ||
2681 | } | 2688 | } |
2682 | return 0; | 2689 | return 0; |
2683 | } | 2690 | } |
2684 | 2691 | ||
2685 | /* | ||
2686 | * If we've hit a total OOM refilling the Rx ring we poll once a second | ||
2687 | * for some memory. Otherwise there is no way to restart the rx process. | ||
2688 | */ | ||
2689 | static void | ||
2690 | rx_oom_timer(struct timer_list *t) | ||
2691 | { | ||
2692 | struct vortex_private *vp = from_timer(vp, t, rx_oom_timer); | ||
2693 | struct net_device *dev = vp->mii.dev; | ||
2694 | |||
2695 | spin_lock_irq(&vp->lock); | ||
2696 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ | ||
2697 | boomerang_rx(dev); | ||
2698 | if (vortex_debug > 1) { | ||
2699 | pr_debug("%s: rx_oom_timer %s\n", dev->name, | ||
2700 | ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); | ||
2701 | } | ||
2702 | spin_unlock_irq(&vp->lock); | ||
2703 | } | ||
2704 | |||
2705 | static void | 2692 | static void |
2706 | vortex_down(struct net_device *dev, int final_down) | 2693 | vortex_down(struct net_device *dev, int final_down) |
2707 | { | 2694 | { |
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down) | |||
2711 | netdev_reset_queue(dev); | 2698 | netdev_reset_queue(dev); |
2712 | netif_stop_queue(dev); | 2699 | netif_stop_queue(dev); |
2713 | 2700 | ||
2714 | del_timer_sync(&vp->rx_oom_timer); | ||
2715 | del_timer_sync(&vp->timer); | 2701 | del_timer_sync(&vp->timer); |
2716 | 2702 | ||
2717 | /* Turn off statistics ASAP. We update dev->stats below. */ | 2703 | /* Turn off statistics ASAP. We update dev->stats below. */ |