aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/r8169.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ee1c2743eab4..951c56ed56e8 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -392,7 +392,6 @@ struct rtl8169_private {
392 struct pci_dev *pci_dev; /* Index of PCI device */ 392 struct pci_dev *pci_dev; /* Index of PCI device */
393 struct net_device *dev; 393 struct net_device *dev;
394 struct napi_struct napi; 394 struct napi_struct napi;
395 struct net_device_stats stats; /* statistics of net device */
396 spinlock_t lock; /* spin lock flag */ 395 spinlock_t lock; /* spin lock flag */
397 u32 msg_enable; 396 u32 msg_enable;
398 int chipset; 397 int chipset;
@@ -2305,7 +2304,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
2305 dev_kfree_skb(skb); 2304 dev_kfree_skb(skb);
2306 tx_skb->skb = NULL; 2305 tx_skb->skb = NULL;
2307 } 2306 }
2308 tp->stats.tx_dropped++; 2307 tp->dev->stats.tx_dropped++;
2309 } 2308 }
2310 } 2309 }
2311 tp->cur_tx = tp->dirty_tx = 0; 2310 tp->cur_tx = tp->dirty_tx = 0;
@@ -2386,6 +2385,7 @@ static void rtl8169_reset_task(struct work_struct *work)
2386 rtl8169_init_ring_indexes(tp); 2385 rtl8169_init_ring_indexes(tp);
2387 rtl_hw_start(dev); 2386 rtl_hw_start(dev);
2388 netif_wake_queue(dev); 2387 netif_wake_queue(dev);
2388 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
2389 } else { 2389 } else {
2390 if (net_ratelimit() && netif_msg_intr(tp)) { 2390 if (net_ratelimit() && netif_msg_intr(tp)) {
2391 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", 2391 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
@@ -2542,7 +2542,7 @@ err_stop:
2542 netif_stop_queue(dev); 2542 netif_stop_queue(dev);
2543 ret = NETDEV_TX_BUSY; 2543 ret = NETDEV_TX_BUSY;
2544err_update_stats: 2544err_update_stats:
2545 tp->stats.tx_dropped++; 2545 dev->stats.tx_dropped++;
2546 goto out; 2546 goto out;
2547} 2547}
2548 2548
@@ -2617,8 +2617,8 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
2617 if (status & DescOwn) 2617 if (status & DescOwn)
2618 break; 2618 break;
2619 2619
2620 tp->stats.tx_bytes += len; 2620 dev->stats.tx_bytes += len;
2621 tp->stats.tx_packets++; 2621 dev->stats.tx_packets++;
2622 2622
2623 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); 2623 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
2624 2624
@@ -2718,14 +2718,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2718 "%s: Rx ERROR. status = %08x\n", 2718 "%s: Rx ERROR. status = %08x\n",
2719 dev->name, status); 2719 dev->name, status);
2720 } 2720 }
2721 tp->stats.rx_errors++; 2721 dev->stats.rx_errors++;
2722 if (status & (RxRWT | RxRUNT)) 2722 if (status & (RxRWT | RxRUNT))
2723 tp->stats.rx_length_errors++; 2723 dev->stats.rx_length_errors++;
2724 if (status & RxCRC) 2724 if (status & RxCRC)
2725 tp->stats.rx_crc_errors++; 2725 dev->stats.rx_crc_errors++;
2726 if (status & RxFOVF) { 2726 if (status & RxFOVF) {
2727 rtl8169_schedule_work(dev, rtl8169_reset_task); 2727 rtl8169_schedule_work(dev, rtl8169_reset_task);
2728 tp->stats.rx_fifo_errors++; 2728 dev->stats.rx_fifo_errors++;
2729 } 2729 }
2730 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2730 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2731 } else { 2731 } else {
@@ -2740,8 +2740,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2740 * sized frames. 2740 * sized frames.
2741 */ 2741 */
2742 if (unlikely(rtl8169_fragmented_frame(status))) { 2742 if (unlikely(rtl8169_fragmented_frame(status))) {
2743 tp->stats.rx_dropped++; 2743 dev->stats.rx_dropped++;
2744 tp->stats.rx_length_errors++; 2744 dev->stats.rx_length_errors++;
2745 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2745 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2746 continue; 2746 continue;
2747 } 2747 }
@@ -2765,8 +2765,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2765 rtl8169_rx_skb(skb); 2765 rtl8169_rx_skb(skb);
2766 2766
2767 dev->last_rx = jiffies; 2767 dev->last_rx = jiffies;
2768 tp->stats.rx_bytes += pkt_size; 2768 dev->stats.rx_bytes += pkt_size;
2769 tp->stats.rx_packets++; 2769 dev->stats.rx_packets++;
2770 } 2770 }
2771 2771
2772 /* Work around for AMD plateform. */ 2772 /* Work around for AMD plateform. */
@@ -2927,7 +2927,7 @@ core_down:
2927 rtl8169_asic_down(ioaddr); 2927 rtl8169_asic_down(ioaddr);
2928 2928
2929 /* Update the error counts. */ 2929 /* Update the error counts. */
2930 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 2930 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
2931 RTL_W32(RxMissed, 0); 2931 RTL_W32(RxMissed, 0);
2932 2932
2933 spin_unlock_irq(&tp->lock); 2933 spin_unlock_irq(&tp->lock);
@@ -3057,12 +3057,12 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3057 3057
3058 if (netif_running(dev)) { 3058 if (netif_running(dev)) {
3059 spin_lock_irqsave(&tp->lock, flags); 3059 spin_lock_irqsave(&tp->lock, flags);
3060 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3060 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3061 RTL_W32(RxMissed, 0); 3061 RTL_W32(RxMissed, 0);
3062 spin_unlock_irqrestore(&tp->lock, flags); 3062 spin_unlock_irqrestore(&tp->lock, flags);
3063 } 3063 }
3064 3064
3065 return &tp->stats; 3065 return &dev->stats;
3066} 3066}
3067 3067
3068#ifdef CONFIG_PM 3068#ifdef CONFIG_PM
@@ -3083,7 +3083,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3083 3083
3084 rtl8169_asic_down(ioaddr); 3084 rtl8169_asic_down(ioaddr);
3085 3085
3086 tp->stats.rx_missed_errors += RTL_R32(RxMissed); 3086 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
3087 RTL_W32(RxMissed, 0); 3087 RTL_W32(RxMissed, 0);
3088 3088
3089 spin_unlock_irq(&tp->lock); 3089 spin_unlock_irq(&tp->lock);