aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKulikov Vasiliy <segooon@gmail.com>2010-07-04 22:14:17 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-05 22:41:17 -0400
commit2321e80ac84cc616edda926aa1932b344409dccc (patch)
tree3de80b338f06e46632c958025859d37bb9d77034 /drivers
parent897dd41d3b3235fe7e973dc5ca04781acf0dd8b2 (diff)
natsemi: Use the instance of net_device_stats from net_device.
Since net_device has an instance of net_device_stats, we can remove the instance of this from the adapter structure. Signed-off-by: Kulikov Vasiliy <segooon@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/natsemi.c56
1 files changed, 27 insertions, 29 deletions
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2a17b503feaa..a6033d48b5cc 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -548,7 +548,6 @@ struct netdev_private {
548 dma_addr_t tx_dma[TX_RING_SIZE]; 548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev; 549 struct net_device *dev;
550 struct napi_struct napi; 550 struct napi_struct napi;
551 struct net_device_stats stats;
552 /* Media monitoring timer */ 551 /* Media monitoring timer */
553 struct timer_list timer; 552 struct timer_list timer;
554 /* Frequently used values: keep some adjacent for cache effect */ 553 /* Frequently used values: keep some adjacent for cache effect */
@@ -1906,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
1906 enable_irq(dev->irq); 1905 enable_irq(dev->irq);
1907 1906
1908 dev->trans_start = jiffies; /* prevent tx timeout */ 1907 dev->trans_start = jiffies; /* prevent tx timeout */
1909 np->stats.tx_errors++; 1908 dev->stats.tx_errors++;
1910 netif_wake_queue(dev); 1909 netif_wake_queue(dev);
1911} 1910}
1912 1911
@@ -2009,7 +2008,7 @@ static void drain_tx(struct net_device *dev)
2009 np->tx_dma[i], np->tx_skbuff[i]->len, 2008 np->tx_dma[i], np->tx_skbuff[i]->len,
2010 PCI_DMA_TODEVICE); 2009 PCI_DMA_TODEVICE);
2011 dev_kfree_skb(np->tx_skbuff[i]); 2010 dev_kfree_skb(np->tx_skbuff[i]);
2012 np->stats.tx_dropped++; 2011 dev->stats.tx_dropped++;
2013 } 2012 }
2014 np->tx_skbuff[i] = NULL; 2013 np->tx_skbuff[i] = NULL;
2015 } 2014 }
@@ -2115,7 +2114,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2115 writel(TxOn, ioaddr + ChipCmd); 2114 writel(TxOn, ioaddr + ChipCmd);
2116 } else { 2115 } else {
2117 dev_kfree_skb_irq(skb); 2116 dev_kfree_skb_irq(skb);
2118 np->stats.tx_dropped++; 2117 dev->stats.tx_dropped++;
2119 } 2118 }
2120 spin_unlock_irqrestore(&np->lock, flags); 2119 spin_unlock_irqrestore(&np->lock, flags);
2121 2120
@@ -2140,20 +2139,20 @@ static void netdev_tx_done(struct net_device *dev)
2140 dev->name, np->dirty_tx, 2139 dev->name, np->dirty_tx,
2141 le32_to_cpu(np->tx_ring[entry].cmd_status)); 2140 le32_to_cpu(np->tx_ring[entry].cmd_status));
2142 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { 2141 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2143 np->stats.tx_packets++; 2142 dev->stats.tx_packets++;
2144 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 2143 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2145 } else { /* Various Tx errors */ 2144 } else { /* Various Tx errors */
2146 int tx_status = 2145 int tx_status =
2147 le32_to_cpu(np->tx_ring[entry].cmd_status); 2146 le32_to_cpu(np->tx_ring[entry].cmd_status);
2148 if (tx_status & (DescTxAbort|DescTxExcColl)) 2147 if (tx_status & (DescTxAbort|DescTxExcColl))
2149 np->stats.tx_aborted_errors++; 2148 dev->stats.tx_aborted_errors++;
2150 if (tx_status & DescTxFIFO) 2149 if (tx_status & DescTxFIFO)
2151 np->stats.tx_fifo_errors++; 2150 dev->stats.tx_fifo_errors++;
2152 if (tx_status & DescTxCarrier) 2151 if (tx_status & DescTxCarrier)
2153 np->stats.tx_carrier_errors++; 2152 dev->stats.tx_carrier_errors++;
2154 if (tx_status & DescTxOOWCol) 2153 if (tx_status & DescTxOOWCol)
2155 np->stats.tx_window_errors++; 2154 dev->stats.tx_window_errors++;
2156 np->stats.tx_errors++; 2155 dev->stats.tx_errors++;
2157 } 2156 }
2158 pci_unmap_single(np->pci_dev,np->tx_dma[entry], 2157 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2159 np->tx_skbuff[entry]->len, 2158 np->tx_skbuff[entry]->len,
@@ -2301,7 +2300,7 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2301 "buffers, entry %#08x " 2300 "buffers, entry %#08x "
2302 "status %#08x.\n", dev->name, 2301 "status %#08x.\n", dev->name,
2303 np->cur_rx, desc_status); 2302 np->cur_rx, desc_status);
2304 np->stats.rx_length_errors++; 2303 dev->stats.rx_length_errors++;
2305 2304
2306 /* The RX state machine has probably 2305 /* The RX state machine has probably
2307 * locked up beneath us. Follow the 2306 * locked up beneath us. Follow the
@@ -2321,15 +2320,15 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2321 2320
2322 } else { 2321 } else {
2323 /* There was an error. */ 2322 /* There was an error. */
2324 np->stats.rx_errors++; 2323 dev->stats.rx_errors++;
2325 if (desc_status & (DescRxAbort|DescRxOver)) 2324 if (desc_status & (DescRxAbort|DescRxOver))
2326 np->stats.rx_over_errors++; 2325 dev->stats.rx_over_errors++;
2327 if (desc_status & (DescRxLong|DescRxRunt)) 2326 if (desc_status & (DescRxLong|DescRxRunt))
2328 np->stats.rx_length_errors++; 2327 dev->stats.rx_length_errors++;
2329 if (desc_status & (DescRxInvalid|DescRxAlign)) 2328 if (desc_status & (DescRxInvalid|DescRxAlign))
2330 np->stats.rx_frame_errors++; 2329 dev->stats.rx_frame_errors++;
2331 if (desc_status & DescRxCRC) 2330 if (desc_status & DescRxCRC)
2332 np->stats.rx_crc_errors++; 2331 dev->stats.rx_crc_errors++;
2333 } 2332 }
2334 } else if (pkt_len > np->rx_buf_sz) { 2333 } else if (pkt_len > np->rx_buf_sz) {
2335 /* if this is the tail of a double buffer 2334 /* if this is the tail of a double buffer
@@ -2364,8 +2363,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2364 } 2363 }
2365 skb->protocol = eth_type_trans(skb, dev); 2364 skb->protocol = eth_type_trans(skb, dev);
2366 netif_receive_skb(skb); 2365 netif_receive_skb(skb);
2367 np->stats.rx_packets++; 2366 dev->stats.rx_packets++;
2368 np->stats.rx_bytes += pkt_len; 2367 dev->stats.rx_bytes += pkt_len;
2369 } 2368 }
2370 entry = (++np->cur_rx) % RX_RING_SIZE; 2369 entry = (++np->cur_rx) % RX_RING_SIZE;
2371 np->rx_head_desc = &np->rx_ring[entry]; 2370 np->rx_head_desc = &np->rx_ring[entry];
@@ -2428,17 +2427,17 @@ static void netdev_error(struct net_device *dev, int intr_status)
2428 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", 2427 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2429 dev->name); 2428 dev->name);
2430 } 2429 }
2431 np->stats.rx_fifo_errors++; 2430 dev->stats.rx_fifo_errors++;
2432 np->stats.rx_errors++; 2431 dev->stats.rx_errors++;
2433 } 2432 }
2434 /* Hmmmmm, it's not clear how to recover from PCI faults. */ 2433 /* Hmmmmm, it's not clear how to recover from PCI faults. */
2435 if (intr_status & IntrPCIErr) { 2434 if (intr_status & IntrPCIErr) {
2436 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, 2435 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2437 intr_status & IntrPCIErr); 2436 intr_status & IntrPCIErr);
2438 np->stats.tx_fifo_errors++; 2437 dev->stats.tx_fifo_errors++;
2439 np->stats.tx_errors++; 2438 dev->stats.tx_errors++;
2440 np->stats.rx_fifo_errors++; 2439 dev->stats.rx_fifo_errors++;
2441 np->stats.rx_errors++; 2440 dev->stats.rx_errors++;
2442 } 2441 }
2443 spin_unlock(&np->lock); 2442 spin_unlock(&np->lock);
2444} 2443}
@@ -2446,11 +2445,10 @@ static void netdev_error(struct net_device *dev, int intr_status)
2446static void __get_stats(struct net_device *dev) 2445static void __get_stats(struct net_device *dev)
2447{ 2446{
2448 void __iomem * ioaddr = ns_ioaddr(dev); 2447 void __iomem * ioaddr = ns_ioaddr(dev);
2449 struct netdev_private *np = netdev_priv(dev);
2450 2448
2451 /* The chip only need report frame silently dropped. */ 2449 /* The chip only need report frame silently dropped. */
2452 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); 2450 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2453 np->stats.rx_missed_errors += readl(ioaddr + RxMissed); 2451 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2454} 2452}
2455 2453
2456static struct net_device_stats *get_stats(struct net_device *dev) 2454static struct net_device_stats *get_stats(struct net_device *dev)
@@ -2463,7 +2461,7 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2463 __get_stats(dev); 2461 __get_stats(dev);
2464 spin_unlock_irq(&np->lock); 2462 spin_unlock_irq(&np->lock);
2465 2463
2466 return &np->stats; 2464 return &dev->stats;
2467} 2465}
2468 2466
2469#ifdef CONFIG_NET_POLL_CONTROLLER 2467#ifdef CONFIG_NET_POLL_CONTROLLER