aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKulikov Vasiliy <segooon@gmail.com>2010-07-04 22:13:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-05 22:41:13 -0400
commit57616ee4405b82c3ba4d20111697a4416f3967a6 (patch)
treee89d2d00514740d70c15e76971292134d63b2aff
parentd117b6665847084cfe8a44b870f771153e18991d (diff)
ethoc: Use the instance of net_device_stats from net_device.
Since net_device has an instance of net_device_stats, we can remove the instance of this from the adapter structure. Signed-off-by: Kulikov Vasiliy <segooon@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethoc.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 37ce8aca2cc6..db519a81e53a 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -183,7 +183,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
183 * @vma: pointer to array of virtual memory addresses for buffers 183 * @vma: pointer to array of virtual memory addresses for buffers
184 * @netdev: pointer to network device structure 184 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 185 * @napi: NAPI structure
186 * @stats: network device statistics
187 * @msg_enable: device state flags 186 * @msg_enable: device state flags
188 * @rx_lock: receive lock 187 * @rx_lock: receive lock
189 * @lock: device lock 188 * @lock: device lock
@@ -208,7 +207,6 @@ struct ethoc {
208 207
209 struct net_device *netdev; 208 struct net_device *netdev;
210 struct napi_struct napi; 209 struct napi_struct napi;
211 struct net_device_stats stats;
212 u32 msg_enable; 210 u32 msg_enable;
213 211
214 spinlock_t rx_lock; 212 spinlock_t rx_lock;
@@ -367,39 +365,39 @@ static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
367 365
368 if (bd->stat & RX_BD_TL) { 366 if (bd->stat & RX_BD_TL) {
369 dev_err(&netdev->dev, "RX: frame too long\n"); 367 dev_err(&netdev->dev, "RX: frame too long\n");
370 dev->stats.rx_length_errors++; 368 netdev->stats.rx_length_errors++;
371 ret++; 369 ret++;
372 } 370 }
373 371
374 if (bd->stat & RX_BD_SF) { 372 if (bd->stat & RX_BD_SF) {
375 dev_err(&netdev->dev, "RX: frame too short\n"); 373 dev_err(&netdev->dev, "RX: frame too short\n");
376 dev->stats.rx_length_errors++; 374 netdev->stats.rx_length_errors++;
377 ret++; 375 ret++;
378 } 376 }
379 377
380 if (bd->stat & RX_BD_DN) { 378 if (bd->stat & RX_BD_DN) {
381 dev_err(&netdev->dev, "RX: dribble nibble\n"); 379 dev_err(&netdev->dev, "RX: dribble nibble\n");
382 dev->stats.rx_frame_errors++; 380 netdev->stats.rx_frame_errors++;
383 } 381 }
384 382
385 if (bd->stat & RX_BD_CRC) { 383 if (bd->stat & RX_BD_CRC) {
386 dev_err(&netdev->dev, "RX: wrong CRC\n"); 384 dev_err(&netdev->dev, "RX: wrong CRC\n");
387 dev->stats.rx_crc_errors++; 385 netdev->stats.rx_crc_errors++;
388 ret++; 386 ret++;
389 } 387 }
390 388
391 if (bd->stat & RX_BD_OR) { 389 if (bd->stat & RX_BD_OR) {
392 dev_err(&netdev->dev, "RX: overrun\n"); 390 dev_err(&netdev->dev, "RX: overrun\n");
393 dev->stats.rx_over_errors++; 391 netdev->stats.rx_over_errors++;
394 ret++; 392 ret++;
395 } 393 }
396 394
397 if (bd->stat & RX_BD_MISS) 395 if (bd->stat & RX_BD_MISS)
398 dev->stats.rx_missed_errors++; 396 netdev->stats.rx_missed_errors++;
399 397
400 if (bd->stat & RX_BD_LC) { 398 if (bd->stat & RX_BD_LC) {
401 dev_err(&netdev->dev, "RX: late collision\n"); 399 dev_err(&netdev->dev, "RX: late collision\n");
402 dev->stats.collisions++; 400 netdev->stats.collisions++;
403 ret++; 401 ret++;
404 } 402 }
405 403
@@ -431,15 +429,15 @@ static int ethoc_rx(struct net_device *dev, int limit)
431 void *src = priv->vma[entry]; 429 void *src = priv->vma[entry];
432 memcpy_fromio(skb_put(skb, size), src, size); 430 memcpy_fromio(skb_put(skb, size), src, size);
433 skb->protocol = eth_type_trans(skb, dev); 431 skb->protocol = eth_type_trans(skb, dev);
434 priv->stats.rx_packets++; 432 dev->stats.rx_packets++;
435 priv->stats.rx_bytes += size; 433 dev->stats.rx_bytes += size;
436 netif_receive_skb(skb); 434 netif_receive_skb(skb);
437 } else { 435 } else {
438 if (net_ratelimit()) 436 if (net_ratelimit())
439 dev_warn(&dev->dev, "low on memory - " 437 dev_warn(&dev->dev, "low on memory - "
440 "packet dropped\n"); 438 "packet dropped\n");
441 439
442 priv->stats.rx_dropped++; 440 dev->stats.rx_dropped++;
443 break; 441 break;
444 } 442 }
445 } 443 }
@@ -460,30 +458,30 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
460 458
461 if (bd->stat & TX_BD_LC) { 459 if (bd->stat & TX_BD_LC) {
462 dev_err(&netdev->dev, "TX: late collision\n"); 460 dev_err(&netdev->dev, "TX: late collision\n");
463 dev->stats.tx_window_errors++; 461 netdev->stats.tx_window_errors++;
464 } 462 }
465 463
466 if (bd->stat & TX_BD_RL) { 464 if (bd->stat & TX_BD_RL) {
467 dev_err(&netdev->dev, "TX: retransmit limit\n"); 465 dev_err(&netdev->dev, "TX: retransmit limit\n");
468 dev->stats.tx_aborted_errors++; 466 netdev->stats.tx_aborted_errors++;
469 } 467 }
470 468
471 if (bd->stat & TX_BD_UR) { 469 if (bd->stat & TX_BD_UR) {
472 dev_err(&netdev->dev, "TX: underrun\n"); 470 dev_err(&netdev->dev, "TX: underrun\n");
473 dev->stats.tx_fifo_errors++; 471 netdev->stats.tx_fifo_errors++;
474 } 472 }
475 473
476 if (bd->stat & TX_BD_CS) { 474 if (bd->stat & TX_BD_CS) {
477 dev_err(&netdev->dev, "TX: carrier sense lost\n"); 475 dev_err(&netdev->dev, "TX: carrier sense lost\n");
478 dev->stats.tx_carrier_errors++; 476 netdev->stats.tx_carrier_errors++;
479 } 477 }
480 478
481 if (bd->stat & TX_BD_STATS) 479 if (bd->stat & TX_BD_STATS)
482 dev->stats.tx_errors++; 480 netdev->stats.tx_errors++;
483 481
484 dev->stats.collisions += (bd->stat >> 4) & 0xf; 482 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
485 dev->stats.tx_bytes += bd->stat >> 16; 483 netdev->stats.tx_bytes += bd->stat >> 16;
486 dev->stats.tx_packets++; 484 netdev->stats.tx_packets++;
487 return 0; 485 return 0;
488} 486}
489 487
@@ -514,7 +512,7 @@ static void ethoc_tx(struct net_device *dev)
514 512
515static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 513static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
516{ 514{
517 struct net_device *dev = (struct net_device *)dev_id; 515 struct net_device *dev = dev_id;
518 struct ethoc *priv = netdev_priv(dev); 516 struct ethoc *priv = netdev_priv(dev);
519 u32 pending; 517 u32 pending;
520 518
@@ -529,7 +527,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
529 527
530 if (pending & INT_MASK_BUSY) { 528 if (pending & INT_MASK_BUSY) {
531 dev_err(&dev->dev, "packet dropped\n"); 529 dev_err(&dev->dev, "packet dropped\n");
532 priv->stats.rx_dropped++; 530 dev->stats.rx_dropped++;
533 } 531 }
534 532
535 if (pending & INT_MASK_RX) { 533 if (pending & INT_MASK_RX) {
@@ -810,8 +808,7 @@ static void ethoc_tx_timeout(struct net_device *dev)
810 808
811static struct net_device_stats *ethoc_stats(struct net_device *dev) 809static struct net_device_stats *ethoc_stats(struct net_device *dev)
812{ 810{
813 struct ethoc *priv = netdev_priv(dev); 811 return &dev->stats;
814 return &priv->stats;
815} 812}
816 813
817static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 814static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -822,7 +819,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
822 void *dest; 819 void *dest;
823 820
824 if (unlikely(skb->len > ETHOC_BUFSIZ)) { 821 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
825 priv->stats.tx_errors++; 822 dev->stats.tx_errors++;
826 goto out; 823 goto out;
827 } 824 }
828 825