aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c35
1 files changed, 1 insertions, 34 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b7cf4b6e15ec..2f1eaee5cf00 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2374,16 +2374,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2374 if (np->desc_ver == DESC_VER_1) { 2374 if (np->desc_ver == DESC_VER_1) {
2375 if (flags & NV_TX_LASTPACKET) { 2375 if (flags & NV_TX_LASTPACKET) {
2376 if (flags & NV_TX_ERROR) { 2376 if (flags & NV_TX_ERROR) {
2377 if (flags & NV_TX_UNDERFLOW)
2378 dev->stats.tx_fifo_errors++;
2379 if (flags & NV_TX_CARRIERLOST)
2380 dev->stats.tx_carrier_errors++;
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2377 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2378 nv_legacybackoff_reseed(dev);
2383 dev->stats.tx_errors++;
2384 } else {
2385 dev->stats.tx_packets++;
2386 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2387 } 2379 }
2388 dev_kfree_skb_any(np->get_tx_ctx->skb); 2380 dev_kfree_skb_any(np->get_tx_ctx->skb);
2389 np->get_tx_ctx->skb = NULL; 2381 np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2384,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2392 } else { 2384 } else {
2393 if (flags & NV_TX2_LASTPACKET) { 2385 if (flags & NV_TX2_LASTPACKET) {
2394 if (flags & NV_TX2_ERROR) { 2386 if (flags & NV_TX2_ERROR) {
2395 if (flags & NV_TX2_UNDERFLOW)
2396 dev->stats.tx_fifo_errors++;
2397 if (flags & NV_TX2_CARRIERLOST)
2398 dev->stats.tx_carrier_errors++;
2399 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2387 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2400 nv_legacybackoff_reseed(dev); 2388 nv_legacybackoff_reseed(dev);
2401 dev->stats.tx_errors++;
2402 } else {
2403 dev->stats.tx_packets++;
2404 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2405 } 2389 }
2406 dev_kfree_skb_any(np->get_tx_ctx->skb); 2390 dev_kfree_skb_any(np->get_tx_ctx->skb);
2407 np->get_tx_ctx->skb = NULL; 2391 np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2418,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2434 nv_unmap_txskb(np, np->get_tx_ctx); 2418 nv_unmap_txskb(np, np->get_tx_ctx);
2435 2419
2436 if (flags & NV_TX2_LASTPACKET) { 2420 if (flags & NV_TX2_LASTPACKET) {
2437 if (!(flags & NV_TX2_ERROR)) 2421 if (flags & NV_TX2_ERROR) {
2438 dev->stats.tx_packets++;
2439 else {
2440 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2422 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2441 if (np->driver_data & DEV_HAS_GEAR_MODE) 2423 if (np->driver_data & DEV_HAS_GEAR_MODE)
2442 nv_gear_backoff_reseed(dev); 2424 nv_gear_backoff_reseed(dev);
@@ -2636,7 +2618,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2636 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2618 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2637 len = nv_getlen(dev, skb->data, len); 2619 len = nv_getlen(dev, skb->data, len);
2638 if (len < 0) { 2620 if (len < 0) {
2639 dev->stats.rx_errors++;
2640 dev_kfree_skb(skb); 2621 dev_kfree_skb(skb);
2641 goto next_pkt; 2622 goto next_pkt;
2642 } 2623 }
@@ -2650,11 +2631,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2650 else { 2631 else {
2651 if (flags & NV_RX_MISSEDFRAME) 2632 if (flags & NV_RX_MISSEDFRAME)
2652 dev->stats.rx_missed_errors++; 2633 dev->stats.rx_missed_errors++;
2653 if (flags & NV_RX_CRCERR)
2654 dev->stats.rx_crc_errors++;
2655 if (flags & NV_RX_OVERFLOW)
2656 dev->stats.rx_over_errors++;
2657 dev->stats.rx_errors++;
2658 dev_kfree_skb(skb); 2634 dev_kfree_skb(skb);
2659 goto next_pkt; 2635 goto next_pkt;
2660 } 2636 }
@@ -2670,7 +2646,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2670 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2646 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 len = nv_getlen(dev, skb->data, len); 2647 len = nv_getlen(dev, skb->data, len);
2672 if (len < 0) { 2648 if (len < 0) {
2673 dev->stats.rx_errors++;
2674 dev_kfree_skb(skb); 2649 dev_kfree_skb(skb);
2675 goto next_pkt; 2650 goto next_pkt;
2676 } 2651 }
@@ -2682,11 +2657,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2682 } 2657 }
2683 /* the rest are hard errors */ 2658 /* the rest are hard errors */
2684 else { 2659 else {
2685 if (flags & NV_RX2_CRCERR)
2686 dev->stats.rx_crc_errors++;
2687 if (flags & NV_RX2_OVERFLOW)
2688 dev->stats.rx_over_errors++;
2689 dev->stats.rx_errors++;
2690 dev_kfree_skb(skb); 2660 dev_kfree_skb(skb);
2691 goto next_pkt; 2661 goto next_pkt;
2692 } 2662 }
@@ -2704,7 +2674,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2704 skb->protocol = eth_type_trans(skb, dev); 2674 skb->protocol = eth_type_trans(skb, dev);
2705 napi_gro_receive(&np->napi, skb); 2675 napi_gro_receive(&np->napi, skb);
2706 dev->stats.rx_packets++; 2676 dev->stats.rx_packets++;
2707 dev->stats.rx_bytes += len;
2708next_pkt: 2677next_pkt:
2709 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2678 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2710 np->get_rx.orig = np->first_rx.orig; 2679 np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2756,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2787 __vlan_hwaccel_put_tag(skb, vid); 2756 __vlan_hwaccel_put_tag(skb, vid);
2788 } 2757 }
2789 napi_gro_receive(&np->napi, skb); 2758 napi_gro_receive(&np->napi, skb);
2790
2791 dev->stats.rx_packets++; 2759 dev->stats.rx_packets++;
2792 dev->stats.rx_bytes += len;
2793 } else { 2760 } else {
2794 dev_kfree_skb(skb); 2761 dev_kfree_skb(skb);
2795 } 2762 }