aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 13:55:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-07 13:55:33 -0500
commit94956eed14b4b16d401c8ad36d68df0608f968cb (patch)
tree60e4e1a3c2c44e8f6616db78cd6b22737b2a1f37 /drivers/net/ethernet
parent50e696308c3fb18a4a0dae7b3a4d47469149c919 (diff)
parente45a618753d5a8bc9086382f73bbc2d6a3399250 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits) forcedeth: fix a few sparse warnings (variable shadowing) forcedeth: Improve stats counters forcedeth: remove unneeded stats updates forcedeth: Acknowledge only interrupts that are being processed forcedeth: fix race when unloading module MAINTAINERS/rds: update maintainer wanrouter: Remove kernel_lock annotations usbnet: fix oops in usbnet_start_xmit ixgbe: Fix compile for kernel without CONFIG_PCI_IOV defined etherh: Add MAINTAINERS entry for etherh bonding: comparing a u8 with -1 is always false sky2: fix regression on Yukon Optima netlink: clarify attribute length check documentation netlink: validate NLA_MSECS length i825xx:xscale:8390:freescale: Fix Kconfig dependancies macvlan: receive multicast with local address tg3: Update version to 3.121 tg3: Eliminate timer race with reset_task tg3: Schedule at most one tg3_reset_task run tg3: Obtain PCI function number from device ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c195
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h21
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c11
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c88
9 files changed, 172 insertions, 163 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 161cbbb4814a..bf4074167d6a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 89
90#define DRV_MODULE_NAME "tg3" 90#define DRV_MODULE_NAME "tg3"
91#define TG3_MAJ_NUM 3 91#define TG3_MAJ_NUM 3
92#define TG3_MIN_NUM 120 92#define TG3_MIN_NUM 121
93#define DRV_MODULE_VERSION \ 93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95#define DRV_MODULE_RELDATE "August 18, 2011" 95#define DRV_MODULE_RELDATE "November 2, 2011"
96 96
97#define RESET_KIND_SHUTDOWN 0 97#define RESET_KIND_SHUTDOWN 0
98#define RESET_KIND_INIT 1 98#define RESET_KIND_INIT 1
@@ -628,19 +628,23 @@ static void tg3_ape_lock_init(struct tg3 *tp)
628 regbase = TG3_APE_PER_LOCK_GRANT; 628 regbase = TG3_APE_PER_LOCK_GRANT;
629 629
630 /* Make sure the driver hasn't any stale locks. */ 630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) { 631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 if (i == TG3_APE_LOCK_GPIO) 632 switch (i) {
633 continue; 633 case TG3_APE_LOCK_PHY0:
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); 634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
644 }
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
635 } 646 }
636 647
637 /* Clear the correct bit of the GPIO lock too. */
638 if (!tp->pci_fn)
639 bit = APE_LOCK_GRANT_DRIVER;
640 else
641 bit = 1 << tp->pci_fn;
642
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
644} 648}
645 649
646static int tg3_ape_lock(struct tg3 *tp, int locknum) 650static int tg3_ape_lock(struct tg3 *tp, int locknum)
@@ -658,6 +662,10 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
658 return 0; 662 return 0;
659 case TG3_APE_LOCK_GRC: 663 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM: 664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
661 break; 669 break;
662 default: 670 default:
663 return -EINVAL; 671 return -EINVAL;
@@ -673,11 +681,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
673 681
674 off = 4 * locknum; 682 off = 4 * locknum;
675 683
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
678 else
679 bit = 1 << tp->pci_fn;
680
681 tg3_ape_write32(tp, req + off, bit); 684 tg3_ape_write32(tp, req + off, bit);
682 685
683 /* Wait for up to 1 millisecond to acquire lock. */ 686 /* Wait for up to 1 millisecond to acquire lock. */
@@ -710,6 +713,10 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
710 return; 713 return;
711 case TG3_APE_LOCK_GRC: 714 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM: 715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
713 break; 720 break;
714 default: 721 default:
715 return; 722 return;
@@ -720,11 +727,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
720 else 727 else
721 gnt = TG3_APE_PER_LOCK_GRANT; 728 gnt = TG3_APE_PER_LOCK_GRANT;
722 729
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
729} 731}
730 732
@@ -5927,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5927 return work_done; 5929 return work_done;
5928} 5930}
5929 5931
5932static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933{
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 schedule_work(&tp->reset_task);
5936}
5937
5938static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939{
5940 cancel_work_sync(&tp->reset_task);
5941 tg3_flag_clear(tp, RESET_TASK_PENDING);
5942}
5943
5930static int tg3_poll_msix(struct napi_struct *napi, int budget) 5944static int tg3_poll_msix(struct napi_struct *napi, int budget)
5931{ 5945{
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5946 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5967,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
5967tx_recovery: 5981tx_recovery:
5968 /* work_done is guaranteed to be less than budget. */ 5982 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi); 5983 napi_complete(napi);
5970 schedule_work(&tp->reset_task); 5984 tg3_reset_task_schedule(tp);
5971 return work_done; 5985 return work_done;
5972} 5986}
5973 5987
@@ -6002,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp)
6002 tg3_dump_state(tp); 6016 tg3_dump_state(tp);
6003 6017
6004 tg3_flag_set(tp, ERROR_PROCESSED); 6018 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task); 6019 tg3_reset_task_schedule(tp);
6006} 6020}
6007 6021
6008static int tg3_poll(struct napi_struct *napi, int budget) 6022static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6049,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
6049tx_recovery: 6063tx_recovery:
6050 /* work_done is guaranteed to be less than budget. */ 6064 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi); 6065 napi_complete(napi);
6052 schedule_work(&tp->reset_task); 6066 tg3_reset_task_schedule(tp);
6053 return work_done; 6067 return work_done;
6054} 6068}
6055 6069
@@ -6338,11 +6352,11 @@ static void tg3_reset_task(struct work_struct *work)
6338{ 6352{
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task); 6353 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340 int err; 6354 int err;
6341 unsigned int restart_timer;
6342 6355
6343 tg3_full_lock(tp, 0); 6356 tg3_full_lock(tp, 0);
6344 6357
6345 if (!netif_running(tp->dev)) { 6358 if (!netif_running(tp->dev)) {
6359 tg3_flag_clear(tp, RESET_TASK_PENDING);
6346 tg3_full_unlock(tp); 6360 tg3_full_unlock(tp);
6347 return; 6361 return;
6348 } 6362 }
@@ -6355,9 +6369,6 @@ static void tg3_reset_task(struct work_struct *work)
6355 6369
6356 tg3_full_lock(tp, 1); 6370 tg3_full_lock(tp, 1);
6357 6371
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6360
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 6372 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox; 6373 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32; 6374 tp->write32_rx_mbox = tg3_write_flush_reg32;
@@ -6372,14 +6383,13 @@ static void tg3_reset_task(struct work_struct *work)
6372 6383
6373 tg3_netif_start(tp); 6384 tg3_netif_start(tp);
6374 6385
6375 if (restart_timer)
6376 mod_timer(&tp->timer, jiffies + 1);
6377
6378out: 6386out:
6379 tg3_full_unlock(tp); 6387 tg3_full_unlock(tp);
6380 6388
6381 if (!err) 6389 if (!err)
6382 tg3_phy_start(tp); 6390 tg3_phy_start(tp);
6391
6392 tg3_flag_clear(tp, RESET_TASK_PENDING);
6383} 6393}
6384 6394
6385static void tg3_tx_timeout(struct net_device *dev) 6395static void tg3_tx_timeout(struct net_device *dev)
@@ -6391,7 +6401,7 @@ static void tg3_tx_timeout(struct net_device *dev)
6391 tg3_dump_state(tp); 6401 tg3_dump_state(tp);
6392 } 6402 }
6393 6403
6394 schedule_work(&tp->reset_task); 6404 tg3_reset_task_schedule(tp);
6395} 6405}
6396 6406
6397/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 6407/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -6442,31 +6452,26 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6442 hwbug = 1; 6452 hwbug = 1;
6443 6453
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) { 6454 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455 u32 prvidx = *entry;
6445 u32 tmp_flag = flags & ~TXD_FLAG_END; 6456 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) { 6457 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX; 6458 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX; 6459 len -= TG3_TX_BD_DMA_MAX;
6449 6460
6450 if (len) { 6461 /* Avoid the 8byte DMA problem */
6451 tnapi->tx_buffers[*entry].fragmented = true; 6462 if (len <= 8) {
6452 /* Avoid the 8byte DMA problem */ 6463 len += TG3_TX_BD_DMA_MAX / 2;
6453 if (len <= 8) { 6464 frag_len = TG3_TX_BD_DMA_MAX / 2;
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6456 }
6457 } else
6458 tmp_flag = flags;
6459
6460 if (*budget) {
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6463 (*budget)--;
6464 *entry = NEXT_TX(*entry);
6465 } else {
6466 hwbug = 1;
6467 break;
6468 } 6465 }
6469 6466
6467 tnapi->tx_buffers[*entry].fragmented = true;
6468
6469 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470 frag_len, tmp_flag, mss, vlan);
6471 *budget -= 1;
6472 prvidx = *entry;
6473 *entry = NEXT_TX(*entry);
6474
6470 map += frag_len; 6475 map += frag_len;
6471 } 6476 }
6472 6477
@@ -6474,10 +6479,11 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6474 if (*budget) { 6479 if (*budget) {
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6480 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan); 6481 len, flags, mss, vlan);
6477 (*budget)--; 6482 *budget -= 1;
6478 *entry = NEXT_TX(*entry); 6483 *entry = NEXT_TX(*entry);
6479 } else { 6484 } else {
6480 hwbug = 1; 6485 hwbug = 1;
6486 tnapi->tx_buffers[prvidx].fragmented = false;
6481 } 6487 }
6482 } 6488 }
6483 } else { 6489 } else {
@@ -6509,7 +6515,7 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6509 txb = &tnapi->tx_buffers[entry]; 6515 txb = &tnapi->tx_buffers[entry];
6510 } 6516 }
6511 6517
6512 for (i = 0; i < last; i++) { 6518 for (i = 0; i <= last; i++) {
6513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6519 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6514 6520
6515 entry = NEXT_TX(entry); 6521 entry = NEXT_TX(entry);
@@ -6559,6 +6565,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6559 dev_kfree_skb(new_skb); 6565 dev_kfree_skb(new_skb);
6560 ret = -1; 6566 ret = -1;
6561 } else { 6567 } else {
6568 u32 save_entry = *entry;
6569
6562 base_flags |= TXD_FLAG_END; 6570 base_flags |= TXD_FLAG_END;
6563 6571
6564 tnapi->tx_buffers[*entry].skb = new_skb; 6572 tnapi->tx_buffers[*entry].skb = new_skb;
@@ -6568,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 6576 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags, 6577 new_skb->len, base_flags,
6570 mss, vlan)) { 6578 mss, vlan)) {
6571 tg3_tx_skb_unmap(tnapi, *entry, 0); 6579 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6572 dev_kfree_skb(new_skb); 6580 dev_kfree_skb(new_skb);
6573 ret = -1; 6581 ret = -1;
6574 } 6582 }
@@ -6758,11 +6766,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6758 6766
6759 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 6767 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6760 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 6768 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6761 mss, vlan)) 6769 mss, vlan)) {
6762 would_hit_hwbug = 1; 6770 would_hit_hwbug = 1;
6763
6764 /* Now loop through additional data fragments, and queue them. */ 6771 /* Now loop through additional data fragments, and queue them. */
6765 if (skb_shinfo(skb)->nr_frags > 0) { 6772 } else if (skb_shinfo(skb)->nr_frags > 0) {
6766 u32 tmp_mss = mss; 6773 u32 tmp_mss = mss;
6767 6774
6768 if (!tg3_flag(tp, HW_TSO_1) && 6775 if (!tg3_flag(tp, HW_TSO_1) &&
@@ -6784,11 +6791,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6784 if (dma_mapping_error(&tp->pdev->dev, mapping)) 6791 if (dma_mapping_error(&tp->pdev->dev, mapping))
6785 goto dma_error; 6792 goto dma_error;
6786 6793
6787 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 6794 if (!budget ||
6795 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6788 len, base_flags | 6796 len, base_flags |
6789 ((i == last) ? TXD_FLAG_END : 0), 6797 ((i == last) ? TXD_FLAG_END : 0),
6790 tmp_mss, vlan)) 6798 tmp_mss, vlan)) {
6791 would_hit_hwbug = 1; 6799 would_hit_hwbug = 1;
6800 break;
6801 }
6792 } 6802 }
6793 } 6803 }
6794 6804
@@ -6828,7 +6838,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6828 return NETDEV_TX_OK; 6838 return NETDEV_TX_OK;
6829 6839
6830dma_error: 6840dma_error:
6831 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 6841 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6832 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 6842 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6833drop: 6843drop:
6834 dev_kfree_skb(skb); 6844 dev_kfree_skb(skb);
@@ -7281,7 +7291,8 @@ static void tg3_free_rings(struct tg3 *tp)
7281 if (!skb) 7291 if (!skb)
7282 continue; 7292 continue;
7283 7293
7284 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); 7294 tg3_tx_skb_unmap(tnapi, i,
7295 skb_shinfo(skb)->nr_frags - 1);
7285 7296
7286 dev_kfree_skb_any(skb); 7297 dev_kfree_skb_any(skb);
7287 } 7298 }
@@ -9200,7 +9211,7 @@ static void tg3_timer(unsigned long __opaque)
9200{ 9211{
9201 struct tg3 *tp = (struct tg3 *) __opaque; 9212 struct tg3 *tp = (struct tg3 *) __opaque;
9202 9213
9203 if (tp->irq_sync) 9214 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9204 goto restart_timer; 9215 goto restart_timer;
9205 9216
9206 spin_lock(&tp->lock); 9217 spin_lock(&tp->lock);
@@ -9223,10 +9234,9 @@ static void tg3_timer(unsigned long __opaque)
9223 } 9234 }
9224 9235
9225 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9236 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9226 tg3_flag_set(tp, RESTART_TIMER);
9227 spin_unlock(&tp->lock); 9237 spin_unlock(&tp->lock);
9228 schedule_work(&tp->reset_task); 9238 tg3_reset_task_schedule(tp);
9229 return; 9239 goto restart_timer;
9230 } 9240 }
9231 } 9241 }
9232 9242
@@ -9674,15 +9684,14 @@ static int tg3_open(struct net_device *dev)
9674 struct tg3_napi *tnapi = &tp->napi[i]; 9684 struct tg3_napi *tnapi = &tp->napi[i];
9675 err = tg3_request_irq(tp, i); 9685 err = tg3_request_irq(tp, i);
9676 if (err) { 9686 if (err) {
9677 for (i--; i >= 0; i--) 9687 for (i--; i >= 0; i--) {
9688 tnapi = &tp->napi[i];
9678 free_irq(tnapi->irq_vec, tnapi); 9689 free_irq(tnapi->irq_vec, tnapi);
9679 break; 9690 }
9691 goto err_out2;
9680 } 9692 }
9681 } 9693 }
9682 9694
9683 if (err)
9684 goto err_out2;
9685
9686 tg3_full_lock(tp, 0); 9695 tg3_full_lock(tp, 0);
9687 9696
9688 err = tg3_init_hw(tp, 1); 9697 err = tg3_init_hw(tp, 1);
@@ -9783,7 +9792,7 @@ static int tg3_close(struct net_device *dev)
9783 struct tg3 *tp = netdev_priv(dev); 9792 struct tg3 *tp = netdev_priv(dev);
9784 9793
9785 tg3_napi_disable(tp); 9794 tg3_napi_disable(tp);
9786 cancel_work_sync(&tp->reset_task); 9795 tg3_reset_task_cancel(tp);
9787 9796
9788 netif_tx_stop_all_queues(dev); 9797 netif_tx_stop_all_queues(dev);
9789 9798
@@ -11520,7 +11529,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11520 break; 11529 break;
11521 } 11530 }
11522 11531
11523 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); 11532 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11524 dev_kfree_skb(skb); 11533 dev_kfree_skb(skb);
11525 11534
11526 if (tx_idx != tnapi->tx_prod) 11535 if (tx_idx != tnapi->tx_prod)
@@ -14228,12 +14237,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14228 val = tr32(MEMARB_MODE); 14237 val = tr32(MEMARB_MODE);
14229 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 14238 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14230 14239
14231 if (tg3_flag(tp, PCIX_MODE)) { 14240 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14232 pci_read_config_dword(tp->pdev, 14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14233 tp->pcix_cap + PCI_X_STATUS, &val); 14242 tg3_flag(tp, 5780_CLASS)) {
14234 tp->pci_fn = val & 0x7; 14243 if (tg3_flag(tp, PCIX_MODE)) {
14235 } else { 14244 pci_read_config_dword(tp->pdev,
14236 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 14245 tp->pcix_cap + PCI_X_STATUS,
14246 &val);
14247 tp->pci_fn = val & 0x7;
14248 }
14249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14250 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14251 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14252 NIC_SRAM_CPMUSTAT_SIG) {
14253 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14254 tp->pci_fn = tp->pci_fn ? 1 : 0;
14255 }
14256 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14258 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14259 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14260 NIC_SRAM_CPMUSTAT_SIG) {
14261 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14262 TG3_CPMU_STATUS_FSHFT_5719;
14263 }
14237 } 14264 }
14238 14265
14239 /* Get eeprom hw config before calling tg3_set_power_state(). 14266 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -15665,7 +15692,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15665 if (tp->fw) 15692 if (tp->fw)
15666 release_firmware(tp->fw); 15693 release_firmware(tp->fw);
15667 15694
15668 cancel_work_sync(&tp->reset_task); 15695 tg3_reset_task_cancel(tp);
15669 15696
15670 if (tg3_flag(tp, USE_PHYLIB)) { 15697 if (tg3_flag(tp, USE_PHYLIB)) {
15671 tg3_phy_fini(tp); 15698 tg3_phy_fini(tp);
@@ -15699,7 +15726,7 @@ static int tg3_suspend(struct device *device)
15699 if (!netif_running(dev)) 15726 if (!netif_running(dev))
15700 return 0; 15727 return 0;
15701 15728
15702 flush_work_sync(&tp->reset_task); 15729 tg3_reset_task_cancel(tp);
15703 tg3_phy_stop(tp); 15730 tg3_phy_stop(tp);
15704 tg3_netif_stop(tp); 15731 tg3_netif_stop(tp);
15705 15732
@@ -15812,12 +15839,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15812 tg3_netif_stop(tp); 15839 tg3_netif_stop(tp);
15813 15840
15814 del_timer_sync(&tp->timer); 15841 del_timer_sync(&tp->timer);
15815 tg3_flag_clear(tp, RESTART_TIMER);
15816 15842
15817 /* Want to make sure that the reset task doesn't run */ 15843 /* Want to make sure that the reset task doesn't run */
15818 cancel_work_sync(&tp->reset_task); 15844 tg3_reset_task_cancel(tp);
15819 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 15845 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15820 tg3_flag_clear(tp, RESTART_TIMER);
15821 15846
15822 netif_device_detach(netdev); 15847 netif_device_detach(netdev);
15823 15848
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index f32f288134c7..94b4bd049a33 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1095,6 +1095,11 @@
1095#define TG3_CPMU_CLCK_ORIDE 0x00003624 1095#define TG3_CPMU_CLCK_ORIDE 0x00003624
1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
1097 1097
1098#define TG3_CPMU_STATUS 0x0000362c
1099#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
1100#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
1101#define TG3_CPMU_STATUS_FSHFT_5719 30
1102
1098#define TG3_CPMU_CLCK_STAT 0x00003630 1103#define TG3_CPMU_CLCK_STAT 0x00003630
1099#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1104#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1100#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1105#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2128,6 +2133,10 @@
2128#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 2133#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
2129#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 2134#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
2130 2135
2136#define NIC_SRAM_CPMU_STATUS 0x00000e00
2137#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
2138#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
2139
2131#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 2140#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
2132 2141
2133#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 2142#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2344,9 +2353,13 @@
2344#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 2353#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
2345 2354
2346/* APE convenience enumerations. */ 2355/* APE convenience enumerations. */
2347#define TG3_APE_LOCK_GRC 1 2356#define TG3_APE_LOCK_PHY0 0
2348#define TG3_APE_LOCK_MEM 4 2357#define TG3_APE_LOCK_GRC 1
2349#define TG3_APE_LOCK_GPIO 7 2358#define TG3_APE_LOCK_PHY1 2
2359#define TG3_APE_LOCK_PHY2 3
2360#define TG3_APE_LOCK_MEM 4
2361#define TG3_APE_LOCK_PHY3 5
2362#define TG3_APE_LOCK_GPIO 7
2350 2363
2351#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2364#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
2352 2365
@@ -2866,7 +2879,6 @@ enum TG3_FLAGS {
2866 TG3_FLAG_JUMBO_CAPABLE, 2879 TG3_FLAG_JUMBO_CAPABLE,
2867 TG3_FLAG_CHIP_RESETTING, 2880 TG3_FLAG_CHIP_RESETTING,
2868 TG3_FLAG_INIT_COMPLETE, 2881 TG3_FLAG_INIT_COMPLETE,
2869 TG3_FLAG_RESTART_TIMER,
2870 TG3_FLAG_TSO_BUG, 2882 TG3_FLAG_TSO_BUG,
2871 TG3_FLAG_IS_5788, 2883 TG3_FLAG_IS_5788,
2872 TG3_FLAG_MAX_RXPEND_64, 2884 TG3_FLAG_MAX_RXPEND_64,
@@ -2909,6 +2921,7 @@ enum TG3_FLAGS {
2909 TG3_FLAG_APE_HAS_NCSI, 2921 TG3_FLAG_APE_HAS_NCSI,
2910 TG3_FLAG_5717_PLUS, 2922 TG3_FLAG_5717_PLUS,
2911 TG3_FLAG_4K_FIFO_LIMIT, 2923 TG3_FLAG_4K_FIFO_LIMIT,
2924 TG3_FLAG_RESET_TASK_PENDING,
2912 2925
2913 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ 2926 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
2914 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 2927 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 1cf671643d1f..c520cfd3b298 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,8 +7,7 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || \ 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
11 (PPC_MPC52xx && PPC_BESTCOMM)
12 ---help--- 11 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
14 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 61029dc7fa6f..76213162fbe3 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,7 +5,11 @@
5config NET_VENDOR_INTEL 5config NET_VENDOR_INTEL
6 bool "Intel devices" 6 bool "Intel devices"
7 default y 7 default y
8 depends on PCI || PCI_MSI 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL
9 ---help--- 13 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 14 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 15 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index db95731863d7..00fcd39ad666 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -442,12 +442,14 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
442 442
443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) 443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
444{ 444{
445#ifdef CONFIG_PCI_IOV
445 int i; 446 int i;
446 for (i = 0; i < adapter->num_vfs; i++) { 447 for (i = 0; i < adapter->num_vfs; i++) {
447 if (adapter->vfinfo[i].vfdev->dev_flags & 448 if (adapter->vfinfo[i].vfdev->dev_flags &
448 PCI_DEV_FLAGS_ASSIGNED) 449 PCI_DEV_FLAGS_ASSIGNED)
449 return true; 450 return true;
450 } 451 }
452#endif
451 return false; 453 return false;
452} 454}
453 455
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4a5d8897faab..df04f1a3857c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,11 +42,11 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
42int ixgbe_ndo_get_vf_config(struct net_device *netdev, 42int ixgbe_ndo_get_vf_config(struct net_device *netdev,
43 int vf, struct ifla_vf_info *ivi); 43 int vf, struct ifla_vf_info *ivi);
44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); 44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
45#ifdef CONFIG_PCI_IOV
46void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); 45void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
46int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
47#ifdef CONFIG_PCI_IOV
47void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 48void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
48 const struct ixgbe_info *ii); 49 const struct ixgbe_info *ii);
49int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
50#endif 50#endif
51 51
52 52
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cbd026f3bc57..fdc6c394c683 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -366,17 +366,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); 366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
367 } 367 }
368 } else { 368 } else {
369 if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
370 u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
371
372 /* enable PHY Reverse Auto-Negotiation */
373 ctrl2 |= 1u << 13;
374
375 /* Write PHY changes (SW-reset must follow) */
376 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
377 }
378
379
380 /* disable energy detect */ 369 /* disable energy detect */
381 ctrl &= ~PHY_M_PC_EN_DET_MSK; 370 ctrl &= ~PHY_M_PC_EN_DET_MSK;
382 371
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index 4a6b9fd073b6..eb836f770f50 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,7 +5,10 @@
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semi-conductor devices"
7 default y 7 default y
8 depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000 8 depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
9 ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
10 MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
11 XTENSA_PLATFORM_XT2000 || ZORRO
9 ---help--- 12 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 13 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 14 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e37eb98c4e2..1dca57013cb2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev)
1682 np->estats.tx_pause += readl(base + NvRegTxPause); 1682 np->estats.tx_pause += readl(base + NvRegTxPause);
1683 np->estats.rx_pause += readl(base + NvRegRxPause); 1683 np->estats.rx_pause += readl(base + NvRegRxPause);
1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1685 } 1686 }
1686 1687
1687 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
@@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1707 1708
1708 /* copy to net_device stats */ 1709 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1709 dev->stats.tx_bytes = np->estats.tx_bytes; 1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1710 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1711 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1712 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1713 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1716 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1717 dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
1714 dev->stats.rx_errors = np->estats.rx_errors_total; 1718 dev->stats.rx_errors = np->estats.rx_errors_total;
1715 dev->stats.tx_errors = np->estats.tx_errors_total; 1719 dev->stats.tx_errors = np->estats.tx_errors_total;
1716 } 1720 }
@@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2099 2103
2100 /* add fragments to entries count */ 2104 /* add fragments to entries count */
2101 for (i = 0; i < fragments; i++) { 2105 for (i = 0; i < fragments; i++) {
2102 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2106 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2103 2107
2104 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2108 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2105 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2109 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2106 } 2110 }
2107 2111
2108 spin_lock_irqsave(&np->lock, flags); 2112 spin_lock_irqsave(&np->lock, flags);
@@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2141 /* setup the fragments */ 2145 /* setup the fragments */
2142 for (i = 0; i < fragments; i++) { 2146 for (i = 0; i < fragments; i++) {
2143 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2144 u32 size = skb_frag_size(frag); 2148 u32 frag_size = skb_frag_size(frag);
2145 offset = 0; 2149 offset = 0;
2146 2150
2147 do { 2151 do {
2148 prev_tx = put_tx; 2152 prev_tx = put_tx;
2149 prev_tx_ctx = np->put_tx_ctx; 2153 prev_tx_ctx = np->put_tx_ctx;
2150 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2154 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2151 np->put_tx_ctx->dma = skb_frag_dma_map( 2155 np->put_tx_ctx->dma = skb_frag_dma_map(
2152 &np->pci_dev->dev, 2156 &np->pci_dev->dev,
2153 frag, offset, 2157 frag, offset,
@@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2159 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2160 2164
2161 offset += bcnt; 2165 offset += bcnt;
2162 size -= bcnt; 2166 frag_size -= bcnt;
2163 if (unlikely(put_tx++ == np->last_tx.orig)) 2167 if (unlikely(put_tx++ == np->last_tx.orig))
2164 put_tx = np->first_tx.orig; 2168 put_tx = np->first_tx.orig;
2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2166 np->put_tx_ctx = np->first_tx_ctx; 2170 np->put_tx_ctx = np->first_tx_ctx;
2167 } while (size); 2171 } while (frag_size);
2168 } 2172 }
2169 2173
2170 /* set last fragment flag */ 2174 /* set last fragment flag */
@@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2213 2217
2214 /* add fragments to entries count */ 2218 /* add fragments to entries count */
2215 for (i = 0; i < fragments; i++) { 2219 for (i = 0; i < fragments; i++) {
2216 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2220 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2217 2221
2218 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2222 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2219 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2223 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2220 } 2224 }
2221 2225
2222 spin_lock_irqsave(&np->lock, flags); 2226 spin_lock_irqsave(&np->lock, flags);
@@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2257 /* setup the fragments */ 2261 /* setup the fragments */
2258 for (i = 0; i < fragments; i++) { 2262 for (i = 0; i < fragments; i++) {
2259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2260 u32 size = skb_frag_size(frag); 2264 u32 frag_size = skb_frag_size(frag);
2261 offset = 0; 2265 offset = 0;
2262 2266
2263 do { 2267 do {
2264 prev_tx = put_tx; 2268 prev_tx = put_tx;
2265 prev_tx_ctx = np->put_tx_ctx; 2269 prev_tx_ctx = np->put_tx_ctx;
2266 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2270 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2267 np->put_tx_ctx->dma = skb_frag_dma_map( 2271 np->put_tx_ctx->dma = skb_frag_dma_map(
2268 &np->pci_dev->dev, 2272 &np->pci_dev->dev,
2269 frag, offset, 2273 frag, offset,
@@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2276 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2277 2281
2278 offset += bcnt; 2282 offset += bcnt;
2279 size -= bcnt; 2283 frag_size -= bcnt;
2280 if (unlikely(put_tx++ == np->last_tx.ex)) 2284 if (unlikely(put_tx++ == np->last_tx.ex))
2281 put_tx = np->first_tx.ex; 2285 put_tx = np->first_tx.ex;
2282 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2283 np->put_tx_ctx = np->first_tx_ctx; 2287 np->put_tx_ctx = np->first_tx_ctx;
2284 } while (size); 2288 } while (frag_size);
2285 } 2289 }
2286 2290
2287 /* set last fragment flag */ 2291 /* set last fragment flag */
@@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2374 if (np->desc_ver == DESC_VER_1) { 2378 if (np->desc_ver == DESC_VER_1) {
2375 if (flags & NV_TX_LASTPACKET) { 2379 if (flags & NV_TX_LASTPACKET) {
2376 if (flags & NV_TX_ERROR) { 2380 if (flags & NV_TX_ERROR) {
2377 if (flags & NV_TX_UNDERFLOW)
2378 dev->stats.tx_fifo_errors++;
2379 if (flags & NV_TX_CARRIERLOST)
2380 dev->stats.tx_carrier_errors++;
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2382 nv_legacybackoff_reseed(dev);
2383 dev->stats.tx_errors++;
2384 } else {
2385 dev->stats.tx_packets++;
2386 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2387 } 2383 }
2388 dev_kfree_skb_any(np->get_tx_ctx->skb); 2384 dev_kfree_skb_any(np->get_tx_ctx->skb);
2389 np->get_tx_ctx->skb = NULL; 2385 np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2392 } else { 2388 } else {
2393 if (flags & NV_TX2_LASTPACKET) { 2389 if (flags & NV_TX2_LASTPACKET) {
2394 if (flags & NV_TX2_ERROR) { 2390 if (flags & NV_TX2_ERROR) {
2395 if (flags & NV_TX2_UNDERFLOW)
2396 dev->stats.tx_fifo_errors++;
2397 if (flags & NV_TX2_CARRIERLOST)
2398 dev->stats.tx_carrier_errors++;
2399 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2400 nv_legacybackoff_reseed(dev); 2392 nv_legacybackoff_reseed(dev);
2401 dev->stats.tx_errors++;
2402 } else {
2403 dev->stats.tx_packets++;
2404 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2405 } 2393 }
2406 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2407 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2434 nv_unmap_txskb(np, np->get_tx_ctx); 2422 nv_unmap_txskb(np, np->get_tx_ctx);
2435 2423
2436 if (flags & NV_TX2_LASTPACKET) { 2424 if (flags & NV_TX2_LASTPACKET) {
2437 if (!(flags & NV_TX2_ERROR)) 2425 if (flags & NV_TX2_ERROR) {
2438 dev->stats.tx_packets++;
2439 else {
2440 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2426 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2441 if (np->driver_data & DEV_HAS_GEAR_MODE) 2427 if (np->driver_data & DEV_HAS_GEAR_MODE)
2442 nv_gear_backoff_reseed(dev); 2428 nv_gear_backoff_reseed(dev);
@@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2636 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2622 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2637 len = nv_getlen(dev, skb->data, len); 2623 len = nv_getlen(dev, skb->data, len);
2638 if (len < 0) { 2624 if (len < 0) {
2639 dev->stats.rx_errors++;
2640 dev_kfree_skb(skb); 2625 dev_kfree_skb(skb);
2641 goto next_pkt; 2626 goto next_pkt;
2642 } 2627 }
@@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2650 else { 2635 else {
2651 if (flags & NV_RX_MISSEDFRAME) 2636 if (flags & NV_RX_MISSEDFRAME)
2652 dev->stats.rx_missed_errors++; 2637 dev->stats.rx_missed_errors++;
2653 if (flags & NV_RX_CRCERR)
2654 dev->stats.rx_crc_errors++;
2655 if (flags & NV_RX_OVERFLOW)
2656 dev->stats.rx_over_errors++;
2657 dev->stats.rx_errors++;
2658 dev_kfree_skb(skb); 2638 dev_kfree_skb(skb);
2659 goto next_pkt; 2639 goto next_pkt;
2660 } 2640 }
@@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2670 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2650 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 len = nv_getlen(dev, skb->data, len); 2651 len = nv_getlen(dev, skb->data, len);
2672 if (len < 0) { 2652 if (len < 0) {
2673 dev->stats.rx_errors++;
2674 dev_kfree_skb(skb); 2653 dev_kfree_skb(skb);
2675 goto next_pkt; 2654 goto next_pkt;
2676 } 2655 }
@@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2682 } 2661 }
2683 /* the rest are hard errors */ 2662 /* the rest are hard errors */
2684 else { 2663 else {
2685 if (flags & NV_RX2_CRCERR)
2686 dev->stats.rx_crc_errors++;
2687 if (flags & NV_RX2_OVERFLOW)
2688 dev->stats.rx_over_errors++;
2689 dev->stats.rx_errors++;
2690 dev_kfree_skb(skb); 2664 dev_kfree_skb(skb);
2691 goto next_pkt; 2665 goto next_pkt;
2692 } 2666 }
@@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2704 skb->protocol = eth_type_trans(skb, dev); 2678 skb->protocol = eth_type_trans(skb, dev);
2705 napi_gro_receive(&np->napi, skb); 2679 napi_gro_receive(&np->napi, skb);
2706 dev->stats.rx_packets++; 2680 dev->stats.rx_packets++;
2707 dev->stats.rx_bytes += len;
2708next_pkt: 2681next_pkt:
2709 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2710 np->get_rx.orig = np->first_rx.orig; 2683 np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2787 __vlan_hwaccel_put_tag(skb, vid); 2760 __vlan_hwaccel_put_tag(skb, vid);
2788 } 2761 }
2789 napi_gro_receive(&np->napi, skb); 2762 napi_gro_receive(&np->napi, skb);
2790
2791 dev->stats.rx_packets++; 2763 dev->stats.rx_packets++;
2792 dev->stats.rx_bytes += len;
2793 } else { 2764 } else {
2794 dev_kfree_skb(skb); 2765 dev_kfree_skb(skb);
2795 } 2766 }
@@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)
2962 struct netdev_hw_addr *ha; 2933 struct netdev_hw_addr *ha;
2963 2934
2964 netdev_for_each_mc_addr(ha, dev) { 2935 netdev_for_each_mc_addr(ha, dev) {
2965 unsigned char *addr = ha->addr; 2936 unsigned char *hw_addr = ha->addr;
2966 u32 a, b; 2937 u32 a, b;
2967 2938
2968 a = le32_to_cpu(*(__le32 *) addr); 2939 a = le32_to_cpu(*(__le32 *) hw_addr);
2969 b = le16_to_cpu(*(__le16 *) (&addr[4])); 2940 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2970 alwaysOn[0] &= a; 2941 alwaysOn[0] &= a;
2971 alwaysOff[0] &= ~a; 2942 alwaysOff[0] &= ~a;
2972 alwaysOn[1] &= b; 2943 alwaysOn[1] &= b;
@@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3398 3369
3399 for (i = 0;; i++) { 3370 for (i = 0;; i++) {
3400 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3371 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3401 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3372 writel(events, base + NvRegMSIXIrqStatus);
3373 netdev_dbg(dev, "tx irq events: %08x\n", events);
3402 if (!(events & np->irqmask)) 3374 if (!(events & np->irqmask))
3403 break; 3375 break;
3404 3376
@@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3509 3481
3510 for (i = 0;; i++) { 3482 for (i = 0;; i++) {
3511 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3483 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3512 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3484 writel(events, base + NvRegMSIXIrqStatus);
3485 netdev_dbg(dev, "rx irq events: %08x\n", events);
3513 if (!(events & np->irqmask)) 3486 if (!(events & np->irqmask))
3514 break; 3487 break;
3515 3488
@@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3553 3526
3554 for (i = 0;; i++) { 3527 for (i = 0;; i++) {
3555 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3528 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3556 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3529 writel(events, base + NvRegMSIXIrqStatus);
3530 netdev_dbg(dev, "irq events: %08x\n", events);
3557 if (!(events & np->irqmask)) 3531 if (!(events & np->irqmask))
3558 break; 3532 break;
3559 3533
@@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3617 3591
3618 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3592 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3619 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3593 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3620 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3594 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3621 } else { 3595 } else {
3622 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3596 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3623 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3597 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3624 } 3598 }
3625 pci_push(base); 3599 pci_push(base);
3626 if (!(events & NVREG_IRQ_TIMER)) 3600 if (!(events & NVREG_IRQ_TIMER))
@@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e
4566 struct fe_priv *np = netdev_priv(dev); 4540 struct fe_priv *np = netdev_priv(dev);
4567 4541
4568 /* update stats */ 4542 /* update stats */
4569 nv_do_stats_poll((unsigned long)dev); 4543 nv_get_hw_stats(dev);
4570 4544
4571 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4545 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4572} 4546}