aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-06-26 17:19:40 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-06-26 17:19:40 -0400
commit2a322e4c08be4e7cb0c04b427ddaaa679fd88863 (patch)
treead8cc17bfd3b5e57e36f07a249028667d72f0b96 /drivers/net
parentba6d2377c85c9b8a793f455d8c9b6cf31985d70f (diff)
parent8678887e7fb43cd6c9be6c9807b05e77848e0920 (diff)
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/irda/sir_kthread.c3
-rw-r--r--drivers/net/irda/stir4200.c4
-rw-r--r--drivers/net/tg3.c337
-rw-r--r--drivers/net/tg3.h24
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wireless/airo.c2
7 files changed, 176 insertions, 198 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 047202c4d9a8..5a4a08a7c951 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1606,7 +1606,7 @@ static int rtl8139_thread (void *data)
1606 do { 1606 do {
1607 timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout); 1607 timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
1608 /* make swsusp happy with our thread */ 1608 /* make swsusp happy with our thread */
1609 try_to_freeze(PF_FREEZE); 1609 try_to_freeze();
1610 } while (!signal_pending (current) && (timeout > 0)); 1610 } while (!signal_pending (current) && (timeout > 0));
1611 1611
1612 if (signal_pending (current)) { 1612 if (signal_pending (current)) {
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
index 18cea1099530..c65054364bca 100644
--- a/drivers/net/irda/sir_kthread.c
+++ b/drivers/net/irda/sir_kthread.c
@@ -135,8 +135,7 @@ static int irda_thread(void *startup)
135 remove_wait_queue(&irda_rq_queue.kick, &wait); 135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136 136
137 /* make swsusp happy with our thread */ 137 /* make swsusp happy with our thread */
138 if (current->flags & PF_FREEZE) 138 try_to_freeze();
139 refrigerator(PF_FREEZE);
140 139
141 run_irda_queue(); 140 run_irda_queue();
142 } 141 }
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 66f488c13717..15f207323d97 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -763,7 +763,7 @@ static int stir_transmit_thread(void *arg)
763 { 763 {
764#ifdef CONFIG_PM 764#ifdef CONFIG_PM
765 /* if suspending, then power off and wait */ 765 /* if suspending, then power off and wait */
766 if (unlikely(current->flags & PF_FREEZE)) { 766 if (unlikely(freezing(current))) {
767 if (stir->receiving) 767 if (stir->receiving)
768 receive_stop(stir); 768 receive_stop(stir);
769 else 769 else
@@ -771,7 +771,7 @@ static int stir_transmit_thread(void *arg)
771 771
772 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); 772 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
773 773
774 refrigerator(PF_FREEZE); 774 refrigerator();
775 775
776 if (change_speed(stir, stir->speed)) 776 if (change_speed(stir, stir->speed))
777 break; 777 break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index a0b8848049c9..7e371b1209a1 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -66,8 +66,8 @@
66 66
67#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
69#define DRV_MODULE_VERSION "3.31" 69#define DRV_MODULE_VERSION "3.32"
70#define DRV_MODULE_RELDATE "June 8, 2005" 70#define DRV_MODULE_RELDATE "June 24, 2005"
71 71
72#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -337,12 +337,10 @@ static struct {
337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
338{ 338{
339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
340 unsigned long flags; 340 spin_lock_bh(&tp->indirect_lock);
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 341 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 342 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345 spin_unlock_irqrestore(&tp->indirect_lock, flags); 343 spin_unlock_bh(&tp->indirect_lock);
346 } else { 344 } else {
347 writel(val, tp->regs + off); 345 writel(val, tp->regs + off);
348 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 346 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
@@ -353,12 +351,10 @@ static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
353static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 351static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
354{ 352{
355 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 353 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
356 unsigned long flags; 354 spin_lock_bh(&tp->indirect_lock);
357
358 spin_lock_irqsave(&tp->indirect_lock, flags);
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 355 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 356 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361 spin_unlock_irqrestore(&tp->indirect_lock, flags); 357 spin_unlock_bh(&tp->indirect_lock);
362 } else { 358 } else {
363 void __iomem *dest = tp->regs + off; 359 void __iomem *dest = tp->regs + off;
364 writel(val, dest); 360 writel(val, dest);
@@ -398,28 +394,24 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
398 394
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 395static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{ 396{
401 unsigned long flags; 397 spin_lock_bh(&tp->indirect_lock);
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 398 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
405 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 399 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
406 400
407 /* Always leave this as zero. */ 401 /* Always leave this as zero. */
408 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags); 403 spin_unlock_bh(&tp->indirect_lock);
410} 404}
411 405
412static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 406static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
413{ 407{
414 unsigned long flags; 408 spin_lock_bh(&tp->indirect_lock);
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 409 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
418 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 410 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
419 411
420 /* Always leave this as zero. */ 412 /* Always leave this as zero. */
421 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
422 spin_unlock_irqrestore(&tp->indirect_lock, flags); 414 spin_unlock_bh(&tp->indirect_lock);
423} 415}
424 416
425static void tg3_disable_ints(struct tg3 *tp) 417static void tg3_disable_ints(struct tg3 *tp)
@@ -438,12 +430,14 @@ static inline void tg3_cond_int(struct tg3 *tp)
438 430
439static void tg3_enable_ints(struct tg3 *tp) 431static void tg3_enable_ints(struct tg3 *tp)
440{ 432{
433 tp->irq_sync = 0;
434 wmb();
435
441 tw32(TG3PCI_MISC_HOST_CTRL, 436 tw32(TG3PCI_MISC_HOST_CTRL,
442 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 437 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
443 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 438 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
444 (tp->last_tag << 24)); 439 (tp->last_tag << 24));
445 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 440 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
446
447 tg3_cond_int(tp); 441 tg3_cond_int(tp);
448} 442}
449 443
@@ -492,6 +486,7 @@ static void tg3_restart_ints(struct tg3 *tp)
492 486
493static inline void tg3_netif_stop(struct tg3 *tp) 487static inline void tg3_netif_stop(struct tg3 *tp)
494{ 488{
489 tp->dev->trans_start = jiffies; /* prevent tx timeout */
495 netif_poll_disable(tp->dev); 490 netif_poll_disable(tp->dev);
496 netif_tx_disable(tp->dev); 491 netif_tx_disable(tp->dev);
497} 492}
@@ -504,7 +499,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
504 * (such as after tg3_init_hw) 499 * (such as after tg3_init_hw)
505 */ 500 */
506 netif_poll_enable(tp->dev); 501 netif_poll_enable(tp->dev);
507 tg3_cond_int(tp); 502 tp->hw_status->status |= SD_STATUS_UPDATED;
503 tg3_enable_ints(tp);
508} 504}
509 505
510static void tg3_switch_clocks(struct tg3 *tp) 506static void tg3_switch_clocks(struct tg3 *tp)
@@ -2578,7 +2574,7 @@ static void tg3_tx(struct tg3 *tp)
2578 sw_idx = NEXT_TX(sw_idx); 2574 sw_idx = NEXT_TX(sw_idx);
2579 } 2575 }
2580 2576
2581 dev_kfree_skb_irq(skb); 2577 dev_kfree_skb(skb);
2582 } 2578 }
2583 2579
2584 tp->tx_cons = sw_idx; 2580 tp->tx_cons = sw_idx;
@@ -2884,11 +2880,8 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2884{ 2880{
2885 struct tg3 *tp = netdev_priv(netdev); 2881 struct tg3 *tp = netdev_priv(netdev);
2886 struct tg3_hw_status *sblk = tp->hw_status; 2882 struct tg3_hw_status *sblk = tp->hw_status;
2887 unsigned long flags;
2888 int done; 2883 int done;
2889 2884
2890 spin_lock_irqsave(&tp->lock, flags);
2891
2892 /* handle link change and other phy events */ 2885 /* handle link change and other phy events */
2893 if (!(tp->tg3_flags & 2886 if (!(tp->tg3_flags &
2894 (TG3_FLAG_USE_LINKCHG_REG | 2887 (TG3_FLAG_USE_LINKCHG_REG |
@@ -2896,7 +2889,9 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2896 if (sblk->status & SD_STATUS_LINK_CHG) { 2889 if (sblk->status & SD_STATUS_LINK_CHG) {
2897 sblk->status = SD_STATUS_UPDATED | 2890 sblk->status = SD_STATUS_UPDATED |
2898 (sblk->status & ~SD_STATUS_LINK_CHG); 2891 (sblk->status & ~SD_STATUS_LINK_CHG);
2892 spin_lock(&tp->lock);
2899 tg3_setup_phy(tp, 0); 2893 tg3_setup_phy(tp, 0);
2894 spin_unlock(&tp->lock);
2900 } 2895 }
2901 } 2896 }
2902 2897
@@ -2907,8 +2902,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2907 spin_unlock(&tp->tx_lock); 2902 spin_unlock(&tp->tx_lock);
2908 } 2903 }
2909 2904
2910 spin_unlock_irqrestore(&tp->lock, flags);
2911
2912 /* run RX thread, within the bounds set by NAPI. 2905 /* run RX thread, within the bounds set by NAPI.
2913 * All RX "locking" is done by ensuring outside 2906 * All RX "locking" is done by ensuring outside
2914 * code synchronizes with dev->poll() 2907 * code synchronizes with dev->poll()
@@ -2929,19 +2922,54 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2929 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 2922 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2930 tp->last_tag = sblk->status_tag; 2923 tp->last_tag = sblk->status_tag;
2931 rmb(); 2924 rmb();
2925 sblk->status &= ~SD_STATUS_UPDATED;
2932 2926
2933 /* if no more work, tell net stack and NIC we're done */ 2927 /* if no more work, tell net stack and NIC we're done */
2934 done = !tg3_has_work(tp); 2928 done = !tg3_has_work(tp);
2935 if (done) { 2929 if (done) {
2936 spin_lock_irqsave(&tp->lock, flags); 2930 spin_lock(&tp->lock);
2937 __netif_rx_complete(netdev); 2931 netif_rx_complete(netdev);
2938 tg3_restart_ints(tp); 2932 tg3_restart_ints(tp);
2939 spin_unlock_irqrestore(&tp->lock, flags); 2933 spin_unlock(&tp->lock);
2940 } 2934 }
2941 2935
2942 return (done ? 0 : 1); 2936 return (done ? 0 : 1);
2943} 2937}
2944 2938
2939static void tg3_irq_quiesce(struct tg3 *tp)
2940{
2941 BUG_ON(tp->irq_sync);
2942
2943 tp->irq_sync = 1;
2944 smp_mb();
2945
2946 synchronize_irq(tp->pdev->irq);
2947}
2948
2949static inline int tg3_irq_sync(struct tg3 *tp)
2950{
2951 return tp->irq_sync;
2952}
2953
2954/* Fully shutdown all tg3 driver activity elsewhere in the system.
2955 * If irq_sync is non-zero, then the IRQ handler must be synchronized
2956 * with as well. Most of the time, this is not necessary except when
2957 * shutting down the device.
2958 */
2959static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2960{
2961 if (irq_sync)
2962 tg3_irq_quiesce(tp);
2963 spin_lock_bh(&tp->lock);
2964 spin_lock(&tp->tx_lock);
2965}
2966
2967static inline void tg3_full_unlock(struct tg3 *tp)
2968{
2969 spin_unlock(&tp->tx_lock);
2970 spin_unlock_bh(&tp->lock);
2971}
2972
2945/* MSI ISR - No need to check for interrupt sharing and no need to 2973/* MSI ISR - No need to check for interrupt sharing and no need to
2946 * flush status block and interrupt mailbox. PCI ordering rules 2974 * flush status block and interrupt mailbox. PCI ordering rules
2947 * guarantee that MSI will arrive after the status block. 2975 * guarantee that MSI will arrive after the status block.
@@ -2951,9 +2979,6 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2951 struct net_device *dev = dev_id; 2979 struct net_device *dev = dev_id;
2952 struct tg3 *tp = netdev_priv(dev); 2980 struct tg3 *tp = netdev_priv(dev);
2953 struct tg3_hw_status *sblk = tp->hw_status; 2981 struct tg3_hw_status *sblk = tp->hw_status;
2954 unsigned long flags;
2955
2956 spin_lock_irqsave(&tp->lock, flags);
2957 2982
2958 /* 2983 /*
2959 * Writing any value to intr-mbox-0 clears PCI INTA# and 2984 * Writing any value to intr-mbox-0 clears PCI INTA# and
@@ -2964,6 +2989,9 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2964 */ 2989 */
2965 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2990 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2966 tp->last_tag = sblk->status_tag; 2991 tp->last_tag = sblk->status_tag;
2992 rmb();
2993 if (tg3_irq_sync(tp))
2994 goto out;
2967 sblk->status &= ~SD_STATUS_UPDATED; 2995 sblk->status &= ~SD_STATUS_UPDATED;
2968 if (likely(tg3_has_work(tp))) 2996 if (likely(tg3_has_work(tp)))
2969 netif_rx_schedule(dev); /* schedule NAPI poll */ 2997 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -2972,9 +3000,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2972 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3000 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2973 tp->last_tag << 24); 3001 tp->last_tag << 24);
2974 } 3002 }
2975 3003out:
2976 spin_unlock_irqrestore(&tp->lock, flags);
2977
2978 return IRQ_RETVAL(1); 3004 return IRQ_RETVAL(1);
2979} 3005}
2980 3006
@@ -2983,11 +3009,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2983 struct net_device *dev = dev_id; 3009 struct net_device *dev = dev_id;
2984 struct tg3 *tp = netdev_priv(dev); 3010 struct tg3 *tp = netdev_priv(dev);
2985 struct tg3_hw_status *sblk = tp->hw_status; 3011 struct tg3_hw_status *sblk = tp->hw_status;
2986 unsigned long flags;
2987 unsigned int handled = 1; 3012 unsigned int handled = 1;
2988 3013
2989 spin_lock_irqsave(&tp->lock, flags);
2990
2991 /* In INTx mode, it is possible for the interrupt to arrive at 3014 /* In INTx mode, it is possible for the interrupt to arrive at
2992 * the CPU before the status block posted prior to the interrupt. 3015 * the CPU before the status block posted prior to the interrupt.
2993 * Reading the PCI State register will confirm whether the 3016 * Reading the PCI State register will confirm whether the
@@ -3004,6 +3027,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3004 */ 3027 */
3005 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3028 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3006 0x00000001); 3029 0x00000001);
3030 if (tg3_irq_sync(tp))
3031 goto out;
3007 sblk->status &= ~SD_STATUS_UPDATED; 3032 sblk->status &= ~SD_STATUS_UPDATED;
3008 if (likely(tg3_has_work(tp))) 3033 if (likely(tg3_has_work(tp)))
3009 netif_rx_schedule(dev); /* schedule NAPI poll */ 3034 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -3018,9 +3043,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3018 } else { /* shared interrupt */ 3043 } else { /* shared interrupt */
3019 handled = 0; 3044 handled = 0;
3020 } 3045 }
3021 3046out:
3022 spin_unlock_irqrestore(&tp->lock, flags);
3023
3024 return IRQ_RETVAL(handled); 3047 return IRQ_RETVAL(handled);
3025} 3048}
3026 3049
@@ -3029,11 +3052,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3029 struct net_device *dev = dev_id; 3052 struct net_device *dev = dev_id;
3030 struct tg3 *tp = netdev_priv(dev); 3053 struct tg3 *tp = netdev_priv(dev);
3031 struct tg3_hw_status *sblk = tp->hw_status; 3054 struct tg3_hw_status *sblk = tp->hw_status;
3032 unsigned long flags;
3033 unsigned int handled = 1; 3055 unsigned int handled = 1;
3034 3056
3035 spin_lock_irqsave(&tp->lock, flags);
3036
3037 /* In INTx mode, it is possible for the interrupt to arrive at 3057 /* In INTx mode, it is possible for the interrupt to arrive at
3038 * the CPU before the status block posted prior to the interrupt. 3058 * the CPU before the status block posted prior to the interrupt.
3039 * Reading the PCI State register will confirm whether the 3059 * Reading the PCI State register will confirm whether the
@@ -3051,6 +3071,9 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3051 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3071 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3052 0x00000001); 3072 0x00000001);
3053 tp->last_tag = sblk->status_tag; 3073 tp->last_tag = sblk->status_tag;
3074 rmb();
3075 if (tg3_irq_sync(tp))
3076 goto out;
3054 sblk->status &= ~SD_STATUS_UPDATED; 3077 sblk->status &= ~SD_STATUS_UPDATED;
3055 if (likely(tg3_has_work(tp))) 3078 if (likely(tg3_has_work(tp)))
3056 netif_rx_schedule(dev); /* schedule NAPI poll */ 3079 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -3065,9 +3088,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3065 } else { /* shared interrupt */ 3088 } else { /* shared interrupt */
3066 handled = 0; 3089 handled = 0;
3067 } 3090 }
3068 3091out:
3069 spin_unlock_irqrestore(&tp->lock, flags);
3070
3071 return IRQ_RETVAL(handled); 3092 return IRQ_RETVAL(handled);
3072} 3093}
3073 3094
@@ -3106,8 +3127,7 @@ static void tg3_reset_task(void *_data)
3106 3127
3107 tg3_netif_stop(tp); 3128 tg3_netif_stop(tp);
3108 3129
3109 spin_lock_irq(&tp->lock); 3130 tg3_full_lock(tp, 1);
3110 spin_lock(&tp->tx_lock);
3111 3131
3112 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3132 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3113 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3133 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
@@ -3117,8 +3137,7 @@ static void tg3_reset_task(void *_data)
3117 3137
3118 tg3_netif_start(tp); 3138 tg3_netif_start(tp);
3119 3139
3120 spin_unlock(&tp->tx_lock); 3140 tg3_full_unlock(tp);
3121 spin_unlock_irq(&tp->lock);
3122 3141
3123 if (restart_timer) 3142 if (restart_timer)
3124 mod_timer(&tp->timer, jiffies + 1); 3143 mod_timer(&tp->timer, jiffies + 1);
@@ -3224,39 +3243,21 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3224 unsigned int i; 3243 unsigned int i;
3225 u32 len, entry, base_flags, mss; 3244 u32 len, entry, base_flags, mss;
3226 int would_hit_hwbug; 3245 int would_hit_hwbug;
3227 unsigned long flags;
3228 3246
3229 len = skb_headlen(skb); 3247 len = skb_headlen(skb);
3230 3248
3231 /* No BH disabling for tx_lock here. We are running in BH disabled 3249 /* No BH disabling for tx_lock here. We are running in BH disabled
3232 * context and TX reclaim runs via tp->poll inside of a software 3250 * context and TX reclaim runs via tp->poll inside of a software
3233 * interrupt. Rejoice! 3251 * interrupt. Furthermore, IRQ processing runs lockless so we have
3234 * 3252 * no IRQ context deadlocks to worry about either. Rejoice!
3235 * Actually, things are not so simple. If we are to take a hw
3236 * IRQ here, we can deadlock, consider:
3237 *
3238 * CPU1 CPU2
3239 * tg3_start_xmit
3240 * take tp->tx_lock
3241 * tg3_timer
3242 * take tp->lock
3243 * tg3_interrupt
3244 * spin on tp->lock
3245 * spin on tp->tx_lock
3246 *
3247 * So we really do need to disable interrupts when taking
3248 * tx_lock here.
3249 */ 3253 */
3250 local_irq_save(flags); 3254 if (!spin_trylock(&tp->tx_lock))
3251 if (!spin_trylock(&tp->tx_lock)) {
3252 local_irq_restore(flags);
3253 return NETDEV_TX_LOCKED; 3255 return NETDEV_TX_LOCKED;
3254 }
3255 3256
3256 /* This is a hard error, log it. */ 3257 /* This is a hard error, log it. */
3257 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3258 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3258 netif_stop_queue(dev); 3259 netif_stop_queue(dev);
3259 spin_unlock_irqrestore(&tp->tx_lock, flags); 3260 spin_unlock(&tp->tx_lock);
3260 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 3261 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3261 dev->name); 3262 dev->name);
3262 return NETDEV_TX_BUSY; 3263 return NETDEV_TX_BUSY;
@@ -3421,7 +3422,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3421 3422
3422out_unlock: 3423out_unlock:
3423 mmiowb(); 3424 mmiowb();
3424 spin_unlock_irqrestore(&tp->tx_lock, flags); 3425 spin_unlock(&tp->tx_lock);
3425 3426
3426 dev->trans_start = jiffies; 3427 dev->trans_start = jiffies;
3427 3428
@@ -3455,8 +3456,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3455 } 3456 }
3456 3457
3457 tg3_netif_stop(tp); 3458 tg3_netif_stop(tp);
3458 spin_lock_irq(&tp->lock); 3459
3459 spin_lock(&tp->tx_lock); 3460 tg3_full_lock(tp, 1);
3460 3461
3461 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 3462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3462 3463
@@ -3466,8 +3467,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3466 3467
3467 tg3_netif_start(tp); 3468 tg3_netif_start(tp);
3468 3469
3469 spin_unlock(&tp->tx_lock); 3470 tg3_full_unlock(tp);
3470 spin_unlock_irq(&tp->lock);
3471 3471
3472 return 0; 3472 return 0;
3473} 3473}
@@ -5088,9 +5088,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5088 5088
5089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5090 5090
5091 spin_lock_irq(&tp->lock); 5091 spin_lock_bh(&tp->lock);
5092 __tg3_set_mac_addr(tp); 5092 __tg3_set_mac_addr(tp);
5093 spin_unlock_irq(&tp->lock); 5093 spin_unlock_bh(&tp->lock);
5094 5094
5095 return 0; 5095 return 0;
5096} 5096}
@@ -5727,9 +5727,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5727 5727
5728 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 5728 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5729 5729
5730 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5731 tg3_enable_ints(tp);
5732
5733 return 0; 5730 return 0;
5734} 5731}
5735 5732
@@ -5802,10 +5799,8 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
5802static void tg3_timer(unsigned long __opaque) 5799static void tg3_timer(unsigned long __opaque)
5803{ 5800{
5804 struct tg3 *tp = (struct tg3 *) __opaque; 5801 struct tg3 *tp = (struct tg3 *) __opaque;
5805 unsigned long flags;
5806 5802
5807 spin_lock_irqsave(&tp->lock, flags); 5803 spin_lock(&tp->lock);
5808 spin_lock(&tp->tx_lock);
5809 5804
5810 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 5805 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5811 /* All of this garbage is because when using non-tagged 5806 /* All of this garbage is because when using non-tagged
@@ -5822,8 +5817,7 @@ static void tg3_timer(unsigned long __opaque)
5822 5817
5823 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5824 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5819 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5825 spin_unlock(&tp->tx_lock); 5820 spin_unlock(&tp->lock);
5826 spin_unlock_irqrestore(&tp->lock, flags);
5827 schedule_work(&tp->reset_task); 5821 schedule_work(&tp->reset_task);
5828 return; 5822 return;
5829 } 5823 }
@@ -5891,8 +5885,7 @@ static void tg3_timer(unsigned long __opaque)
5891 tp->asf_counter = tp->asf_multiplier; 5885 tp->asf_counter = tp->asf_multiplier;
5892 } 5886 }
5893 5887
5894 spin_unlock(&tp->tx_lock); 5888 spin_unlock(&tp->lock);
5895 spin_unlock_irqrestore(&tp->lock, flags);
5896 5889
5897 tp->timer.expires = jiffies + tp->timer_offset; 5890 tp->timer.expires = jiffies + tp->timer_offset;
5898 add_timer(&tp->timer); 5891 add_timer(&tp->timer);
@@ -6007,14 +6000,12 @@ static int tg3_test_msi(struct tg3 *tp)
6007 /* Need to reset the chip because the MSI cycle may have terminated 6000 /* Need to reset the chip because the MSI cycle may have terminated
6008 * with Master Abort. 6001 * with Master Abort.
6009 */ 6002 */
6010 spin_lock_irq(&tp->lock); 6003 tg3_full_lock(tp, 1);
6011 spin_lock(&tp->tx_lock);
6012 6004
6013 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6014 err = tg3_init_hw(tp); 6006 err = tg3_init_hw(tp);
6015 6007
6016 spin_unlock(&tp->tx_lock); 6008 tg3_full_unlock(tp);
6017 spin_unlock_irq(&tp->lock);
6018 6009
6019 if (err) 6010 if (err)
6020 free_irq(tp->pdev->irq, dev); 6011 free_irq(tp->pdev->irq, dev);
@@ -6027,14 +6018,12 @@ static int tg3_open(struct net_device *dev)
6027 struct tg3 *tp = netdev_priv(dev); 6018 struct tg3 *tp = netdev_priv(dev);
6028 int err; 6019 int err;
6029 6020
6030 spin_lock_irq(&tp->lock); 6021 tg3_full_lock(tp, 0);
6031 spin_lock(&tp->tx_lock);
6032 6022
6033 tg3_disable_ints(tp); 6023 tg3_disable_ints(tp);
6034 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 6024 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6035 6025
6036 spin_unlock(&tp->tx_lock); 6026 tg3_full_unlock(tp);
6037 spin_unlock_irq(&tp->lock);
6038 6027
6039 /* The placement of this call is tied 6028 /* The placement of this call is tied
6040 * to the setup and use of Host TX descriptors. 6029 * to the setup and use of Host TX descriptors.
@@ -6081,8 +6070,7 @@ static int tg3_open(struct net_device *dev)
6081 return err; 6070 return err;
6082 } 6071 }
6083 6072
6084 spin_lock_irq(&tp->lock); 6073 tg3_full_lock(tp, 0);
6085 spin_lock(&tp->tx_lock);
6086 6074
6087 err = tg3_init_hw(tp); 6075 err = tg3_init_hw(tp);
6088 if (err) { 6076 if (err) {
@@ -6106,8 +6094,7 @@ static int tg3_open(struct net_device *dev)
6106 tp->timer.function = tg3_timer; 6094 tp->timer.function = tg3_timer;
6107 } 6095 }
6108 6096
6109 spin_unlock(&tp->tx_lock); 6097 tg3_full_unlock(tp);
6110 spin_unlock_irq(&tp->lock);
6111 6098
6112 if (err) { 6099 if (err) {
6113 free_irq(tp->pdev->irq, dev); 6100 free_irq(tp->pdev->irq, dev);
@@ -6123,8 +6110,7 @@ static int tg3_open(struct net_device *dev)
6123 err = tg3_test_msi(tp); 6110 err = tg3_test_msi(tp);
6124 6111
6125 if (err) { 6112 if (err) {
6126 spin_lock_irq(&tp->lock); 6113 tg3_full_lock(tp, 0);
6127 spin_lock(&tp->tx_lock);
6128 6114
6129 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6115 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6130 pci_disable_msi(tp->pdev); 6116 pci_disable_msi(tp->pdev);
@@ -6134,22 +6120,19 @@ static int tg3_open(struct net_device *dev)
6134 tg3_free_rings(tp); 6120 tg3_free_rings(tp);
6135 tg3_free_consistent(tp); 6121 tg3_free_consistent(tp);
6136 6122
6137 spin_unlock(&tp->tx_lock); 6123 tg3_full_unlock(tp);
6138 spin_unlock_irq(&tp->lock);
6139 6124
6140 return err; 6125 return err;
6141 } 6126 }
6142 } 6127 }
6143 6128
6144 spin_lock_irq(&tp->lock); 6129 tg3_full_lock(tp, 0);
6145 spin_lock(&tp->tx_lock);
6146 6130
6147 add_timer(&tp->timer); 6131 add_timer(&tp->timer);
6148 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 6132 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6149 tg3_enable_ints(tp); 6133 tg3_enable_ints(tp);
6150 6134
6151 spin_unlock(&tp->tx_lock); 6135 tg3_full_unlock(tp);
6152 spin_unlock_irq(&tp->lock);
6153 6136
6154 netif_start_queue(dev); 6137 netif_start_queue(dev);
6155 6138
@@ -6395,8 +6378,7 @@ static int tg3_close(struct net_device *dev)
6395 6378
6396 del_timer_sync(&tp->timer); 6379 del_timer_sync(&tp->timer);
6397 6380
6398 spin_lock_irq(&tp->lock); 6381 tg3_full_lock(tp, 1);
6399 spin_lock(&tp->tx_lock);
6400#if 0 6382#if 0
6401 tg3_dump_state(tp); 6383 tg3_dump_state(tp);
6402#endif 6384#endif
@@ -6410,8 +6392,7 @@ static int tg3_close(struct net_device *dev)
6410 TG3_FLAG_GOT_SERDES_FLOWCTL); 6392 TG3_FLAG_GOT_SERDES_FLOWCTL);
6411 netif_carrier_off(tp->dev); 6393 netif_carrier_off(tp->dev);
6412 6394
6413 spin_unlock(&tp->tx_lock); 6395 tg3_full_unlock(tp);
6414 spin_unlock_irq(&tp->lock);
6415 6396
6416 free_irq(tp->pdev->irq, dev); 6397 free_irq(tp->pdev->irq, dev);
6417 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6398 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -6448,16 +6429,15 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
6448 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 6429 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6449 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 6430 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 6431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6451 unsigned long flags;
6452 u32 val; 6432 u32 val;
6453 6433
6454 spin_lock_irqsave(&tp->lock, flags); 6434 spin_lock_bh(&tp->lock);
6455 if (!tg3_readphy(tp, 0x1e, &val)) { 6435 if (!tg3_readphy(tp, 0x1e, &val)) {
6456 tg3_writephy(tp, 0x1e, val | 0x8000); 6436 tg3_writephy(tp, 0x1e, val | 0x8000);
6457 tg3_readphy(tp, 0x14, &val); 6437 tg3_readphy(tp, 0x14, &val);
6458 } else 6438 } else
6459 val = 0; 6439 val = 0;
6460 spin_unlock_irqrestore(&tp->lock, flags); 6440 spin_unlock_bh(&tp->lock);
6461 6441
6462 tp->phy_crc_errors += val; 6442 tp->phy_crc_errors += val;
6463 6443
@@ -6719,11 +6699,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
6719{ 6699{
6720 struct tg3 *tp = netdev_priv(dev); 6700 struct tg3 *tp = netdev_priv(dev);
6721 6701
6722 spin_lock_irq(&tp->lock); 6702 tg3_full_lock(tp, 0);
6723 spin_lock(&tp->tx_lock);
6724 __tg3_set_rx_mode(dev); 6703 __tg3_set_rx_mode(dev);
6725 spin_unlock(&tp->tx_lock); 6704 tg3_full_unlock(tp);
6726 spin_unlock_irq(&tp->lock);
6727} 6705}
6728 6706
6729#define TG3_REGDUMP_LEN (32 * 1024) 6707#define TG3_REGDUMP_LEN (32 * 1024)
@@ -6745,8 +6723,7 @@ static void tg3_get_regs(struct net_device *dev,
6745 6723
6746 memset(p, 0, TG3_REGDUMP_LEN); 6724 memset(p, 0, TG3_REGDUMP_LEN);
6747 6725
6748 spin_lock_irq(&tp->lock); 6726 tg3_full_lock(tp, 0);
6749 spin_lock(&tp->tx_lock);
6750 6727
6751#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 6728#define __GET_REG32(reg) (*(p)++ = tr32(reg))
6752#define GET_REG32_LOOP(base,len) \ 6729#define GET_REG32_LOOP(base,len) \
@@ -6796,8 +6773,7 @@ do { p = (u32 *)(orig_p + (reg)); \
6796#undef GET_REG32_LOOP 6773#undef GET_REG32_LOOP
6797#undef GET_REG32_1 6774#undef GET_REG32_1
6798 6775
6799 spin_unlock(&tp->tx_lock); 6776 tg3_full_unlock(tp);
6800 spin_unlock_irq(&tp->lock);
6801} 6777}
6802 6778
6803static int tg3_get_eeprom_len(struct net_device *dev) 6779static int tg3_get_eeprom_len(struct net_device *dev)
@@ -6973,8 +6949,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6973 return -EINVAL; 6949 return -EINVAL;
6974 } 6950 }
6975 6951
6976 spin_lock_irq(&tp->lock); 6952 tg3_full_lock(tp, 0);
6977 spin_lock(&tp->tx_lock);
6978 6953
6979 tp->link_config.autoneg = cmd->autoneg; 6954 tp->link_config.autoneg = cmd->autoneg;
6980 if (cmd->autoneg == AUTONEG_ENABLE) { 6955 if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -6990,8 +6965,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6990 if (netif_running(dev)) 6965 if (netif_running(dev))
6991 tg3_setup_phy(tp, 1); 6966 tg3_setup_phy(tp, 1);
6992 6967
6993 spin_unlock(&tp->tx_lock); 6968 tg3_full_unlock(tp);
6994 spin_unlock_irq(&tp->lock);
6995 6969
6996 return 0; 6970 return 0;
6997} 6971}
@@ -7027,12 +7001,12 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7027 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) 7001 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7028 return -EINVAL; 7002 return -EINVAL;
7029 7003
7030 spin_lock_irq(&tp->lock); 7004 spin_lock_bh(&tp->lock);
7031 if (wol->wolopts & WAKE_MAGIC) 7005 if (wol->wolopts & WAKE_MAGIC)
7032 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 7006 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7033 else 7007 else
7034 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 7008 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7035 spin_unlock_irq(&tp->lock); 7009 spin_unlock_bh(&tp->lock);
7036 7010
7037 return 0; 7011 return 0;
7038} 7012}
@@ -7072,7 +7046,7 @@ static int tg3_nway_reset(struct net_device *dev)
7072 if (!netif_running(dev)) 7046 if (!netif_running(dev))
7073 return -EAGAIN; 7047 return -EAGAIN;
7074 7048
7075 spin_lock_irq(&tp->lock); 7049 spin_lock_bh(&tp->lock);
7076 r = -EINVAL; 7050 r = -EINVAL;
7077 tg3_readphy(tp, MII_BMCR, &bmcr); 7051 tg3_readphy(tp, MII_BMCR, &bmcr);
7078 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7052 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
@@ -7080,7 +7054,7 @@ static int tg3_nway_reset(struct net_device *dev)
7080 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); 7054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7081 r = 0; 7055 r = 0;
7082 } 7056 }
7083 spin_unlock_irq(&tp->lock); 7057 spin_unlock_bh(&tp->lock);
7084 7058
7085 return r; 7059 return r;
7086} 7060}
@@ -7102,17 +7076,19 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7102static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 7076static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7103{ 7077{
7104 struct tg3 *tp = netdev_priv(dev); 7078 struct tg3 *tp = netdev_priv(dev);
7079 int irq_sync = 0;
7105 7080
7106 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 7081 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7107 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 7082 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7108 (ering->tx_pending > TG3_TX_RING_SIZE - 1)) 7083 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7109 return -EINVAL; 7084 return -EINVAL;
7110 7085
7111 if (netif_running(dev)) 7086 if (netif_running(dev)) {
7112 tg3_netif_stop(tp); 7087 tg3_netif_stop(tp);
7088 irq_sync = 1;
7089 }
7113 7090
7114 spin_lock_irq(&tp->lock); 7091 tg3_full_lock(tp, irq_sync);
7115 spin_lock(&tp->tx_lock);
7116 7092
7117 tp->rx_pending = ering->rx_pending; 7093 tp->rx_pending = ering->rx_pending;
7118 7094
@@ -7128,8 +7104,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7128 tg3_netif_start(tp); 7104 tg3_netif_start(tp);
7129 } 7105 }
7130 7106
7131 spin_unlock(&tp->tx_lock); 7107 tg3_full_unlock(tp);
7132 spin_unlock_irq(&tp->lock);
7133 7108
7134 return 0; 7109 return 0;
7135} 7110}
@@ -7146,12 +7121,15 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7146static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 7121static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7147{ 7122{
7148 struct tg3 *tp = netdev_priv(dev); 7123 struct tg3 *tp = netdev_priv(dev);
7124 int irq_sync = 0;
7149 7125
7150 if (netif_running(dev)) 7126 if (netif_running(dev)) {
7151 tg3_netif_stop(tp); 7127 tg3_netif_stop(tp);
7128 irq_sync = 1;
7129 }
7130
7131 tg3_full_lock(tp, irq_sync);
7152 7132
7153 spin_lock_irq(&tp->lock);
7154 spin_lock(&tp->tx_lock);
7155 if (epause->autoneg) 7133 if (epause->autoneg)
7156 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 7134 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7157 else 7135 else
@@ -7170,8 +7148,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7170 tg3_init_hw(tp); 7148 tg3_init_hw(tp);
7171 tg3_netif_start(tp); 7149 tg3_netif_start(tp);
7172 } 7150 }
7173 spin_unlock(&tp->tx_lock); 7151
7174 spin_unlock_irq(&tp->lock); 7152 tg3_full_unlock(tp);
7175 7153
7176 return 0; 7154 return 0;
7177} 7155}
@@ -7192,12 +7170,12 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7192 return 0; 7170 return 0;
7193 } 7171 }
7194 7172
7195 spin_lock_irq(&tp->lock); 7173 spin_lock_bh(&tp->lock);
7196 if (data) 7174 if (data)
7197 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 7175 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7198 else 7176 else
7199 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 7177 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7200 spin_unlock_irq(&tp->lock); 7178 spin_unlock_bh(&tp->lock);
7201 7179
7202 return 0; 7180 return 0;
7203} 7181}
@@ -7606,8 +7584,6 @@ static int tg3_test_loopback(struct tg3 *tp)
7606 7584
7607 tg3_abort_hw(tp, 1); 7585 tg3_abort_hw(tp, 1);
7608 7586
7609 /* Clearing this flag to keep interrupts disabled */
7610 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7611 tg3_reset_hw(tp); 7587 tg3_reset_hw(tp);
7612 7588
7613 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7589 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
@@ -7716,11 +7692,14 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7716 data[1] = 1; 7692 data[1] = 1;
7717 } 7693 }
7718 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7694 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7719 if (netif_running(dev)) 7695 int irq_sync = 0;
7696
7697 if (netif_running(dev)) {
7720 tg3_netif_stop(tp); 7698 tg3_netif_stop(tp);
7699 irq_sync = 1;
7700 }
7721 7701
7722 spin_lock_irq(&tp->lock); 7702 tg3_full_lock(tp, irq_sync);
7723 spin_lock(&tp->tx_lock);
7724 7703
7725 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 7704 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7726 tg3_nvram_lock(tp); 7705 tg3_nvram_lock(tp);
@@ -7742,14 +7721,14 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7742 data[4] = 1; 7721 data[4] = 1;
7743 } 7722 }
7744 7723
7745 spin_unlock(&tp->tx_lock); 7724 tg3_full_unlock(tp);
7746 spin_unlock_irq(&tp->lock); 7725
7747 if (tg3_test_interrupt(tp) != 0) { 7726 if (tg3_test_interrupt(tp) != 0) {
7748 etest->flags |= ETH_TEST_FL_FAILED; 7727 etest->flags |= ETH_TEST_FL_FAILED;
7749 data[5] = 1; 7728 data[5] = 1;
7750 } 7729 }
7751 spin_lock_irq(&tp->lock); 7730
7752 spin_lock(&tp->tx_lock); 7731 tg3_full_lock(tp, 0);
7753 7732
7754 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7733 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7755 if (netif_running(dev)) { 7734 if (netif_running(dev)) {
@@ -7757,8 +7736,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7757 tg3_init_hw(tp); 7736 tg3_init_hw(tp);
7758 tg3_netif_start(tp); 7737 tg3_netif_start(tp);
7759 } 7738 }
7760 spin_unlock(&tp->tx_lock); 7739
7761 spin_unlock_irq(&tp->lock); 7740 tg3_full_unlock(tp);
7762 } 7741 }
7763} 7742}
7764 7743
@@ -7779,9 +7758,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7779 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7758 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7780 break; /* We have no PHY */ 7759 break; /* We have no PHY */
7781 7760
7782 spin_lock_irq(&tp->lock); 7761 spin_lock_bh(&tp->lock);
7783 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 7762 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7784 spin_unlock_irq(&tp->lock); 7763 spin_unlock_bh(&tp->lock);
7785 7764
7786 data->val_out = mii_regval; 7765 data->val_out = mii_regval;
7787 7766
@@ -7795,9 +7774,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7795 if (!capable(CAP_NET_ADMIN)) 7774 if (!capable(CAP_NET_ADMIN))
7796 return -EPERM; 7775 return -EPERM;
7797 7776
7798 spin_lock_irq(&tp->lock); 7777 spin_lock_bh(&tp->lock);
7799 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 7778 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7800 spin_unlock_irq(&tp->lock); 7779 spin_unlock_bh(&tp->lock);
7801 7780
7802 return err; 7781 return err;
7803 7782
@@ -7813,28 +7792,24 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7813{ 7792{
7814 struct tg3 *tp = netdev_priv(dev); 7793 struct tg3 *tp = netdev_priv(dev);
7815 7794
7816 spin_lock_irq(&tp->lock); 7795 tg3_full_lock(tp, 0);
7817 spin_lock(&tp->tx_lock);
7818 7796
7819 tp->vlgrp = grp; 7797 tp->vlgrp = grp;
7820 7798
7821 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 7799 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7822 __tg3_set_rx_mode(dev); 7800 __tg3_set_rx_mode(dev);
7823 7801
7824 spin_unlock(&tp->tx_lock); 7802 tg3_full_unlock(tp);
7825 spin_unlock_irq(&tp->lock);
7826} 7803}
7827 7804
7828static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 7805static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7829{ 7806{
7830 struct tg3 *tp = netdev_priv(dev); 7807 struct tg3 *tp = netdev_priv(dev);
7831 7808
7832 spin_lock_irq(&tp->lock); 7809 tg3_full_lock(tp, 0);
7833 spin_lock(&tp->tx_lock);
7834 if (tp->vlgrp) 7810 if (tp->vlgrp)
7835 tp->vlgrp->vlan_devices[vid] = NULL; 7811 tp->vlgrp->vlan_devices[vid] = NULL;
7836 spin_unlock(&tp->tx_lock); 7812 tg3_full_unlock(tp);
7837 spin_unlock_irq(&tp->lock);
7838} 7813}
7839#endif 7814#endif
7840 7815
@@ -10165,24 +10140,19 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10165 10140
10166 del_timer_sync(&tp->timer); 10141 del_timer_sync(&tp->timer);
10167 10142
10168 spin_lock_irq(&tp->lock); 10143 tg3_full_lock(tp, 1);
10169 spin_lock(&tp->tx_lock);
10170 tg3_disable_ints(tp); 10144 tg3_disable_ints(tp);
10171 spin_unlock(&tp->tx_lock); 10145 tg3_full_unlock(tp);
10172 spin_unlock_irq(&tp->lock);
10173 10146
10174 netif_device_detach(dev); 10147 netif_device_detach(dev);
10175 10148
10176 spin_lock_irq(&tp->lock); 10149 tg3_full_lock(tp, 0);
10177 spin_lock(&tp->tx_lock);
10178 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10150 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10179 spin_unlock(&tp->tx_lock); 10151 tg3_full_unlock(tp);
10180 spin_unlock_irq(&tp->lock);
10181 10152
10182 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 10153 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10183 if (err) { 10154 if (err) {
10184 spin_lock_irq(&tp->lock); 10155 tg3_full_lock(tp, 0);
10185 spin_lock(&tp->tx_lock);
10186 10156
10187 tg3_init_hw(tp); 10157 tg3_init_hw(tp);
10188 10158
@@ -10192,8 +10162,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10192 netif_device_attach(dev); 10162 netif_device_attach(dev);
10193 tg3_netif_start(tp); 10163 tg3_netif_start(tp);
10194 10164
10195 spin_unlock(&tp->tx_lock); 10165 tg3_full_unlock(tp);
10196 spin_unlock_irq(&tp->lock);
10197 } 10166 }
10198 10167
10199 return err; 10168 return err;
@@ -10216,20 +10185,16 @@ static int tg3_resume(struct pci_dev *pdev)
10216 10185
10217 netif_device_attach(dev); 10186 netif_device_attach(dev);
10218 10187
10219 spin_lock_irq(&tp->lock); 10188 tg3_full_lock(tp, 0);
10220 spin_lock(&tp->tx_lock);
10221 10189
10222 tg3_init_hw(tp); 10190 tg3_init_hw(tp);
10223 10191
10224 tp->timer.expires = jiffies + tp->timer_offset; 10192 tp->timer.expires = jiffies + tp->timer_offset;
10225 add_timer(&tp->timer); 10193 add_timer(&tp->timer);
10226 10194
10227 tg3_enable_ints(tp);
10228
10229 tg3_netif_start(tp); 10195 tg3_netif_start(tp);
10230 10196
10231 spin_unlock(&tp->tx_lock); 10197 tg3_full_unlock(tp);
10232 spin_unlock_irq(&tp->lock);
10233 10198
10234 return 0; 10199 return 0;
10235} 10200}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 993f84c93dc4..99c5f9675a56 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2006,17 +2006,31 @@ struct tg3_ethtool_stats {
2006struct tg3 { 2006struct tg3 {
2007 /* begin "general, frequently-used members" cacheline section */ 2007 /* begin "general, frequently-used members" cacheline section */
2008 2008
2009 /* If the IRQ handler (which runs lockless) needs to be
2010 * quiesced, the following bitmask state is used. The
2011 * SYNC flag is set by non-IRQ context code to initiate
2012 * the quiescence.
2013 *
2014 * When the IRQ handler notices that SYNC is set, it
2015 * disables interrupts and returns.
2016 *
2017 * When all outstanding IRQ handlers have returned after
2018 * the SYNC flag has been set, the setter can be assured
2019 * that interrupts will no longer get run.
2020 *
2021 * In this way all SMP driver locks are never acquired
2022 * in hw IRQ context, only sw IRQ context or lower.
2023 */
2024 unsigned int irq_sync;
2025
2009 /* SMP locking strategy: 2026 /* SMP locking strategy:
2010 * 2027 *
2011 * lock: Held during all operations except TX packet 2028 * lock: Held during all operations except TX packet
2012 * processing. 2029 * processing.
2013 * 2030 *
2014 * tx_lock: Held during tg3_start_xmit{,_4gbug} and tg3_tx 2031 * tx_lock: Held during tg3_start_xmit and tg3_tx
2015 * 2032 *
2016 * If you want to shut up all asynchronous processing you must 2033 * Both of these locks are to be held with BH safety.
2017 * acquire both locks, 'lock' taken before 'tx_lock'. IRQs must
2018 * be disabled to take 'lock' but only softirq disabling is
2019 * necessary for acquisition of 'tx_lock'.
2020 */ 2034 */
2021 spinlock_t lock; 2035 spinlock_t lock;
2022 spinlock_t indirect_lock; 2036 spinlock_t indirect_lock;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 66b94668ddd8..18c27e1e7884 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -435,7 +435,7 @@ config VENDOR_SANGOMA
435 the driver to support. 435 the driver to support.
436 436
437 If you have one or more of these cards, say M to this option; 437 If you have one or more of these cards, say M to this option;
438 and read <file:Documentation/networking/wanpipe.txt>. 438 and read <file:Documentation/networking/wan-router.txt>.
439 439
440 To compile this driver as a module, choose M here: the 440 To compile this driver as a module, choose M here: the
441 module will be called wanpipe. 441 module will be called wanpipe.
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index fb10a2db63ad..d72e0385e4f2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2918,7 +2918,7 @@ static int airo_thread(void *data) {
2918 flush_signals(current); 2918 flush_signals(current);
2919 2919
2920 /* make swsusp happy with our thread */ 2920 /* make swsusp happy with our thread */
2921 try_to_freeze(PF_FREEZE); 2921 try_to_freeze();
2922 2922
2923 if (test_bit(JOB_DIE, &ai->flags)) 2923 if (test_bit(JOB_DIE, &ai->flags))
2924 break; 2924 break;