aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-06-24 23:18:35 -0400
committerDavid S. Miller <davem@davemloft.net>2005-06-24 23:18:35 -0400
commitf47c11eeccc8820010992eb32dbe7370a08f8bd3 (patch)
tree7cd6308a1796fae227c61208545593b79b12f21a
parentcd024c8baf9756759c57f0a19be639da8d3d4f8c (diff)
[TG3]: Eliminate all hw IRQ handler spinlocks.
Move all driver spinlocks to be taken at sw IRQ context only. This fixes the skb_copy() we were doing with hw IRQs disabled (which is illegal and triggers a BUG() with HIGHMEM enabled). It also simplifies the locking all over the driver tremendously. We accomplish this feat by creating a special sequence to synchronize with the hw IRQ handler using a binary state and synchronize_irq(). This idea is from Herbert Xu. Thanks to Michael Chan for helping to track down all of the race conditions in initial versions of this code. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tg3.c304
-rw-r--r--drivers/net/tg3.h24
2 files changed, 149 insertions, 179 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index fef1d087107c..8b8aa2ad578b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -337,12 +337,10 @@ static struct {
337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
338{ 338{
339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
340 unsigned long flags; 340 spin_lock_bh(&tp->indirect_lock);
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 341 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 342 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345 spin_unlock_irqrestore(&tp->indirect_lock, flags); 343 spin_unlock_bh(&tp->indirect_lock);
346 } else { 344 } else {
347 writel(val, tp->regs + off); 345 writel(val, tp->regs + off);
348 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0) 346 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
@@ -353,12 +351,10 @@ static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
353static void _tw32_flush(struct tg3 *tp, u32 off, u32 val) 351static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
354{ 352{
355 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 353 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
356 unsigned long flags; 354 spin_lock_bh(&tp->indirect_lock);
357
358 spin_lock_irqsave(&tp->indirect_lock, flags);
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 355 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 356 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361 spin_unlock_irqrestore(&tp->indirect_lock, flags); 357 spin_unlock_bh(&tp->indirect_lock);
362 } else { 358 } else {
363 void __iomem *dest = tp->regs + off; 359 void __iomem *dest = tp->regs + off;
364 writel(val, dest); 360 writel(val, dest);
@@ -398,28 +394,24 @@ static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
398 394
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 395static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{ 396{
401 unsigned long flags; 397 spin_lock_bh(&tp->indirect_lock);
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 398 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
405 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 399 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
406 400
407 /* Always leave this as zero. */ 401 /* Always leave this as zero. */
408 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags); 403 spin_unlock_bh(&tp->indirect_lock);
410} 404}
411 405
412static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 406static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
413{ 407{
414 unsigned long flags; 408 spin_lock_bh(&tp->indirect_lock);
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 409 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
418 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 410 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
419 411
420 /* Always leave this as zero. */ 412 /* Always leave this as zero. */
421 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
422 spin_unlock_irqrestore(&tp->indirect_lock, flags); 414 spin_unlock_bh(&tp->indirect_lock);
423} 415}
424 416
425static void tg3_disable_ints(struct tg3 *tp) 417static void tg3_disable_ints(struct tg3 *tp)
@@ -443,7 +435,7 @@ static void tg3_enable_ints(struct tg3 *tp)
443 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 435 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
444 (tp->last_tag << 24)); 436 (tp->last_tag << 24));
445 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 437 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
446 438 tp->irq_sync = 0;
447 tg3_cond_int(tp); 439 tg3_cond_int(tp);
448} 440}
449 441
@@ -504,7 +496,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
504 * (such as after tg3_init_hw) 496 * (such as after tg3_init_hw)
505 */ 497 */
506 netif_poll_enable(tp->dev); 498 netif_poll_enable(tp->dev);
507 tg3_cond_int(tp); 499 tp->hw_status->status |= SD_STATUS_UPDATED;
500 tg3_enable_ints(tp);
508} 501}
509 502
510static void tg3_switch_clocks(struct tg3 *tp) 503static void tg3_switch_clocks(struct tg3 *tp)
@@ -2578,7 +2571,7 @@ static void tg3_tx(struct tg3 *tp)
2578 sw_idx = NEXT_TX(sw_idx); 2571 sw_idx = NEXT_TX(sw_idx);
2579 } 2572 }
2580 2573
2581 dev_kfree_skb_irq(skb); 2574 dev_kfree_skb(skb);
2582 } 2575 }
2583 2576
2584 tp->tx_cons = sw_idx; 2577 tp->tx_cons = sw_idx;
@@ -2884,11 +2877,8 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2884{ 2877{
2885 struct tg3 *tp = netdev_priv(netdev); 2878 struct tg3 *tp = netdev_priv(netdev);
2886 struct tg3_hw_status *sblk = tp->hw_status; 2879 struct tg3_hw_status *sblk = tp->hw_status;
2887 unsigned long flags;
2888 int done; 2880 int done;
2889 2881
2890 spin_lock_irqsave(&tp->lock, flags);
2891
2892 /* handle link change and other phy events */ 2882 /* handle link change and other phy events */
2893 if (!(tp->tg3_flags & 2883 if (!(tp->tg3_flags &
2894 (TG3_FLAG_USE_LINKCHG_REG | 2884 (TG3_FLAG_USE_LINKCHG_REG |
@@ -2896,7 +2886,9 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2896 if (sblk->status & SD_STATUS_LINK_CHG) { 2886 if (sblk->status & SD_STATUS_LINK_CHG) {
2897 sblk->status = SD_STATUS_UPDATED | 2887 sblk->status = SD_STATUS_UPDATED |
2898 (sblk->status & ~SD_STATUS_LINK_CHG); 2888 (sblk->status & ~SD_STATUS_LINK_CHG);
2889 spin_lock(&tp->lock);
2899 tg3_setup_phy(tp, 0); 2890 tg3_setup_phy(tp, 0);
2891 spin_unlock(&tp->lock);
2900 } 2892 }
2901 } 2893 }
2902 2894
@@ -2907,8 +2899,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2907 spin_unlock(&tp->tx_lock); 2899 spin_unlock(&tp->tx_lock);
2908 } 2900 }
2909 2901
2910 spin_unlock_irqrestore(&tp->lock, flags);
2911
2912 /* run RX thread, within the bounds set by NAPI. 2902 /* run RX thread, within the bounds set by NAPI.
2913 * All RX "locking" is done by ensuring outside 2903 * All RX "locking" is done by ensuring outside
2914 * code synchronizes with dev->poll() 2904 * code synchronizes with dev->poll()
@@ -2934,15 +2924,49 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2934 /* if no more work, tell net stack and NIC we're done */ 2924 /* if no more work, tell net stack and NIC we're done */
2935 done = !tg3_has_work(tp); 2925 done = !tg3_has_work(tp);
2936 if (done) { 2926 if (done) {
2937 spin_lock_irqsave(&tp->lock, flags); 2927 spin_lock(&tp->lock);
2938 __netif_rx_complete(netdev); 2928 netif_rx_complete(netdev);
2939 tg3_restart_ints(tp); 2929 tg3_restart_ints(tp);
2940 spin_unlock_irqrestore(&tp->lock, flags); 2930 spin_unlock(&tp->lock);
2941 } 2931 }
2942 2932
2943 return (done ? 0 : 1); 2933 return (done ? 0 : 1);
2944} 2934}
2945 2935
2936static void tg3_irq_quiesce(struct tg3 *tp)
2937{
2938 BUG_ON(tp->irq_sync);
2939
2940 tp->irq_sync = 1;
2941 smp_mb();
2942
2943 synchronize_irq(tp->pdev->irq);
2944}
2945
2946static inline int tg3_irq_sync(struct tg3 *tp)
2947{
2948 return tp->irq_sync;
2949}
2950
2951/* Fully shutdown all tg3 driver activity elsewhere in the system.
2952 * If irq_sync is non-zero, then the IRQ handler must be synchronized
2953 * with as well. Most of the time, this is not necessary except when
2954 * shutting down the device.
2955 */
2956static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2957{
2958 if (irq_sync)
2959 tg3_irq_quiesce(tp);
2960 spin_lock_bh(&tp->lock);
2961 spin_lock(&tp->tx_lock);
2962}
2963
2964static inline void tg3_full_unlock(struct tg3 *tp)
2965{
2966 spin_unlock(&tp->tx_lock);
2967 spin_unlock_bh(&tp->lock);
2968}
2969
2946/* MSI ISR - No need to check for interrupt sharing and no need to 2970/* MSI ISR - No need to check for interrupt sharing and no need to
2947 * flush status block and interrupt mailbox. PCI ordering rules 2971 * flush status block and interrupt mailbox. PCI ordering rules
2948 * guarantee that MSI will arrive after the status block. 2972 * guarantee that MSI will arrive after the status block.
@@ -2952,9 +2976,6 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2952 struct net_device *dev = dev_id; 2976 struct net_device *dev = dev_id;
2953 struct tg3 *tp = netdev_priv(dev); 2977 struct tg3 *tp = netdev_priv(dev);
2954 struct tg3_hw_status *sblk = tp->hw_status; 2978 struct tg3_hw_status *sblk = tp->hw_status;
2955 unsigned long flags;
2956
2957 spin_lock_irqsave(&tp->lock, flags);
2958 2979
2959 /* 2980 /*
2960 * Writing any value to intr-mbox-0 clears PCI INTA# and 2981 * Writing any value to intr-mbox-0 clears PCI INTA# and
@@ -2966,6 +2987,8 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2966 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2987 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2967 tp->last_tag = sblk->status_tag; 2988 tp->last_tag = sblk->status_tag;
2968 rmb(); 2989 rmb();
2990 if (tg3_irq_sync(tp))
2991 goto out;
2969 sblk->status &= ~SD_STATUS_UPDATED; 2992 sblk->status &= ~SD_STATUS_UPDATED;
2970 if (likely(tg3_has_work(tp))) 2993 if (likely(tg3_has_work(tp)))
2971 netif_rx_schedule(dev); /* schedule NAPI poll */ 2994 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -2974,9 +2997,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2974 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2997 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2975 tp->last_tag << 24); 2998 tp->last_tag << 24);
2976 } 2999 }
2977 3000out:
2978 spin_unlock_irqrestore(&tp->lock, flags);
2979
2980 return IRQ_RETVAL(1); 3001 return IRQ_RETVAL(1);
2981} 3002}
2982 3003
@@ -2985,11 +3006,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2985 struct net_device *dev = dev_id; 3006 struct net_device *dev = dev_id;
2986 struct tg3 *tp = netdev_priv(dev); 3007 struct tg3 *tp = netdev_priv(dev);
2987 struct tg3_hw_status *sblk = tp->hw_status; 3008 struct tg3_hw_status *sblk = tp->hw_status;
2988 unsigned long flags;
2989 unsigned int handled = 1; 3009 unsigned int handled = 1;
2990 3010
2991 spin_lock_irqsave(&tp->lock, flags);
2992
2993 /* In INTx mode, it is possible for the interrupt to arrive at 3011 /* In INTx mode, it is possible for the interrupt to arrive at
2994 * the CPU before the status block posted prior to the interrupt. 3012 * the CPU before the status block posted prior to the interrupt.
2995 * Reading the PCI State register will confirm whether the 3013 * Reading the PCI State register will confirm whether the
@@ -3006,6 +3024,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3006 */ 3024 */
3007 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3025 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3008 0x00000001); 3026 0x00000001);
3027 if (tg3_irq_sync(tp))
3028 goto out;
3009 sblk->status &= ~SD_STATUS_UPDATED; 3029 sblk->status &= ~SD_STATUS_UPDATED;
3010 if (likely(tg3_has_work(tp))) 3030 if (likely(tg3_has_work(tp)))
3011 netif_rx_schedule(dev); /* schedule NAPI poll */ 3031 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -3020,9 +3040,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3020 } else { /* shared interrupt */ 3040 } else { /* shared interrupt */
3021 handled = 0; 3041 handled = 0;
3022 } 3042 }
3023 3043out:
3024 spin_unlock_irqrestore(&tp->lock, flags);
3025
3026 return IRQ_RETVAL(handled); 3044 return IRQ_RETVAL(handled);
3027} 3045}
3028 3046
@@ -3031,11 +3049,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3031 struct net_device *dev = dev_id; 3049 struct net_device *dev = dev_id;
3032 struct tg3 *tp = netdev_priv(dev); 3050 struct tg3 *tp = netdev_priv(dev);
3033 struct tg3_hw_status *sblk = tp->hw_status; 3051 struct tg3_hw_status *sblk = tp->hw_status;
3034 unsigned long flags;
3035 unsigned int handled = 1; 3052 unsigned int handled = 1;
3036 3053
3037 spin_lock_irqsave(&tp->lock, flags);
3038
3039 /* In INTx mode, it is possible for the interrupt to arrive at 3054 /* In INTx mode, it is possible for the interrupt to arrive at
3040 * the CPU before the status block posted prior to the interrupt. 3055 * the CPU before the status block posted prior to the interrupt.
3041 * Reading the PCI State register will confirm whether the 3056 * Reading the PCI State register will confirm whether the
@@ -3054,6 +3069,8 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3054 0x00000001); 3069 0x00000001);
3055 tp->last_tag = sblk->status_tag; 3070 tp->last_tag = sblk->status_tag;
3056 rmb(); 3071 rmb();
3072 if (tg3_irq_sync(tp))
3073 goto out;
3057 sblk->status &= ~SD_STATUS_UPDATED; 3074 sblk->status &= ~SD_STATUS_UPDATED;
3058 if (likely(tg3_has_work(tp))) 3075 if (likely(tg3_has_work(tp)))
3059 netif_rx_schedule(dev); /* schedule NAPI poll */ 3076 netif_rx_schedule(dev); /* schedule NAPI poll */
@@ -3068,9 +3085,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *r
3068 } else { /* shared interrupt */ 3085 } else { /* shared interrupt */
3069 handled = 0; 3086 handled = 0;
3070 } 3087 }
3071 3088out:
3072 spin_unlock_irqrestore(&tp->lock, flags);
3073
3074 return IRQ_RETVAL(handled); 3089 return IRQ_RETVAL(handled);
3075} 3090}
3076 3091
@@ -3109,8 +3124,7 @@ static void tg3_reset_task(void *_data)
3109 3124
3110 tg3_netif_stop(tp); 3125 tg3_netif_stop(tp);
3111 3126
3112 spin_lock_irq(&tp->lock); 3127 tg3_full_lock(tp, 1);
3113 spin_lock(&tp->tx_lock);
3114 3128
3115 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3129 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3116 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3130 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
@@ -3120,8 +3134,7 @@ static void tg3_reset_task(void *_data)
3120 3134
3121 tg3_netif_start(tp); 3135 tg3_netif_start(tp);
3122 3136
3123 spin_unlock(&tp->tx_lock); 3137 tg3_full_unlock(tp);
3124 spin_unlock_irq(&tp->lock);
3125 3138
3126 if (restart_timer) 3139 if (restart_timer)
3127 mod_timer(&tp->timer, jiffies + 1); 3140 mod_timer(&tp->timer, jiffies + 1);
@@ -3227,39 +3240,21 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3227 unsigned int i; 3240 unsigned int i;
3228 u32 len, entry, base_flags, mss; 3241 u32 len, entry, base_flags, mss;
3229 int would_hit_hwbug; 3242 int would_hit_hwbug;
3230 unsigned long flags;
3231 3243
3232 len = skb_headlen(skb); 3244 len = skb_headlen(skb);
3233 3245
3234 /* No BH disabling for tx_lock here. We are running in BH disabled 3246 /* No BH disabling for tx_lock here. We are running in BH disabled
3235 * context and TX reclaim runs via tp->poll inside of a software 3247 * context and TX reclaim runs via tp->poll inside of a software
3236 * interrupt. Rejoice! 3248 * interrupt. Furthermore, IRQ processing runs lockless so we have
3237 * 3249 * no IRQ context deadlocks to worry about either. Rejoice!
3238 * Actually, things are not so simple. If we are to take a hw
3239 * IRQ here, we can deadlock, consider:
3240 *
3241 * CPU1 CPU2
3242 * tg3_start_xmit
3243 * take tp->tx_lock
3244 * tg3_timer
3245 * take tp->lock
3246 * tg3_interrupt
3247 * spin on tp->lock
3248 * spin on tp->tx_lock
3249 *
3250 * So we really do need to disable interrupts when taking
3251 * tx_lock here.
3252 */ 3250 */
3253 local_irq_save(flags); 3251 if (!spin_trylock(&tp->tx_lock))
3254 if (!spin_trylock(&tp->tx_lock)) {
3255 local_irq_restore(flags);
3256 return NETDEV_TX_LOCKED; 3252 return NETDEV_TX_LOCKED;
3257 }
3258 3253
3259 /* This is a hard error, log it. */ 3254 /* This is a hard error, log it. */
3260 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3255 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3261 netif_stop_queue(dev); 3256 netif_stop_queue(dev);
3262 spin_unlock_irqrestore(&tp->tx_lock, flags); 3257 spin_unlock(&tp->tx_lock);
3263 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 3258 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3264 dev->name); 3259 dev->name);
3265 return NETDEV_TX_BUSY; 3260 return NETDEV_TX_BUSY;
@@ -3424,7 +3419,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3424 3419
3425out_unlock: 3420out_unlock:
3426 mmiowb(); 3421 mmiowb();
3427 spin_unlock_irqrestore(&tp->tx_lock, flags); 3422 spin_unlock(&tp->tx_lock);
3428 3423
3429 dev->trans_start = jiffies; 3424 dev->trans_start = jiffies;
3430 3425
@@ -3458,8 +3453,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3458 } 3453 }
3459 3454
3460 tg3_netif_stop(tp); 3455 tg3_netif_stop(tp);
3461 spin_lock_irq(&tp->lock); 3456
3462 spin_lock(&tp->tx_lock); 3457 tg3_full_lock(tp, 1);
3463 3458
3464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 3459 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3465 3460
@@ -3469,8 +3464,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3469 3464
3470 tg3_netif_start(tp); 3465 tg3_netif_start(tp);
3471 3466
3472 spin_unlock(&tp->tx_lock); 3467 tg3_full_unlock(tp);
3473 spin_unlock_irq(&tp->lock);
3474 3468
3475 return 0; 3469 return 0;
3476} 3470}
@@ -5091,9 +5085,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5091 5085
5092 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5086 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5093 5087
5094 spin_lock_irq(&tp->lock); 5088 spin_lock_bh(&tp->lock);
5095 __tg3_set_mac_addr(tp); 5089 __tg3_set_mac_addr(tp);
5096 spin_unlock_irq(&tp->lock); 5090 spin_unlock_bh(&tp->lock);
5097 5091
5098 return 0; 5092 return 0;
5099} 5093}
@@ -5805,10 +5799,8 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
5805static void tg3_timer(unsigned long __opaque) 5799static void tg3_timer(unsigned long __opaque)
5806{ 5800{
5807 struct tg3 *tp = (struct tg3 *) __opaque; 5801 struct tg3 *tp = (struct tg3 *) __opaque;
5808 unsigned long flags;
5809 5802
5810 spin_lock_irqsave(&tp->lock, flags); 5803 spin_lock(&tp->lock);
5811 spin_lock(&tp->tx_lock);
5812 5804
5813 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 5805 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5814 /* All of this garbage is because when using non-tagged 5806 /* All of this garbage is because when using non-tagged
@@ -5825,8 +5817,7 @@ static void tg3_timer(unsigned long __opaque)
5825 5817
5826 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5827 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5819 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5828 spin_unlock(&tp->tx_lock); 5820 spin_unlock(&tp->lock);
5829 spin_unlock_irqrestore(&tp->lock, flags);
5830 schedule_work(&tp->reset_task); 5821 schedule_work(&tp->reset_task);
5831 return; 5822 return;
5832 } 5823 }
@@ -5894,8 +5885,7 @@ static void tg3_timer(unsigned long __opaque)
5894 tp->asf_counter = tp->asf_multiplier; 5885 tp->asf_counter = tp->asf_multiplier;
5895 } 5886 }
5896 5887
5897 spin_unlock(&tp->tx_lock); 5888 spin_unlock(&tp->lock);
5898 spin_unlock_irqrestore(&tp->lock, flags);
5899 5889
5900 tp->timer.expires = jiffies + tp->timer_offset; 5890 tp->timer.expires = jiffies + tp->timer_offset;
5901 add_timer(&tp->timer); 5891 add_timer(&tp->timer);
@@ -6010,14 +6000,12 @@ static int tg3_test_msi(struct tg3 *tp)
6010 /* Need to reset the chip because the MSI cycle may have terminated 6000 /* Need to reset the chip because the MSI cycle may have terminated
6011 * with Master Abort. 6001 * with Master Abort.
6012 */ 6002 */
6013 spin_lock_irq(&tp->lock); 6003 tg3_full_lock(tp, 1);
6014 spin_lock(&tp->tx_lock);
6015 6004
6016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6017 err = tg3_init_hw(tp); 6006 err = tg3_init_hw(tp);
6018 6007
6019 spin_unlock(&tp->tx_lock); 6008 tg3_full_unlock(tp);
6020 spin_unlock_irq(&tp->lock);
6021 6009
6022 if (err) 6010 if (err)
6023 free_irq(tp->pdev->irq, dev); 6011 free_irq(tp->pdev->irq, dev);
@@ -6030,14 +6018,12 @@ static int tg3_open(struct net_device *dev)
6030 struct tg3 *tp = netdev_priv(dev); 6018 struct tg3 *tp = netdev_priv(dev);
6031 int err; 6019 int err;
6032 6020
6033 spin_lock_irq(&tp->lock); 6021 tg3_full_lock(tp, 0);
6034 spin_lock(&tp->tx_lock);
6035 6022
6036 tg3_disable_ints(tp); 6023 tg3_disable_ints(tp);
6037 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 6024 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6038 6025
6039 spin_unlock(&tp->tx_lock); 6026 tg3_full_unlock(tp);
6040 spin_unlock_irq(&tp->lock);
6041 6027
6042 /* The placement of this call is tied 6028 /* The placement of this call is tied
6043 * to the setup and use of Host TX descriptors. 6029 * to the setup and use of Host TX descriptors.
@@ -6084,8 +6070,7 @@ static int tg3_open(struct net_device *dev)
6084 return err; 6070 return err;
6085 } 6071 }
6086 6072
6087 spin_lock_irq(&tp->lock); 6073 tg3_full_lock(tp, 0);
6088 spin_lock(&tp->tx_lock);
6089 6074
6090 err = tg3_init_hw(tp); 6075 err = tg3_init_hw(tp);
6091 if (err) { 6076 if (err) {
@@ -6109,8 +6094,7 @@ static int tg3_open(struct net_device *dev)
6109 tp->timer.function = tg3_timer; 6094 tp->timer.function = tg3_timer;
6110 } 6095 }
6111 6096
6112 spin_unlock(&tp->tx_lock); 6097 tg3_full_unlock(tp);
6113 spin_unlock_irq(&tp->lock);
6114 6098
6115 if (err) { 6099 if (err) {
6116 free_irq(tp->pdev->irq, dev); 6100 free_irq(tp->pdev->irq, dev);
@@ -6126,8 +6110,7 @@ static int tg3_open(struct net_device *dev)
6126 err = tg3_test_msi(tp); 6110 err = tg3_test_msi(tp);
6127 6111
6128 if (err) { 6112 if (err) {
6129 spin_lock_irq(&tp->lock); 6113 tg3_full_lock(tp, 0);
6130 spin_lock(&tp->tx_lock);
6131 6114
6132 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6115 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6133 pci_disable_msi(tp->pdev); 6116 pci_disable_msi(tp->pdev);
@@ -6137,22 +6120,19 @@ static int tg3_open(struct net_device *dev)
6137 tg3_free_rings(tp); 6120 tg3_free_rings(tp);
6138 tg3_free_consistent(tp); 6121 tg3_free_consistent(tp);
6139 6122
6140 spin_unlock(&tp->tx_lock); 6123 tg3_full_unlock(tp);
6141 spin_unlock_irq(&tp->lock);
6142 6124
6143 return err; 6125 return err;
6144 } 6126 }
6145 } 6127 }
6146 6128
6147 spin_lock_irq(&tp->lock); 6129 tg3_full_lock(tp, 0);
6148 spin_lock(&tp->tx_lock);
6149 6130
6150 add_timer(&tp->timer); 6131 add_timer(&tp->timer);
6151 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 6132 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6152 tg3_enable_ints(tp); 6133 tg3_enable_ints(tp);
6153 6134
6154 spin_unlock(&tp->tx_lock); 6135 tg3_full_unlock(tp);
6155 spin_unlock_irq(&tp->lock);
6156 6136
6157 netif_start_queue(dev); 6137 netif_start_queue(dev);
6158 6138
@@ -6398,8 +6378,7 @@ static int tg3_close(struct net_device *dev)
6398 6378
6399 del_timer_sync(&tp->timer); 6379 del_timer_sync(&tp->timer);
6400 6380
6401 spin_lock_irq(&tp->lock); 6381 tg3_full_lock(tp, 1);
6402 spin_lock(&tp->tx_lock);
6403#if 0 6382#if 0
6404 tg3_dump_state(tp); 6383 tg3_dump_state(tp);
6405#endif 6384#endif
@@ -6413,8 +6392,7 @@ static int tg3_close(struct net_device *dev)
6413 TG3_FLAG_GOT_SERDES_FLOWCTL); 6392 TG3_FLAG_GOT_SERDES_FLOWCTL);
6414 netif_carrier_off(tp->dev); 6393 netif_carrier_off(tp->dev);
6415 6394
6416 spin_unlock(&tp->tx_lock); 6395 tg3_full_unlock(tp);
6417 spin_unlock_irq(&tp->lock);
6418 6396
6419 free_irq(tp->pdev->irq, dev); 6397 free_irq(tp->pdev->irq, dev);
6420 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6398 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -6451,16 +6429,15 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
6451 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 6429 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6452 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 6430 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 6431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6454 unsigned long flags;
6455 u32 val; 6432 u32 val;
6456 6433
6457 spin_lock_irqsave(&tp->lock, flags); 6434 spin_lock_bh(&tp->lock);
6458 if (!tg3_readphy(tp, 0x1e, &val)) { 6435 if (!tg3_readphy(tp, 0x1e, &val)) {
6459 tg3_writephy(tp, 0x1e, val | 0x8000); 6436 tg3_writephy(tp, 0x1e, val | 0x8000);
6460 tg3_readphy(tp, 0x14, &val); 6437 tg3_readphy(tp, 0x14, &val);
6461 } else 6438 } else
6462 val = 0; 6439 val = 0;
6463 spin_unlock_irqrestore(&tp->lock, flags); 6440 spin_unlock_bh(&tp->lock);
6464 6441
6465 tp->phy_crc_errors += val; 6442 tp->phy_crc_errors += val;
6466 6443
@@ -6722,11 +6699,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
6722{ 6699{
6723 struct tg3 *tp = netdev_priv(dev); 6700 struct tg3 *tp = netdev_priv(dev);
6724 6701
6725 spin_lock_irq(&tp->lock); 6702 tg3_full_lock(tp, 0);
6726 spin_lock(&tp->tx_lock);
6727 __tg3_set_rx_mode(dev); 6703 __tg3_set_rx_mode(dev);
6728 spin_unlock(&tp->tx_lock); 6704 tg3_full_unlock(tp);
6729 spin_unlock_irq(&tp->lock);
6730} 6705}
6731 6706
6732#define TG3_REGDUMP_LEN (32 * 1024) 6707#define TG3_REGDUMP_LEN (32 * 1024)
@@ -6748,8 +6723,7 @@ static void tg3_get_regs(struct net_device *dev,
6748 6723
6749 memset(p, 0, TG3_REGDUMP_LEN); 6724 memset(p, 0, TG3_REGDUMP_LEN);
6750 6725
6751 spin_lock_irq(&tp->lock); 6726 tg3_full_lock(tp, 0);
6752 spin_lock(&tp->tx_lock);
6753 6727
6754#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 6728#define __GET_REG32(reg) (*(p)++ = tr32(reg))
6755#define GET_REG32_LOOP(base,len) \ 6729#define GET_REG32_LOOP(base,len) \
@@ -6799,8 +6773,7 @@ do { p = (u32 *)(orig_p + (reg)); \
6799#undef GET_REG32_LOOP 6773#undef GET_REG32_LOOP
6800#undef GET_REG32_1 6774#undef GET_REG32_1
6801 6775
6802 spin_unlock(&tp->tx_lock); 6776 tg3_full_unlock(tp);
6803 spin_unlock_irq(&tp->lock);
6804} 6777}
6805 6778
6806static int tg3_get_eeprom_len(struct net_device *dev) 6779static int tg3_get_eeprom_len(struct net_device *dev)
@@ -6976,8 +6949,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6976 return -EINVAL; 6949 return -EINVAL;
6977 } 6950 }
6978 6951
6979 spin_lock_irq(&tp->lock); 6952 tg3_full_lock(tp, 0);
6980 spin_lock(&tp->tx_lock);
6981 6953
6982 tp->link_config.autoneg = cmd->autoneg; 6954 tp->link_config.autoneg = cmd->autoneg;
6983 if (cmd->autoneg == AUTONEG_ENABLE) { 6955 if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -6993,8 +6965,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6993 if (netif_running(dev)) 6965 if (netif_running(dev))
6994 tg3_setup_phy(tp, 1); 6966 tg3_setup_phy(tp, 1);
6995 6967
6996 spin_unlock(&tp->tx_lock); 6968 tg3_full_unlock(tp);
6997 spin_unlock_irq(&tp->lock);
6998 6969
6999 return 0; 6970 return 0;
7000} 6971}
@@ -7030,12 +7001,12 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7030 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) 7001 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7031 return -EINVAL; 7002 return -EINVAL;
7032 7003
7033 spin_lock_irq(&tp->lock); 7004 spin_lock_bh(&tp->lock);
7034 if (wol->wolopts & WAKE_MAGIC) 7005 if (wol->wolopts & WAKE_MAGIC)
7035 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 7006 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7036 else 7007 else
7037 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 7008 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7038 spin_unlock_irq(&tp->lock); 7009 spin_unlock_bh(&tp->lock);
7039 7010
7040 return 0; 7011 return 0;
7041} 7012}
@@ -7075,7 +7046,7 @@ static int tg3_nway_reset(struct net_device *dev)
7075 if (!netif_running(dev)) 7046 if (!netif_running(dev))
7076 return -EAGAIN; 7047 return -EAGAIN;
7077 7048
7078 spin_lock_irq(&tp->lock); 7049 spin_lock_bh(&tp->lock);
7079 r = -EINVAL; 7050 r = -EINVAL;
7080 tg3_readphy(tp, MII_BMCR, &bmcr); 7051 tg3_readphy(tp, MII_BMCR, &bmcr);
7081 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7052 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
@@ -7083,7 +7054,7 @@ static int tg3_nway_reset(struct net_device *dev)
7083 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); 7054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7084 r = 0; 7055 r = 0;
7085 } 7056 }
7086 spin_unlock_irq(&tp->lock); 7057 spin_unlock_bh(&tp->lock);
7087 7058
7088 return r; 7059 return r;
7089} 7060}
@@ -7114,8 +7085,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7114 if (netif_running(dev)) 7085 if (netif_running(dev))
7115 tg3_netif_stop(tp); 7086 tg3_netif_stop(tp);
7116 7087
7117 spin_lock_irq(&tp->lock); 7088 tg3_full_lock(tp, 0);
7118 spin_lock(&tp->tx_lock);
7119 7089
7120 tp->rx_pending = ering->rx_pending; 7090 tp->rx_pending = ering->rx_pending;
7121 7091
@@ -7131,8 +7101,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7131 tg3_netif_start(tp); 7101 tg3_netif_start(tp);
7132 } 7102 }
7133 7103
7134 spin_unlock(&tp->tx_lock); 7104 tg3_full_unlock(tp);
7135 spin_unlock_irq(&tp->lock);
7136 7105
7137 return 0; 7106 return 0;
7138} 7107}
@@ -7153,8 +7122,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7153 if (netif_running(dev)) 7122 if (netif_running(dev))
7154 tg3_netif_stop(tp); 7123 tg3_netif_stop(tp);
7155 7124
7156 spin_lock_irq(&tp->lock); 7125 tg3_full_lock(tp, 1);
7157 spin_lock(&tp->tx_lock); 7126
7158 if (epause->autoneg) 7127 if (epause->autoneg)
7159 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 7128 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7160 else 7129 else
@@ -7173,8 +7142,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7173 tg3_init_hw(tp); 7142 tg3_init_hw(tp);
7174 tg3_netif_start(tp); 7143 tg3_netif_start(tp);
7175 } 7144 }
7176 spin_unlock(&tp->tx_lock); 7145
7177 spin_unlock_irq(&tp->lock); 7146 tg3_full_unlock(tp);
7178 7147
7179 return 0; 7148 return 0;
7180} 7149}
@@ -7195,12 +7164,12 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7195 return 0; 7164 return 0;
7196 } 7165 }
7197 7166
7198 spin_lock_irq(&tp->lock); 7167 spin_lock_bh(&tp->lock);
7199 if (data) 7168 if (data)
7200 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 7169 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7201 else 7170 else
7202 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 7171 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7203 spin_unlock_irq(&tp->lock); 7172 spin_unlock_bh(&tp->lock);
7204 7173
7205 return 0; 7174 return 0;
7206} 7175}
@@ -7722,8 +7691,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7722 if (netif_running(dev)) 7691 if (netif_running(dev))
7723 tg3_netif_stop(tp); 7692 tg3_netif_stop(tp);
7724 7693
7725 spin_lock_irq(&tp->lock); 7694 tg3_full_lock(tp, 1);
7726 spin_lock(&tp->tx_lock);
7727 7695
7728 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 7696 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7729 tg3_nvram_lock(tp); 7697 tg3_nvram_lock(tp);
@@ -7745,14 +7713,14 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7745 data[4] = 1; 7713 data[4] = 1;
7746 } 7714 }
7747 7715
7748 spin_unlock(&tp->tx_lock); 7716 tg3_full_unlock(tp);
7749 spin_unlock_irq(&tp->lock); 7717
7750 if (tg3_test_interrupt(tp) != 0) { 7718 if (tg3_test_interrupt(tp) != 0) {
7751 etest->flags |= ETH_TEST_FL_FAILED; 7719 etest->flags |= ETH_TEST_FL_FAILED;
7752 data[5] = 1; 7720 data[5] = 1;
7753 } 7721 }
7754 spin_lock_irq(&tp->lock); 7722
7755 spin_lock(&tp->tx_lock); 7723 tg3_full_lock(tp, 0);
7756 7724
7757 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7725 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7758 if (netif_running(dev)) { 7726 if (netif_running(dev)) {
@@ -7760,8 +7728,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7760 tg3_init_hw(tp); 7728 tg3_init_hw(tp);
7761 tg3_netif_start(tp); 7729 tg3_netif_start(tp);
7762 } 7730 }
7763 spin_unlock(&tp->tx_lock); 7731
7764 spin_unlock_irq(&tp->lock); 7732 tg3_full_unlock(tp);
7765 } 7733 }
7766} 7734}
7767 7735
@@ -7782,9 +7750,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7782 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7750 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7783 break; /* We have no PHY */ 7751 break; /* We have no PHY */
7784 7752
7785 spin_lock_irq(&tp->lock); 7753 spin_lock_bh(&tp->lock);
7786 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 7754 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7787 spin_unlock_irq(&tp->lock); 7755 spin_unlock_bh(&tp->lock);
7788 7756
7789 data->val_out = mii_regval; 7757 data->val_out = mii_regval;
7790 7758
@@ -7798,9 +7766,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7798 if (!capable(CAP_NET_ADMIN)) 7766 if (!capable(CAP_NET_ADMIN))
7799 return -EPERM; 7767 return -EPERM;
7800 7768
7801 spin_lock_irq(&tp->lock); 7769 spin_lock_bh(&tp->lock);
7802 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 7770 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7803 spin_unlock_irq(&tp->lock); 7771 spin_unlock_bh(&tp->lock);
7804 7772
7805 return err; 7773 return err;
7806 7774
@@ -7816,28 +7784,24 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7816{ 7784{
7817 struct tg3 *tp = netdev_priv(dev); 7785 struct tg3 *tp = netdev_priv(dev);
7818 7786
7819 spin_lock_irq(&tp->lock); 7787 tg3_full_lock(tp, 0);
7820 spin_lock(&tp->tx_lock);
7821 7788
7822 tp->vlgrp = grp; 7789 tp->vlgrp = grp;
7823 7790
7824 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 7791 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7825 __tg3_set_rx_mode(dev); 7792 __tg3_set_rx_mode(dev);
7826 7793
7827 spin_unlock(&tp->tx_lock); 7794 tg3_full_unlock(tp);
7828 spin_unlock_irq(&tp->lock);
7829} 7795}
7830 7796
7831static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 7797static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7832{ 7798{
7833 struct tg3 *tp = netdev_priv(dev); 7799 struct tg3 *tp = netdev_priv(dev);
7834 7800
7835 spin_lock_irq(&tp->lock); 7801 tg3_full_lock(tp, 0);
7836 spin_lock(&tp->tx_lock);
7837 if (tp->vlgrp) 7802 if (tp->vlgrp)
7838 tp->vlgrp->vlan_devices[vid] = NULL; 7803 tp->vlgrp->vlan_devices[vid] = NULL;
7839 spin_unlock(&tp->tx_lock); 7804 tg3_full_unlock(tp);
7840 spin_unlock_irq(&tp->lock);
7841} 7805}
7842#endif 7806#endif
7843 7807
@@ -10168,24 +10132,19 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10168 10132
10169 del_timer_sync(&tp->timer); 10133 del_timer_sync(&tp->timer);
10170 10134
10171 spin_lock_irq(&tp->lock); 10135 tg3_full_lock(tp, 1);
10172 spin_lock(&tp->tx_lock);
10173 tg3_disable_ints(tp); 10136 tg3_disable_ints(tp);
10174 spin_unlock(&tp->tx_lock); 10137 tg3_full_unlock(tp);
10175 spin_unlock_irq(&tp->lock);
10176 10138
10177 netif_device_detach(dev); 10139 netif_device_detach(dev);
10178 10140
10179 spin_lock_irq(&tp->lock); 10141 tg3_full_lock(tp, 0);
10180 spin_lock(&tp->tx_lock);
10181 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10142 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10182 spin_unlock(&tp->tx_lock); 10143 tg3_full_unlock(tp);
10183 spin_unlock_irq(&tp->lock);
10184 10144
10185 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 10145 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10186 if (err) { 10146 if (err) {
10187 spin_lock_irq(&tp->lock); 10147 tg3_full_lock(tp, 0);
10188 spin_lock(&tp->tx_lock);
10189 10148
10190 tg3_init_hw(tp); 10149 tg3_init_hw(tp);
10191 10150
@@ -10195,8 +10154,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10195 netif_device_attach(dev); 10154 netif_device_attach(dev);
10196 tg3_netif_start(tp); 10155 tg3_netif_start(tp);
10197 10156
10198 spin_unlock(&tp->tx_lock); 10157 tg3_full_unlock(tp);
10199 spin_unlock_irq(&tp->lock);
10200 } 10158 }
10201 10159
10202 return err; 10160 return err;
@@ -10219,8 +10177,7 @@ static int tg3_resume(struct pci_dev *pdev)
10219 10177
10220 netif_device_attach(dev); 10178 netif_device_attach(dev);
10221 10179
10222 spin_lock_irq(&tp->lock); 10180 tg3_full_lock(tp, 0);
10223 spin_lock(&tp->tx_lock);
10224 10181
10225 tg3_init_hw(tp); 10182 tg3_init_hw(tp);
10226 10183
@@ -10231,8 +10188,7 @@ static int tg3_resume(struct pci_dev *pdev)
10231 10188
10232 tg3_netif_start(tp); 10189 tg3_netif_start(tp);
10233 10190
10234 spin_unlock(&tp->tx_lock); 10191 tg3_full_unlock(tp);
10235 spin_unlock_irq(&tp->lock);
10236 10192
10237 return 0; 10193 return 0;
10238} 10194}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 993f84c93dc4..99c5f9675a56 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2006,17 +2006,31 @@ struct tg3_ethtool_stats {
2006struct tg3 { 2006struct tg3 {
2007 /* begin "general, frequently-used members" cacheline section */ 2007 /* begin "general, frequently-used members" cacheline section */
2008 2008
2009 /* If the IRQ handler (which runs lockless) needs to be
2010 * quiesced, the following bitmask state is used. The
2011 * SYNC flag is set by non-IRQ context code to initiate
2012 * the quiescence.
2013 *
2014 * When the IRQ handler notices that SYNC is set, it
2015 * disables interrupts and returns.
2016 *
2017 * When all outstanding IRQ handlers have returned after
2018 * the SYNC flag has been set, the setter can be assured
2019 * that interrupts will no longer get run.
2020 *
2021 * In this way all SMP driver locks are never acquired
2022 * in hw IRQ context, only sw IRQ context or lower.
2023 */
2024 unsigned int irq_sync;
2025
2009 /* SMP locking strategy: 2026 /* SMP locking strategy:
2010 * 2027 *
2011 * lock: Held during all operations except TX packet 2028 * lock: Held during all operations except TX packet
2012 * processing. 2029 * processing.
2013 * 2030 *
2014 * tx_lock: Held during tg3_start_xmit{,_4gbug} and tg3_tx 2031 * tx_lock: Held during tg3_start_xmit and tg3_tx
2015 * 2032 *
2016 * If you want to shut up all asynchronous processing you must 2033 * Both of these locks are to be held with BH safety.
2017 * acquire both locks, 'lock' taken before 'tx_lock'. IRQs must
2018 * be disabled to take 'lock' but only softirq disabling is
2019 * necessary for acquisition of 'tx_lock'.
2020 */ 2034 */
2021 spinlock_t lock; 2035 spinlock_t lock;
2022 spinlock_t indirect_lock; 2036 spinlock_t indirect_lock;