diff options
author | David S. Miller <davem@davemloft.net> | 2010-09-10 01:27:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-10 01:27:33 -0400 |
commit | e548833df83c3554229eff0672900bfe958b45fd (patch) | |
tree | 85efc4a76dc356593d6d394776aeb845dc580fb6 /drivers/net/3c59x.c | |
parent | cbd9da7be869f676afc204e1a664163778c770bd (diff) | |
parent | 053d8f6622701f849fda2ca2c9ae596c13599ba9 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
net/mac80211/main.c
Diffstat (limited to 'drivers/net/3c59x.c')
-rw-r--r-- | drivers/net/3c59x.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 7a01588fb6fb..e31a6d1919c6 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -633,7 +633,8 @@ struct vortex_private { | |||
633 | open:1, | 633 | open:1, |
634 | medialock:1, | 634 | medialock:1, |
635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ | 635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ |
636 | large_frames:1; /* accept large frames */ | 636 | large_frames:1, /* accept large frames */ |
637 | handling_irq:1; /* private in_irq indicator */ | ||
637 | int drv_flags; | 638 | int drv_flags; |
638 | u16 status_enable; | 639 | u16 status_enable; |
639 | u16 intr_enable; | 640 | u16 intr_enable; |
@@ -646,7 +647,7 @@ struct vortex_private { | |||
646 | u16 io_size; /* Size of PCI region (for release_region) */ | 647 | u16 io_size; /* Size of PCI region (for release_region) */ |
647 | 648 | ||
648 | /* Serialises access to hardware other than MII and variables below. | 649 | /* Serialises access to hardware other than MII and variables below. |
649 | * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ | 650 | * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ |
650 | spinlock_t lock; | 651 | spinlock_t lock; |
651 | 652 | ||
652 | spinlock_t mii_lock; /* Serialises access to MII */ | 653 | spinlock_t mii_lock; /* Serialises access to MII */ |
@@ -1993,10 +1994,9 @@ vortex_error(struct net_device *dev, int status) | |||
1993 | } | 1994 | } |
1994 | } | 1995 | } |
1995 | 1996 | ||
1996 | if (status & RxEarly) { /* Rx early is unused. */ | 1997 | if (status & RxEarly) /* Rx early is unused. */ |
1997 | vortex_rx(dev); | ||
1998 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); | 1998 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); |
1999 | } | 1999 | |
2000 | if (status & StatsFull) { /* Empty statistics. */ | 2000 | if (status & StatsFull) { /* Empty statistics. */ |
2001 | static int DoneDidThat; | 2001 | static int DoneDidThat; |
2002 | if (vortex_debug > 4) | 2002 | if (vortex_debug > 4) |
@@ -2133,6 +2133,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2133 | dev->name, vp->cur_tx); | 2133 | dev->name, vp->cur_tx); |
2134 | } | 2134 | } |
2135 | 2135 | ||
2136 | /* | ||
2137 | * We can't allow a recursion from our interrupt handler back into the | ||
2138 | * tx routine, as they take the same spin lock, and that causes | ||
2139 | * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in | ||
2140 | * a bit | ||
2141 | */ | ||
2142 | if (vp->handling_irq) | ||
2143 | return NETDEV_TX_BUSY; | ||
2144 | |||
2136 | if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { | 2145 | if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { |
2137 | if (vortex_debug > 0) | 2146 | if (vortex_debug > 0) |
2138 | pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", | 2147 | pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", |
@@ -2288,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id) | |||
2288 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { | 2297 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { |
2289 | if (status == 0xffff) | 2298 | if (status == 0xffff) |
2290 | break; | 2299 | break; |
2300 | if (status & RxEarly) | ||
2301 | vortex_rx(dev); | ||
2302 | spin_unlock(&vp->window_lock); | ||
2291 | vortex_error(dev, status); | 2303 | vortex_error(dev, status); |
2304 | spin_lock(&vp->window_lock); | ||
2305 | window_set(vp, 7); | ||
2292 | } | 2306 | } |
2293 | 2307 | ||
2294 | if (--work_done < 0) { | 2308 | if (--work_done < 0) { |
@@ -2335,11 +2349,13 @@ boomerang_interrupt(int irq, void *dev_id) | |||
2335 | 2349 | ||
2336 | ioaddr = vp->ioaddr; | 2350 | ioaddr = vp->ioaddr; |
2337 | 2351 | ||
2352 | |||
2338 | /* | 2353 | /* |
2339 | * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout | 2354 | * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout |
2340 | * and boomerang_start_xmit | 2355 | * and boomerang_start_xmit |
2341 | */ | 2356 | */ |
2342 | spin_lock(&vp->lock); | 2357 | spin_lock(&vp->lock); |
2358 | vp->handling_irq = 1; | ||
2343 | 2359 | ||
2344 | status = ioread16(ioaddr + EL3_STATUS); | 2360 | status = ioread16(ioaddr + EL3_STATUS); |
2345 | 2361 | ||
@@ -2447,6 +2463,7 @@ boomerang_interrupt(int irq, void *dev_id) | |||
2447 | pr_debug("%s: exiting interrupt, status %4.4x.\n", | 2463 | pr_debug("%s: exiting interrupt, status %4.4x.\n", |
2448 | dev->name, status); | 2464 | dev->name, status); |
2449 | handler_exit: | 2465 | handler_exit: |
2466 | vp->handling_irq = 0; | ||
2450 | spin_unlock(&vp->lock); | 2467 | spin_unlock(&vp->lock); |
2451 | return IRQ_HANDLED; | 2468 | return IRQ_HANDLED; |
2452 | } | 2469 | } |
@@ -2971,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2971 | { | 2988 | { |
2972 | int err; | 2989 | int err; |
2973 | struct vortex_private *vp = netdev_priv(dev); | 2990 | struct vortex_private *vp = netdev_priv(dev); |
2974 | unsigned long flags; | ||
2975 | pci_power_t state = 0; | 2991 | pci_power_t state = 0; |
2976 | 2992 | ||
2977 | if(VORTEX_PCI(vp)) | 2993 | if(VORTEX_PCI(vp)) |
@@ -2981,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2981 | 2997 | ||
2982 | if(state != 0) | 2998 | if(state != 0) |
2983 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); | 2999 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); |
2984 | spin_lock_irqsave(&vp->lock, flags); | ||
2985 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); | 3000 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); |
2986 | spin_unlock_irqrestore(&vp->lock, flags); | ||
2987 | if(state != 0) | 3001 | if(state != 0) |
2988 | pci_set_power_state(VORTEX_PCI(vp), state); | 3002 | pci_set_power_state(VORTEX_PCI(vp), state); |
2989 | 3003 | ||