diff options
author | Michael Buesch <mb@bu3sch.de> | 2007-09-28 10:19:03 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:54:13 -0400 |
commit | 05b64b364822863974c0121359b01d7ba0f22205 (patch) | |
tree | 9f11ab39989780f93bdd953d798e070ada6ed069 | |
parent | 42bb4cd5ae320dd46630533fecb91b940d4468e2 (diff) |
[B43]: Rewrite pwork locking policy.
Implement much easier and more lightweight locking for
the periodic work.
This also removes the last big busywait loop and replaces it
by a sleeping loop.
Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/wireless/b43/main.c | 88 |
1 files changed, 32 insertions, 56 deletions
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index a603a154d496..6c80f2e2f4ee 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -1976,6 +1976,7 @@ void b43_mac_enable(struct b43_wldev *dev) | |||
1976 | { | 1976 | { |
1977 | dev->mac_suspended--; | 1977 | dev->mac_suspended--; |
1978 | B43_WARN_ON(dev->mac_suspended < 0); | 1978 | B43_WARN_ON(dev->mac_suspended < 0); |
1979 | B43_WARN_ON(irqs_disabled()); | ||
1979 | if (dev->mac_suspended == 0) { | 1980 | if (dev->mac_suspended == 0) { |
1980 | b43_write32(dev, B43_MMIO_MACCTL, | 1981 | b43_write32(dev, B43_MMIO_MACCTL, |
1981 | b43_read32(dev, B43_MMIO_MACCTL) | 1982 | b43_read32(dev, B43_MMIO_MACCTL) |
@@ -1986,6 +1987,11 @@ void b43_mac_enable(struct b43_wldev *dev) | |||
1986 | b43_read32(dev, B43_MMIO_MACCTL); | 1987 | b43_read32(dev, B43_MMIO_MACCTL); |
1987 | b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); | 1988 | b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); |
1988 | b43_power_saving_ctl_bits(dev, 0); | 1989 | b43_power_saving_ctl_bits(dev, 0); |
1990 | |||
1991 | /* Re-enable IRQs. */ | ||
1992 | spin_lock_irq(&dev->wl->irq_lock); | ||
1993 | b43_interrupt_enable(dev, dev->irq_savedstate); | ||
1994 | spin_unlock_irq(&dev->wl->irq_lock); | ||
1989 | } | 1995 | } |
1990 | } | 1996 | } |
1991 | 1997 | ||
@@ -1995,23 +2001,34 @@ void b43_mac_suspend(struct b43_wldev *dev) | |||
1995 | int i; | 2001 | int i; |
1996 | u32 tmp; | 2002 | u32 tmp; |
1997 | 2003 | ||
2004 | might_sleep(); | ||
2005 | B43_WARN_ON(irqs_disabled()); | ||
1998 | B43_WARN_ON(dev->mac_suspended < 0); | 2006 | B43_WARN_ON(dev->mac_suspended < 0); |
2007 | |||
1999 | if (dev->mac_suspended == 0) { | 2008 | if (dev->mac_suspended == 0) { |
2009 | /* Mask IRQs before suspending MAC. Otherwise | ||
2010 | * the MAC stays busy and won't suspend. */ | ||
2011 | spin_lock_irq(&dev->wl->irq_lock); | ||
2012 | tmp = b43_interrupt_disable(dev, B43_IRQ_ALL); | ||
2013 | spin_unlock_irq(&dev->wl->irq_lock); | ||
2014 | b43_synchronize_irq(dev); | ||
2015 | dev->irq_savedstate = tmp; | ||
2016 | |||
2000 | b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); | 2017 | b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); |
2001 | b43_write32(dev, B43_MMIO_MACCTL, | 2018 | b43_write32(dev, B43_MMIO_MACCTL, |
2002 | b43_read32(dev, B43_MMIO_MACCTL) | 2019 | b43_read32(dev, B43_MMIO_MACCTL) |
2003 | & ~B43_MACCTL_ENABLED); | 2020 | & ~B43_MACCTL_ENABLED); |
2004 | /* force pci to flush the write */ | 2021 | /* force pci to flush the write */ |
2005 | b43_read32(dev, B43_MMIO_MACCTL); | 2022 | b43_read32(dev, B43_MMIO_MACCTL); |
2006 | for (i = 10000; i; i--) { | 2023 | for (i = 40; i; i--) { |
2007 | tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); | 2024 | tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); |
2008 | if (tmp & B43_IRQ_MAC_SUSPENDED) | 2025 | if (tmp & B43_IRQ_MAC_SUSPENDED) |
2009 | goto out; | 2026 | goto out; |
2010 | udelay(1); | 2027 | msleep(1); |
2011 | } | 2028 | } |
2012 | b43err(dev->wl, "MAC suspend failed\n"); | 2029 | b43err(dev->wl, "MAC suspend failed\n"); |
2013 | } | 2030 | } |
2014 | out: | 2031 | out: |
2015 | dev->mac_suspended++; | 2032 | dev->mac_suspended++; |
2016 | } | 2033 | } |
2017 | 2034 | ||
@@ -2349,77 +2366,36 @@ static void do_periodic_work(struct b43_wldev *dev) | |||
2349 | b43_periodic_every15sec(dev); | 2366 | b43_periodic_every15sec(dev); |
2350 | } | 2367 | } |
2351 | 2368 | ||
2352 | /* Estimate a "Badness" value based on the periodic work | 2369 | /* Periodic work locking policy: |
2353 | * state-machine state. "Badness" is worse (bigger), if the | 2370 | * The whole periodic work handler is protected by |
2354 | * periodic work will take longer. | 2371 | * wl->mutex. If another lock is needed somewhere in the |
2372 | * pwork callchain, it's aquired in-place, where it's needed. | ||
2355 | */ | 2373 | */ |
2356 | static int estimate_periodic_work_badness(unsigned int state) | ||
2357 | { | ||
2358 | int badness = 0; | ||
2359 | |||
2360 | if (state % 8 == 0) /* every 120 sec */ | ||
2361 | badness += 10; | ||
2362 | if (state % 4 == 0) /* every 60 sec */ | ||
2363 | badness += 5; | ||
2364 | if (state % 2 == 0) /* every 30 sec */ | ||
2365 | badness += 1; | ||
2366 | |||
2367 | #define BADNESS_LIMIT 4 | ||
2368 | return badness; | ||
2369 | } | ||
2370 | |||
2371 | static void b43_periodic_work_handler(struct work_struct *work) | 2374 | static void b43_periodic_work_handler(struct work_struct *work) |
2372 | { | 2375 | { |
2373 | struct b43_wldev *dev = | 2376 | struct b43_wldev *dev = container_of(work, struct b43_wldev, |
2374 | container_of(work, struct b43_wldev, periodic_work.work); | 2377 | periodic_work.work); |
2375 | unsigned long flags, delay; | 2378 | struct b43_wl *wl = dev->wl; |
2376 | u32 savedirqs = 0; | 2379 | unsigned long delay; |
2377 | int badness; | ||
2378 | 2380 | ||
2379 | mutex_lock(&dev->wl->mutex); | 2381 | mutex_lock(&wl->mutex); |
2380 | 2382 | ||
2381 | if (unlikely(b43_status(dev) != B43_STAT_STARTED)) | 2383 | if (unlikely(b43_status(dev) != B43_STAT_STARTED)) |
2382 | goto out; | 2384 | goto out; |
2383 | if (b43_debug(dev, B43_DBG_PWORK_STOP)) | 2385 | if (b43_debug(dev, B43_DBG_PWORK_STOP)) |
2384 | goto out_requeue; | 2386 | goto out_requeue; |
2385 | 2387 | ||
2386 | badness = estimate_periodic_work_badness(dev->periodic_state); | 2388 | do_periodic_work(dev); |
2387 | if (badness > BADNESS_LIMIT) { | ||
2388 | spin_lock_irqsave(&dev->wl->irq_lock, flags); | ||
2389 | /* Suspend TX as we don't want to transmit packets while | ||
2390 | * we recalibrate the hardware. */ | ||
2391 | b43_tx_suspend(dev); | ||
2392 | savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL); | ||
2393 | /* Periodic work will take a long time, so we want it to | ||
2394 | * be preemtible and release the spinlock. */ | ||
2395 | spin_unlock_irqrestore(&dev->wl->irq_lock, flags); | ||
2396 | b43_synchronize_irq(dev); | ||
2397 | |||
2398 | do_periodic_work(dev); | ||
2399 | |||
2400 | spin_lock_irqsave(&dev->wl->irq_lock, flags); | ||
2401 | b43_interrupt_enable(dev, savedirqs); | ||
2402 | b43_tx_resume(dev); | ||
2403 | mmiowb(); | ||
2404 | spin_unlock_irqrestore(&dev->wl->irq_lock, flags); | ||
2405 | } else { | ||
2406 | /* Take the global driver lock. This will lock any operation. */ | ||
2407 | spin_lock_irqsave(&dev->wl->irq_lock, flags); | ||
2408 | |||
2409 | do_periodic_work(dev); | ||
2410 | 2389 | ||
2411 | mmiowb(); | ||
2412 | spin_unlock_irqrestore(&dev->wl->irq_lock, flags); | ||
2413 | } | ||
2414 | dev->periodic_state++; | 2390 | dev->periodic_state++; |
2415 | out_requeue: | 2391 | out_requeue: |
2416 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) | 2392 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) |
2417 | delay = msecs_to_jiffies(50); | 2393 | delay = msecs_to_jiffies(50); |
2418 | else | 2394 | else |
2419 | delay = round_jiffies(HZ * 15); | 2395 | delay = round_jiffies(HZ * 15); |
2420 | queue_delayed_work(dev->wl->hw->workqueue, &dev->periodic_work, delay); | 2396 | queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); |
2421 | out: | 2397 | out: |
2422 | mutex_unlock(&dev->wl->mutex); | 2398 | mutex_unlock(&wl->mutex); |
2423 | } | 2399 | } |
2424 | 2400 | ||
2425 | static void b43_periodic_tasks_setup(struct b43_wldev *dev) | 2401 | static void b43_periodic_tasks_setup(struct b43_wldev *dev) |