diff options
author | Michael Buesch <mb@bu3sch.de> | 2006-06-05 14:24:21 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2006-06-15 15:48:13 -0400 |
commit | 91769e7dd9cef7988dc4280f74ed168351beb5b8 (patch) | |
tree | ef854d83ec881882b94a3e88b580e2468f62bac1 /drivers/net/wireless/bcm43xx/bcm43xx_main.c | |
parent | 78ff56a06edc3407996173daf63e48f6b90c7062 (diff) |
[PATCH] bcm43xx: preemptible periodic work
Make the heavy periodic work preemptible to avoid disabling
local IRQs for several msecs.
Signed-off-by: Michael Buesch <mb@buesch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/bcm43xx/bcm43xx_main.c')
-rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_main.c | 97 |
1 files changed, 86 insertions, 11 deletions
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 835a2df1fe30..77d0e390b021 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -498,11 +498,21 @@ static inline u32 bcm43xx_interrupt_disable(struct bcm43xx_private *bcm, u32 mas | |||
498 | return old_mask; | 498 | return old_mask; |
499 | } | 499 | } |
500 | 500 | ||
501 | /* Synchronize IRQ top- and bottom-half. | ||
502 | * IRQs must be masked before calling this. | ||
503 | * This must not be called with the irq_lock held. | ||
504 | */ | ||
505 | static void bcm43xx_synchronize_irq(struct bcm43xx_private *bcm) | ||
506 | { | ||
507 | synchronize_irq(bcm->irq); | ||
508 | tasklet_disable(&bcm->isr_tasklet); | ||
509 | } | ||
510 | |||
501 | /* Make sure we don't receive more data from the device. */ | 511 | /* Make sure we don't receive more data from the device. */ |
502 | static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *oldstate) | 512 | static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *oldstate) |
503 | { | 513 | { |
504 | u32 old; | ||
505 | unsigned long flags; | 514 | unsigned long flags; |
515 | u32 old; | ||
506 | 516 | ||
507 | bcm43xx_lock_irqonly(bcm, flags); | 517 | bcm43xx_lock_irqonly(bcm, flags); |
508 | if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) { | 518 | if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) { |
@@ -510,8 +520,9 @@ static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *old | |||
510 | return -EBUSY; | 520 | return -EBUSY; |
511 | } | 521 | } |
512 | old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); | 522 | old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); |
513 | tasklet_disable(&bcm->isr_tasklet); | ||
514 | bcm43xx_unlock_irqonly(bcm, flags); | 523 | bcm43xx_unlock_irqonly(bcm, flags); |
524 | bcm43xx_synchronize_irq(bcm); | ||
525 | |||
515 | if (oldstate) | 526 | if (oldstate) |
516 | *oldstate = old; | 527 | *oldstate = old; |
517 | 528 | ||
@@ -3108,14 +3119,10 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm) | |||
3108 | //TODO for APHY (temperature?) | 3119 | //TODO for APHY (temperature?) |
3109 | } | 3120 | } |
3110 | 3121 | ||
3111 | static void bcm43xx_periodic_work_handler(void *d) | 3122 | static void do_periodic_work(struct bcm43xx_private *bcm) |
3112 | { | 3123 | { |
3113 | struct bcm43xx_private *bcm = d; | ||
3114 | unsigned long flags; | ||
3115 | unsigned int state; | 3124 | unsigned int state; |
3116 | 3125 | ||
3117 | bcm43xx_lock_irqsafe(bcm, flags); | ||
3118 | |||
3119 | state = bcm->periodic_state; | 3126 | state = bcm->periodic_state; |
3120 | if (state % 8 == 0) | 3127 | if (state % 8 == 0) |
3121 | bcm43xx_periodic_every120sec(bcm); | 3128 | bcm43xx_periodic_every120sec(bcm); |
@@ -3123,13 +3130,79 @@ static void bcm43xx_periodic_work_handler(void *d) | |||
3123 | bcm43xx_periodic_every60sec(bcm); | 3130 | bcm43xx_periodic_every60sec(bcm); |
3124 | if (state % 2 == 0) | 3131 | if (state % 2 == 0) |
3125 | bcm43xx_periodic_every30sec(bcm); | 3132 | bcm43xx_periodic_every30sec(bcm); |
3126 | bcm43xx_periodic_every15sec(bcm); | 3133 | if (state % 1 == 0) |
3134 | bcm43xx_periodic_every15sec(bcm); | ||
3127 | bcm->periodic_state = state + 1; | 3135 | bcm->periodic_state = state + 1; |
3128 | 3136 | ||
3129 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); | 3137 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); |
3138 | } | ||
3130 | 3139 | ||
3131 | mmiowb(); | 3140 | /* Estimate a "Badness" value based on the periodic work |
3132 | bcm43xx_unlock_irqsafe(bcm, flags); | 3141 | * state-machine state. "Badness" is worse (bigger), if the |
3142 | * periodic work will take longer. | ||
3143 | */ | ||
3144 | static int estimate_periodic_work_badness(unsigned int state) | ||
3145 | { | ||
3146 | int badness = 0; | ||
3147 | |||
3148 | if (state % 8 == 0) /* every 120 sec */ | ||
3149 | badness += 10; | ||
3150 | if (state % 4 == 0) /* every 60 sec */ | ||
3151 | badness += 5; | ||
3152 | if (state % 2 == 0) /* every 30 sec */ | ||
3153 | badness += 1; | ||
3154 | if (state % 1 == 0) /* every 15 sec */ | ||
3155 | badness += 1; | ||
3156 | |||
3157 | #define BADNESS_LIMIT 4 | ||
3158 | return badness; | ||
3159 | } | ||
3160 | |||
3161 | static void bcm43xx_periodic_work_handler(void *d) | ||
3162 | { | ||
3163 | struct bcm43xx_private *bcm = d; | ||
3164 | unsigned long flags; | ||
3165 | u32 savedirqs = 0; | ||
3166 | int badness; | ||
3167 | |||
3168 | badness = estimate_periodic_work_badness(bcm->periodic_state); | ||
3169 | if (badness > BADNESS_LIMIT) { | ||
3170 | /* Periodic work will take a long time, so we want it to | ||
3171 | * be preemtible. | ||
3172 | */ | ||
3173 | bcm43xx_lock_irqonly(bcm, flags); | ||
3174 | netif_stop_queue(bcm->net_dev); | ||
3175 | if (bcm43xx_using_pio(bcm)) | ||
3176 | bcm43xx_pio_freeze_txqueues(bcm); | ||
3177 | savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); | ||
3178 | bcm43xx_unlock_irqonly(bcm, flags); | ||
3179 | bcm43xx_lock_noirq(bcm); | ||
3180 | bcm43xx_synchronize_irq(bcm); | ||
3181 | } else { | ||
3182 | /* Periodic work should take short time, so we want low | ||
3183 | * locking overhead. | ||
3184 | */ | ||
3185 | bcm43xx_lock_irqsafe(bcm, flags); | ||
3186 | } | ||
3187 | |||
3188 | do_periodic_work(bcm); | ||
3189 | |||
3190 | if (badness > BADNESS_LIMIT) { | ||
3191 | bcm43xx_lock_irqonly(bcm, flags); | ||
3192 | if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) { | ||
3193 | tasklet_enable(&bcm->isr_tasklet); | ||
3194 | bcm43xx_interrupt_enable(bcm, savedirqs); | ||
3195 | if (bcm43xx_using_pio(bcm)) | ||
3196 | bcm43xx_pio_thaw_txqueues(bcm); | ||
3197 | } | ||
3198 | netif_wake_queue(bcm->net_dev); | ||
3199 | mmiowb(); | ||
3200 | bcm43xx_unlock_irqonly(bcm, flags); | ||
3201 | bcm43xx_unlock_noirq(bcm); | ||
3202 | } else { | ||
3203 | mmiowb(); | ||
3204 | bcm43xx_unlock_irqsafe(bcm, flags); | ||
3205 | } | ||
3133 | } | 3206 | } |
3134 | 3207 | ||
3135 | static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) | 3208 | static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) |
@@ -3670,9 +3743,11 @@ static int bcm43xx_net_open(struct net_device *net_dev) | |||
3670 | static int bcm43xx_net_stop(struct net_device *net_dev) | 3743 | static int bcm43xx_net_stop(struct net_device *net_dev) |
3671 | { | 3744 | { |
3672 | struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); | 3745 | struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); |
3746 | int err; | ||
3673 | 3747 | ||
3674 | ieee80211softmac_stop(net_dev); | 3748 | ieee80211softmac_stop(net_dev); |
3675 | bcm43xx_disable_interrupts_sync(bcm, NULL); | 3749 | err = bcm43xx_disable_interrupts_sync(bcm, NULL); |
3750 | assert(!err); | ||
3676 | bcm43xx_free_board(bcm); | 3751 | bcm43xx_free_board(bcm); |
3677 | 3752 | ||
3678 | return 0; | 3753 | return 0; |