aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/rfkill/rfkill-input.c49
1 files changed, 41 insertions, 8 deletions
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index 7706541f9f79..84efde97c5a7 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -31,6 +31,9 @@ enum rfkill_input_master_mode {
31 RFKILL_INPUT_MASTER_MAX, /* marker */ 31 RFKILL_INPUT_MASTER_MAX, /* marker */
32}; 32};
33 33
34/* Delay (in ms) between consecutive switch ops */
35#define RFKILL_OPS_DELAY 200
36
34static enum rfkill_input_master_mode rfkill_master_switch_mode = 37static enum rfkill_input_master_mode rfkill_master_switch_mode =
35 RFKILL_INPUT_MASTER_UNBLOCKALL; 38 RFKILL_INPUT_MASTER_UNBLOCKALL;
36module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); 39module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
@@ -51,7 +54,7 @@ enum rfkill_global_sched_op {
51 */ 54 */
52 55
53struct rfkill_task { 56struct rfkill_task {
54 struct work_struct work; 57 struct delayed_work dwork;
55 58
56 /* ensures that task is serialized */ 59 /* ensures that task is serialized */
57 struct mutex mutex; 60 struct mutex mutex;
@@ -75,6 +78,9 @@ struct rfkill_task {
75 78
76 bool global_op_pending; 79 bool global_op_pending;
77 enum rfkill_global_sched_op op; 80 enum rfkill_global_sched_op op;
81
82 /* last time it was scheduled */
83 unsigned long last_scheduled;
78}; 84};
79 85
80static void __rfkill_handle_global_op(enum rfkill_global_sched_op op) 86static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
@@ -138,8 +144,8 @@ static void __rfkill_handle_normal_op(const enum rfkill_type type,
138 144
139static void rfkill_task_handler(struct work_struct *work) 145static void rfkill_task_handler(struct work_struct *work)
140{ 146{
141 struct rfkill_task *task = 147 struct rfkill_task *task = container_of(work,
142 container_of(work, struct rfkill_task, work); 148 struct rfkill_task, dwork.work);
143 bool doit = true; 149 bool doit = true;
144 150
145 mutex_lock(&task->mutex); 151 mutex_lock(&task->mutex);
@@ -194,12 +200,27 @@ static void rfkill_task_handler(struct work_struct *work)
194} 200}
195 201
196static struct rfkill_task rfkill_task = { 202static struct rfkill_task rfkill_task = {
197 .work = __WORK_INITIALIZER(rfkill_task.work, 203 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
198 rfkill_task_handler), 204 rfkill_task_handler),
199 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex), 205 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
200 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock), 206 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
201}; 207};
202 208
209static unsigned long rfkill_ratelimit(const unsigned long last)
210{
211 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
212 return (time_after(jiffies, last + delay)) ? 0 : delay;
213}
214
215static void rfkill_schedule_ratelimited(void)
216{
217 if (!delayed_work_pending(&rfkill_task.dwork)) {
218 schedule_delayed_work(&rfkill_task.dwork,
219 rfkill_ratelimit(rfkill_task.last_scheduled));
220 rfkill_task.last_scheduled = jiffies;
221 }
222}
223
203static void rfkill_schedule_global_op(enum rfkill_global_sched_op op) 224static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
204{ 225{
205 unsigned long flags; 226 unsigned long flags;
@@ -207,7 +228,13 @@ static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
207 spin_lock_irqsave(&rfkill_task.lock, flags); 228 spin_lock_irqsave(&rfkill_task.lock, flags);
208 rfkill_task.op = op; 229 rfkill_task.op = op;
209 rfkill_task.global_op_pending = true; 230 rfkill_task.global_op_pending = true;
210 schedule_work(&rfkill_task.work); 231 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
232 /* bypass the limiter for EPO */
233 cancel_delayed_work(&rfkill_task.dwork);
234 schedule_delayed_work(&rfkill_task.dwork, 0);
235 rfkill_task.last_scheduled = jiffies;
236 } else
237 rfkill_schedule_ratelimited();
211 spin_unlock_irqrestore(&rfkill_task.lock, flags); 238 spin_unlock_irqrestore(&rfkill_task.lock, flags);
212} 239}
213 240
@@ -231,7 +258,7 @@ static void rfkill_schedule_set(enum rfkill_type type,
231 set_bit(type, rfkill_task.sw_newstate); 258 set_bit(type, rfkill_task.sw_newstate);
232 else 259 else
233 clear_bit(type, rfkill_task.sw_newstate); 260 clear_bit(type, rfkill_task.sw_newstate);
234 schedule_work(&rfkill_task.work); 261 rfkill_schedule_ratelimited();
235 } 262 }
236 spin_unlock_irqrestore(&rfkill_task.lock, flags); 263 spin_unlock_irqrestore(&rfkill_task.lock, flags);
237} 264}
@@ -248,7 +275,7 @@ static void rfkill_schedule_toggle(enum rfkill_type type)
248 if (!rfkill_task.global_op_pending) { 275 if (!rfkill_task.global_op_pending) {
249 set_bit(type, rfkill_task.sw_pending); 276 set_bit(type, rfkill_task.sw_pending);
250 change_bit(type, rfkill_task.sw_togglestate); 277 change_bit(type, rfkill_task.sw_togglestate);
251 schedule_work(&rfkill_task.work); 278 rfkill_schedule_ratelimited();
252 } 279 }
253 spin_unlock_irqrestore(&rfkill_task.lock, flags); 280 spin_unlock_irqrestore(&rfkill_task.lock, flags);
254} 281}
@@ -412,13 +439,19 @@ static int __init rfkill_handler_init(void)
412 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX) 439 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
413 return -EINVAL; 440 return -EINVAL;
414 441
442 /*
443 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
444 * at the first use. Acceptable, but if we can avoid it, why not?
445 */
446 rfkill_task.last_scheduled =
447 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
415 return input_register_handler(&rfkill_handler); 448 return input_register_handler(&rfkill_handler);
416} 449}
417 450
418static void __exit rfkill_handler_exit(void) 451static void __exit rfkill_handler_exit(void)
419{ 452{
420 input_unregister_handler(&rfkill_handler); 453 input_unregister_handler(&rfkill_handler);
421 flush_scheduled_work(); 454 cancel_delayed_work_sync(&rfkill_task.dwork);
422 rfkill_remove_epo_lock(); 455 rfkill_remove_epo_lock();
423} 456}
424 457