aboutsummaryrefslogtreecommitdiffstats
path: root/net/rfkill/rfkill-input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rfkill/rfkill-input.c')
-rw-r--r--net/rfkill/rfkill-input.c337
1 files changed, 261 insertions, 76 deletions
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index bfdade72e066..84efde97c5a7 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -24,138 +24,318 @@ MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24MODULE_DESCRIPTION("Input layer to RF switch connector"); 24MODULE_DESCRIPTION("Input layer to RF switch connector");
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27enum rfkill_input_master_mode {
28 RFKILL_INPUT_MASTER_DONOTHING = 0,
29 RFKILL_INPUT_MASTER_RESTORE = 1,
30 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31 RFKILL_INPUT_MASTER_MAX, /* marker */
32};
33
34/* Delay (in ms) between consecutive switch ops */
35#define RFKILL_OPS_DELAY 200
36
37static enum rfkill_input_master_mode rfkill_master_switch_mode =
38 RFKILL_INPUT_MASTER_UNBLOCKALL;
39module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
40MODULE_PARM_DESC(master_switch_mode,
41 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
42
43enum rfkill_global_sched_op {
44 RFKILL_GLOBAL_OP_EPO = 0,
45 RFKILL_GLOBAL_OP_RESTORE,
46 RFKILL_GLOBAL_OP_UNLOCK,
47 RFKILL_GLOBAL_OP_UNBLOCK,
48};
49
50/*
51 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
52 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
53 * future, when such events are added, that code will be necessary.
54 */
55
27struct rfkill_task { 56struct rfkill_task {
28 struct work_struct work; 57 struct delayed_work dwork;
29 enum rfkill_type type; 58
30 struct mutex mutex; /* ensures that task is serialized */ 59 /* ensures that task is serialized */
31 spinlock_t lock; /* for accessing last and desired state */ 60 struct mutex mutex;
32 unsigned long last; /* last schedule */ 61
33 enum rfkill_state desired_state; /* on/off */ 62 /* protects everything below */
63 spinlock_t lock;
64
65 /* pending regular switch operations (1=pending) */
66 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
67
68#ifdef RFKILL_NEED_SWSET
69 /* set operation pending (1=pending) */
70 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
71
72 /* desired state for pending set operation (1=unblock) */
73 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
74#endif
75
76 /* should the state be complemented (1=yes) */
77 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
78
79 bool global_op_pending;
80 enum rfkill_global_sched_op op;
81
82 /* last time it was scheduled */
83 unsigned long last_scheduled;
34}; 84};
35 85
86static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
87{
88 unsigned int i;
89
90 switch (op) {
91 case RFKILL_GLOBAL_OP_EPO:
92 rfkill_epo();
93 break;
94 case RFKILL_GLOBAL_OP_RESTORE:
95 rfkill_restore_states();
96 break;
97 case RFKILL_GLOBAL_OP_UNLOCK:
98 rfkill_remove_epo_lock();
99 break;
100 case RFKILL_GLOBAL_OP_UNBLOCK:
101 rfkill_remove_epo_lock();
102 for (i = 0; i < RFKILL_TYPE_MAX; i++)
103 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
104 break;
105 default:
106 /* memory corruption or bug, fail safely */
107 rfkill_epo();
108 WARN(1, "Unknown requested operation %d! "
109 "rfkill Emergency Power Off activated\n",
110 op);
111 }
112}
113
114#ifdef RFKILL_NEED_SWSET
115static void __rfkill_handle_normal_op(const enum rfkill_type type,
116 const bool sp, const bool s, const bool c)
117{
118 enum rfkill_state state;
119
120 if (sp)
121 state = (s) ? RFKILL_STATE_UNBLOCKED :
122 RFKILL_STATE_SOFT_BLOCKED;
123 else
124 state = rfkill_get_global_state(type);
125
126 if (c)
127 state = rfkill_state_complement(state);
128
129 rfkill_switch_all(type, state);
130}
131#else
132static void __rfkill_handle_normal_op(const enum rfkill_type type,
133 const bool c)
134{
135 enum rfkill_state state;
136
137 state = rfkill_get_global_state(type);
138 if (c)
139 state = rfkill_state_complement(state);
140
141 rfkill_switch_all(type, state);
142}
143#endif
144
36static void rfkill_task_handler(struct work_struct *work) 145static void rfkill_task_handler(struct work_struct *work)
37{ 146{
38 struct rfkill_task *task = container_of(work, struct rfkill_task, work); 147 struct rfkill_task *task = container_of(work,
148 struct rfkill_task, dwork.work);
149 bool doit = true;
39 150
40 mutex_lock(&task->mutex); 151 mutex_lock(&task->mutex);
41 152
42 rfkill_switch_all(task->type, task->desired_state); 153 spin_lock_irq(&task->lock);
154 while (doit) {
155 if (task->global_op_pending) {
156 enum rfkill_global_sched_op op = task->op;
157 task->global_op_pending = false;
158 memset(task->sw_pending, 0, sizeof(task->sw_pending));
159 spin_unlock_irq(&task->lock);
160
161 __rfkill_handle_global_op(op);
162
163 /* make sure we do at least one pass with
164 * !task->global_op_pending */
165 spin_lock_irq(&task->lock);
166 continue;
167 } else if (!rfkill_is_epo_lock_active()) {
168 unsigned int i = 0;
169
170 while (!task->global_op_pending &&
171 i < RFKILL_TYPE_MAX) {
172 if (test_and_clear_bit(i, task->sw_pending)) {
173 bool c;
174#ifdef RFKILL_NEED_SWSET
175 bool sp, s;
176 sp = test_and_clear_bit(i,
177 task->sw_setpending);
178 s = test_bit(i, task->sw_newstate);
179#endif
180 c = test_and_clear_bit(i,
181 task->sw_togglestate);
182 spin_unlock_irq(&task->lock);
183
184#ifdef RFKILL_NEED_SWSET
185 __rfkill_handle_normal_op(i, sp, s, c);
186#else
187 __rfkill_handle_normal_op(i, c);
188#endif
189
190 spin_lock_irq(&task->lock);
191 }
192 i++;
193 }
194 }
195 doit = task->global_op_pending;
196 }
197 spin_unlock_irq(&task->lock);
43 198
44 mutex_unlock(&task->mutex); 199 mutex_unlock(&task->mutex);
45} 200}
46 201
47static void rfkill_task_epo_handler(struct work_struct *work) 202static struct rfkill_task rfkill_task = {
203 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
204 rfkill_task_handler),
205 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
206 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
207};
208
209static unsigned long rfkill_ratelimit(const unsigned long last)
48{ 210{
49 rfkill_epo(); 211 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
212 return (time_after(jiffies, last + delay)) ? 0 : delay;
50} 213}
51 214
52static DECLARE_WORK(epo_work, rfkill_task_epo_handler); 215static void rfkill_schedule_ratelimited(void)
216{
217 if (!delayed_work_pending(&rfkill_task.dwork)) {
218 schedule_delayed_work(&rfkill_task.dwork,
219 rfkill_ratelimit(rfkill_task.last_scheduled));
220 rfkill_task.last_scheduled = jiffies;
221 }
222}
53 223
54static void rfkill_schedule_epo(void) 224static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
55{ 225{
56 schedule_work(&epo_work); 226 unsigned long flags;
227
228 spin_lock_irqsave(&rfkill_task.lock, flags);
229 rfkill_task.op = op;
230 rfkill_task.global_op_pending = true;
231 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
232 /* bypass the limiter for EPO */
233 cancel_delayed_work(&rfkill_task.dwork);
234 schedule_delayed_work(&rfkill_task.dwork, 0);
235 rfkill_task.last_scheduled = jiffies;
236 } else
237 rfkill_schedule_ratelimited();
238 spin_unlock_irqrestore(&rfkill_task.lock, flags);
57} 239}
58 240
59static void rfkill_schedule_set(struct rfkill_task *task, 241#ifdef RFKILL_NEED_SWSET
242/* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
243
244static void rfkill_schedule_set(enum rfkill_type type,
60 enum rfkill_state desired_state) 245 enum rfkill_state desired_state)
61{ 246{
62 unsigned long flags; 247 unsigned long flags;
63 248
64 if (unlikely(work_pending(&epo_work))) 249 if (rfkill_is_epo_lock_active())
65 return; 250 return;
66 251
67 spin_lock_irqsave(&task->lock, flags); 252 spin_lock_irqsave(&rfkill_task.lock, flags);
68 253 if (!rfkill_task.global_op_pending) {
69 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 254 set_bit(type, rfkill_task.sw_pending);
70 task->desired_state = desired_state; 255 set_bit(type, rfkill_task.sw_setpending);
71 task->last = jiffies; 256 clear_bit(type, rfkill_task.sw_togglestate);
72 schedule_work(&task->work); 257 if (desired_state)
258 set_bit(type, rfkill_task.sw_newstate);
259 else
260 clear_bit(type, rfkill_task.sw_newstate);
261 rfkill_schedule_ratelimited();
73 } 262 }
74 263 spin_unlock_irqrestore(&rfkill_task.lock, flags);
75 spin_unlock_irqrestore(&task->lock, flags);
76} 264}
265#endif
77 266
78static void rfkill_schedule_toggle(struct rfkill_task *task) 267static void rfkill_schedule_toggle(enum rfkill_type type)
79{ 268{
80 unsigned long flags; 269 unsigned long flags;
81 270
82 if (unlikely(work_pending(&epo_work))) 271 if (rfkill_is_epo_lock_active())
83 return; 272 return;
84 273
85 spin_lock_irqsave(&task->lock, flags); 274 spin_lock_irqsave(&rfkill_task.lock, flags);
86 275 if (!rfkill_task.global_op_pending) {
87 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 276 set_bit(type, rfkill_task.sw_pending);
88 task->desired_state = 277 change_bit(type, rfkill_task.sw_togglestate);
89 rfkill_state_complement(task->desired_state); 278 rfkill_schedule_ratelimited();
90 task->last = jiffies;
91 schedule_work(&task->work);
92 } 279 }
93 280 spin_unlock_irqrestore(&rfkill_task.lock, flags);
94 spin_unlock_irqrestore(&task->lock, flags);
95} 281}
96 282
97#define DEFINE_RFKILL_TASK(n, t) \
98 struct rfkill_task n = { \
99 .work = __WORK_INITIALIZER(n.work, \
100 rfkill_task_handler), \
101 .type = t, \
102 .mutex = __MUTEX_INITIALIZER(n.mutex), \
103 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \
104 .desired_state = RFKILL_STATE_UNBLOCKED, \
105 }
106
107static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN);
108static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
109static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
110static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
111static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN);
112
113static void rfkill_schedule_evsw_rfkillall(int state) 283static void rfkill_schedule_evsw_rfkillall(int state)
114{ 284{
115 /* EVERY radio type. state != 0 means radios ON */
116 /* handle EPO (emergency power off) through shortcut */
117 if (state) { 285 if (state) {
118 rfkill_schedule_set(&rfkill_wwan, 286 switch (rfkill_master_switch_mode) {
119 RFKILL_STATE_UNBLOCKED); 287 case RFKILL_INPUT_MASTER_UNBLOCKALL:
120 rfkill_schedule_set(&rfkill_wimax, 288 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
121 RFKILL_STATE_UNBLOCKED); 289 break;
122 rfkill_schedule_set(&rfkill_uwb, 290 case RFKILL_INPUT_MASTER_RESTORE:
123 RFKILL_STATE_UNBLOCKED); 291 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
124 rfkill_schedule_set(&rfkill_bt, 292 break;
125 RFKILL_STATE_UNBLOCKED); 293 case RFKILL_INPUT_MASTER_DONOTHING:
126 rfkill_schedule_set(&rfkill_wlan, 294 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
127 RFKILL_STATE_UNBLOCKED); 295 break;
296 default:
297 /* memory corruption or driver bug! fail safely */
298 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
299 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
300 "driver bug or memory corruption detected!\n",
301 rfkill_master_switch_mode);
302 break;
303 }
128 } else 304 } else
129 rfkill_schedule_epo(); 305 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
130} 306}
131 307
132static void rfkill_event(struct input_handle *handle, unsigned int type, 308static void rfkill_event(struct input_handle *handle, unsigned int type,
133 unsigned int code, int data) 309 unsigned int code, int data)
134{ 310{
135 if (type == EV_KEY && data == 1) { 311 if (type == EV_KEY && data == 1) {
312 enum rfkill_type t;
313
136 switch (code) { 314 switch (code) {
137 case KEY_WLAN: 315 case KEY_WLAN:
138 rfkill_schedule_toggle(&rfkill_wlan); 316 t = RFKILL_TYPE_WLAN;
139 break; 317 break;
140 case KEY_BLUETOOTH: 318 case KEY_BLUETOOTH:
141 rfkill_schedule_toggle(&rfkill_bt); 319 t = RFKILL_TYPE_BLUETOOTH;
142 break; 320 break;
143 case KEY_UWB: 321 case KEY_UWB:
144 rfkill_schedule_toggle(&rfkill_uwb); 322 t = RFKILL_TYPE_UWB;
145 break; 323 break;
146 case KEY_WIMAX: 324 case KEY_WIMAX:
147 rfkill_schedule_toggle(&rfkill_wimax); 325 t = RFKILL_TYPE_WIMAX;
148 break; 326 break;
149 default: 327 default:
150 break; 328 return;
151 } 329 }
330 rfkill_schedule_toggle(t);
331 return;
152 } else if (type == EV_SW) { 332 } else if (type == EV_SW) {
153 switch (code) { 333 switch (code) {
154 case SW_RFKILL_ALL: 334 case SW_RFKILL_ALL:
155 rfkill_schedule_evsw_rfkillall(data); 335 rfkill_schedule_evsw_rfkillall(data);
156 break; 336 return;
157 default: 337 default:
158 break; 338 return;
159 } 339 }
160 } 340 }
161} 341}
@@ -256,18 +436,23 @@ static struct input_handler rfkill_handler = {
256 436
257static int __init rfkill_handler_init(void) 437static int __init rfkill_handler_init(void)
258{ 438{
259 unsigned long last_run = jiffies - msecs_to_jiffies(500); 439 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
260 rfkill_wlan.last = last_run; 440 return -EINVAL;
261 rfkill_bt.last = last_run; 441
262 rfkill_uwb.last = last_run; 442 /*
263 rfkill_wimax.last = last_run; 443 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
444 * at the first use. Acceptable, but if we can avoid it, why not?
445 */
446 rfkill_task.last_scheduled =
447 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
264 return input_register_handler(&rfkill_handler); 448 return input_register_handler(&rfkill_handler);
265} 449}
266 450
267static void __exit rfkill_handler_exit(void) 451static void __exit rfkill_handler_exit(void)
268{ 452{
269 input_unregister_handler(&rfkill_handler); 453 input_unregister_handler(&rfkill_handler);
270 flush_scheduled_work(); 454 cancel_delayed_work_sync(&rfkill_task.dwork);
455 rfkill_remove_epo_lock();
271} 456}
272 457
273module_init(rfkill_handler_init); 458module_init(rfkill_handler_init);