diff options
Diffstat (limited to 'net/rfkill')
-rw-r--r-- | net/rfkill/Kconfig | 21 | ||||
-rw-r--r-- | net/rfkill/Makefile | 5 | ||||
-rw-r--r-- | net/rfkill/core.c | 896 | ||||
-rw-r--r-- | net/rfkill/input.c | 342 | ||||
-rw-r--r-- | net/rfkill/rfkill-input.c | 390 | ||||
-rw-r--r-- | net/rfkill/rfkill.c | 855 | ||||
-rw-r--r-- | net/rfkill/rfkill.h (renamed from net/rfkill/rfkill-input.h) | 10 |
7 files changed, 1256 insertions, 1263 deletions
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 7f807b30cfbb..b47f72fae05d 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig | |||
@@ -10,22 +10,15 @@ menuconfig RFKILL | |||
10 | To compile this driver as a module, choose M here: the | 10 | To compile this driver as a module, choose M here: the |
11 | module will be called rfkill. | 11 | module will be called rfkill. |
12 | 12 | ||
13 | config RFKILL_INPUT | ||
14 | tristate "Input layer to RF switch connector" | ||
15 | depends on RFKILL && INPUT | ||
16 | help | ||
17 | Say Y here if you want kernel automatically toggle state | ||
18 | of RF switches on and off when user presses appropriate | ||
19 | button or a key on the keyboard. Without this module you | ||
20 | need a some kind of userspace application to control | ||
21 | state of the switches. | ||
22 | |||
23 | To compile this driver as a module, choose M here: the | ||
24 | module will be called rfkill-input. | ||
25 | |||
26 | # LED trigger support | 13 | # LED trigger support |
27 | config RFKILL_LEDS | 14 | config RFKILL_LEDS |
28 | bool | 15 | bool |
29 | depends on RFKILL && LEDS_TRIGGERS | 16 | depends on RFKILL |
17 | depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS | ||
30 | default y | 18 | default y |
31 | 19 | ||
20 | config RFKILL_INPUT | ||
21 | bool | ||
22 | depends on RFKILL | ||
23 | depends on INPUT = y || RFKILL = INPUT | ||
24 | default y | ||
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index b38c430be057..662105352691 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for the RF switch subsystem. | 2 | # Makefile for the RF switch subsystem. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_RFKILL) += rfkill.o | 5 | rfkill-y += core.o |
6 | obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o | 6 | rfkill-$(CONFIG_RFKILL_INPUT) += input.o |
7 | obj-$(CONFIG_RFKILL) += rfkill.o | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 000000000000..30a6f8d819b2 --- /dev/null +++ b/net/rfkill/core.c | |||
@@ -0,0 +1,896 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rfkill.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | |||
32 | #include "rfkill.h" | ||
33 | |||
34 | #define POLL_INTERVAL (5 * HZ) | ||
35 | |||
36 | #define RFKILL_BLOCK_HW BIT(0) | ||
37 | #define RFKILL_BLOCK_SW BIT(1) | ||
38 | #define RFKILL_BLOCK_SW_PREV BIT(2) | ||
39 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | ||
40 | RFKILL_BLOCK_SW |\ | ||
41 | RFKILL_BLOCK_SW_PREV) | ||
42 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | ||
43 | |||
44 | struct rfkill { | ||
45 | spinlock_t lock; | ||
46 | |||
47 | const char *name; | ||
48 | enum rfkill_type type; | ||
49 | |||
50 | unsigned long state; | ||
51 | |||
52 | bool registered; | ||
53 | bool suspended; | ||
54 | |||
55 | const struct rfkill_ops *ops; | ||
56 | void *data; | ||
57 | |||
58 | #ifdef CONFIG_RFKILL_LEDS | ||
59 | struct led_trigger led_trigger; | ||
60 | const char *ledtrigname; | ||
61 | #endif | ||
62 | |||
63 | struct device dev; | ||
64 | struct list_head node; | ||
65 | |||
66 | struct delayed_work poll_work; | ||
67 | struct work_struct uevent_work; | ||
68 | struct work_struct sync_work; | ||
69 | }; | ||
70 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | ||
71 | |||
72 | |||
73 | |||
74 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
75 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
76 | MODULE_DESCRIPTION("RF switch support"); | ||
77 | MODULE_LICENSE("GPL"); | ||
78 | |||
79 | |||
80 | /* | ||
81 | * The locking here should be made much smarter, we currently have | ||
82 | * a bit of a stupid situation because drivers might want to register | ||
83 | * the rfkill struct under their own lock, and take this lock during | ||
84 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | ||
85 | * | ||
86 | * To fix that, we need to rework this code here to be mostly lock-free | ||
87 | * and only use the mutex for list manipulations, not to protect the | ||
88 | * various other global variables. Then we can avoid holding the mutex | ||
89 | * around driver operations, and all is happy. | ||
90 | */ | ||
91 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
92 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
93 | |||
94 | static unsigned int rfkill_default_state = 1; | ||
95 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
96 | MODULE_PARM_DESC(default_state, | ||
97 | "Default initial state for all radio types, 0 = radio off"); | ||
98 | |||
99 | static struct { | ||
100 | bool cur, def; | ||
101 | } rfkill_global_states[NUM_RFKILL_TYPES]; | ||
102 | |||
103 | static unsigned long rfkill_states_default_locked; | ||
104 | |||
105 | static bool rfkill_epo_lock_active; | ||
106 | |||
107 | |||
108 | #ifdef CONFIG_RFKILL_LEDS | ||
109 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
110 | { | ||
111 | struct led_trigger *trigger; | ||
112 | |||
113 | if (!rfkill->registered) | ||
114 | return; | ||
115 | |||
116 | trigger = &rfkill->led_trigger; | ||
117 | |||
118 | if (rfkill->state & RFKILL_BLOCK_ANY) | ||
119 | led_trigger_event(trigger, LED_OFF); | ||
120 | else | ||
121 | led_trigger_event(trigger, LED_FULL); | ||
122 | } | ||
123 | |||
124 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
125 | { | ||
126 | struct rfkill *rfkill; | ||
127 | |||
128 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | ||
129 | |||
130 | rfkill_led_trigger_event(rfkill); | ||
131 | } | ||
132 | |||
133 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | ||
134 | { | ||
135 | return rfkill->led_trigger.name; | ||
136 | } | ||
137 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | ||
138 | |||
139 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | ||
140 | { | ||
141 | BUG_ON(!rfkill); | ||
142 | |||
143 | rfkill->ledtrigname = name; | ||
144 | } | ||
145 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | ||
146 | |||
147 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
148 | { | ||
149 | rfkill->led_trigger.name = rfkill->ledtrigname | ||
150 | ? : dev_name(&rfkill->dev); | ||
151 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
152 | return led_trigger_register(&rfkill->led_trigger); | ||
153 | } | ||
154 | |||
155 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
156 | { | ||
157 | led_trigger_unregister(&rfkill->led_trigger); | ||
158 | } | ||
159 | #else | ||
160 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
161 | { | ||
162 | } | ||
163 | |||
164 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
170 | { | ||
171 | } | ||
172 | #endif /* CONFIG_RFKILL_LEDS */ | ||
173 | |||
174 | static void rfkill_uevent(struct rfkill *rfkill) | ||
175 | { | ||
176 | if (!rfkill->registered || rfkill->suspended) | ||
177 | return; | ||
178 | |||
179 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
180 | } | ||
181 | |||
182 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | ||
183 | bool blocked, bool *change) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | bool prev, any; | ||
187 | |||
188 | BUG_ON(!rfkill); | ||
189 | |||
190 | spin_lock_irqsave(&rfkill->lock, flags); | ||
191 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
192 | if (blocked) | ||
193 | rfkill->state |= RFKILL_BLOCK_HW; | ||
194 | else | ||
195 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
196 | *change = prev != blocked; | ||
197 | any = rfkill->state & RFKILL_BLOCK_ANY; | ||
198 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
199 | |||
200 | rfkill_led_trigger_event(rfkill); | ||
201 | |||
202 | return any; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * rfkill_set_block - wrapper for set_block method | ||
207 | * | ||
208 | * @rfkill: the rfkill struct to use | ||
209 | * @blocked: the new software state | ||
210 | * | ||
211 | * Calls the set_block method (when applicable) and handles notifications | ||
212 | * etc. as well. | ||
213 | */ | ||
214 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | ||
215 | { | ||
216 | unsigned long flags; | ||
217 | int err; | ||
218 | |||
219 | /* | ||
220 | * Some platforms (...!) generate input events which affect the | ||
221 | * _hard_ kill state -- whenever something tries to change the | ||
222 | * current software state query the hardware state too. | ||
223 | */ | ||
224 | if (rfkill->ops->query) | ||
225 | rfkill->ops->query(rfkill, rfkill->data); | ||
226 | |||
227 | spin_lock_irqsave(&rfkill->lock, flags); | ||
228 | if (rfkill->state & RFKILL_BLOCK_SW) | ||
229 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | ||
230 | else | ||
231 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
232 | |||
233 | if (blocked) | ||
234 | rfkill->state |= RFKILL_BLOCK_SW; | ||
235 | else | ||
236 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
237 | |||
238 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | ||
239 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
240 | |||
241 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
242 | return; | ||
243 | |||
244 | err = rfkill->ops->set_block(rfkill->data, blocked); | ||
245 | |||
246 | spin_lock_irqsave(&rfkill->lock, flags); | ||
247 | if (err) { | ||
248 | /* | ||
249 | * Failed -- reset status to _prev, this may be different | ||
250 | * from what set set _PREV to earlier in this function | ||
251 | * if rfkill_set_sw_state was invoked. | ||
252 | */ | ||
253 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | ||
254 | rfkill->state |= RFKILL_BLOCK_SW; | ||
255 | else | ||
256 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
257 | } | ||
258 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | ||
259 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
260 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
261 | |||
262 | rfkill_led_trigger_event(rfkill); | ||
263 | rfkill_uevent(rfkill); | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
268 | * @type: type of interfaces to be affected | ||
269 | * @state: the new state | ||
270 | * | ||
271 | * This function sets the state of all switches of given type, | ||
272 | * unless a specific switch is claimed by userspace (in which case, | ||
273 | * that switch is left alone) or suspended. | ||
274 | * | ||
275 | * Caller must have acquired rfkill_global_mutex. | ||
276 | */ | ||
277 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | ||
278 | { | ||
279 | struct rfkill *rfkill; | ||
280 | |||
281 | rfkill_global_states[type].cur = blocked; | ||
282 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
283 | if (rfkill->type != type) | ||
284 | continue; | ||
285 | |||
286 | rfkill_set_block(rfkill, blocked); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * rfkill_switch_all - Toggle state of all switches of given type | ||
292 | * @type: type of interfaces to be affected | ||
293 | * @state: the new state | ||
294 | * | ||
295 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
296 | * Please refer to __rfkill_switch_all() for details. | ||
297 | * | ||
298 | * Does nothing if the EPO lock is active. | ||
299 | */ | ||
300 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | ||
301 | { | ||
302 | mutex_lock(&rfkill_global_mutex); | ||
303 | |||
304 | if (!rfkill_epo_lock_active) | ||
305 | __rfkill_switch_all(type, blocked); | ||
306 | |||
307 | mutex_unlock(&rfkill_global_mutex); | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * rfkill_epo - emergency power off all transmitters | ||
312 | * | ||
313 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
314 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
315 | * | ||
316 | * The global state before the EPO is saved and can be restored later | ||
317 | * using rfkill_restore_states(). | ||
318 | */ | ||
319 | void rfkill_epo(void) | ||
320 | { | ||
321 | struct rfkill *rfkill; | ||
322 | int i; | ||
323 | |||
324 | mutex_lock(&rfkill_global_mutex); | ||
325 | |||
326 | rfkill_epo_lock_active = true; | ||
327 | list_for_each_entry(rfkill, &rfkill_list, node) | ||
328 | rfkill_set_block(rfkill, true); | ||
329 | |||
330 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
331 | rfkill_global_states[i].def = rfkill_global_states[i].cur; | ||
332 | rfkill_global_states[i].cur = true; | ||
333 | } | ||
334 | mutex_unlock(&rfkill_global_mutex); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * rfkill_restore_states - restore global states | ||
339 | * | ||
340 | * Restore (and sync switches to) the global state from the | ||
341 | * states in rfkill_default_states. This can undo the effects of | ||
342 | * a call to rfkill_epo(). | ||
343 | */ | ||
344 | void rfkill_restore_states(void) | ||
345 | { | ||
346 | int i; | ||
347 | |||
348 | mutex_lock(&rfkill_global_mutex); | ||
349 | |||
350 | rfkill_epo_lock_active = false; | ||
351 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
352 | __rfkill_switch_all(i, rfkill_global_states[i].def); | ||
353 | mutex_unlock(&rfkill_global_mutex); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * rfkill_remove_epo_lock - unlock state changes | ||
358 | * | ||
359 | * Used by rfkill-input manually unlock state changes, when | ||
360 | * the EPO switch is deactivated. | ||
361 | */ | ||
362 | void rfkill_remove_epo_lock(void) | ||
363 | { | ||
364 | mutex_lock(&rfkill_global_mutex); | ||
365 | rfkill_epo_lock_active = false; | ||
366 | mutex_unlock(&rfkill_global_mutex); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
371 | * | ||
372 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
373 | * and 1 (true) if there is an active EPO contition, which | ||
374 | * locks all radios in one of the BLOCKED states. | ||
375 | * | ||
376 | * Can be called in atomic context. | ||
377 | */ | ||
378 | bool rfkill_is_epo_lock_active(void) | ||
379 | { | ||
380 | return rfkill_epo_lock_active; | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * rfkill_get_global_sw_state - returns global state for a type | ||
385 | * @type: the type to get the global state of | ||
386 | * | ||
387 | * Returns the current global state for a given wireless | ||
388 | * device type. | ||
389 | */ | ||
390 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | ||
391 | { | ||
392 | return rfkill_global_states[type].cur; | ||
393 | } | ||
394 | |||
395 | void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked) | ||
396 | { | ||
397 | mutex_lock(&rfkill_global_mutex); | ||
398 | |||
399 | /* don't allow unblock when epo */ | ||
400 | if (rfkill_epo_lock_active && !blocked) | ||
401 | goto out; | ||
402 | |||
403 | /* too late */ | ||
404 | if (rfkill_states_default_locked & BIT(type)) | ||
405 | goto out; | ||
406 | |||
407 | rfkill_states_default_locked |= BIT(type); | ||
408 | |||
409 | rfkill_global_states[type].cur = blocked; | ||
410 | rfkill_global_states[type].def = blocked; | ||
411 | out: | ||
412 | mutex_unlock(&rfkill_global_mutex); | ||
413 | } | ||
414 | EXPORT_SYMBOL(rfkill_set_global_sw_state); | ||
415 | |||
416 | |||
417 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | ||
418 | { | ||
419 | bool ret, change; | ||
420 | |||
421 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | ||
422 | |||
423 | if (!rfkill->registered) | ||
424 | return ret; | ||
425 | |||
426 | if (change) | ||
427 | schedule_work(&rfkill->uevent_work); | ||
428 | |||
429 | return ret; | ||
430 | } | ||
431 | EXPORT_SYMBOL(rfkill_set_hw_state); | ||
432 | |||
433 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
434 | { | ||
435 | u32 bit = RFKILL_BLOCK_SW; | ||
436 | |||
437 | /* if in a ops->set_block right now, use other bit */ | ||
438 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | ||
439 | bit = RFKILL_BLOCK_SW_PREV; | ||
440 | |||
441 | if (blocked) | ||
442 | rfkill->state |= bit; | ||
443 | else | ||
444 | rfkill->state &= ~bit; | ||
445 | } | ||
446 | |||
447 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
448 | { | ||
449 | unsigned long flags; | ||
450 | bool prev, hwblock; | ||
451 | |||
452 | BUG_ON(!rfkill); | ||
453 | |||
454 | spin_lock_irqsave(&rfkill->lock, flags); | ||
455 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
456 | __rfkill_set_sw_state(rfkill, blocked); | ||
457 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
458 | blocked = blocked || hwblock; | ||
459 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
460 | |||
461 | if (!rfkill->registered) | ||
462 | return blocked; | ||
463 | |||
464 | if (prev != blocked && !hwblock) | ||
465 | schedule_work(&rfkill->uevent_work); | ||
466 | |||
467 | rfkill_led_trigger_event(rfkill); | ||
468 | |||
469 | return blocked; | ||
470 | } | ||
471 | EXPORT_SYMBOL(rfkill_set_sw_state); | ||
472 | |||
473 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | ||
474 | { | ||
475 | unsigned long flags; | ||
476 | bool swprev, hwprev; | ||
477 | |||
478 | BUG_ON(!rfkill); | ||
479 | |||
480 | spin_lock_irqsave(&rfkill->lock, flags); | ||
481 | |||
482 | /* | ||
483 | * No need to care about prev/setblock ... this is for uevent only | ||
484 | * and that will get triggered by rfkill_set_block anyway. | ||
485 | */ | ||
486 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
487 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
488 | __rfkill_set_sw_state(rfkill, sw); | ||
489 | |||
490 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
491 | |||
492 | if (!rfkill->registered) | ||
493 | return; | ||
494 | |||
495 | if (swprev != sw || hwprev != hw) | ||
496 | schedule_work(&rfkill->uevent_work); | ||
497 | |||
498 | rfkill_led_trigger_event(rfkill); | ||
499 | } | ||
500 | EXPORT_SYMBOL(rfkill_set_states); | ||
501 | |||
502 | static ssize_t rfkill_name_show(struct device *dev, | ||
503 | struct device_attribute *attr, | ||
504 | char *buf) | ||
505 | { | ||
506 | struct rfkill *rfkill = to_rfkill(dev); | ||
507 | |||
508 | return sprintf(buf, "%s\n", rfkill->name); | ||
509 | } | ||
510 | |||
511 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
512 | { | ||
513 | switch (type) { | ||
514 | case RFKILL_TYPE_WLAN: | ||
515 | return "wlan"; | ||
516 | case RFKILL_TYPE_BLUETOOTH: | ||
517 | return "bluetooth"; | ||
518 | case RFKILL_TYPE_UWB: | ||
519 | return "ultrawideband"; | ||
520 | case RFKILL_TYPE_WIMAX: | ||
521 | return "wimax"; | ||
522 | case RFKILL_TYPE_WWAN: | ||
523 | return "wwan"; | ||
524 | default: | ||
525 | BUG(); | ||
526 | } | ||
527 | |||
528 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | ||
529 | } | ||
530 | |||
531 | static ssize_t rfkill_type_show(struct device *dev, | ||
532 | struct device_attribute *attr, | ||
533 | char *buf) | ||
534 | { | ||
535 | struct rfkill *rfkill = to_rfkill(dev); | ||
536 | |||
537 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
538 | } | ||
539 | |||
540 | static u8 user_state_from_blocked(unsigned long state) | ||
541 | { | ||
542 | if (state & RFKILL_BLOCK_HW) | ||
543 | return RFKILL_USER_STATE_HARD_BLOCKED; | ||
544 | if (state & RFKILL_BLOCK_SW) | ||
545 | return RFKILL_USER_STATE_SOFT_BLOCKED; | ||
546 | |||
547 | return RFKILL_USER_STATE_UNBLOCKED; | ||
548 | } | ||
549 | |||
550 | static ssize_t rfkill_state_show(struct device *dev, | ||
551 | struct device_attribute *attr, | ||
552 | char *buf) | ||
553 | { | ||
554 | struct rfkill *rfkill = to_rfkill(dev); | ||
555 | unsigned long flags; | ||
556 | u32 state; | ||
557 | |||
558 | spin_lock_irqsave(&rfkill->lock, flags); | ||
559 | state = rfkill->state; | ||
560 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
561 | |||
562 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | ||
563 | } | ||
564 | |||
565 | static ssize_t rfkill_state_store(struct device *dev, | ||
566 | struct device_attribute *attr, | ||
567 | const char *buf, size_t count) | ||
568 | { | ||
569 | /* | ||
570 | * The intention was that userspace can only take control over | ||
571 | * a given device when/if rfkill-input doesn't control it due | ||
572 | * to user_claim. Since user_claim is currently unsupported, | ||
573 | * we never support changing the state from userspace -- this | ||
574 | * can be implemented again later. | ||
575 | */ | ||
576 | |||
577 | return -EPERM; | ||
578 | } | ||
579 | |||
580 | static ssize_t rfkill_claim_show(struct device *dev, | ||
581 | struct device_attribute *attr, | ||
582 | char *buf) | ||
583 | { | ||
584 | return sprintf(buf, "%d\n", 0); | ||
585 | } | ||
586 | |||
587 | static ssize_t rfkill_claim_store(struct device *dev, | ||
588 | struct device_attribute *attr, | ||
589 | const char *buf, size_t count) | ||
590 | { | ||
591 | return -EOPNOTSUPP; | ||
592 | } | ||
593 | |||
594 | static struct device_attribute rfkill_dev_attrs[] = { | ||
595 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
596 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
597 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
598 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
599 | __ATTR_NULL | ||
600 | }; | ||
601 | |||
602 | static void rfkill_release(struct device *dev) | ||
603 | { | ||
604 | struct rfkill *rfkill = to_rfkill(dev); | ||
605 | |||
606 | kfree(rfkill); | ||
607 | } | ||
608 | |||
609 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
610 | { | ||
611 | struct rfkill *rfkill = to_rfkill(dev); | ||
612 | unsigned long flags; | ||
613 | u32 state; | ||
614 | int error; | ||
615 | |||
616 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
617 | if (error) | ||
618 | return error; | ||
619 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
620 | rfkill_get_type_str(rfkill->type)); | ||
621 | if (error) | ||
622 | return error; | ||
623 | spin_lock_irqsave(&rfkill->lock, flags); | ||
624 | state = rfkill->state; | ||
625 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
626 | error = add_uevent_var(env, "RFKILL_STATE=%d", | ||
627 | user_state_from_blocked(state)); | ||
628 | return error; | ||
629 | } | ||
630 | |||
631 | void rfkill_pause_polling(struct rfkill *rfkill) | ||
632 | { | ||
633 | BUG_ON(!rfkill); | ||
634 | |||
635 | if (!rfkill->ops->poll) | ||
636 | return; | ||
637 | |||
638 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
639 | } | ||
640 | EXPORT_SYMBOL(rfkill_pause_polling); | ||
641 | |||
642 | void rfkill_resume_polling(struct rfkill *rfkill) | ||
643 | { | ||
644 | BUG_ON(!rfkill); | ||
645 | |||
646 | if (!rfkill->ops->poll) | ||
647 | return; | ||
648 | |||
649 | schedule_work(&rfkill->poll_work.work); | ||
650 | } | ||
651 | EXPORT_SYMBOL(rfkill_resume_polling); | ||
652 | |||
653 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
654 | { | ||
655 | struct rfkill *rfkill = to_rfkill(dev); | ||
656 | |||
657 | rfkill_pause_polling(rfkill); | ||
658 | |||
659 | rfkill->suspended = true; | ||
660 | |||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static int rfkill_resume(struct device *dev) | ||
665 | { | ||
666 | struct rfkill *rfkill = to_rfkill(dev); | ||
667 | bool cur; | ||
668 | |||
669 | mutex_lock(&rfkill_global_mutex); | ||
670 | cur = rfkill_global_states[rfkill->type].cur; | ||
671 | rfkill_set_block(rfkill, cur); | ||
672 | mutex_unlock(&rfkill_global_mutex); | ||
673 | |||
674 | rfkill->suspended = false; | ||
675 | |||
676 | schedule_work(&rfkill->uevent_work); | ||
677 | |||
678 | rfkill_resume_polling(rfkill); | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | static struct class rfkill_class = { | ||
684 | .name = "rfkill", | ||
685 | .dev_release = rfkill_release, | ||
686 | .dev_attrs = rfkill_dev_attrs, | ||
687 | .dev_uevent = rfkill_dev_uevent, | ||
688 | .suspend = rfkill_suspend, | ||
689 | .resume = rfkill_resume, | ||
690 | }; | ||
691 | |||
692 | |||
693 | struct rfkill * __must_check rfkill_alloc(const char *name, | ||
694 | struct device *parent, | ||
695 | const enum rfkill_type type, | ||
696 | const struct rfkill_ops *ops, | ||
697 | void *ops_data) | ||
698 | { | ||
699 | struct rfkill *rfkill; | ||
700 | struct device *dev; | ||
701 | |||
702 | if (WARN_ON(!ops)) | ||
703 | return NULL; | ||
704 | |||
705 | if (WARN_ON(!ops->set_block)) | ||
706 | return NULL; | ||
707 | |||
708 | if (WARN_ON(!name)) | ||
709 | return NULL; | ||
710 | |||
711 | if (WARN_ON(type >= NUM_RFKILL_TYPES)) | ||
712 | return NULL; | ||
713 | |||
714 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | ||
715 | if (!rfkill) | ||
716 | return NULL; | ||
717 | |||
718 | spin_lock_init(&rfkill->lock); | ||
719 | INIT_LIST_HEAD(&rfkill->node); | ||
720 | rfkill->type = type; | ||
721 | rfkill->name = name; | ||
722 | rfkill->ops = ops; | ||
723 | rfkill->data = ops_data; | ||
724 | |||
725 | dev = &rfkill->dev; | ||
726 | dev->class = &rfkill_class; | ||
727 | dev->parent = parent; | ||
728 | device_initialize(dev); | ||
729 | |||
730 | return rfkill; | ||
731 | } | ||
732 | EXPORT_SYMBOL(rfkill_alloc); | ||
733 | |||
734 | static void rfkill_poll(struct work_struct *work) | ||
735 | { | ||
736 | struct rfkill *rfkill; | ||
737 | |||
738 | rfkill = container_of(work, struct rfkill, poll_work.work); | ||
739 | |||
740 | /* | ||
741 | * Poll hardware state -- driver will use one of the | ||
742 | * rfkill_set{,_hw,_sw}_state functions and use its | ||
743 | * return value to update the current status. | ||
744 | */ | ||
745 | rfkill->ops->poll(rfkill, rfkill->data); | ||
746 | |||
747 | schedule_delayed_work(&rfkill->poll_work, | ||
748 | round_jiffies_relative(POLL_INTERVAL)); | ||
749 | } | ||
750 | |||
751 | static void rfkill_uevent_work(struct work_struct *work) | ||
752 | { | ||
753 | struct rfkill *rfkill; | ||
754 | |||
755 | rfkill = container_of(work, struct rfkill, uevent_work); | ||
756 | |||
757 | rfkill_uevent(rfkill); | ||
758 | } | ||
759 | |||
760 | static void rfkill_sync_work(struct work_struct *work) | ||
761 | { | ||
762 | struct rfkill *rfkill; | ||
763 | bool cur; | ||
764 | |||
765 | rfkill = container_of(work, struct rfkill, sync_work); | ||
766 | |||
767 | mutex_lock(&rfkill_global_mutex); | ||
768 | cur = rfkill_global_states[rfkill->type].cur; | ||
769 | rfkill_set_block(rfkill, cur); | ||
770 | mutex_unlock(&rfkill_global_mutex); | ||
771 | } | ||
772 | |||
773 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
774 | { | ||
775 | static unsigned long rfkill_no; | ||
776 | struct device *dev = &rfkill->dev; | ||
777 | int error; | ||
778 | |||
779 | BUG_ON(!rfkill); | ||
780 | |||
781 | mutex_lock(&rfkill_global_mutex); | ||
782 | |||
783 | if (rfkill->registered) { | ||
784 | error = -EALREADY; | ||
785 | goto unlock; | ||
786 | } | ||
787 | |||
788 | dev_set_name(dev, "rfkill%lu", rfkill_no); | ||
789 | rfkill_no++; | ||
790 | |||
791 | if (!(rfkill_states_default_locked & BIT(rfkill->type))) { | ||
792 | /* first of its kind */ | ||
793 | BUILD_BUG_ON(NUM_RFKILL_TYPES > | ||
794 | sizeof(rfkill_states_default_locked) * 8); | ||
795 | rfkill_states_default_locked |= BIT(rfkill->type); | ||
796 | rfkill_global_states[rfkill->type].cur = | ||
797 | rfkill_global_states[rfkill->type].def; | ||
798 | } | ||
799 | |||
800 | list_add_tail(&rfkill->node, &rfkill_list); | ||
801 | |||
802 | error = device_add(dev); | ||
803 | if (error) | ||
804 | goto remove; | ||
805 | |||
806 | error = rfkill_led_trigger_register(rfkill); | ||
807 | if (error) | ||
808 | goto devdel; | ||
809 | |||
810 | rfkill->registered = true; | ||
811 | |||
812 | if (rfkill->ops->poll) { | ||
813 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | ||
814 | schedule_delayed_work(&rfkill->poll_work, | ||
815 | round_jiffies_relative(POLL_INTERVAL)); | ||
816 | } | ||
817 | |||
818 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | ||
819 | |||
820 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | ||
821 | schedule_work(&rfkill->sync_work); | ||
822 | |||
823 | mutex_unlock(&rfkill_global_mutex); | ||
824 | return 0; | ||
825 | |||
826 | devdel: | ||
827 | device_del(&rfkill->dev); | ||
828 | remove: | ||
829 | list_del_init(&rfkill->node); | ||
830 | unlock: | ||
831 | mutex_unlock(&rfkill_global_mutex); | ||
832 | return error; | ||
833 | } | ||
834 | EXPORT_SYMBOL(rfkill_register); | ||
835 | |||
836 | void rfkill_unregister(struct rfkill *rfkill) | ||
837 | { | ||
838 | BUG_ON(!rfkill); | ||
839 | |||
840 | if (rfkill->ops->poll) | ||
841 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
842 | |||
843 | cancel_work_sync(&rfkill->uevent_work); | ||
844 | cancel_work_sync(&rfkill->sync_work); | ||
845 | |||
846 | rfkill->registered = false; | ||
847 | |||
848 | device_del(&rfkill->dev); | ||
849 | |||
850 | mutex_lock(&rfkill_global_mutex); | ||
851 | list_del_init(&rfkill->node); | ||
852 | mutex_unlock(&rfkill_global_mutex); | ||
853 | |||
854 | rfkill_led_trigger_unregister(rfkill); | ||
855 | } | ||
856 | EXPORT_SYMBOL(rfkill_unregister); | ||
857 | |||
858 | void rfkill_destroy(struct rfkill *rfkill) | ||
859 | { | ||
860 | if (rfkill) | ||
861 | put_device(&rfkill->dev); | ||
862 | } | ||
863 | EXPORT_SYMBOL(rfkill_destroy); | ||
864 | |||
865 | |||
866 | static int __init rfkill_init(void) | ||
867 | { | ||
868 | int error; | ||
869 | int i; | ||
870 | |||
871 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
872 | rfkill_global_states[i].def = !rfkill_default_state; | ||
873 | |||
874 | error = class_register(&rfkill_class); | ||
875 | if (error) | ||
876 | goto out; | ||
877 | |||
878 | #ifdef CONFIG_RFKILL_INPUT | ||
879 | error = rfkill_handler_init(); | ||
880 | if (error) | ||
881 | class_unregister(&rfkill_class); | ||
882 | #endif | ||
883 | |||
884 | out: | ||
885 | return error; | ||
886 | } | ||
887 | subsys_initcall(rfkill_init); | ||
888 | |||
889 | static void __exit rfkill_exit(void) | ||
890 | { | ||
891 | #ifdef CONFIG_RFKILL_INPUT | ||
892 | rfkill_handler_exit(); | ||
893 | #endif | ||
894 | class_unregister(&rfkill_class); | ||
895 | } | ||
896 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/input.c b/net/rfkill/input.c new file mode 100644 index 000000000000..a7295ad5f9cb --- /dev/null +++ b/net/rfkill/input.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | * | ||
11 | * If you ever run into a situation in which you have a SW_ type rfkill | ||
12 | * input device, then you can revive code that was removed in the patch | ||
13 | * "rfkill-input: remove unused code". | ||
14 | */ | ||
15 | |||
16 | #include <linux/input.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/rfkill.h> | ||
21 | #include <linux/sched.h> | ||
22 | |||
23 | #include "rfkill.h" | ||
24 | |||
25 | enum rfkill_input_master_mode { | ||
26 | RFKILL_INPUT_MASTER_UNLOCK = 0, | ||
27 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
28 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
29 | NUM_RFKILL_INPUT_MASTER_MODES | ||
30 | }; | ||
31 | |||
32 | /* Delay (in ms) between consecutive switch ops */ | ||
33 | #define RFKILL_OPS_DELAY 200 | ||
34 | |||
35 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
36 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
37 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
38 | MODULE_PARM_DESC(master_switch_mode, | ||
39 | "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); | ||
40 | |||
41 | static spinlock_t rfkill_op_lock; | ||
42 | static bool rfkill_op_pending; | ||
43 | static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
44 | static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
45 | |||
46 | enum rfkill_sched_op { | ||
47 | RFKILL_GLOBAL_OP_EPO = 0, | ||
48 | RFKILL_GLOBAL_OP_RESTORE, | ||
49 | RFKILL_GLOBAL_OP_UNLOCK, | ||
50 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
51 | }; | ||
52 | |||
53 | static enum rfkill_sched_op rfkill_master_switch_op; | ||
54 | static enum rfkill_sched_op rfkill_op; | ||
55 | |||
56 | static void __rfkill_handle_global_op(enum rfkill_sched_op op) | ||
57 | { | ||
58 | unsigned int i; | ||
59 | |||
60 | switch (op) { | ||
61 | case RFKILL_GLOBAL_OP_EPO: | ||
62 | rfkill_epo(); | ||
63 | break; | ||
64 | case RFKILL_GLOBAL_OP_RESTORE: | ||
65 | rfkill_restore_states(); | ||
66 | break; | ||
67 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
68 | rfkill_remove_epo_lock(); | ||
69 | break; | ||
70 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
71 | rfkill_remove_epo_lock(); | ||
72 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
73 | rfkill_switch_all(i, false); | ||
74 | break; | ||
75 | default: | ||
76 | /* memory corruption or bug, fail safely */ | ||
77 | rfkill_epo(); | ||
78 | WARN(1, "Unknown requested operation %d! " | ||
79 | "rfkill Emergency Power Off activated\n", | ||
80 | op); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
85 | const bool complement) | ||
86 | { | ||
87 | bool blocked; | ||
88 | |||
89 | blocked = rfkill_get_global_sw_state(type); | ||
90 | if (complement) | ||
91 | blocked = !blocked; | ||
92 | |||
93 | rfkill_switch_all(type, blocked); | ||
94 | } | ||
95 | |||
96 | static void rfkill_op_handler(struct work_struct *work) | ||
97 | { | ||
98 | unsigned int i; | ||
99 | bool c; | ||
100 | |||
101 | spin_lock_irq(&rfkill_op_lock); | ||
102 | do { | ||
103 | if (rfkill_op_pending) { | ||
104 | enum rfkill_sched_op op = rfkill_op; | ||
105 | rfkill_op_pending = false; | ||
106 | memset(rfkill_sw_pending, 0, | ||
107 | sizeof(rfkill_sw_pending)); | ||
108 | spin_unlock_irq(&rfkill_op_lock); | ||
109 | |||
110 | __rfkill_handle_global_op(op); | ||
111 | |||
112 | spin_lock_irq(&rfkill_op_lock); | ||
113 | |||
114 | /* | ||
115 | * handle global ops first -- during unlocked period | ||
116 | * we might have gotten a new global op. | ||
117 | */ | ||
118 | if (rfkill_op_pending) | ||
119 | continue; | ||
120 | } | ||
121 | |||
122 | if (rfkill_is_epo_lock_active()) | ||
123 | continue; | ||
124 | |||
125 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
126 | if (__test_and_clear_bit(i, rfkill_sw_pending)) { | ||
127 | c = __test_and_clear_bit(i, rfkill_sw_state); | ||
128 | spin_unlock_irq(&rfkill_op_lock); | ||
129 | |||
130 | __rfkill_handle_normal_op(i, c); | ||
131 | |||
132 | spin_lock_irq(&rfkill_op_lock); | ||
133 | } | ||
134 | } | ||
135 | } while (rfkill_op_pending); | ||
136 | spin_unlock_irq(&rfkill_op_lock); | ||
137 | } | ||
138 | |||
139 | static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); | ||
140 | static unsigned long rfkill_last_scheduled; | ||
141 | |||
142 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
143 | { | ||
144 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
145 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
146 | } | ||
147 | |||
148 | static void rfkill_schedule_ratelimited(void) | ||
149 | { | ||
150 | if (delayed_work_pending(&rfkill_op_work)) | ||
151 | return; | ||
152 | schedule_delayed_work(&rfkill_op_work, | ||
153 | rfkill_ratelimit(rfkill_last_scheduled)); | ||
154 | rfkill_last_scheduled = jiffies; | ||
155 | } | ||
156 | |||
157 | static void rfkill_schedule_global_op(enum rfkill_sched_op op) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
162 | rfkill_op = op; | ||
163 | rfkill_op_pending = true; | ||
164 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
165 | /* bypass the limiter for EPO */ | ||
166 | cancel_delayed_work(&rfkill_op_work); | ||
167 | schedule_delayed_work(&rfkill_op_work, 0); | ||
168 | rfkill_last_scheduled = jiffies; | ||
169 | } else | ||
170 | rfkill_schedule_ratelimited(); | ||
171 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
172 | } | ||
173 | |||
174 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | |||
178 | if (rfkill_is_epo_lock_active()) | ||
179 | return; | ||
180 | |||
181 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
182 | if (!rfkill_op_pending) { | ||
183 | __set_bit(type, rfkill_sw_pending); | ||
184 | __change_bit(type, rfkill_sw_state); | ||
185 | rfkill_schedule_ratelimited(); | ||
186 | } | ||
187 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
188 | } | ||
189 | |||
190 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
191 | { | ||
192 | if (state) | ||
193 | rfkill_schedule_global_op(rfkill_master_switch_op); | ||
194 | else | ||
195 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
196 | } | ||
197 | |||
198 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
199 | unsigned int code, int data) | ||
200 | { | ||
201 | if (type == EV_KEY && data == 1) { | ||
202 | switch (code) { | ||
203 | case KEY_WLAN: | ||
204 | rfkill_schedule_toggle(RFKILL_TYPE_WLAN); | ||
205 | break; | ||
206 | case KEY_BLUETOOTH: | ||
207 | rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); | ||
208 | break; | ||
209 | case KEY_UWB: | ||
210 | rfkill_schedule_toggle(RFKILL_TYPE_UWB); | ||
211 | break; | ||
212 | case KEY_WIMAX: | ||
213 | rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); | ||
214 | break; | ||
215 | } | ||
216 | } else if (type == EV_SW && code == SW_RFKILL_ALL) | ||
217 | rfkill_schedule_evsw_rfkillall(data); | ||
218 | } | ||
219 | |||
220 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
221 | const struct input_device_id *id) | ||
222 | { | ||
223 | struct input_handle *handle; | ||
224 | int error; | ||
225 | |||
226 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
227 | if (!handle) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | handle->dev = dev; | ||
231 | handle->handler = handler; | ||
232 | handle->name = "rfkill"; | ||
233 | |||
234 | /* causes rfkill_start() to be called */ | ||
235 | error = input_register_handle(handle); | ||
236 | if (error) | ||
237 | goto err_free_handle; | ||
238 | |||
239 | error = input_open_device(handle); | ||
240 | if (error) | ||
241 | goto err_unregister_handle; | ||
242 | |||
243 | return 0; | ||
244 | |||
245 | err_unregister_handle: | ||
246 | input_unregister_handle(handle); | ||
247 | err_free_handle: | ||
248 | kfree(handle); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | static void rfkill_start(struct input_handle *handle) | ||
253 | { | ||
254 | /* | ||
255 | * Take event_lock to guard against configuration changes, we | ||
256 | * should be able to deal with concurrency with rfkill_event() | ||
257 | * just fine (which event_lock will also avoid). | ||
258 | */ | ||
259 | spin_lock_irq(&handle->dev->event_lock); | ||
260 | |||
261 | if (test_bit(EV_SW, handle->dev->evbit) && | ||
262 | test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
263 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
264 | handle->dev->sw)); | ||
265 | |||
266 | spin_unlock_irq(&handle->dev->event_lock); | ||
267 | } | ||
268 | |||
269 | static void rfkill_disconnect(struct input_handle *handle) | ||
270 | { | ||
271 | input_close_device(handle); | ||
272 | input_unregister_handle(handle); | ||
273 | kfree(handle); | ||
274 | } | ||
275 | |||
276 | static const struct input_device_id rfkill_ids[] = { | ||
277 | { | ||
278 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
279 | .evbit = { BIT_MASK(EV_KEY) }, | ||
280 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
281 | }, | ||
282 | { | ||
283 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
284 | .evbit = { BIT_MASK(EV_KEY) }, | ||
285 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
286 | }, | ||
287 | { | ||
288 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
289 | .evbit = { BIT_MASK(EV_KEY) }, | ||
290 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
291 | }, | ||
292 | { | ||
293 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
294 | .evbit = { BIT_MASK(EV_KEY) }, | ||
295 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
296 | }, | ||
297 | { | ||
298 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
299 | .evbit = { BIT(EV_SW) }, | ||
300 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
301 | }, | ||
302 | { } | ||
303 | }; | ||
304 | |||
305 | static struct input_handler rfkill_handler = { | ||
306 | .name = "rfkill", | ||
307 | .event = rfkill_event, | ||
308 | .connect = rfkill_connect, | ||
309 | .start = rfkill_start, | ||
310 | .disconnect = rfkill_disconnect, | ||
311 | .id_table = rfkill_ids, | ||
312 | }; | ||
313 | |||
314 | int __init rfkill_handler_init(void) | ||
315 | { | ||
316 | switch (rfkill_master_switch_mode) { | ||
317 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
318 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; | ||
319 | break; | ||
320 | case RFKILL_INPUT_MASTER_RESTORE: | ||
321 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; | ||
322 | break; | ||
323 | case RFKILL_INPUT_MASTER_UNLOCK: | ||
324 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; | ||
325 | break; | ||
326 | default: | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | spin_lock_init(&rfkill_op_lock); | ||
331 | |||
332 | /* Avoid delay at first schedule */ | ||
333 | rfkill_last_scheduled = | ||
334 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
335 | return input_register_handler(&rfkill_handler); | ||
336 | } | ||
337 | |||
338 | void __exit rfkill_handler_exit(void) | ||
339 | { | ||
340 | input_unregister_handler(&rfkill_handler); | ||
341 | cancel_delayed_work_sync(&rfkill_op_work); | ||
342 | } | ||
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c deleted file mode 100644 index 60a34f3b5f65..000000000000 --- a/net/rfkill/rfkill-input.c +++ /dev/null | |||
@@ -1,390 +0,0 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/input.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/rfkill.h> | ||
19 | #include <linux/sched.h> | ||
20 | |||
21 | #include "rfkill-input.h" | ||
22 | |||
23 | MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); | ||
24 | MODULE_DESCRIPTION("Input layer to RF switch connector"); | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
27 | enum rfkill_input_master_mode { | ||
28 | RFKILL_INPUT_MASTER_DONOTHING = 0, | ||
29 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
30 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
31 | RFKILL_INPUT_MASTER_MAX, /* marker */ | ||
32 | }; | ||
33 | |||
34 | /* Delay (in ms) between consecutive switch ops */ | ||
35 | #define RFKILL_OPS_DELAY 200 | ||
36 | |||
37 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
38 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
39 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
40 | MODULE_PARM_DESC(master_switch_mode, | ||
41 | "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all"); | ||
42 | |||
43 | enum rfkill_global_sched_op { | ||
44 | RFKILL_GLOBAL_OP_EPO = 0, | ||
45 | RFKILL_GLOBAL_OP_RESTORE, | ||
46 | RFKILL_GLOBAL_OP_UNLOCK, | ||
47 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
48 | }; | ||
49 | |||
50 | struct rfkill_task { | ||
51 | struct delayed_work dwork; | ||
52 | |||
53 | /* ensures that task is serialized */ | ||
54 | struct mutex mutex; | ||
55 | |||
56 | /* protects everything below */ | ||
57 | spinlock_t lock; | ||
58 | |||
59 | /* pending regular switch operations (1=pending) */ | ||
60 | unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
61 | |||
62 | /* should the state be complemented (1=yes) */ | ||
63 | unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
64 | |||
65 | bool global_op_pending; | ||
66 | enum rfkill_global_sched_op op; | ||
67 | |||
68 | /* last time it was scheduled */ | ||
69 | unsigned long last_scheduled; | ||
70 | }; | ||
71 | |||
72 | static void __rfkill_handle_global_op(enum rfkill_global_sched_op op) | ||
73 | { | ||
74 | unsigned int i; | ||
75 | |||
76 | switch (op) { | ||
77 | case RFKILL_GLOBAL_OP_EPO: | ||
78 | rfkill_epo(); | ||
79 | break; | ||
80 | case RFKILL_GLOBAL_OP_RESTORE: | ||
81 | rfkill_restore_states(); | ||
82 | break; | ||
83 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
84 | rfkill_remove_epo_lock(); | ||
85 | break; | ||
86 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
87 | rfkill_remove_epo_lock(); | ||
88 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
89 | rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED); | ||
90 | break; | ||
91 | default: | ||
92 | /* memory corruption or bug, fail safely */ | ||
93 | rfkill_epo(); | ||
94 | WARN(1, "Unknown requested operation %d! " | ||
95 | "rfkill Emergency Power Off activated\n", | ||
96 | op); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
101 | const bool c) | ||
102 | { | ||
103 | enum rfkill_state state; | ||
104 | |||
105 | state = rfkill_get_global_state(type); | ||
106 | if (c) | ||
107 | state = rfkill_state_complement(state); | ||
108 | |||
109 | rfkill_switch_all(type, state); | ||
110 | } | ||
111 | |||
112 | static void rfkill_task_handler(struct work_struct *work) | ||
113 | { | ||
114 | struct rfkill_task *task = container_of(work, | ||
115 | struct rfkill_task, dwork.work); | ||
116 | bool doit = true; | ||
117 | |||
118 | mutex_lock(&task->mutex); | ||
119 | |||
120 | spin_lock_irq(&task->lock); | ||
121 | while (doit) { | ||
122 | if (task->global_op_pending) { | ||
123 | enum rfkill_global_sched_op op = task->op; | ||
124 | task->global_op_pending = false; | ||
125 | memset(task->sw_pending, 0, sizeof(task->sw_pending)); | ||
126 | spin_unlock_irq(&task->lock); | ||
127 | |||
128 | __rfkill_handle_global_op(op); | ||
129 | |||
130 | /* make sure we do at least one pass with | ||
131 | * !task->global_op_pending */ | ||
132 | spin_lock_irq(&task->lock); | ||
133 | continue; | ||
134 | } else if (!rfkill_is_epo_lock_active()) { | ||
135 | unsigned int i = 0; | ||
136 | |||
137 | while (!task->global_op_pending && | ||
138 | i < RFKILL_TYPE_MAX) { | ||
139 | if (test_and_clear_bit(i, task->sw_pending)) { | ||
140 | bool c; | ||
141 | c = test_and_clear_bit(i, | ||
142 | task->sw_togglestate); | ||
143 | spin_unlock_irq(&task->lock); | ||
144 | |||
145 | __rfkill_handle_normal_op(i, c); | ||
146 | |||
147 | spin_lock_irq(&task->lock); | ||
148 | } | ||
149 | i++; | ||
150 | } | ||
151 | } | ||
152 | doit = task->global_op_pending; | ||
153 | } | ||
154 | spin_unlock_irq(&task->lock); | ||
155 | |||
156 | mutex_unlock(&task->mutex); | ||
157 | } | ||
158 | |||
159 | static struct rfkill_task rfkill_task = { | ||
160 | .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork, | ||
161 | rfkill_task_handler), | ||
162 | .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex), | ||
163 | .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock), | ||
164 | }; | ||
165 | |||
166 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
167 | { | ||
168 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
169 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
170 | } | ||
171 | |||
172 | static void rfkill_schedule_ratelimited(void) | ||
173 | { | ||
174 | if (!delayed_work_pending(&rfkill_task.dwork)) { | ||
175 | schedule_delayed_work(&rfkill_task.dwork, | ||
176 | rfkill_ratelimit(rfkill_task.last_scheduled)); | ||
177 | rfkill_task.last_scheduled = jiffies; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static void rfkill_schedule_global_op(enum rfkill_global_sched_op op) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | |||
185 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
186 | rfkill_task.op = op; | ||
187 | rfkill_task.global_op_pending = true; | ||
188 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
189 | /* bypass the limiter for EPO */ | ||
190 | cancel_delayed_work(&rfkill_task.dwork); | ||
191 | schedule_delayed_work(&rfkill_task.dwork, 0); | ||
192 | rfkill_task.last_scheduled = jiffies; | ||
193 | } else | ||
194 | rfkill_schedule_ratelimited(); | ||
195 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
196 | } | ||
197 | |||
198 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | |||
202 | if (rfkill_is_epo_lock_active()) | ||
203 | return; | ||
204 | |||
205 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
206 | if (!rfkill_task.global_op_pending) { | ||
207 | set_bit(type, rfkill_task.sw_pending); | ||
208 | change_bit(type, rfkill_task.sw_togglestate); | ||
209 | rfkill_schedule_ratelimited(); | ||
210 | } | ||
211 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
212 | } | ||
213 | |||
214 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
215 | { | ||
216 | if (state) { | ||
217 | switch (rfkill_master_switch_mode) { | ||
218 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
219 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK); | ||
220 | break; | ||
221 | case RFKILL_INPUT_MASTER_RESTORE: | ||
222 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE); | ||
223 | break; | ||
224 | case RFKILL_INPUT_MASTER_DONOTHING: | ||
225 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK); | ||
226 | break; | ||
227 | default: | ||
228 | /* memory corruption or driver bug! fail safely */ | ||
229 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
230 | WARN(1, "Unknown rfkill_master_switch_mode (%d), " | ||
231 | "driver bug or memory corruption detected!\n", | ||
232 | rfkill_master_switch_mode); | ||
233 | break; | ||
234 | } | ||
235 | } else | ||
236 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
237 | } | ||
238 | |||
239 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
240 | unsigned int code, int data) | ||
241 | { | ||
242 | if (type == EV_KEY && data == 1) { | ||
243 | enum rfkill_type t; | ||
244 | |||
245 | switch (code) { | ||
246 | case KEY_WLAN: | ||
247 | t = RFKILL_TYPE_WLAN; | ||
248 | break; | ||
249 | case KEY_BLUETOOTH: | ||
250 | t = RFKILL_TYPE_BLUETOOTH; | ||
251 | break; | ||
252 | case KEY_UWB: | ||
253 | t = RFKILL_TYPE_UWB; | ||
254 | break; | ||
255 | case KEY_WIMAX: | ||
256 | t = RFKILL_TYPE_WIMAX; | ||
257 | break; | ||
258 | default: | ||
259 | return; | ||
260 | } | ||
261 | rfkill_schedule_toggle(t); | ||
262 | return; | ||
263 | } else if (type == EV_SW) { | ||
264 | switch (code) { | ||
265 | case SW_RFKILL_ALL: | ||
266 | rfkill_schedule_evsw_rfkillall(data); | ||
267 | return; | ||
268 | default: | ||
269 | return; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
275 | const struct input_device_id *id) | ||
276 | { | ||
277 | struct input_handle *handle; | ||
278 | int error; | ||
279 | |||
280 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
281 | if (!handle) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | handle->dev = dev; | ||
285 | handle->handler = handler; | ||
286 | handle->name = "rfkill"; | ||
287 | |||
288 | /* causes rfkill_start() to be called */ | ||
289 | error = input_register_handle(handle); | ||
290 | if (error) | ||
291 | goto err_free_handle; | ||
292 | |||
293 | error = input_open_device(handle); | ||
294 | if (error) | ||
295 | goto err_unregister_handle; | ||
296 | |||
297 | return 0; | ||
298 | |||
299 | err_unregister_handle: | ||
300 | input_unregister_handle(handle); | ||
301 | err_free_handle: | ||
302 | kfree(handle); | ||
303 | return error; | ||
304 | } | ||
305 | |||
306 | static void rfkill_start(struct input_handle *handle) | ||
307 | { | ||
308 | /* Take event_lock to guard against configuration changes, we | ||
309 | * should be able to deal with concurrency with rfkill_event() | ||
310 | * just fine (which event_lock will also avoid). */ | ||
311 | spin_lock_irq(&handle->dev->event_lock); | ||
312 | |||
313 | if (test_bit(EV_SW, handle->dev->evbit)) { | ||
314 | if (test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
315 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
316 | handle->dev->sw)); | ||
317 | /* add resync for further EV_SW events here */ | ||
318 | } | ||
319 | |||
320 | spin_unlock_irq(&handle->dev->event_lock); | ||
321 | } | ||
322 | |||
323 | static void rfkill_disconnect(struct input_handle *handle) | ||
324 | { | ||
325 | input_close_device(handle); | ||
326 | input_unregister_handle(handle); | ||
327 | kfree(handle); | ||
328 | } | ||
329 | |||
330 | static const struct input_device_id rfkill_ids[] = { | ||
331 | { | ||
332 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
333 | .evbit = { BIT_MASK(EV_KEY) }, | ||
334 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
335 | }, | ||
336 | { | ||
337 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
338 | .evbit = { BIT_MASK(EV_KEY) }, | ||
339 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
340 | }, | ||
341 | { | ||
342 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
343 | .evbit = { BIT_MASK(EV_KEY) }, | ||
344 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
345 | }, | ||
346 | { | ||
347 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
348 | .evbit = { BIT_MASK(EV_KEY) }, | ||
349 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
350 | }, | ||
351 | { | ||
352 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
353 | .evbit = { BIT(EV_SW) }, | ||
354 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
355 | }, | ||
356 | { } | ||
357 | }; | ||
358 | |||
359 | static struct input_handler rfkill_handler = { | ||
360 | .event = rfkill_event, | ||
361 | .connect = rfkill_connect, | ||
362 | .disconnect = rfkill_disconnect, | ||
363 | .start = rfkill_start, | ||
364 | .name = "rfkill", | ||
365 | .id_table = rfkill_ids, | ||
366 | }; | ||
367 | |||
368 | static int __init rfkill_handler_init(void) | ||
369 | { | ||
370 | if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX) | ||
371 | return -EINVAL; | ||
372 | |||
373 | /* | ||
374 | * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay | ||
375 | * at the first use. Acceptable, but if we can avoid it, why not? | ||
376 | */ | ||
377 | rfkill_task.last_scheduled = | ||
378 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
379 | return input_register_handler(&rfkill_handler); | ||
380 | } | ||
381 | |||
382 | static void __exit rfkill_handler_exit(void) | ||
383 | { | ||
384 | input_unregister_handler(&rfkill_handler); | ||
385 | cancel_delayed_work_sync(&rfkill_task.dwork); | ||
386 | rfkill_remove_epo_lock(); | ||
387 | } | ||
388 | |||
389 | module_init(rfkill_handler_init); | ||
390 | module_exit(rfkill_handler_exit); | ||
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c deleted file mode 100644 index 4f5a83183c95..000000000000 --- a/net/rfkill/rfkill.c +++ /dev/null | |||
@@ -1,855 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the | ||
17 | * Free Software Foundation, Inc., | ||
18 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | #include <linux/capability.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/rfkill.h> | ||
29 | |||
30 | /* Get declaration of rfkill_switch_all() to shut up sparse. */ | ||
31 | #include "rfkill-input.h" | ||
32 | |||
33 | |||
34 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
35 | MODULE_VERSION("1.0"); | ||
36 | MODULE_DESCRIPTION("RF switch support"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
40 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
41 | |||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | ||
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
44 | MODULE_PARM_DESC(default_state, | ||
45 | "Default initial state for all radio types, 0 = radio off"); | ||
46 | |||
47 | struct rfkill_gsw_state { | ||
48 | enum rfkill_state current_state; | ||
49 | enum rfkill_state default_state; | ||
50 | }; | ||
51 | |||
52 | static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; | ||
53 | static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
54 | static bool rfkill_epo_lock_active; | ||
55 | |||
56 | |||
57 | #ifdef CONFIG_RFKILL_LEDS | ||
58 | static void rfkill_led_trigger(struct rfkill *rfkill, | ||
59 | enum rfkill_state state) | ||
60 | { | ||
61 | struct led_trigger *led = &rfkill->led_trigger; | ||
62 | |||
63 | if (!led->name) | ||
64 | return; | ||
65 | if (state != RFKILL_STATE_UNBLOCKED) | ||
66 | led_trigger_event(led, LED_OFF); | ||
67 | else | ||
68 | led_trigger_event(led, LED_FULL); | ||
69 | } | ||
70 | |||
71 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
72 | { | ||
73 | struct rfkill *rfkill = container_of(led->trigger, | ||
74 | struct rfkill, led_trigger); | ||
75 | |||
76 | rfkill_led_trigger(rfkill, rfkill->state); | ||
77 | } | ||
78 | #else | ||
79 | static inline void rfkill_led_trigger(struct rfkill *rfkill, | ||
80 | enum rfkill_state state) | ||
81 | { | ||
82 | } | ||
83 | #endif /* CONFIG_RFKILL_LEDS */ | ||
84 | |||
85 | static void rfkill_uevent(struct rfkill *rfkill) | ||
86 | { | ||
87 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
88 | } | ||
89 | |||
90 | static void update_rfkill_state(struct rfkill *rfkill) | ||
91 | { | ||
92 | enum rfkill_state newstate, oldstate; | ||
93 | |||
94 | if (rfkill->get_state) { | ||
95 | mutex_lock(&rfkill->mutex); | ||
96 | if (!rfkill->get_state(rfkill->data, &newstate)) { | ||
97 | oldstate = rfkill->state; | ||
98 | rfkill->state = newstate; | ||
99 | if (oldstate != newstate) | ||
100 | rfkill_uevent(rfkill); | ||
101 | } | ||
102 | mutex_unlock(&rfkill->mutex); | ||
103 | } | ||
104 | rfkill_led_trigger(rfkill, rfkill->state); | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * rfkill_toggle_radio - wrapper for toggle_radio hook | ||
109 | * @rfkill: the rfkill struct to use | ||
110 | * @force: calls toggle_radio even if cache says it is not needed, | ||
111 | * and also makes sure notifications of the state will be | ||
112 | * sent even if it didn't change | ||
113 | * @state: the new state to call toggle_radio() with | ||
114 | * | ||
115 | * Calls rfkill->toggle_radio, enforcing the API for toggle_radio | ||
116 | * calls and handling all the red tape such as issuing notifications | ||
117 | * if the call is successful. | ||
118 | * | ||
119 | * Suspended devices are not touched at all, and -EAGAIN is returned. | ||
120 | * | ||
121 | * Note that the @force parameter cannot override a (possibly cached) | ||
122 | * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of | ||
123 | * RFKILL_STATE_HARD_BLOCKED implements either get_state() or | ||
124 | * rfkill_force_state(), so the cache either is bypassed or valid. | ||
125 | * | ||
126 | * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED | ||
127 | * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to | ||
128 | * give the driver a hint that it should double-BLOCK the transmitter. | ||
129 | * | ||
130 | * Caller must have acquired rfkill->mutex. | ||
131 | */ | ||
132 | static int rfkill_toggle_radio(struct rfkill *rfkill, | ||
133 | enum rfkill_state state, | ||
134 | int force) | ||
135 | { | ||
136 | int retval = 0; | ||
137 | enum rfkill_state oldstate, newstate; | ||
138 | |||
139 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
140 | return -EBUSY; | ||
141 | |||
142 | oldstate = rfkill->state; | ||
143 | |||
144 | if (rfkill->get_state && !force && | ||
145 | !rfkill->get_state(rfkill->data, &newstate)) { | ||
146 | rfkill->state = newstate; | ||
147 | } | ||
148 | |||
149 | switch (state) { | ||
150 | case RFKILL_STATE_HARD_BLOCKED: | ||
151 | /* typically happens when refreshing hardware state, | ||
152 | * such as on resume */ | ||
153 | state = RFKILL_STATE_SOFT_BLOCKED; | ||
154 | break; | ||
155 | case RFKILL_STATE_UNBLOCKED: | ||
156 | /* force can't override this, only rfkill_force_state() can */ | ||
157 | if (rfkill->state == RFKILL_STATE_HARD_BLOCKED) | ||
158 | return -EPERM; | ||
159 | break; | ||
160 | case RFKILL_STATE_SOFT_BLOCKED: | ||
161 | /* nothing to do, we want to give drivers the hint to double | ||
162 | * BLOCK even a transmitter that is already in state | ||
163 | * RFKILL_STATE_HARD_BLOCKED */ | ||
164 | break; | ||
165 | default: | ||
166 | WARN(1, KERN_WARNING | ||
167 | "rfkill: illegal state %d passed as parameter " | ||
168 | "to rfkill_toggle_radio\n", state); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | |||
172 | if (force || state != rfkill->state) { | ||
173 | retval = rfkill->toggle_radio(rfkill->data, state); | ||
174 | /* never allow a HARD->SOFT downgrade! */ | ||
175 | if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED) | ||
176 | rfkill->state = state; | ||
177 | } | ||
178 | |||
179 | if (force || rfkill->state != oldstate) | ||
180 | rfkill_uevent(rfkill); | ||
181 | |||
182 | rfkill_led_trigger(rfkill, rfkill->state); | ||
183 | return retval; | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
188 | * @type: type of interfaces to be affected | ||
189 | * @state: the new state | ||
190 | * | ||
191 | * This function toggles the state of all switches of given type, | ||
192 | * unless a specific switch is claimed by userspace (in which case, | ||
193 | * that switch is left alone) or suspended. | ||
194 | * | ||
195 | * Caller must have acquired rfkill_global_mutex. | ||
196 | */ | ||
197 | static void __rfkill_switch_all(const enum rfkill_type type, | ||
198 | const enum rfkill_state state) | ||
199 | { | ||
200 | struct rfkill *rfkill; | ||
201 | |||
202 | if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX), | ||
203 | KERN_WARNING | ||
204 | "rfkill: illegal state %d or type %d " | ||
205 | "passed as parameter to __rfkill_switch_all\n", | ||
206 | state, type)) | ||
207 | return; | ||
208 | |||
209 | rfkill_global_states[type].current_state = state; | ||
210 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
211 | if (rfkill->type == type) { | ||
212 | mutex_lock(&rfkill->mutex); | ||
213 | rfkill_toggle_radio(rfkill, state, 0); | ||
214 | mutex_unlock(&rfkill->mutex); | ||
215 | rfkill_led_trigger(rfkill, rfkill->state); | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * rfkill_switch_all - Toggle state of all switches of given type | ||
222 | * @type: type of interfaces to be affected | ||
223 | * @state: the new state | ||
224 | * | ||
225 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
226 | * Please refer to __rfkill_switch_all() for details. | ||
227 | * | ||
228 | * Does nothing if the EPO lock is active. | ||
229 | */ | ||
230 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | ||
231 | { | ||
232 | mutex_lock(&rfkill_global_mutex); | ||
233 | if (!rfkill_epo_lock_active) | ||
234 | __rfkill_switch_all(type, state); | ||
235 | mutex_unlock(&rfkill_global_mutex); | ||
236 | } | ||
237 | EXPORT_SYMBOL(rfkill_switch_all); | ||
238 | |||
239 | /** | ||
240 | * rfkill_epo - emergency power off all transmitters | ||
241 | * | ||
242 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
243 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
244 | * | ||
245 | * The global state before the EPO is saved and can be restored later | ||
246 | * using rfkill_restore_states(). | ||
247 | */ | ||
248 | void rfkill_epo(void) | ||
249 | { | ||
250 | struct rfkill *rfkill; | ||
251 | int i; | ||
252 | |||
253 | mutex_lock(&rfkill_global_mutex); | ||
254 | |||
255 | rfkill_epo_lock_active = true; | ||
256 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
257 | mutex_lock(&rfkill->mutex); | ||
258 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
259 | mutex_unlock(&rfkill->mutex); | ||
260 | } | ||
261 | for (i = 0; i < RFKILL_TYPE_MAX; i++) { | ||
262 | rfkill_global_states[i].default_state = | ||
263 | rfkill_global_states[i].current_state; | ||
264 | rfkill_global_states[i].current_state = | ||
265 | RFKILL_STATE_SOFT_BLOCKED; | ||
266 | } | ||
267 | mutex_unlock(&rfkill_global_mutex); | ||
268 | rfkill_led_trigger(rfkill, rfkill->state); | ||
269 | } | ||
270 | EXPORT_SYMBOL_GPL(rfkill_epo); | ||
271 | |||
272 | /** | ||
273 | * rfkill_restore_states - restore global states | ||
274 | * | ||
275 | * Restore (and sync switches to) the global state from the | ||
276 | * states in rfkill_default_states. This can undo the effects of | ||
277 | * a call to rfkill_epo(). | ||
278 | */ | ||
279 | void rfkill_restore_states(void) | ||
280 | { | ||
281 | int i; | ||
282 | |||
283 | mutex_lock(&rfkill_global_mutex); | ||
284 | |||
285 | rfkill_epo_lock_active = false; | ||
286 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
287 | __rfkill_switch_all(i, rfkill_global_states[i].default_state); | ||
288 | mutex_unlock(&rfkill_global_mutex); | ||
289 | } | ||
290 | EXPORT_SYMBOL_GPL(rfkill_restore_states); | ||
291 | |||
292 | /** | ||
293 | * rfkill_remove_epo_lock - unlock state changes | ||
294 | * | ||
295 | * Used by rfkill-input manually unlock state changes, when | ||
296 | * the EPO switch is deactivated. | ||
297 | */ | ||
298 | void rfkill_remove_epo_lock(void) | ||
299 | { | ||
300 | mutex_lock(&rfkill_global_mutex); | ||
301 | rfkill_epo_lock_active = false; | ||
302 | mutex_unlock(&rfkill_global_mutex); | ||
303 | } | ||
304 | EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock); | ||
305 | |||
306 | /** | ||
307 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
308 | * | ||
309 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
310 | * and 1 (true) if there is an active EPO contition, which | ||
311 | * locks all radios in one of the BLOCKED states. | ||
312 | * | ||
313 | * Can be called in atomic context. | ||
314 | */ | ||
315 | bool rfkill_is_epo_lock_active(void) | ||
316 | { | ||
317 | return rfkill_epo_lock_active; | ||
318 | } | ||
319 | EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active); | ||
320 | |||
321 | /** | ||
322 | * rfkill_get_global_state - returns global state for a type | ||
323 | * @type: the type to get the global state of | ||
324 | * | ||
325 | * Returns the current global state for a given wireless | ||
326 | * device type. | ||
327 | */ | ||
328 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type) | ||
329 | { | ||
330 | return rfkill_global_states[type].current_state; | ||
331 | } | ||
332 | EXPORT_SYMBOL_GPL(rfkill_get_global_state); | ||
333 | |||
334 | /** | ||
335 | * rfkill_force_state - Force the internal rfkill radio state | ||
336 | * @rfkill: pointer to the rfkill class to modify. | ||
337 | * @state: the current radio state the class should be forced to. | ||
338 | * | ||
339 | * This function updates the internal state of the radio cached | ||
340 | * by the rfkill class. It should be used when the driver gets | ||
341 | * a notification by the firmware/hardware of the current *real* | ||
342 | * state of the radio rfkill switch. | ||
343 | * | ||
344 | * Devices which are subject to external changes on their rfkill | ||
345 | * state (such as those caused by a hardware rfkill line) MUST | ||
346 | * have their driver arrange to call rfkill_force_state() as soon | ||
347 | * as possible after such a change. | ||
348 | * | ||
349 | * This function may not be called from an atomic context. | ||
350 | */ | ||
351 | int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | ||
352 | { | ||
353 | enum rfkill_state oldstate; | ||
354 | |||
355 | BUG_ON(!rfkill); | ||
356 | if (WARN((state >= RFKILL_STATE_MAX), | ||
357 | KERN_WARNING | ||
358 | "rfkill: illegal state %d passed as parameter " | ||
359 | "to rfkill_force_state\n", state)) | ||
360 | return -EINVAL; | ||
361 | |||
362 | mutex_lock(&rfkill->mutex); | ||
363 | |||
364 | oldstate = rfkill->state; | ||
365 | rfkill->state = state; | ||
366 | |||
367 | if (state != oldstate) | ||
368 | rfkill_uevent(rfkill); | ||
369 | |||
370 | mutex_unlock(&rfkill->mutex); | ||
371 | rfkill_led_trigger(rfkill, rfkill->state); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | EXPORT_SYMBOL(rfkill_force_state); | ||
376 | |||
377 | static ssize_t rfkill_name_show(struct device *dev, | ||
378 | struct device_attribute *attr, | ||
379 | char *buf) | ||
380 | { | ||
381 | struct rfkill *rfkill = to_rfkill(dev); | ||
382 | |||
383 | return sprintf(buf, "%s\n", rfkill->name); | ||
384 | } | ||
385 | |||
386 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
387 | { | ||
388 | switch (type) { | ||
389 | case RFKILL_TYPE_WLAN: | ||
390 | return "wlan"; | ||
391 | case RFKILL_TYPE_BLUETOOTH: | ||
392 | return "bluetooth"; | ||
393 | case RFKILL_TYPE_UWB: | ||
394 | return "ultrawideband"; | ||
395 | case RFKILL_TYPE_WIMAX: | ||
396 | return "wimax"; | ||
397 | case RFKILL_TYPE_WWAN: | ||
398 | return "wwan"; | ||
399 | default: | ||
400 | BUG(); | ||
401 | } | ||
402 | } | ||
403 | |||
404 | static ssize_t rfkill_type_show(struct device *dev, | ||
405 | struct device_attribute *attr, | ||
406 | char *buf) | ||
407 | { | ||
408 | struct rfkill *rfkill = to_rfkill(dev); | ||
409 | |||
410 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
411 | } | ||
412 | |||
413 | static ssize_t rfkill_state_show(struct device *dev, | ||
414 | struct device_attribute *attr, | ||
415 | char *buf) | ||
416 | { | ||
417 | struct rfkill *rfkill = to_rfkill(dev); | ||
418 | |||
419 | update_rfkill_state(rfkill); | ||
420 | return sprintf(buf, "%d\n", rfkill->state); | ||
421 | } | ||
422 | |||
423 | static ssize_t rfkill_state_store(struct device *dev, | ||
424 | struct device_attribute *attr, | ||
425 | const char *buf, size_t count) | ||
426 | { | ||
427 | struct rfkill *rfkill = to_rfkill(dev); | ||
428 | unsigned long state; | ||
429 | int error; | ||
430 | |||
431 | if (!capable(CAP_NET_ADMIN)) | ||
432 | return -EPERM; | ||
433 | |||
434 | error = strict_strtoul(buf, 0, &state); | ||
435 | if (error) | ||
436 | return error; | ||
437 | |||
438 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
439 | if (state != RFKILL_STATE_UNBLOCKED && | ||
440 | state != RFKILL_STATE_SOFT_BLOCKED) | ||
441 | return -EINVAL; | ||
442 | |||
443 | error = mutex_lock_killable(&rfkill->mutex); | ||
444 | if (error) | ||
445 | return error; | ||
446 | |||
447 | if (!rfkill_epo_lock_active) | ||
448 | error = rfkill_toggle_radio(rfkill, state, 0); | ||
449 | else | ||
450 | error = -EPERM; | ||
451 | |||
452 | mutex_unlock(&rfkill->mutex); | ||
453 | |||
454 | return error ? error : count; | ||
455 | } | ||
456 | |||
457 | static ssize_t rfkill_claim_show(struct device *dev, | ||
458 | struct device_attribute *attr, | ||
459 | char *buf) | ||
460 | { | ||
461 | return sprintf(buf, "%d\n", 0); | ||
462 | } | ||
463 | |||
464 | static ssize_t rfkill_claim_store(struct device *dev, | ||
465 | struct device_attribute *attr, | ||
466 | const char *buf, size_t count) | ||
467 | { | ||
468 | return -EOPNOTSUPP; | ||
469 | } | ||
470 | |||
471 | static struct device_attribute rfkill_dev_attrs[] = { | ||
472 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
473 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
474 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
475 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
476 | __ATTR_NULL | ||
477 | }; | ||
478 | |||
479 | static void rfkill_release(struct device *dev) | ||
480 | { | ||
481 | struct rfkill *rfkill = to_rfkill(dev); | ||
482 | |||
483 | kfree(rfkill); | ||
484 | module_put(THIS_MODULE); | ||
485 | } | ||
486 | |||
487 | #ifdef CONFIG_PM | ||
488 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
489 | { | ||
490 | struct rfkill *rfkill = to_rfkill(dev); | ||
491 | |||
492 | /* mark class device as suspended */ | ||
493 | if (dev->power.power_state.event != state.event) | ||
494 | dev->power.power_state = state; | ||
495 | |||
496 | /* store state for the resume handler */ | ||
497 | rfkill->state_for_resume = rfkill->state; | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int rfkill_resume(struct device *dev) | ||
503 | { | ||
504 | struct rfkill *rfkill = to_rfkill(dev); | ||
505 | enum rfkill_state newstate; | ||
506 | |||
507 | if (dev->power.power_state.event != PM_EVENT_ON) { | ||
508 | mutex_lock(&rfkill->mutex); | ||
509 | |||
510 | dev->power.power_state.event = PM_EVENT_ON; | ||
511 | |||
512 | /* | ||
513 | * rfkill->state could have been modified before we got | ||
514 | * called, and won't be updated by rfkill_toggle_radio() | ||
515 | * in force mode. Sync it FIRST. | ||
516 | */ | ||
517 | if (rfkill->get_state && | ||
518 | !rfkill->get_state(rfkill->data, &newstate)) | ||
519 | rfkill->state = newstate; | ||
520 | |||
521 | /* | ||
522 | * If we are under EPO, kick transmitter offline, | ||
523 | * otherwise restore to pre-suspend state. | ||
524 | * | ||
525 | * Issue a notification in any case | ||
526 | */ | ||
527 | rfkill_toggle_radio(rfkill, | ||
528 | rfkill_epo_lock_active ? | ||
529 | RFKILL_STATE_SOFT_BLOCKED : | ||
530 | rfkill->state_for_resume, | ||
531 | 1); | ||
532 | |||
533 | mutex_unlock(&rfkill->mutex); | ||
534 | rfkill_led_trigger(rfkill, rfkill->state); | ||
535 | } | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | #else | ||
540 | #define rfkill_suspend NULL | ||
541 | #define rfkill_resume NULL | ||
542 | #endif | ||
543 | |||
544 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
545 | { | ||
546 | struct rfkill *rfkill = to_rfkill(dev); | ||
547 | int error; | ||
548 | |||
549 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
550 | if (error) | ||
551 | return error; | ||
552 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
553 | rfkill_get_type_str(rfkill->type)); | ||
554 | if (error) | ||
555 | return error; | ||
556 | error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state); | ||
557 | return error; | ||
558 | } | ||
559 | |||
560 | static struct class rfkill_class = { | ||
561 | .name = "rfkill", | ||
562 | .dev_release = rfkill_release, | ||
563 | .dev_attrs = rfkill_dev_attrs, | ||
564 | .suspend = rfkill_suspend, | ||
565 | .resume = rfkill_resume, | ||
566 | .dev_uevent = rfkill_dev_uevent, | ||
567 | }; | ||
568 | |||
569 | static int rfkill_check_duplicity(const struct rfkill *rfkill) | ||
570 | { | ||
571 | struct rfkill *p; | ||
572 | unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
573 | |||
574 | memset(seen, 0, sizeof(seen)); | ||
575 | |||
576 | list_for_each_entry(p, &rfkill_list, node) { | ||
577 | if (WARN((p == rfkill), KERN_WARNING | ||
578 | "rfkill: illegal attempt to register " | ||
579 | "an already registered rfkill struct\n")) | ||
580 | return -EEXIST; | ||
581 | set_bit(p->type, seen); | ||
582 | } | ||
583 | |||
584 | /* 0: first switch of its kind */ | ||
585 | return (test_bit(rfkill->type, seen)) ? 1 : 0; | ||
586 | } | ||
587 | |||
588 | static int rfkill_add_switch(struct rfkill *rfkill) | ||
589 | { | ||
590 | int error; | ||
591 | |||
592 | mutex_lock(&rfkill_global_mutex); | ||
593 | |||
594 | error = rfkill_check_duplicity(rfkill); | ||
595 | if (error < 0) | ||
596 | goto unlock_out; | ||
597 | |||
598 | if (!error) { | ||
599 | /* lock default after first use */ | ||
600 | set_bit(rfkill->type, rfkill_states_lockdflt); | ||
601 | rfkill_global_states[rfkill->type].current_state = | ||
602 | rfkill_global_states[rfkill->type].default_state; | ||
603 | } | ||
604 | |||
605 | rfkill_toggle_radio(rfkill, | ||
606 | rfkill_global_states[rfkill->type].current_state, | ||
607 | 0); | ||
608 | |||
609 | list_add_tail(&rfkill->node, &rfkill_list); | ||
610 | |||
611 | error = 0; | ||
612 | unlock_out: | ||
613 | mutex_unlock(&rfkill_global_mutex); | ||
614 | |||
615 | return error; | ||
616 | } | ||
617 | |||
618 | static void rfkill_remove_switch(struct rfkill *rfkill) | ||
619 | { | ||
620 | mutex_lock(&rfkill_global_mutex); | ||
621 | list_del_init(&rfkill->node); | ||
622 | mutex_unlock(&rfkill_global_mutex); | ||
623 | |||
624 | mutex_lock(&rfkill->mutex); | ||
625 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
626 | mutex_unlock(&rfkill->mutex); | ||
627 | } | ||
628 | |||
629 | /** | ||
630 | * rfkill_allocate - allocate memory for rfkill structure. | ||
631 | * @parent: device that has rf switch on it | ||
632 | * @type: type of the switch (RFKILL_TYPE_*) | ||
633 | * | ||
634 | * This function should be called by the network driver when it needs | ||
635 | * rfkill structure. Once the structure is allocated the driver should | ||
636 | * finish its initialization by setting the name, private data, enable_radio | ||
637 | * and disable_radio methods and then register it with rfkill_register(). | ||
638 | * | ||
639 | * NOTE: If registration fails the structure shoudl be freed by calling | ||
640 | * rfkill_free() otherwise rfkill_unregister() should be used. | ||
641 | */ | ||
642 | struct rfkill * __must_check rfkill_allocate(struct device *parent, | ||
643 | enum rfkill_type type) | ||
644 | { | ||
645 | struct rfkill *rfkill; | ||
646 | struct device *dev; | ||
647 | |||
648 | if (WARN((type >= RFKILL_TYPE_MAX), | ||
649 | KERN_WARNING | ||
650 | "rfkill: illegal type %d passed as parameter " | ||
651 | "to rfkill_allocate\n", type)) | ||
652 | return NULL; | ||
653 | |||
654 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); | ||
655 | if (!rfkill) | ||
656 | return NULL; | ||
657 | |||
658 | mutex_init(&rfkill->mutex); | ||
659 | INIT_LIST_HEAD(&rfkill->node); | ||
660 | rfkill->type = type; | ||
661 | |||
662 | dev = &rfkill->dev; | ||
663 | dev->class = &rfkill_class; | ||
664 | dev->parent = parent; | ||
665 | device_initialize(dev); | ||
666 | |||
667 | __module_get(THIS_MODULE); | ||
668 | |||
669 | return rfkill; | ||
670 | } | ||
671 | EXPORT_SYMBOL(rfkill_allocate); | ||
672 | |||
673 | /** | ||
674 | * rfkill_free - Mark rfkill structure for deletion | ||
675 | * @rfkill: rfkill structure to be destroyed | ||
676 | * | ||
677 | * Decrements reference count of the rfkill structure so it is destroyed. | ||
678 | * Note that rfkill_free() should _not_ be called after rfkill_unregister(). | ||
679 | */ | ||
680 | void rfkill_free(struct rfkill *rfkill) | ||
681 | { | ||
682 | if (rfkill) | ||
683 | put_device(&rfkill->dev); | ||
684 | } | ||
685 | EXPORT_SYMBOL(rfkill_free); | ||
686 | |||
687 | static void rfkill_led_trigger_register(struct rfkill *rfkill) | ||
688 | { | ||
689 | #ifdef CONFIG_RFKILL_LEDS | ||
690 | int error; | ||
691 | |||
692 | if (!rfkill->led_trigger.name) | ||
693 | rfkill->led_trigger.name = dev_name(&rfkill->dev); | ||
694 | if (!rfkill->led_trigger.activate) | ||
695 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
696 | error = led_trigger_register(&rfkill->led_trigger); | ||
697 | if (error) | ||
698 | rfkill->led_trigger.name = NULL; | ||
699 | #endif /* CONFIG_RFKILL_LEDS */ | ||
700 | } | ||
701 | |||
702 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
703 | { | ||
704 | #ifdef CONFIG_RFKILL_LEDS | ||
705 | if (rfkill->led_trigger.name) { | ||
706 | led_trigger_unregister(&rfkill->led_trigger); | ||
707 | rfkill->led_trigger.name = NULL; | ||
708 | } | ||
709 | #endif | ||
710 | } | ||
711 | |||
712 | /** | ||
713 | * rfkill_register - Register a rfkill structure. | ||
714 | * @rfkill: rfkill structure to be registered | ||
715 | * | ||
716 | * This function should be called by the network driver when the rfkill | ||
717 | * structure needs to be registered. Immediately from registration the | ||
718 | * switch driver should be able to service calls to toggle_radio. | ||
719 | */ | ||
720 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
721 | { | ||
722 | static atomic_t rfkill_no = ATOMIC_INIT(0); | ||
723 | struct device *dev = &rfkill->dev; | ||
724 | int error; | ||
725 | |||
726 | if (WARN((!rfkill || !rfkill->toggle_radio || | ||
727 | rfkill->type >= RFKILL_TYPE_MAX || | ||
728 | rfkill->state >= RFKILL_STATE_MAX), | ||
729 | KERN_WARNING | ||
730 | "rfkill: attempt to register a " | ||
731 | "badly initialized rfkill struct\n")) | ||
732 | return -EINVAL; | ||
733 | |||
734 | dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1); | ||
735 | |||
736 | rfkill_led_trigger_register(rfkill); | ||
737 | |||
738 | error = rfkill_add_switch(rfkill); | ||
739 | if (error) { | ||
740 | rfkill_led_trigger_unregister(rfkill); | ||
741 | return error; | ||
742 | } | ||
743 | |||
744 | error = device_add(dev); | ||
745 | if (error) { | ||
746 | rfkill_remove_switch(rfkill); | ||
747 | rfkill_led_trigger_unregister(rfkill); | ||
748 | return error; | ||
749 | } | ||
750 | |||
751 | return 0; | ||
752 | } | ||
753 | EXPORT_SYMBOL(rfkill_register); | ||
754 | |||
755 | /** | ||
756 | * rfkill_unregister - Unregister a rfkill structure. | ||
757 | * @rfkill: rfkill structure to be unregistered | ||
758 | * | ||
759 | * This function should be called by the network driver during device | ||
760 | * teardown to destroy rfkill structure. Note that rfkill_free() should | ||
761 | * _not_ be called after rfkill_unregister(). | ||
762 | */ | ||
763 | void rfkill_unregister(struct rfkill *rfkill) | ||
764 | { | ||
765 | BUG_ON(!rfkill); | ||
766 | device_del(&rfkill->dev); | ||
767 | rfkill_remove_switch(rfkill); | ||
768 | rfkill_led_trigger_unregister(rfkill); | ||
769 | put_device(&rfkill->dev); | ||
770 | } | ||
771 | EXPORT_SYMBOL(rfkill_unregister); | ||
772 | |||
773 | /** | ||
774 | * rfkill_set_default - set initial value for a switch type | ||
775 | * @type - the type of switch to set the default state of | ||
776 | * @state - the new default state for that group of switches | ||
777 | * | ||
778 | * Sets the initial state rfkill should use for a given type. | ||
779 | * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED | ||
780 | * and RFKILL_STATE_UNBLOCKED. | ||
781 | * | ||
782 | * This function is meant to be used by platform drivers for platforms | ||
783 | * that can save switch state across power down/reboot. | ||
784 | * | ||
785 | * The default state for each switch type can be changed exactly once. | ||
786 | * After a switch of that type is registered, the default state cannot | ||
787 | * be changed anymore. This guards against multiple drivers it the | ||
788 | * same platform trying to set the initial switch default state, which | ||
789 | * is not allowed. | ||
790 | * | ||
791 | * Returns -EPERM if the state has already been set once or is in use, | ||
792 | * so drivers likely want to either ignore or at most printk(KERN_NOTICE) | ||
793 | * if this function returns -EPERM. | ||
794 | * | ||
795 | * Returns 0 if the new default state was set, or an error if it | ||
796 | * could not be set. | ||
797 | */ | ||
798 | int rfkill_set_default(enum rfkill_type type, enum rfkill_state state) | ||
799 | { | ||
800 | int error; | ||
801 | |||
802 | if (WARN((type >= RFKILL_TYPE_MAX || | ||
803 | (state != RFKILL_STATE_SOFT_BLOCKED && | ||
804 | state != RFKILL_STATE_UNBLOCKED)), | ||
805 | KERN_WARNING | ||
806 | "rfkill: illegal state %d or type %d passed as " | ||
807 | "parameter to rfkill_set_default\n", state, type)) | ||
808 | return -EINVAL; | ||
809 | |||
810 | mutex_lock(&rfkill_global_mutex); | ||
811 | |||
812 | if (!test_and_set_bit(type, rfkill_states_lockdflt)) { | ||
813 | rfkill_global_states[type].default_state = state; | ||
814 | rfkill_global_states[type].current_state = state; | ||
815 | error = 0; | ||
816 | } else | ||
817 | error = -EPERM; | ||
818 | |||
819 | mutex_unlock(&rfkill_global_mutex); | ||
820 | return error; | ||
821 | } | ||
822 | EXPORT_SYMBOL_GPL(rfkill_set_default); | ||
823 | |||
824 | /* | ||
825 | * Rfkill module initialization/deinitialization. | ||
826 | */ | ||
827 | static int __init rfkill_init(void) | ||
828 | { | ||
829 | int error; | ||
830 | int i; | ||
831 | |||
832 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
833 | if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED && | ||
834 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | ||
835 | return -EINVAL; | ||
836 | |||
837 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
838 | rfkill_global_states[i].default_state = rfkill_default_state; | ||
839 | |||
840 | error = class_register(&rfkill_class); | ||
841 | if (error) { | ||
842 | printk(KERN_ERR "rfkill: unable to register rfkill class\n"); | ||
843 | return error; | ||
844 | } | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static void __exit rfkill_exit(void) | ||
850 | { | ||
851 | class_unregister(&rfkill_class); | ||
852 | } | ||
853 | |||
854 | subsys_initcall(rfkill_init); | ||
855 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill.h index fe8df6b5b935..d1117cb6e4de 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ivo van Doorn | 2 | * Copyright (C) 2007 Ivo van Doorn |
3 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
3 | */ | 4 | */ |
4 | 5 | ||
5 | /* | 6 | /* |
@@ -11,11 +12,16 @@ | |||
11 | #ifndef __RFKILL_INPUT_H | 12 | #ifndef __RFKILL_INPUT_H |
12 | #define __RFKILL_INPUT_H | 13 | #define __RFKILL_INPUT_H |
13 | 14 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 15 | /* core code */ |
16 | void rfkill_switch_all(const enum rfkill_type type, bool blocked); | ||
15 | void rfkill_epo(void); | 17 | void rfkill_epo(void); |
16 | void rfkill_restore_states(void); | 18 | void rfkill_restore_states(void); |
17 | void rfkill_remove_epo_lock(void); | 19 | void rfkill_remove_epo_lock(void); |
18 | bool rfkill_is_epo_lock_active(void); | 20 | bool rfkill_is_epo_lock_active(void); |
19 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); | 21 | bool rfkill_get_global_sw_state(const enum rfkill_type type); |
22 | |||
23 | /* input handler */ | ||
24 | int rfkill_handler_init(void); | ||
25 | void rfkill_handler_exit(void); | ||
20 | 26 | ||
21 | #endif /* __RFKILL_INPUT_H */ | 27 | #endif /* __RFKILL_INPUT_H */ |