diff options
Diffstat (limited to 'net/rfkill')
-rw-r--r-- | net/rfkill/Kconfig | 21 | ||||
-rw-r--r-- | net/rfkill/Makefile | 5 | ||||
-rw-r--r-- | net/rfkill/core.c | 1205 | ||||
-rw-r--r-- | net/rfkill/input.c | 342 | ||||
-rw-r--r-- | net/rfkill/rfkill-input.c | 459 | ||||
-rw-r--r-- | net/rfkill/rfkill.c | 882 | ||||
-rw-r--r-- | net/rfkill/rfkill.h (renamed from net/rfkill/rfkill-input.h) | 10 |
7 files changed, 1565 insertions, 1359 deletions
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 7f807b30cfbb..eaf765876458 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig | |||
@@ -10,22 +10,15 @@ menuconfig RFKILL | |||
10 | To compile this driver as a module, choose M here: the | 10 | To compile this driver as a module, choose M here: the |
11 | module will be called rfkill. | 11 | module will be called rfkill. |
12 | 12 | ||
13 | config RFKILL_INPUT | ||
14 | tristate "Input layer to RF switch connector" | ||
15 | depends on RFKILL && INPUT | ||
16 | help | ||
17 | Say Y here if you want kernel automatically toggle state | ||
18 | of RF switches on and off when user presses appropriate | ||
19 | button or a key on the keyboard. Without this module you | ||
20 | need a some kind of userspace application to control | ||
21 | state of the switches. | ||
22 | |||
23 | To compile this driver as a module, choose M here: the | ||
24 | module will be called rfkill-input. | ||
25 | |||
26 | # LED trigger support | 13 | # LED trigger support |
27 | config RFKILL_LEDS | 14 | config RFKILL_LEDS |
28 | bool | 15 | bool |
29 | depends on RFKILL && LEDS_TRIGGERS | 16 | depends on RFKILL |
17 | depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS | ||
30 | default y | 18 | default y |
31 | 19 | ||
20 | config RFKILL_INPUT | ||
21 | bool "RF switch input support" if EMBEDDED | ||
22 | depends on RFKILL | ||
23 | depends on INPUT = y || RFKILL = INPUT | ||
24 | default y if !EMBEDDED | ||
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index b38c430be057..662105352691 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for the RF switch subsystem. | 2 | # Makefile for the RF switch subsystem. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_RFKILL) += rfkill.o | 5 | rfkill-y += core.o |
6 | obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o | 6 | rfkill-$(CONFIG_RFKILL_INPUT) += input.o |
7 | obj-$(CONFIG_RFKILL) += rfkill.o | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 000000000000..4e68ab439d5d --- /dev/null +++ b/net/rfkill/core.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rfkill.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fs.h> | ||
35 | |||
36 | #include "rfkill.h" | ||
37 | |||
38 | #define POLL_INTERVAL (5 * HZ) | ||
39 | |||
40 | #define RFKILL_BLOCK_HW BIT(0) | ||
41 | #define RFKILL_BLOCK_SW BIT(1) | ||
42 | #define RFKILL_BLOCK_SW_PREV BIT(2) | ||
43 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | ||
44 | RFKILL_BLOCK_SW |\ | ||
45 | RFKILL_BLOCK_SW_PREV) | ||
46 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | ||
47 | |||
48 | struct rfkill { | ||
49 | spinlock_t lock; | ||
50 | |||
51 | const char *name; | ||
52 | enum rfkill_type type; | ||
53 | |||
54 | unsigned long state; | ||
55 | |||
56 | u32 idx; | ||
57 | |||
58 | bool registered; | ||
59 | bool suspended; | ||
60 | bool persistent; | ||
61 | |||
62 | const struct rfkill_ops *ops; | ||
63 | void *data; | ||
64 | |||
65 | #ifdef CONFIG_RFKILL_LEDS | ||
66 | struct led_trigger led_trigger; | ||
67 | const char *ledtrigname; | ||
68 | #endif | ||
69 | |||
70 | struct device dev; | ||
71 | struct list_head node; | ||
72 | |||
73 | struct delayed_work poll_work; | ||
74 | struct work_struct uevent_work; | ||
75 | struct work_struct sync_work; | ||
76 | }; | ||
77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | ||
78 | |||
79 | struct rfkill_int_event { | ||
80 | struct list_head list; | ||
81 | struct rfkill_event ev; | ||
82 | }; | ||
83 | |||
84 | struct rfkill_data { | ||
85 | struct list_head list; | ||
86 | struct list_head events; | ||
87 | struct mutex mtx; | ||
88 | wait_queue_head_t read_wait; | ||
89 | bool input_handler; | ||
90 | }; | ||
91 | |||
92 | |||
93 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
94 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
95 | MODULE_DESCRIPTION("RF switch support"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | |||
98 | |||
99 | /* | ||
100 | * The locking here should be made much smarter, we currently have | ||
101 | * a bit of a stupid situation because drivers might want to register | ||
102 | * the rfkill struct under their own lock, and take this lock during | ||
103 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | ||
104 | * | ||
105 | * To fix that, we need to rework this code here to be mostly lock-free | ||
106 | * and only use the mutex for list manipulations, not to protect the | ||
107 | * various other global variables. Then we can avoid holding the mutex | ||
108 | * around driver operations, and all is happy. | ||
109 | */ | ||
110 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
111 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
112 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ | ||
113 | |||
114 | static unsigned int rfkill_default_state = 1; | ||
115 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
116 | MODULE_PARM_DESC(default_state, | ||
117 | "Default initial state for all radio types, 0 = radio off"); | ||
118 | |||
119 | static struct { | ||
120 | bool cur, sav; | ||
121 | } rfkill_global_states[NUM_RFKILL_TYPES]; | ||
122 | |||
123 | static bool rfkill_epo_lock_active; | ||
124 | |||
125 | |||
126 | #ifdef CONFIG_RFKILL_LEDS | ||
127 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
128 | { | ||
129 | struct led_trigger *trigger; | ||
130 | |||
131 | if (!rfkill->registered) | ||
132 | return; | ||
133 | |||
134 | trigger = &rfkill->led_trigger; | ||
135 | |||
136 | if (rfkill->state & RFKILL_BLOCK_ANY) | ||
137 | led_trigger_event(trigger, LED_OFF); | ||
138 | else | ||
139 | led_trigger_event(trigger, LED_FULL); | ||
140 | } | ||
141 | |||
142 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
143 | { | ||
144 | struct rfkill *rfkill; | ||
145 | |||
146 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | ||
147 | |||
148 | rfkill_led_trigger_event(rfkill); | ||
149 | } | ||
150 | |||
151 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | ||
152 | { | ||
153 | return rfkill->led_trigger.name; | ||
154 | } | ||
155 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | ||
156 | |||
157 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | ||
158 | { | ||
159 | BUG_ON(!rfkill); | ||
160 | |||
161 | rfkill->ledtrigname = name; | ||
162 | } | ||
163 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | ||
164 | |||
165 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
166 | { | ||
167 | rfkill->led_trigger.name = rfkill->ledtrigname | ||
168 | ? : dev_name(&rfkill->dev); | ||
169 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
170 | return led_trigger_register(&rfkill->led_trigger); | ||
171 | } | ||
172 | |||
173 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
174 | { | ||
175 | led_trigger_unregister(&rfkill->led_trigger); | ||
176 | } | ||
177 | #else | ||
178 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
183 | { | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
188 | { | ||
189 | } | ||
190 | #endif /* CONFIG_RFKILL_LEDS */ | ||
191 | |||
192 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, | ||
193 | enum rfkill_operation op) | ||
194 | { | ||
195 | unsigned long flags; | ||
196 | |||
197 | ev->idx = rfkill->idx; | ||
198 | ev->type = rfkill->type; | ||
199 | ev->op = op; | ||
200 | |||
201 | spin_lock_irqsave(&rfkill->lock, flags); | ||
202 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
203 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | ||
204 | RFKILL_BLOCK_SW_PREV)); | ||
205 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
206 | } | ||
207 | |||
208 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | ||
209 | { | ||
210 | struct rfkill_data *data; | ||
211 | struct rfkill_int_event *ev; | ||
212 | |||
213 | list_for_each_entry(data, &rfkill_fds, list) { | ||
214 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
215 | if (!ev) | ||
216 | continue; | ||
217 | rfkill_fill_event(&ev->ev, rfkill, op); | ||
218 | mutex_lock(&data->mtx); | ||
219 | list_add_tail(&ev->list, &data->events); | ||
220 | mutex_unlock(&data->mtx); | ||
221 | wake_up_interruptible(&data->read_wait); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void rfkill_event(struct rfkill *rfkill) | ||
226 | { | ||
227 | if (!rfkill->registered || rfkill->suspended) | ||
228 | return; | ||
229 | |||
230 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
231 | |||
232 | /* also send event to /dev/rfkill */ | ||
233 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | ||
234 | } | ||
235 | |||
236 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | ||
237 | bool blocked, bool *change) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | bool prev, any; | ||
241 | |||
242 | BUG_ON(!rfkill); | ||
243 | |||
244 | spin_lock_irqsave(&rfkill->lock, flags); | ||
245 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
246 | if (blocked) | ||
247 | rfkill->state |= RFKILL_BLOCK_HW; | ||
248 | else | ||
249 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
250 | *change = prev != blocked; | ||
251 | any = rfkill->state & RFKILL_BLOCK_ANY; | ||
252 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
253 | |||
254 | rfkill_led_trigger_event(rfkill); | ||
255 | |||
256 | return any; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * rfkill_set_block - wrapper for set_block method | ||
261 | * | ||
262 | * @rfkill: the rfkill struct to use | ||
263 | * @blocked: the new software state | ||
264 | * | ||
265 | * Calls the set_block method (when applicable) and handles notifications | ||
266 | * etc. as well. | ||
267 | */ | ||
268 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | int err; | ||
272 | |||
273 | /* | ||
274 | * Some platforms (...!) generate input events which affect the | ||
275 | * _hard_ kill state -- whenever something tries to change the | ||
276 | * current software state query the hardware state too. | ||
277 | */ | ||
278 | if (rfkill->ops->query) | ||
279 | rfkill->ops->query(rfkill, rfkill->data); | ||
280 | |||
281 | spin_lock_irqsave(&rfkill->lock, flags); | ||
282 | if (rfkill->state & RFKILL_BLOCK_SW) | ||
283 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | ||
284 | else | ||
285 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
286 | |||
287 | if (blocked) | ||
288 | rfkill->state |= RFKILL_BLOCK_SW; | ||
289 | else | ||
290 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
291 | |||
292 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | ||
293 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
294 | |||
295 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
296 | return; | ||
297 | |||
298 | err = rfkill->ops->set_block(rfkill->data, blocked); | ||
299 | |||
300 | spin_lock_irqsave(&rfkill->lock, flags); | ||
301 | if (err) { | ||
302 | /* | ||
303 | * Failed -- reset status to _prev, this may be different | ||
304 | * from what set set _PREV to earlier in this function | ||
305 | * if rfkill_set_sw_state was invoked. | ||
306 | */ | ||
307 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | ||
308 | rfkill->state |= RFKILL_BLOCK_SW; | ||
309 | else | ||
310 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
311 | } | ||
312 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | ||
313 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
314 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
315 | |||
316 | rfkill_led_trigger_event(rfkill); | ||
317 | rfkill_event(rfkill); | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_RFKILL_INPUT | ||
321 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | ||
322 | |||
323 | /** | ||
324 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
325 | * @type: type of interfaces to be affected | ||
326 | * @state: the new state | ||
327 | * | ||
328 | * This function sets the state of all switches of given type, | ||
329 | * unless a specific switch is claimed by userspace (in which case, | ||
330 | * that switch is left alone) or suspended. | ||
331 | * | ||
332 | * Caller must have acquired rfkill_global_mutex. | ||
333 | */ | ||
334 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | ||
335 | { | ||
336 | struct rfkill *rfkill; | ||
337 | |||
338 | rfkill_global_states[type].cur = blocked; | ||
339 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
340 | if (rfkill->type != type) | ||
341 | continue; | ||
342 | |||
343 | rfkill_set_block(rfkill, blocked); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * rfkill_switch_all - Toggle state of all switches of given type | ||
349 | * @type: type of interfaces to be affected | ||
350 | * @state: the new state | ||
351 | * | ||
352 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
353 | * Please refer to __rfkill_switch_all() for details. | ||
354 | * | ||
355 | * Does nothing if the EPO lock is active. | ||
356 | */ | ||
357 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | ||
358 | { | ||
359 | if (atomic_read(&rfkill_input_disabled)) | ||
360 | return; | ||
361 | |||
362 | mutex_lock(&rfkill_global_mutex); | ||
363 | |||
364 | if (!rfkill_epo_lock_active) | ||
365 | __rfkill_switch_all(type, blocked); | ||
366 | |||
367 | mutex_unlock(&rfkill_global_mutex); | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * rfkill_epo - emergency power off all transmitters | ||
372 | * | ||
373 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
374 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
375 | * | ||
376 | * The global state before the EPO is saved and can be restored later | ||
377 | * using rfkill_restore_states(). | ||
378 | */ | ||
379 | void rfkill_epo(void) | ||
380 | { | ||
381 | struct rfkill *rfkill; | ||
382 | int i; | ||
383 | |||
384 | if (atomic_read(&rfkill_input_disabled)) | ||
385 | return; | ||
386 | |||
387 | mutex_lock(&rfkill_global_mutex); | ||
388 | |||
389 | rfkill_epo_lock_active = true; | ||
390 | list_for_each_entry(rfkill, &rfkill_list, node) | ||
391 | rfkill_set_block(rfkill, true); | ||
392 | |||
393 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
394 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; | ||
395 | rfkill_global_states[i].cur = true; | ||
396 | } | ||
397 | |||
398 | mutex_unlock(&rfkill_global_mutex); | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * rfkill_restore_states - restore global states | ||
403 | * | ||
404 | * Restore (and sync switches to) the global state from the | ||
405 | * states in rfkill_default_states. This can undo the effects of | ||
406 | * a call to rfkill_epo(). | ||
407 | */ | ||
408 | void rfkill_restore_states(void) | ||
409 | { | ||
410 | int i; | ||
411 | |||
412 | if (atomic_read(&rfkill_input_disabled)) | ||
413 | return; | ||
414 | |||
415 | mutex_lock(&rfkill_global_mutex); | ||
416 | |||
417 | rfkill_epo_lock_active = false; | ||
418 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
419 | __rfkill_switch_all(i, rfkill_global_states[i].sav); | ||
420 | mutex_unlock(&rfkill_global_mutex); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * rfkill_remove_epo_lock - unlock state changes | ||
425 | * | ||
426 | * Used by rfkill-input manually unlock state changes, when | ||
427 | * the EPO switch is deactivated. | ||
428 | */ | ||
429 | void rfkill_remove_epo_lock(void) | ||
430 | { | ||
431 | if (atomic_read(&rfkill_input_disabled)) | ||
432 | return; | ||
433 | |||
434 | mutex_lock(&rfkill_global_mutex); | ||
435 | rfkill_epo_lock_active = false; | ||
436 | mutex_unlock(&rfkill_global_mutex); | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
441 | * | ||
442 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
443 | * and 1 (true) if there is an active EPO contition, which | ||
444 | * locks all radios in one of the BLOCKED states. | ||
445 | * | ||
446 | * Can be called in atomic context. | ||
447 | */ | ||
448 | bool rfkill_is_epo_lock_active(void) | ||
449 | { | ||
450 | return rfkill_epo_lock_active; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * rfkill_get_global_sw_state - returns global state for a type | ||
455 | * @type: the type to get the global state of | ||
456 | * | ||
457 | * Returns the current global state for a given wireless | ||
458 | * device type. | ||
459 | */ | ||
460 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | ||
461 | { | ||
462 | return rfkill_global_states[type].cur; | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | |||
467 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | ||
468 | { | ||
469 | bool ret, change; | ||
470 | |||
471 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | ||
472 | |||
473 | if (!rfkill->registered) | ||
474 | return ret; | ||
475 | |||
476 | if (change) | ||
477 | schedule_work(&rfkill->uevent_work); | ||
478 | |||
479 | return ret; | ||
480 | } | ||
481 | EXPORT_SYMBOL(rfkill_set_hw_state); | ||
482 | |||
483 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
484 | { | ||
485 | u32 bit = RFKILL_BLOCK_SW; | ||
486 | |||
487 | /* if in a ops->set_block right now, use other bit */ | ||
488 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | ||
489 | bit = RFKILL_BLOCK_SW_PREV; | ||
490 | |||
491 | if (blocked) | ||
492 | rfkill->state |= bit; | ||
493 | else | ||
494 | rfkill->state &= ~bit; | ||
495 | } | ||
496 | |||
497 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | bool prev, hwblock; | ||
501 | |||
502 | BUG_ON(!rfkill); | ||
503 | |||
504 | spin_lock_irqsave(&rfkill->lock, flags); | ||
505 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
506 | __rfkill_set_sw_state(rfkill, blocked); | ||
507 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
508 | blocked = blocked || hwblock; | ||
509 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
510 | |||
511 | if (!rfkill->registered) { | ||
512 | rfkill->persistent = true; | ||
513 | } else { | ||
514 | if (prev != blocked && !hwblock) | ||
515 | schedule_work(&rfkill->uevent_work); | ||
516 | |||
517 | rfkill_led_trigger_event(rfkill); | ||
518 | } | ||
519 | |||
520 | return blocked; | ||
521 | } | ||
522 | EXPORT_SYMBOL(rfkill_set_sw_state); | ||
523 | |||
524 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | ||
525 | { | ||
526 | unsigned long flags; | ||
527 | bool swprev, hwprev; | ||
528 | |||
529 | BUG_ON(!rfkill); | ||
530 | |||
531 | spin_lock_irqsave(&rfkill->lock, flags); | ||
532 | |||
533 | /* | ||
534 | * No need to care about prev/setblock ... this is for uevent only | ||
535 | * and that will get triggered by rfkill_set_block anyway. | ||
536 | */ | ||
537 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
538 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
539 | __rfkill_set_sw_state(rfkill, sw); | ||
540 | |||
541 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
542 | |||
543 | if (!rfkill->registered) { | ||
544 | rfkill->persistent = true; | ||
545 | } else { | ||
546 | if (swprev != sw || hwprev != hw) | ||
547 | schedule_work(&rfkill->uevent_work); | ||
548 | |||
549 | rfkill_led_trigger_event(rfkill); | ||
550 | } | ||
551 | } | ||
552 | EXPORT_SYMBOL(rfkill_set_states); | ||
553 | |||
554 | static ssize_t rfkill_name_show(struct device *dev, | ||
555 | struct device_attribute *attr, | ||
556 | char *buf) | ||
557 | { | ||
558 | struct rfkill *rfkill = to_rfkill(dev); | ||
559 | |||
560 | return sprintf(buf, "%s\n", rfkill->name); | ||
561 | } | ||
562 | |||
563 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
564 | { | ||
565 | switch (type) { | ||
566 | case RFKILL_TYPE_WLAN: | ||
567 | return "wlan"; | ||
568 | case RFKILL_TYPE_BLUETOOTH: | ||
569 | return "bluetooth"; | ||
570 | case RFKILL_TYPE_UWB: | ||
571 | return "ultrawideband"; | ||
572 | case RFKILL_TYPE_WIMAX: | ||
573 | return "wimax"; | ||
574 | case RFKILL_TYPE_WWAN: | ||
575 | return "wwan"; | ||
576 | default: | ||
577 | BUG(); | ||
578 | } | ||
579 | |||
580 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | ||
581 | } | ||
582 | |||
583 | static ssize_t rfkill_type_show(struct device *dev, | ||
584 | struct device_attribute *attr, | ||
585 | char *buf) | ||
586 | { | ||
587 | struct rfkill *rfkill = to_rfkill(dev); | ||
588 | |||
589 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
590 | } | ||
591 | |||
592 | static ssize_t rfkill_idx_show(struct device *dev, | ||
593 | struct device_attribute *attr, | ||
594 | char *buf) | ||
595 | { | ||
596 | struct rfkill *rfkill = to_rfkill(dev); | ||
597 | |||
598 | return sprintf(buf, "%d\n", rfkill->idx); | ||
599 | } | ||
600 | |||
601 | static u8 user_state_from_blocked(unsigned long state) | ||
602 | { | ||
603 | if (state & RFKILL_BLOCK_HW) | ||
604 | return RFKILL_USER_STATE_HARD_BLOCKED; | ||
605 | if (state & RFKILL_BLOCK_SW) | ||
606 | return RFKILL_USER_STATE_SOFT_BLOCKED; | ||
607 | |||
608 | return RFKILL_USER_STATE_UNBLOCKED; | ||
609 | } | ||
610 | |||
611 | static ssize_t rfkill_state_show(struct device *dev, | ||
612 | struct device_attribute *attr, | ||
613 | char *buf) | ||
614 | { | ||
615 | struct rfkill *rfkill = to_rfkill(dev); | ||
616 | unsigned long flags; | ||
617 | u32 state; | ||
618 | |||
619 | spin_lock_irqsave(&rfkill->lock, flags); | ||
620 | state = rfkill->state; | ||
621 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
622 | |||
623 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | ||
624 | } | ||
625 | |||
626 | static ssize_t rfkill_state_store(struct device *dev, | ||
627 | struct device_attribute *attr, | ||
628 | const char *buf, size_t count) | ||
629 | { | ||
630 | /* | ||
631 | * The intention was that userspace can only take control over | ||
632 | * a given device when/if rfkill-input doesn't control it due | ||
633 | * to user_claim. Since user_claim is currently unsupported, | ||
634 | * we never support changing the state from userspace -- this | ||
635 | * can be implemented again later. | ||
636 | */ | ||
637 | |||
638 | return -EPERM; | ||
639 | } | ||
640 | |||
641 | static ssize_t rfkill_claim_show(struct device *dev, | ||
642 | struct device_attribute *attr, | ||
643 | char *buf) | ||
644 | { | ||
645 | return sprintf(buf, "%d\n", 0); | ||
646 | } | ||
647 | |||
648 | static ssize_t rfkill_claim_store(struct device *dev, | ||
649 | struct device_attribute *attr, | ||
650 | const char *buf, size_t count) | ||
651 | { | ||
652 | return -EOPNOTSUPP; | ||
653 | } | ||
654 | |||
655 | static struct device_attribute rfkill_dev_attrs[] = { | ||
656 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
657 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
658 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), | ||
659 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
660 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
661 | __ATTR_NULL | ||
662 | }; | ||
663 | |||
664 | static void rfkill_release(struct device *dev) | ||
665 | { | ||
666 | struct rfkill *rfkill = to_rfkill(dev); | ||
667 | |||
668 | kfree(rfkill); | ||
669 | } | ||
670 | |||
671 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
672 | { | ||
673 | struct rfkill *rfkill = to_rfkill(dev); | ||
674 | unsigned long flags; | ||
675 | u32 state; | ||
676 | int error; | ||
677 | |||
678 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
679 | if (error) | ||
680 | return error; | ||
681 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
682 | rfkill_get_type_str(rfkill->type)); | ||
683 | if (error) | ||
684 | return error; | ||
685 | spin_lock_irqsave(&rfkill->lock, flags); | ||
686 | state = rfkill->state; | ||
687 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
688 | error = add_uevent_var(env, "RFKILL_STATE=%d", | ||
689 | user_state_from_blocked(state)); | ||
690 | return error; | ||
691 | } | ||
692 | |||
693 | void rfkill_pause_polling(struct rfkill *rfkill) | ||
694 | { | ||
695 | BUG_ON(!rfkill); | ||
696 | |||
697 | if (!rfkill->ops->poll) | ||
698 | return; | ||
699 | |||
700 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
701 | } | ||
702 | EXPORT_SYMBOL(rfkill_pause_polling); | ||
703 | |||
704 | void rfkill_resume_polling(struct rfkill *rfkill) | ||
705 | { | ||
706 | BUG_ON(!rfkill); | ||
707 | |||
708 | if (!rfkill->ops->poll) | ||
709 | return; | ||
710 | |||
711 | schedule_work(&rfkill->poll_work.work); | ||
712 | } | ||
713 | EXPORT_SYMBOL(rfkill_resume_polling); | ||
714 | |||
715 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
716 | { | ||
717 | struct rfkill *rfkill = to_rfkill(dev); | ||
718 | |||
719 | rfkill_pause_polling(rfkill); | ||
720 | |||
721 | rfkill->suspended = true; | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int rfkill_resume(struct device *dev) | ||
727 | { | ||
728 | struct rfkill *rfkill = to_rfkill(dev); | ||
729 | bool cur; | ||
730 | |||
731 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
732 | rfkill_set_block(rfkill, cur); | ||
733 | |||
734 | rfkill->suspended = false; | ||
735 | |||
736 | rfkill_resume_polling(rfkill); | ||
737 | |||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | static struct class rfkill_class = { | ||
742 | .name = "rfkill", | ||
743 | .dev_release = rfkill_release, | ||
744 | .dev_attrs = rfkill_dev_attrs, | ||
745 | .dev_uevent = rfkill_dev_uevent, | ||
746 | .suspend = rfkill_suspend, | ||
747 | .resume = rfkill_resume, | ||
748 | }; | ||
749 | |||
750 | bool rfkill_blocked(struct rfkill *rfkill) | ||
751 | { | ||
752 | unsigned long flags; | ||
753 | u32 state; | ||
754 | |||
755 | spin_lock_irqsave(&rfkill->lock, flags); | ||
756 | state = rfkill->state; | ||
757 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
758 | |||
759 | return !!(state & RFKILL_BLOCK_ANY); | ||
760 | } | ||
761 | EXPORT_SYMBOL(rfkill_blocked); | ||
762 | |||
763 | |||
764 | struct rfkill * __must_check rfkill_alloc(const char *name, | ||
765 | struct device *parent, | ||
766 | const enum rfkill_type type, | ||
767 | const struct rfkill_ops *ops, | ||
768 | void *ops_data) | ||
769 | { | ||
770 | struct rfkill *rfkill; | ||
771 | struct device *dev; | ||
772 | |||
773 | if (WARN_ON(!ops)) | ||
774 | return NULL; | ||
775 | |||
776 | if (WARN_ON(!ops->set_block)) | ||
777 | return NULL; | ||
778 | |||
779 | if (WARN_ON(!name)) | ||
780 | return NULL; | ||
781 | |||
782 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | ||
783 | return NULL; | ||
784 | |||
785 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | ||
786 | if (!rfkill) | ||
787 | return NULL; | ||
788 | |||
789 | spin_lock_init(&rfkill->lock); | ||
790 | INIT_LIST_HEAD(&rfkill->node); | ||
791 | rfkill->type = type; | ||
792 | rfkill->name = name; | ||
793 | rfkill->ops = ops; | ||
794 | rfkill->data = ops_data; | ||
795 | |||
796 | dev = &rfkill->dev; | ||
797 | dev->class = &rfkill_class; | ||
798 | dev->parent = parent; | ||
799 | device_initialize(dev); | ||
800 | |||
801 | return rfkill; | ||
802 | } | ||
803 | EXPORT_SYMBOL(rfkill_alloc); | ||
804 | |||
805 | static void rfkill_poll(struct work_struct *work) | ||
806 | { | ||
807 | struct rfkill *rfkill; | ||
808 | |||
809 | rfkill = container_of(work, struct rfkill, poll_work.work); | ||
810 | |||
811 | /* | ||
812 | * Poll hardware state -- driver will use one of the | ||
813 | * rfkill_set{,_hw,_sw}_state functions and use its | ||
814 | * return value to update the current status. | ||
815 | */ | ||
816 | rfkill->ops->poll(rfkill, rfkill->data); | ||
817 | |||
818 | schedule_delayed_work(&rfkill->poll_work, | ||
819 | round_jiffies_relative(POLL_INTERVAL)); | ||
820 | } | ||
821 | |||
822 | static void rfkill_uevent_work(struct work_struct *work) | ||
823 | { | ||
824 | struct rfkill *rfkill; | ||
825 | |||
826 | rfkill = container_of(work, struct rfkill, uevent_work); | ||
827 | |||
828 | mutex_lock(&rfkill_global_mutex); | ||
829 | rfkill_event(rfkill); | ||
830 | mutex_unlock(&rfkill_global_mutex); | ||
831 | } | ||
832 | |||
833 | static void rfkill_sync_work(struct work_struct *work) | ||
834 | { | ||
835 | struct rfkill *rfkill; | ||
836 | bool cur; | ||
837 | |||
838 | rfkill = container_of(work, struct rfkill, sync_work); | ||
839 | |||
840 | mutex_lock(&rfkill_global_mutex); | ||
841 | cur = rfkill_global_states[rfkill->type].cur; | ||
842 | rfkill_set_block(rfkill, cur); | ||
843 | mutex_unlock(&rfkill_global_mutex); | ||
844 | } | ||
845 | |||
846 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
847 | { | ||
848 | static unsigned long rfkill_no; | ||
849 | struct device *dev = &rfkill->dev; | ||
850 | int error; | ||
851 | |||
852 | BUG_ON(!rfkill); | ||
853 | |||
854 | mutex_lock(&rfkill_global_mutex); | ||
855 | |||
856 | if (rfkill->registered) { | ||
857 | error = -EALREADY; | ||
858 | goto unlock; | ||
859 | } | ||
860 | |||
861 | rfkill->idx = rfkill_no; | ||
862 | dev_set_name(dev, "rfkill%lu", rfkill_no); | ||
863 | rfkill_no++; | ||
864 | |||
865 | list_add_tail(&rfkill->node, &rfkill_list); | ||
866 | |||
867 | error = device_add(dev); | ||
868 | if (error) | ||
869 | goto remove; | ||
870 | |||
871 | error = rfkill_led_trigger_register(rfkill); | ||
872 | if (error) | ||
873 | goto devdel; | ||
874 | |||
875 | rfkill->registered = true; | ||
876 | |||
877 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | ||
878 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | ||
879 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | ||
880 | |||
881 | if (rfkill->ops->poll) | ||
882 | schedule_delayed_work(&rfkill->poll_work, | ||
883 | round_jiffies_relative(POLL_INTERVAL)); | ||
884 | |||
885 | if (!rfkill->persistent || rfkill_epo_lock_active) { | ||
886 | schedule_work(&rfkill->sync_work); | ||
887 | } else { | ||
888 | #ifdef CONFIG_RFKILL_INPUT | ||
889 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
890 | |||
891 | if (!atomic_read(&rfkill_input_disabled)) | ||
892 | __rfkill_switch_all(rfkill->type, soft_blocked); | ||
893 | #endif | ||
894 | } | ||
895 | |||
896 | rfkill_send_events(rfkill, RFKILL_OP_ADD); | ||
897 | |||
898 | mutex_unlock(&rfkill_global_mutex); | ||
899 | return 0; | ||
900 | |||
901 | devdel: | ||
902 | device_del(&rfkill->dev); | ||
903 | remove: | ||
904 | list_del_init(&rfkill->node); | ||
905 | unlock: | ||
906 | mutex_unlock(&rfkill_global_mutex); | ||
907 | return error; | ||
908 | } | ||
909 | EXPORT_SYMBOL(rfkill_register); | ||
910 | |||
911 | void rfkill_unregister(struct rfkill *rfkill) | ||
912 | { | ||
913 | BUG_ON(!rfkill); | ||
914 | |||
915 | if (rfkill->ops->poll) | ||
916 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
917 | |||
918 | cancel_work_sync(&rfkill->uevent_work); | ||
919 | cancel_work_sync(&rfkill->sync_work); | ||
920 | |||
921 | rfkill->registered = false; | ||
922 | |||
923 | device_del(&rfkill->dev); | ||
924 | |||
925 | mutex_lock(&rfkill_global_mutex); | ||
926 | rfkill_send_events(rfkill, RFKILL_OP_DEL); | ||
927 | list_del_init(&rfkill->node); | ||
928 | mutex_unlock(&rfkill_global_mutex); | ||
929 | |||
930 | rfkill_led_trigger_unregister(rfkill); | ||
931 | } | ||
932 | EXPORT_SYMBOL(rfkill_unregister); | ||
933 | |||
934 | void rfkill_destroy(struct rfkill *rfkill) | ||
935 | { | ||
936 | if (rfkill) | ||
937 | put_device(&rfkill->dev); | ||
938 | } | ||
939 | EXPORT_SYMBOL(rfkill_destroy); | ||
940 | |||
941 | static int rfkill_fop_open(struct inode *inode, struct file *file) | ||
942 | { | ||
943 | struct rfkill_data *data; | ||
944 | struct rfkill *rfkill; | ||
945 | struct rfkill_int_event *ev, *tmp; | ||
946 | |||
947 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
948 | if (!data) | ||
949 | return -ENOMEM; | ||
950 | |||
951 | INIT_LIST_HEAD(&data->events); | ||
952 | mutex_init(&data->mtx); | ||
953 | init_waitqueue_head(&data->read_wait); | ||
954 | |||
955 | mutex_lock(&rfkill_global_mutex); | ||
956 | mutex_lock(&data->mtx); | ||
957 | /* | ||
958 | * start getting events from elsewhere but hold mtx to get | ||
959 | * startup events added first | ||
960 | */ | ||
961 | list_add(&data->list, &rfkill_fds); | ||
962 | |||
963 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
964 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
965 | if (!ev) | ||
966 | goto free; | ||
967 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | ||
968 | list_add_tail(&ev->list, &data->events); | ||
969 | } | ||
970 | mutex_unlock(&data->mtx); | ||
971 | mutex_unlock(&rfkill_global_mutex); | ||
972 | |||
973 | file->private_data = data; | ||
974 | |||
975 | return nonseekable_open(inode, file); | ||
976 | |||
977 | free: | ||
978 | mutex_unlock(&data->mtx); | ||
979 | mutex_unlock(&rfkill_global_mutex); | ||
980 | mutex_destroy(&data->mtx); | ||
981 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
982 | kfree(ev); | ||
983 | kfree(data); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | ||
988 | { | ||
989 | struct rfkill_data *data = file->private_data; | ||
990 | unsigned int res = POLLOUT | POLLWRNORM; | ||
991 | |||
992 | poll_wait(file, &data->read_wait, wait); | ||
993 | |||
994 | mutex_lock(&data->mtx); | ||
995 | if (!list_empty(&data->events)) | ||
996 | res = POLLIN | POLLRDNORM; | ||
997 | mutex_unlock(&data->mtx); | ||
998 | |||
999 | return res; | ||
1000 | } | ||
1001 | |||
1002 | static bool rfkill_readable(struct rfkill_data *data) | ||
1003 | { | ||
1004 | bool r; | ||
1005 | |||
1006 | mutex_lock(&data->mtx); | ||
1007 | r = !list_empty(&data->events); | ||
1008 | mutex_unlock(&data->mtx); | ||
1009 | |||
1010 | return r; | ||
1011 | } | ||
1012 | |||
1013 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | ||
1014 | size_t count, loff_t *pos) | ||
1015 | { | ||
1016 | struct rfkill_data *data = file->private_data; | ||
1017 | struct rfkill_int_event *ev; | ||
1018 | unsigned long sz; | ||
1019 | int ret; | ||
1020 | |||
1021 | mutex_lock(&data->mtx); | ||
1022 | |||
1023 | while (list_empty(&data->events)) { | ||
1024 | if (file->f_flags & O_NONBLOCK) { | ||
1025 | ret = -EAGAIN; | ||
1026 | goto out; | ||
1027 | } | ||
1028 | mutex_unlock(&data->mtx); | ||
1029 | ret = wait_event_interruptible(data->read_wait, | ||
1030 | rfkill_readable(data)); | ||
1031 | mutex_lock(&data->mtx); | ||
1032 | |||
1033 | if (ret) | ||
1034 | goto out; | ||
1035 | } | ||
1036 | |||
1037 | ev = list_first_entry(&data->events, struct rfkill_int_event, | ||
1038 | list); | ||
1039 | |||
1040 | sz = min_t(unsigned long, sizeof(ev->ev), count); | ||
1041 | ret = sz; | ||
1042 | if (copy_to_user(buf, &ev->ev, sz)) | ||
1043 | ret = -EFAULT; | ||
1044 | |||
1045 | list_del(&ev->list); | ||
1046 | kfree(ev); | ||
1047 | out: | ||
1048 | mutex_unlock(&data->mtx); | ||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | ||
1053 | size_t count, loff_t *pos) | ||
1054 | { | ||
1055 | struct rfkill *rfkill; | ||
1056 | struct rfkill_event ev; | ||
1057 | |||
1058 | /* we don't need the 'hard' variable but accept it */ | ||
1059 | if (count < sizeof(ev) - 1) | ||
1060 | return -EINVAL; | ||
1061 | |||
1062 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | ||
1063 | return -EFAULT; | ||
1064 | |||
1065 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | if (ev.type >= NUM_RFKILL_TYPES) | ||
1069 | return -EINVAL; | ||
1070 | |||
1071 | mutex_lock(&rfkill_global_mutex); | ||
1072 | |||
1073 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | ||
1074 | if (ev.type == RFKILL_TYPE_ALL) { | ||
1075 | enum rfkill_type i; | ||
1076 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1077 | rfkill_global_states[i].cur = ev.soft; | ||
1078 | } else { | ||
1079 | rfkill_global_states[ev.type].cur = ev.soft; | ||
1080 | } | ||
1081 | } | ||
1082 | |||
1083 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
1084 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1085 | continue; | ||
1086 | |||
1087 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | ||
1088 | continue; | ||
1089 | |||
1090 | rfkill_set_block(rfkill, ev.soft); | ||
1091 | } | ||
1092 | mutex_unlock(&rfkill_global_mutex); | ||
1093 | |||
1094 | return count; | ||
1095 | } | ||
1096 | |||
1097 | static int rfkill_fop_release(struct inode *inode, struct file *file) | ||
1098 | { | ||
1099 | struct rfkill_data *data = file->private_data; | ||
1100 | struct rfkill_int_event *ev, *tmp; | ||
1101 | |||
1102 | mutex_lock(&rfkill_global_mutex); | ||
1103 | list_del(&data->list); | ||
1104 | mutex_unlock(&rfkill_global_mutex); | ||
1105 | |||
1106 | mutex_destroy(&data->mtx); | ||
1107 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
1108 | kfree(ev); | ||
1109 | |||
1110 | #ifdef CONFIG_RFKILL_INPUT | ||
1111 | if (data->input_handler) | ||
1112 | if (atomic_dec_return(&rfkill_input_disabled) == 0) | ||
1113 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); | ||
1114 | #endif | ||
1115 | |||
1116 | kfree(data); | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | #ifdef CONFIG_RFKILL_INPUT | ||
1122 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | ||
1123 | unsigned long arg) | ||
1124 | { | ||
1125 | struct rfkill_data *data = file->private_data; | ||
1126 | |||
1127 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | ||
1128 | return -ENOSYS; | ||
1129 | |||
1130 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | ||
1131 | return -ENOSYS; | ||
1132 | |||
1133 | mutex_lock(&data->mtx); | ||
1134 | |||
1135 | if (!data->input_handler) { | ||
1136 | if (atomic_inc_return(&rfkill_input_disabled) == 1) | ||
1137 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); | ||
1138 | data->input_handler = true; | ||
1139 | } | ||
1140 | |||
1141 | mutex_unlock(&data->mtx); | ||
1142 | |||
1143 | return 0; | ||
1144 | } | ||
1145 | #endif | ||
1146 | |||
1147 | static const struct file_operations rfkill_fops = { | ||
1148 | .open = rfkill_fop_open, | ||
1149 | .read = rfkill_fop_read, | ||
1150 | .write = rfkill_fop_write, | ||
1151 | .poll = rfkill_fop_poll, | ||
1152 | .release = rfkill_fop_release, | ||
1153 | #ifdef CONFIG_RFKILL_INPUT | ||
1154 | .unlocked_ioctl = rfkill_fop_ioctl, | ||
1155 | .compat_ioctl = rfkill_fop_ioctl, | ||
1156 | #endif | ||
1157 | }; | ||
1158 | |||
1159 | static struct miscdevice rfkill_miscdev = { | ||
1160 | .name = "rfkill", | ||
1161 | .fops = &rfkill_fops, | ||
1162 | .minor = MISC_DYNAMIC_MINOR, | ||
1163 | }; | ||
1164 | |||
1165 | static int __init rfkill_init(void) | ||
1166 | { | ||
1167 | int error; | ||
1168 | int i; | ||
1169 | |||
1170 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1171 | rfkill_global_states[i].cur = !rfkill_default_state; | ||
1172 | |||
1173 | error = class_register(&rfkill_class); | ||
1174 | if (error) | ||
1175 | goto out; | ||
1176 | |||
1177 | error = misc_register(&rfkill_miscdev); | ||
1178 | if (error) { | ||
1179 | class_unregister(&rfkill_class); | ||
1180 | goto out; | ||
1181 | } | ||
1182 | |||
1183 | #ifdef CONFIG_RFKILL_INPUT | ||
1184 | error = rfkill_handler_init(); | ||
1185 | if (error) { | ||
1186 | misc_deregister(&rfkill_miscdev); | ||
1187 | class_unregister(&rfkill_class); | ||
1188 | goto out; | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
1192 | out: | ||
1193 | return error; | ||
1194 | } | ||
1195 | subsys_initcall(rfkill_init); | ||
1196 | |||
1197 | static void __exit rfkill_exit(void) | ||
1198 | { | ||
1199 | #ifdef CONFIG_RFKILL_INPUT | ||
1200 | rfkill_handler_exit(); | ||
1201 | #endif | ||
1202 | misc_deregister(&rfkill_miscdev); | ||
1203 | class_unregister(&rfkill_class); | ||
1204 | } | ||
1205 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/input.c b/net/rfkill/input.c new file mode 100644 index 000000000000..a7295ad5f9cb --- /dev/null +++ b/net/rfkill/input.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | * | ||
11 | * If you ever run into a situation in which you have a SW_ type rfkill | ||
12 | * input device, then you can revive code that was removed in the patch | ||
13 | * "rfkill-input: remove unused code". | ||
14 | */ | ||
15 | |||
16 | #include <linux/input.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/rfkill.h> | ||
21 | #include <linux/sched.h> | ||
22 | |||
23 | #include "rfkill.h" | ||
24 | |||
25 | enum rfkill_input_master_mode { | ||
26 | RFKILL_INPUT_MASTER_UNLOCK = 0, | ||
27 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
28 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
29 | NUM_RFKILL_INPUT_MASTER_MODES | ||
30 | }; | ||
31 | |||
32 | /* Delay (in ms) between consecutive switch ops */ | ||
33 | #define RFKILL_OPS_DELAY 200 | ||
34 | |||
35 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
36 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
37 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
38 | MODULE_PARM_DESC(master_switch_mode, | ||
39 | "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); | ||
40 | |||
41 | static spinlock_t rfkill_op_lock; | ||
42 | static bool rfkill_op_pending; | ||
43 | static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
44 | static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
45 | |||
46 | enum rfkill_sched_op { | ||
47 | RFKILL_GLOBAL_OP_EPO = 0, | ||
48 | RFKILL_GLOBAL_OP_RESTORE, | ||
49 | RFKILL_GLOBAL_OP_UNLOCK, | ||
50 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
51 | }; | ||
52 | |||
53 | static enum rfkill_sched_op rfkill_master_switch_op; | ||
54 | static enum rfkill_sched_op rfkill_op; | ||
55 | |||
56 | static void __rfkill_handle_global_op(enum rfkill_sched_op op) | ||
57 | { | ||
58 | unsigned int i; | ||
59 | |||
60 | switch (op) { | ||
61 | case RFKILL_GLOBAL_OP_EPO: | ||
62 | rfkill_epo(); | ||
63 | break; | ||
64 | case RFKILL_GLOBAL_OP_RESTORE: | ||
65 | rfkill_restore_states(); | ||
66 | break; | ||
67 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
68 | rfkill_remove_epo_lock(); | ||
69 | break; | ||
70 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
71 | rfkill_remove_epo_lock(); | ||
72 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
73 | rfkill_switch_all(i, false); | ||
74 | break; | ||
75 | default: | ||
76 | /* memory corruption or bug, fail safely */ | ||
77 | rfkill_epo(); | ||
78 | WARN(1, "Unknown requested operation %d! " | ||
79 | "rfkill Emergency Power Off activated\n", | ||
80 | op); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
85 | const bool complement) | ||
86 | { | ||
87 | bool blocked; | ||
88 | |||
89 | blocked = rfkill_get_global_sw_state(type); | ||
90 | if (complement) | ||
91 | blocked = !blocked; | ||
92 | |||
93 | rfkill_switch_all(type, blocked); | ||
94 | } | ||
95 | |||
96 | static void rfkill_op_handler(struct work_struct *work) | ||
97 | { | ||
98 | unsigned int i; | ||
99 | bool c; | ||
100 | |||
101 | spin_lock_irq(&rfkill_op_lock); | ||
102 | do { | ||
103 | if (rfkill_op_pending) { | ||
104 | enum rfkill_sched_op op = rfkill_op; | ||
105 | rfkill_op_pending = false; | ||
106 | memset(rfkill_sw_pending, 0, | ||
107 | sizeof(rfkill_sw_pending)); | ||
108 | spin_unlock_irq(&rfkill_op_lock); | ||
109 | |||
110 | __rfkill_handle_global_op(op); | ||
111 | |||
112 | spin_lock_irq(&rfkill_op_lock); | ||
113 | |||
114 | /* | ||
115 | * handle global ops first -- during unlocked period | ||
116 | * we might have gotten a new global op. | ||
117 | */ | ||
118 | if (rfkill_op_pending) | ||
119 | continue; | ||
120 | } | ||
121 | |||
122 | if (rfkill_is_epo_lock_active()) | ||
123 | continue; | ||
124 | |||
125 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
126 | if (__test_and_clear_bit(i, rfkill_sw_pending)) { | ||
127 | c = __test_and_clear_bit(i, rfkill_sw_state); | ||
128 | spin_unlock_irq(&rfkill_op_lock); | ||
129 | |||
130 | __rfkill_handle_normal_op(i, c); | ||
131 | |||
132 | spin_lock_irq(&rfkill_op_lock); | ||
133 | } | ||
134 | } | ||
135 | } while (rfkill_op_pending); | ||
136 | spin_unlock_irq(&rfkill_op_lock); | ||
137 | } | ||
138 | |||
139 | static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); | ||
140 | static unsigned long rfkill_last_scheduled; | ||
141 | |||
142 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
143 | { | ||
144 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
145 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
146 | } | ||
147 | |||
148 | static void rfkill_schedule_ratelimited(void) | ||
149 | { | ||
150 | if (delayed_work_pending(&rfkill_op_work)) | ||
151 | return; | ||
152 | schedule_delayed_work(&rfkill_op_work, | ||
153 | rfkill_ratelimit(rfkill_last_scheduled)); | ||
154 | rfkill_last_scheduled = jiffies; | ||
155 | } | ||
156 | |||
157 | static void rfkill_schedule_global_op(enum rfkill_sched_op op) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
162 | rfkill_op = op; | ||
163 | rfkill_op_pending = true; | ||
164 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
165 | /* bypass the limiter for EPO */ | ||
166 | cancel_delayed_work(&rfkill_op_work); | ||
167 | schedule_delayed_work(&rfkill_op_work, 0); | ||
168 | rfkill_last_scheduled = jiffies; | ||
169 | } else | ||
170 | rfkill_schedule_ratelimited(); | ||
171 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
172 | } | ||
173 | |||
174 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | |||
178 | if (rfkill_is_epo_lock_active()) | ||
179 | return; | ||
180 | |||
181 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
182 | if (!rfkill_op_pending) { | ||
183 | __set_bit(type, rfkill_sw_pending); | ||
184 | __change_bit(type, rfkill_sw_state); | ||
185 | rfkill_schedule_ratelimited(); | ||
186 | } | ||
187 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
188 | } | ||
189 | |||
190 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
191 | { | ||
192 | if (state) | ||
193 | rfkill_schedule_global_op(rfkill_master_switch_op); | ||
194 | else | ||
195 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
196 | } | ||
197 | |||
198 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
199 | unsigned int code, int data) | ||
200 | { | ||
201 | if (type == EV_KEY && data == 1) { | ||
202 | switch (code) { | ||
203 | case KEY_WLAN: | ||
204 | rfkill_schedule_toggle(RFKILL_TYPE_WLAN); | ||
205 | break; | ||
206 | case KEY_BLUETOOTH: | ||
207 | rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); | ||
208 | break; | ||
209 | case KEY_UWB: | ||
210 | rfkill_schedule_toggle(RFKILL_TYPE_UWB); | ||
211 | break; | ||
212 | case KEY_WIMAX: | ||
213 | rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); | ||
214 | break; | ||
215 | } | ||
216 | } else if (type == EV_SW && code == SW_RFKILL_ALL) | ||
217 | rfkill_schedule_evsw_rfkillall(data); | ||
218 | } | ||
219 | |||
220 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
221 | const struct input_device_id *id) | ||
222 | { | ||
223 | struct input_handle *handle; | ||
224 | int error; | ||
225 | |||
226 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
227 | if (!handle) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | handle->dev = dev; | ||
231 | handle->handler = handler; | ||
232 | handle->name = "rfkill"; | ||
233 | |||
234 | /* causes rfkill_start() to be called */ | ||
235 | error = input_register_handle(handle); | ||
236 | if (error) | ||
237 | goto err_free_handle; | ||
238 | |||
239 | error = input_open_device(handle); | ||
240 | if (error) | ||
241 | goto err_unregister_handle; | ||
242 | |||
243 | return 0; | ||
244 | |||
245 | err_unregister_handle: | ||
246 | input_unregister_handle(handle); | ||
247 | err_free_handle: | ||
248 | kfree(handle); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | static void rfkill_start(struct input_handle *handle) | ||
253 | { | ||
254 | /* | ||
255 | * Take event_lock to guard against configuration changes, we | ||
256 | * should be able to deal with concurrency with rfkill_event() | ||
257 | * just fine (which event_lock will also avoid). | ||
258 | */ | ||
259 | spin_lock_irq(&handle->dev->event_lock); | ||
260 | |||
261 | if (test_bit(EV_SW, handle->dev->evbit) && | ||
262 | test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
263 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
264 | handle->dev->sw)); | ||
265 | |||
266 | spin_unlock_irq(&handle->dev->event_lock); | ||
267 | } | ||
268 | |||
269 | static void rfkill_disconnect(struct input_handle *handle) | ||
270 | { | ||
271 | input_close_device(handle); | ||
272 | input_unregister_handle(handle); | ||
273 | kfree(handle); | ||
274 | } | ||
275 | |||
276 | static const struct input_device_id rfkill_ids[] = { | ||
277 | { | ||
278 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
279 | .evbit = { BIT_MASK(EV_KEY) }, | ||
280 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
281 | }, | ||
282 | { | ||
283 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
284 | .evbit = { BIT_MASK(EV_KEY) }, | ||
285 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
286 | }, | ||
287 | { | ||
288 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
289 | .evbit = { BIT_MASK(EV_KEY) }, | ||
290 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
291 | }, | ||
292 | { | ||
293 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
294 | .evbit = { BIT_MASK(EV_KEY) }, | ||
295 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
296 | }, | ||
297 | { | ||
298 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
299 | .evbit = { BIT(EV_SW) }, | ||
300 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
301 | }, | ||
302 | { } | ||
303 | }; | ||
304 | |||
305 | static struct input_handler rfkill_handler = { | ||
306 | .name = "rfkill", | ||
307 | .event = rfkill_event, | ||
308 | .connect = rfkill_connect, | ||
309 | .start = rfkill_start, | ||
310 | .disconnect = rfkill_disconnect, | ||
311 | .id_table = rfkill_ids, | ||
312 | }; | ||
313 | |||
314 | int __init rfkill_handler_init(void) | ||
315 | { | ||
316 | switch (rfkill_master_switch_mode) { | ||
317 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
318 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; | ||
319 | break; | ||
320 | case RFKILL_INPUT_MASTER_RESTORE: | ||
321 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; | ||
322 | break; | ||
323 | case RFKILL_INPUT_MASTER_UNLOCK: | ||
324 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; | ||
325 | break; | ||
326 | default: | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | spin_lock_init(&rfkill_op_lock); | ||
331 | |||
332 | /* Avoid delay at first schedule */ | ||
333 | rfkill_last_scheduled = | ||
334 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
335 | return input_register_handler(&rfkill_handler); | ||
336 | } | ||
337 | |||
338 | void __exit rfkill_handler_exit(void) | ||
339 | { | ||
340 | input_unregister_handler(&rfkill_handler); | ||
341 | cancel_delayed_work_sync(&rfkill_op_work); | ||
342 | } | ||
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c deleted file mode 100644 index 84efde97c5a7..000000000000 --- a/net/rfkill/rfkill-input.c +++ /dev/null | |||
@@ -1,459 +0,0 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/input.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/rfkill.h> | ||
19 | #include <linux/sched.h> | ||
20 | |||
21 | #include "rfkill-input.h" | ||
22 | |||
23 | MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); | ||
24 | MODULE_DESCRIPTION("Input layer to RF switch connector"); | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
27 | enum rfkill_input_master_mode { | ||
28 | RFKILL_INPUT_MASTER_DONOTHING = 0, | ||
29 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
30 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
31 | RFKILL_INPUT_MASTER_MAX, /* marker */ | ||
32 | }; | ||
33 | |||
34 | /* Delay (in ms) between consecutive switch ops */ | ||
35 | #define RFKILL_OPS_DELAY 200 | ||
36 | |||
37 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
38 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
39 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
40 | MODULE_PARM_DESC(master_switch_mode, | ||
41 | "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all"); | ||
42 | |||
43 | enum rfkill_global_sched_op { | ||
44 | RFKILL_GLOBAL_OP_EPO = 0, | ||
45 | RFKILL_GLOBAL_OP_RESTORE, | ||
46 | RFKILL_GLOBAL_OP_UNLOCK, | ||
47 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * Currently, the code marked with RFKILL_NEED_SWSET is inactive. | ||
52 | * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the | ||
53 | * future, when such events are added, that code will be necessary. | ||
54 | */ | ||
55 | |||
56 | struct rfkill_task { | ||
57 | struct delayed_work dwork; | ||
58 | |||
59 | /* ensures that task is serialized */ | ||
60 | struct mutex mutex; | ||
61 | |||
62 | /* protects everything below */ | ||
63 | spinlock_t lock; | ||
64 | |||
65 | /* pending regular switch operations (1=pending) */ | ||
66 | unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
67 | |||
68 | #ifdef RFKILL_NEED_SWSET | ||
69 | /* set operation pending (1=pending) */ | ||
70 | unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
71 | |||
72 | /* desired state for pending set operation (1=unblock) */ | ||
73 | unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
74 | #endif | ||
75 | |||
76 | /* should the state be complemented (1=yes) */ | ||
77 | unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
78 | |||
79 | bool global_op_pending; | ||
80 | enum rfkill_global_sched_op op; | ||
81 | |||
82 | /* last time it was scheduled */ | ||
83 | unsigned long last_scheduled; | ||
84 | }; | ||
85 | |||
86 | static void __rfkill_handle_global_op(enum rfkill_global_sched_op op) | ||
87 | { | ||
88 | unsigned int i; | ||
89 | |||
90 | switch (op) { | ||
91 | case RFKILL_GLOBAL_OP_EPO: | ||
92 | rfkill_epo(); | ||
93 | break; | ||
94 | case RFKILL_GLOBAL_OP_RESTORE: | ||
95 | rfkill_restore_states(); | ||
96 | break; | ||
97 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
98 | rfkill_remove_epo_lock(); | ||
99 | break; | ||
100 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
101 | rfkill_remove_epo_lock(); | ||
102 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
103 | rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED); | ||
104 | break; | ||
105 | default: | ||
106 | /* memory corruption or bug, fail safely */ | ||
107 | rfkill_epo(); | ||
108 | WARN(1, "Unknown requested operation %d! " | ||
109 | "rfkill Emergency Power Off activated\n", | ||
110 | op); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | #ifdef RFKILL_NEED_SWSET | ||
115 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
116 | const bool sp, const bool s, const bool c) | ||
117 | { | ||
118 | enum rfkill_state state; | ||
119 | |||
120 | if (sp) | ||
121 | state = (s) ? RFKILL_STATE_UNBLOCKED : | ||
122 | RFKILL_STATE_SOFT_BLOCKED; | ||
123 | else | ||
124 | state = rfkill_get_global_state(type); | ||
125 | |||
126 | if (c) | ||
127 | state = rfkill_state_complement(state); | ||
128 | |||
129 | rfkill_switch_all(type, state); | ||
130 | } | ||
131 | #else | ||
132 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
133 | const bool c) | ||
134 | { | ||
135 | enum rfkill_state state; | ||
136 | |||
137 | state = rfkill_get_global_state(type); | ||
138 | if (c) | ||
139 | state = rfkill_state_complement(state); | ||
140 | |||
141 | rfkill_switch_all(type, state); | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | static void rfkill_task_handler(struct work_struct *work) | ||
146 | { | ||
147 | struct rfkill_task *task = container_of(work, | ||
148 | struct rfkill_task, dwork.work); | ||
149 | bool doit = true; | ||
150 | |||
151 | mutex_lock(&task->mutex); | ||
152 | |||
153 | spin_lock_irq(&task->lock); | ||
154 | while (doit) { | ||
155 | if (task->global_op_pending) { | ||
156 | enum rfkill_global_sched_op op = task->op; | ||
157 | task->global_op_pending = false; | ||
158 | memset(task->sw_pending, 0, sizeof(task->sw_pending)); | ||
159 | spin_unlock_irq(&task->lock); | ||
160 | |||
161 | __rfkill_handle_global_op(op); | ||
162 | |||
163 | /* make sure we do at least one pass with | ||
164 | * !task->global_op_pending */ | ||
165 | spin_lock_irq(&task->lock); | ||
166 | continue; | ||
167 | } else if (!rfkill_is_epo_lock_active()) { | ||
168 | unsigned int i = 0; | ||
169 | |||
170 | while (!task->global_op_pending && | ||
171 | i < RFKILL_TYPE_MAX) { | ||
172 | if (test_and_clear_bit(i, task->sw_pending)) { | ||
173 | bool c; | ||
174 | #ifdef RFKILL_NEED_SWSET | ||
175 | bool sp, s; | ||
176 | sp = test_and_clear_bit(i, | ||
177 | task->sw_setpending); | ||
178 | s = test_bit(i, task->sw_newstate); | ||
179 | #endif | ||
180 | c = test_and_clear_bit(i, | ||
181 | task->sw_togglestate); | ||
182 | spin_unlock_irq(&task->lock); | ||
183 | |||
184 | #ifdef RFKILL_NEED_SWSET | ||
185 | __rfkill_handle_normal_op(i, sp, s, c); | ||
186 | #else | ||
187 | __rfkill_handle_normal_op(i, c); | ||
188 | #endif | ||
189 | |||
190 | spin_lock_irq(&task->lock); | ||
191 | } | ||
192 | i++; | ||
193 | } | ||
194 | } | ||
195 | doit = task->global_op_pending; | ||
196 | } | ||
197 | spin_unlock_irq(&task->lock); | ||
198 | |||
199 | mutex_unlock(&task->mutex); | ||
200 | } | ||
201 | |||
202 | static struct rfkill_task rfkill_task = { | ||
203 | .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork, | ||
204 | rfkill_task_handler), | ||
205 | .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex), | ||
206 | .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock), | ||
207 | }; | ||
208 | |||
209 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
210 | { | ||
211 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
212 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
213 | } | ||
214 | |||
215 | static void rfkill_schedule_ratelimited(void) | ||
216 | { | ||
217 | if (!delayed_work_pending(&rfkill_task.dwork)) { | ||
218 | schedule_delayed_work(&rfkill_task.dwork, | ||
219 | rfkill_ratelimit(rfkill_task.last_scheduled)); | ||
220 | rfkill_task.last_scheduled = jiffies; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static void rfkill_schedule_global_op(enum rfkill_global_sched_op op) | ||
225 | { | ||
226 | unsigned long flags; | ||
227 | |||
228 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
229 | rfkill_task.op = op; | ||
230 | rfkill_task.global_op_pending = true; | ||
231 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
232 | /* bypass the limiter for EPO */ | ||
233 | cancel_delayed_work(&rfkill_task.dwork); | ||
234 | schedule_delayed_work(&rfkill_task.dwork, 0); | ||
235 | rfkill_task.last_scheduled = jiffies; | ||
236 | } else | ||
237 | rfkill_schedule_ratelimited(); | ||
238 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
239 | } | ||
240 | |||
241 | #ifdef RFKILL_NEED_SWSET | ||
242 | /* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */ | ||
243 | |||
244 | static void rfkill_schedule_set(enum rfkill_type type, | ||
245 | enum rfkill_state desired_state) | ||
246 | { | ||
247 | unsigned long flags; | ||
248 | |||
249 | if (rfkill_is_epo_lock_active()) | ||
250 | return; | ||
251 | |||
252 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
253 | if (!rfkill_task.global_op_pending) { | ||
254 | set_bit(type, rfkill_task.sw_pending); | ||
255 | set_bit(type, rfkill_task.sw_setpending); | ||
256 | clear_bit(type, rfkill_task.sw_togglestate); | ||
257 | if (desired_state) | ||
258 | set_bit(type, rfkill_task.sw_newstate); | ||
259 | else | ||
260 | clear_bit(type, rfkill_task.sw_newstate); | ||
261 | rfkill_schedule_ratelimited(); | ||
262 | } | ||
263 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
264 | } | ||
265 | #endif | ||
266 | |||
267 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
268 | { | ||
269 | unsigned long flags; | ||
270 | |||
271 | if (rfkill_is_epo_lock_active()) | ||
272 | return; | ||
273 | |||
274 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
275 | if (!rfkill_task.global_op_pending) { | ||
276 | set_bit(type, rfkill_task.sw_pending); | ||
277 | change_bit(type, rfkill_task.sw_togglestate); | ||
278 | rfkill_schedule_ratelimited(); | ||
279 | } | ||
280 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
281 | } | ||
282 | |||
283 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
284 | { | ||
285 | if (state) { | ||
286 | switch (rfkill_master_switch_mode) { | ||
287 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
288 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK); | ||
289 | break; | ||
290 | case RFKILL_INPUT_MASTER_RESTORE: | ||
291 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE); | ||
292 | break; | ||
293 | case RFKILL_INPUT_MASTER_DONOTHING: | ||
294 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK); | ||
295 | break; | ||
296 | default: | ||
297 | /* memory corruption or driver bug! fail safely */ | ||
298 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
299 | WARN(1, "Unknown rfkill_master_switch_mode (%d), " | ||
300 | "driver bug or memory corruption detected!\n", | ||
301 | rfkill_master_switch_mode); | ||
302 | break; | ||
303 | } | ||
304 | } else | ||
305 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
306 | } | ||
307 | |||
308 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
309 | unsigned int code, int data) | ||
310 | { | ||
311 | if (type == EV_KEY && data == 1) { | ||
312 | enum rfkill_type t; | ||
313 | |||
314 | switch (code) { | ||
315 | case KEY_WLAN: | ||
316 | t = RFKILL_TYPE_WLAN; | ||
317 | break; | ||
318 | case KEY_BLUETOOTH: | ||
319 | t = RFKILL_TYPE_BLUETOOTH; | ||
320 | break; | ||
321 | case KEY_UWB: | ||
322 | t = RFKILL_TYPE_UWB; | ||
323 | break; | ||
324 | case KEY_WIMAX: | ||
325 | t = RFKILL_TYPE_WIMAX; | ||
326 | break; | ||
327 | default: | ||
328 | return; | ||
329 | } | ||
330 | rfkill_schedule_toggle(t); | ||
331 | return; | ||
332 | } else if (type == EV_SW) { | ||
333 | switch (code) { | ||
334 | case SW_RFKILL_ALL: | ||
335 | rfkill_schedule_evsw_rfkillall(data); | ||
336 | return; | ||
337 | default: | ||
338 | return; | ||
339 | } | ||
340 | } | ||
341 | } | ||
342 | |||
343 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
344 | const struct input_device_id *id) | ||
345 | { | ||
346 | struct input_handle *handle; | ||
347 | int error; | ||
348 | |||
349 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
350 | if (!handle) | ||
351 | return -ENOMEM; | ||
352 | |||
353 | handle->dev = dev; | ||
354 | handle->handler = handler; | ||
355 | handle->name = "rfkill"; | ||
356 | |||
357 | /* causes rfkill_start() to be called */ | ||
358 | error = input_register_handle(handle); | ||
359 | if (error) | ||
360 | goto err_free_handle; | ||
361 | |||
362 | error = input_open_device(handle); | ||
363 | if (error) | ||
364 | goto err_unregister_handle; | ||
365 | |||
366 | return 0; | ||
367 | |||
368 | err_unregister_handle: | ||
369 | input_unregister_handle(handle); | ||
370 | err_free_handle: | ||
371 | kfree(handle); | ||
372 | return error; | ||
373 | } | ||
374 | |||
375 | static void rfkill_start(struct input_handle *handle) | ||
376 | { | ||
377 | /* Take event_lock to guard against configuration changes, we | ||
378 | * should be able to deal with concurrency with rfkill_event() | ||
379 | * just fine (which event_lock will also avoid). */ | ||
380 | spin_lock_irq(&handle->dev->event_lock); | ||
381 | |||
382 | if (test_bit(EV_SW, handle->dev->evbit)) { | ||
383 | if (test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
384 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
385 | handle->dev->sw)); | ||
386 | /* add resync for further EV_SW events here */ | ||
387 | } | ||
388 | |||
389 | spin_unlock_irq(&handle->dev->event_lock); | ||
390 | } | ||
391 | |||
392 | static void rfkill_disconnect(struct input_handle *handle) | ||
393 | { | ||
394 | input_close_device(handle); | ||
395 | input_unregister_handle(handle); | ||
396 | kfree(handle); | ||
397 | } | ||
398 | |||
399 | static const struct input_device_id rfkill_ids[] = { | ||
400 | { | ||
401 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
402 | .evbit = { BIT_MASK(EV_KEY) }, | ||
403 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
404 | }, | ||
405 | { | ||
406 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
407 | .evbit = { BIT_MASK(EV_KEY) }, | ||
408 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
409 | }, | ||
410 | { | ||
411 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
412 | .evbit = { BIT_MASK(EV_KEY) }, | ||
413 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
414 | }, | ||
415 | { | ||
416 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
417 | .evbit = { BIT_MASK(EV_KEY) }, | ||
418 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
419 | }, | ||
420 | { | ||
421 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
422 | .evbit = { BIT(EV_SW) }, | ||
423 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
424 | }, | ||
425 | { } | ||
426 | }; | ||
427 | |||
428 | static struct input_handler rfkill_handler = { | ||
429 | .event = rfkill_event, | ||
430 | .connect = rfkill_connect, | ||
431 | .disconnect = rfkill_disconnect, | ||
432 | .start = rfkill_start, | ||
433 | .name = "rfkill", | ||
434 | .id_table = rfkill_ids, | ||
435 | }; | ||
436 | |||
437 | static int __init rfkill_handler_init(void) | ||
438 | { | ||
439 | if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX) | ||
440 | return -EINVAL; | ||
441 | |||
442 | /* | ||
443 | * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay | ||
444 | * at the first use. Acceptable, but if we can avoid it, why not? | ||
445 | */ | ||
446 | rfkill_task.last_scheduled = | ||
447 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
448 | return input_register_handler(&rfkill_handler); | ||
449 | } | ||
450 | |||
451 | static void __exit rfkill_handler_exit(void) | ||
452 | { | ||
453 | input_unregister_handler(&rfkill_handler); | ||
454 | cancel_delayed_work_sync(&rfkill_task.dwork); | ||
455 | rfkill_remove_epo_lock(); | ||
456 | } | ||
457 | |||
458 | module_init(rfkill_handler_init); | ||
459 | module_exit(rfkill_handler_exit); | ||
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c deleted file mode 100644 index 3eaa39403c13..000000000000 --- a/net/rfkill/rfkill.c +++ /dev/null | |||
@@ -1,882 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the | ||
17 | * Free Software Foundation, Inc., | ||
18 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | #include <linux/capability.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/rfkill.h> | ||
29 | |||
30 | /* Get declaration of rfkill_switch_all() to shut up sparse. */ | ||
31 | #include "rfkill-input.h" | ||
32 | |||
33 | |||
34 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
35 | MODULE_VERSION("1.0"); | ||
36 | MODULE_DESCRIPTION("RF switch support"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
40 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
41 | |||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | ||
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
44 | MODULE_PARM_DESC(default_state, | ||
45 | "Default initial state for all radio types, 0 = radio off"); | ||
46 | |||
47 | struct rfkill_gsw_state { | ||
48 | enum rfkill_state current_state; | ||
49 | enum rfkill_state default_state; | ||
50 | }; | ||
51 | |||
52 | static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; | ||
53 | static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
54 | static bool rfkill_epo_lock_active; | ||
55 | |||
56 | |||
57 | #ifdef CONFIG_RFKILL_LEDS | ||
58 | static void rfkill_led_trigger(struct rfkill *rfkill, | ||
59 | enum rfkill_state state) | ||
60 | { | ||
61 | struct led_trigger *led = &rfkill->led_trigger; | ||
62 | |||
63 | if (!led->name) | ||
64 | return; | ||
65 | if (state != RFKILL_STATE_UNBLOCKED) | ||
66 | led_trigger_event(led, LED_OFF); | ||
67 | else | ||
68 | led_trigger_event(led, LED_FULL); | ||
69 | } | ||
70 | |||
71 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
72 | { | ||
73 | struct rfkill *rfkill = container_of(led->trigger, | ||
74 | struct rfkill, led_trigger); | ||
75 | |||
76 | rfkill_led_trigger(rfkill, rfkill->state); | ||
77 | } | ||
78 | #endif /* CONFIG_RFKILL_LEDS */ | ||
79 | |||
80 | static void rfkill_uevent(struct rfkill *rfkill) | ||
81 | { | ||
82 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
83 | } | ||
84 | |||
85 | static void update_rfkill_state(struct rfkill *rfkill) | ||
86 | { | ||
87 | enum rfkill_state newstate, oldstate; | ||
88 | |||
89 | if (rfkill->get_state) { | ||
90 | mutex_lock(&rfkill->mutex); | ||
91 | if (!rfkill->get_state(rfkill->data, &newstate)) { | ||
92 | oldstate = rfkill->state; | ||
93 | rfkill->state = newstate; | ||
94 | if (oldstate != newstate) | ||
95 | rfkill_uevent(rfkill); | ||
96 | } | ||
97 | mutex_unlock(&rfkill->mutex); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * rfkill_toggle_radio - wrapper for toggle_radio hook | ||
103 | * @rfkill: the rfkill struct to use | ||
104 | * @force: calls toggle_radio even if cache says it is not needed, | ||
105 | * and also makes sure notifications of the state will be | ||
106 | * sent even if it didn't change | ||
107 | * @state: the new state to call toggle_radio() with | ||
108 | * | ||
109 | * Calls rfkill->toggle_radio, enforcing the API for toggle_radio | ||
110 | * calls and handling all the red tape such as issuing notifications | ||
111 | * if the call is successful. | ||
112 | * | ||
113 | * Suspended devices are not touched at all, and -EAGAIN is returned. | ||
114 | * | ||
115 | * Note that the @force parameter cannot override a (possibly cached) | ||
116 | * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of | ||
117 | * RFKILL_STATE_HARD_BLOCKED implements either get_state() or | ||
118 | * rfkill_force_state(), so the cache either is bypassed or valid. | ||
119 | * | ||
120 | * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED | ||
121 | * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to | ||
122 | * give the driver a hint that it should double-BLOCK the transmitter. | ||
123 | * | ||
124 | * Caller must have acquired rfkill->mutex. | ||
125 | */ | ||
126 | static int rfkill_toggle_radio(struct rfkill *rfkill, | ||
127 | enum rfkill_state state, | ||
128 | int force) | ||
129 | { | ||
130 | int retval = 0; | ||
131 | enum rfkill_state oldstate, newstate; | ||
132 | |||
133 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
134 | return -EBUSY; | ||
135 | |||
136 | oldstate = rfkill->state; | ||
137 | |||
138 | if (rfkill->get_state && !force && | ||
139 | !rfkill->get_state(rfkill->data, &newstate)) | ||
140 | rfkill->state = newstate; | ||
141 | |||
142 | switch (state) { | ||
143 | case RFKILL_STATE_HARD_BLOCKED: | ||
144 | /* typically happens when refreshing hardware state, | ||
145 | * such as on resume */ | ||
146 | state = RFKILL_STATE_SOFT_BLOCKED; | ||
147 | break; | ||
148 | case RFKILL_STATE_UNBLOCKED: | ||
149 | /* force can't override this, only rfkill_force_state() can */ | ||
150 | if (rfkill->state == RFKILL_STATE_HARD_BLOCKED) | ||
151 | return -EPERM; | ||
152 | break; | ||
153 | case RFKILL_STATE_SOFT_BLOCKED: | ||
154 | /* nothing to do, we want to give drivers the hint to double | ||
155 | * BLOCK even a transmitter that is already in state | ||
156 | * RFKILL_STATE_HARD_BLOCKED */ | ||
157 | break; | ||
158 | default: | ||
159 | WARN(1, KERN_WARNING | ||
160 | "rfkill: illegal state %d passed as parameter " | ||
161 | "to rfkill_toggle_radio\n", state); | ||
162 | return -EINVAL; | ||
163 | } | ||
164 | |||
165 | if (force || state != rfkill->state) { | ||
166 | retval = rfkill->toggle_radio(rfkill->data, state); | ||
167 | /* never allow a HARD->SOFT downgrade! */ | ||
168 | if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED) | ||
169 | rfkill->state = state; | ||
170 | } | ||
171 | |||
172 | if (force || rfkill->state != oldstate) | ||
173 | rfkill_uevent(rfkill); | ||
174 | |||
175 | return retval; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
180 | * @type: type of interfaces to be affected | ||
181 | * @state: the new state | ||
182 | * | ||
183 | * This function toggles the state of all switches of given type, | ||
184 | * unless a specific switch is claimed by userspace (in which case, | ||
185 | * that switch is left alone) or suspended. | ||
186 | * | ||
187 | * Caller must have acquired rfkill_global_mutex. | ||
188 | */ | ||
189 | static void __rfkill_switch_all(const enum rfkill_type type, | ||
190 | const enum rfkill_state state) | ||
191 | { | ||
192 | struct rfkill *rfkill; | ||
193 | |||
194 | if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX), | ||
195 | KERN_WARNING | ||
196 | "rfkill: illegal state %d or type %d " | ||
197 | "passed as parameter to __rfkill_switch_all\n", | ||
198 | state, type)) | ||
199 | return; | ||
200 | |||
201 | rfkill_global_states[type].current_state = state; | ||
202 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
203 | if ((!rfkill->user_claim) && (rfkill->type == type)) { | ||
204 | mutex_lock(&rfkill->mutex); | ||
205 | rfkill_toggle_radio(rfkill, state, 0); | ||
206 | mutex_unlock(&rfkill->mutex); | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * rfkill_switch_all - Toggle state of all switches of given type | ||
213 | * @type: type of interfaces to be affected | ||
214 | * @state: the new state | ||
215 | * | ||
216 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
217 | * Please refer to __rfkill_switch_all() for details. | ||
218 | * | ||
219 | * Does nothing if the EPO lock is active. | ||
220 | */ | ||
221 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | ||
222 | { | ||
223 | mutex_lock(&rfkill_global_mutex); | ||
224 | if (!rfkill_epo_lock_active) | ||
225 | __rfkill_switch_all(type, state); | ||
226 | mutex_unlock(&rfkill_global_mutex); | ||
227 | } | ||
228 | EXPORT_SYMBOL(rfkill_switch_all); | ||
229 | |||
230 | /** | ||
231 | * rfkill_epo - emergency power off all transmitters | ||
232 | * | ||
233 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
234 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
235 | * | ||
236 | * The global state before the EPO is saved and can be restored later | ||
237 | * using rfkill_restore_states(). | ||
238 | */ | ||
239 | void rfkill_epo(void) | ||
240 | { | ||
241 | struct rfkill *rfkill; | ||
242 | int i; | ||
243 | |||
244 | mutex_lock(&rfkill_global_mutex); | ||
245 | |||
246 | rfkill_epo_lock_active = true; | ||
247 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
248 | mutex_lock(&rfkill->mutex); | ||
249 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
250 | mutex_unlock(&rfkill->mutex); | ||
251 | } | ||
252 | for (i = 0; i < RFKILL_TYPE_MAX; i++) { | ||
253 | rfkill_global_states[i].default_state = | ||
254 | rfkill_global_states[i].current_state; | ||
255 | rfkill_global_states[i].current_state = | ||
256 | RFKILL_STATE_SOFT_BLOCKED; | ||
257 | } | ||
258 | mutex_unlock(&rfkill_global_mutex); | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(rfkill_epo); | ||
261 | |||
262 | /** | ||
263 | * rfkill_restore_states - restore global states | ||
264 | * | ||
265 | * Restore (and sync switches to) the global state from the | ||
266 | * states in rfkill_default_states. This can undo the effects of | ||
267 | * a call to rfkill_epo(). | ||
268 | */ | ||
269 | void rfkill_restore_states(void) | ||
270 | { | ||
271 | int i; | ||
272 | |||
273 | mutex_lock(&rfkill_global_mutex); | ||
274 | |||
275 | rfkill_epo_lock_active = false; | ||
276 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
277 | __rfkill_switch_all(i, rfkill_global_states[i].default_state); | ||
278 | mutex_unlock(&rfkill_global_mutex); | ||
279 | } | ||
280 | EXPORT_SYMBOL_GPL(rfkill_restore_states); | ||
281 | |||
282 | /** | ||
283 | * rfkill_remove_epo_lock - unlock state changes | ||
284 | * | ||
285 | * Used by rfkill-input manually unlock state changes, when | ||
286 | * the EPO switch is deactivated. | ||
287 | */ | ||
288 | void rfkill_remove_epo_lock(void) | ||
289 | { | ||
290 | mutex_lock(&rfkill_global_mutex); | ||
291 | rfkill_epo_lock_active = false; | ||
292 | mutex_unlock(&rfkill_global_mutex); | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock); | ||
295 | |||
296 | /** | ||
297 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
298 | * | ||
299 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
300 | * and 1 (true) if there is an active EPO contition, which | ||
301 | * locks all radios in one of the BLOCKED states. | ||
302 | * | ||
303 | * Can be called in atomic context. | ||
304 | */ | ||
305 | bool rfkill_is_epo_lock_active(void) | ||
306 | { | ||
307 | return rfkill_epo_lock_active; | ||
308 | } | ||
309 | EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active); | ||
310 | |||
311 | /** | ||
312 | * rfkill_get_global_state - returns global state for a type | ||
313 | * @type: the type to get the global state of | ||
314 | * | ||
315 | * Returns the current global state for a given wireless | ||
316 | * device type. | ||
317 | */ | ||
318 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type) | ||
319 | { | ||
320 | return rfkill_global_states[type].current_state; | ||
321 | } | ||
322 | EXPORT_SYMBOL_GPL(rfkill_get_global_state); | ||
323 | |||
324 | /** | ||
325 | * rfkill_force_state - Force the internal rfkill radio state | ||
326 | * @rfkill: pointer to the rfkill class to modify. | ||
327 | * @state: the current radio state the class should be forced to. | ||
328 | * | ||
329 | * This function updates the internal state of the radio cached | ||
330 | * by the rfkill class. It should be used when the driver gets | ||
331 | * a notification by the firmware/hardware of the current *real* | ||
332 | * state of the radio rfkill switch. | ||
333 | * | ||
334 | * Devices which are subject to external changes on their rfkill | ||
335 | * state (such as those caused by a hardware rfkill line) MUST | ||
336 | * have their driver arrange to call rfkill_force_state() as soon | ||
337 | * as possible after such a change. | ||
338 | * | ||
339 | * This function may not be called from an atomic context. | ||
340 | */ | ||
341 | int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | ||
342 | { | ||
343 | enum rfkill_state oldstate; | ||
344 | |||
345 | BUG_ON(!rfkill); | ||
346 | if (WARN((state >= RFKILL_STATE_MAX), | ||
347 | KERN_WARNING | ||
348 | "rfkill: illegal state %d passed as parameter " | ||
349 | "to rfkill_force_state\n", state)) | ||
350 | return -EINVAL; | ||
351 | |||
352 | mutex_lock(&rfkill->mutex); | ||
353 | |||
354 | oldstate = rfkill->state; | ||
355 | rfkill->state = state; | ||
356 | |||
357 | if (state != oldstate) | ||
358 | rfkill_uevent(rfkill); | ||
359 | |||
360 | mutex_unlock(&rfkill->mutex); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | EXPORT_SYMBOL(rfkill_force_state); | ||
365 | |||
366 | static ssize_t rfkill_name_show(struct device *dev, | ||
367 | struct device_attribute *attr, | ||
368 | char *buf) | ||
369 | { | ||
370 | struct rfkill *rfkill = to_rfkill(dev); | ||
371 | |||
372 | return sprintf(buf, "%s\n", rfkill->name); | ||
373 | } | ||
374 | |||
375 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
376 | { | ||
377 | switch (type) { | ||
378 | case RFKILL_TYPE_WLAN: | ||
379 | return "wlan"; | ||
380 | case RFKILL_TYPE_BLUETOOTH: | ||
381 | return "bluetooth"; | ||
382 | case RFKILL_TYPE_UWB: | ||
383 | return "ultrawideband"; | ||
384 | case RFKILL_TYPE_WIMAX: | ||
385 | return "wimax"; | ||
386 | case RFKILL_TYPE_WWAN: | ||
387 | return "wwan"; | ||
388 | default: | ||
389 | BUG(); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | static ssize_t rfkill_type_show(struct device *dev, | ||
394 | struct device_attribute *attr, | ||
395 | char *buf) | ||
396 | { | ||
397 | struct rfkill *rfkill = to_rfkill(dev); | ||
398 | |||
399 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
400 | } | ||
401 | |||
402 | static ssize_t rfkill_state_show(struct device *dev, | ||
403 | struct device_attribute *attr, | ||
404 | char *buf) | ||
405 | { | ||
406 | struct rfkill *rfkill = to_rfkill(dev); | ||
407 | |||
408 | update_rfkill_state(rfkill); | ||
409 | return sprintf(buf, "%d\n", rfkill->state); | ||
410 | } | ||
411 | |||
412 | static ssize_t rfkill_state_store(struct device *dev, | ||
413 | struct device_attribute *attr, | ||
414 | const char *buf, size_t count) | ||
415 | { | ||
416 | struct rfkill *rfkill = to_rfkill(dev); | ||
417 | unsigned long state; | ||
418 | int error; | ||
419 | |||
420 | if (!capable(CAP_NET_ADMIN)) | ||
421 | return -EPERM; | ||
422 | |||
423 | error = strict_strtoul(buf, 0, &state); | ||
424 | if (error) | ||
425 | return error; | ||
426 | |||
427 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
428 | if (state != RFKILL_STATE_UNBLOCKED && | ||
429 | state != RFKILL_STATE_SOFT_BLOCKED) | ||
430 | return -EINVAL; | ||
431 | |||
432 | error = mutex_lock_killable(&rfkill->mutex); | ||
433 | if (error) | ||
434 | return error; | ||
435 | |||
436 | if (!rfkill_epo_lock_active) | ||
437 | error = rfkill_toggle_radio(rfkill, state, 0); | ||
438 | else | ||
439 | error = -EPERM; | ||
440 | |||
441 | mutex_unlock(&rfkill->mutex); | ||
442 | |||
443 | return error ? error : count; | ||
444 | } | ||
445 | |||
446 | static ssize_t rfkill_claim_show(struct device *dev, | ||
447 | struct device_attribute *attr, | ||
448 | char *buf) | ||
449 | { | ||
450 | struct rfkill *rfkill = to_rfkill(dev); | ||
451 | |||
452 | return sprintf(buf, "%d\n", rfkill->user_claim); | ||
453 | } | ||
454 | |||
455 | static ssize_t rfkill_claim_store(struct device *dev, | ||
456 | struct device_attribute *attr, | ||
457 | const char *buf, size_t count) | ||
458 | { | ||
459 | struct rfkill *rfkill = to_rfkill(dev); | ||
460 | unsigned long claim_tmp; | ||
461 | bool claim; | ||
462 | int error; | ||
463 | |||
464 | if (!capable(CAP_NET_ADMIN)) | ||
465 | return -EPERM; | ||
466 | |||
467 | if (rfkill->user_claim_unsupported) | ||
468 | return -EOPNOTSUPP; | ||
469 | |||
470 | error = strict_strtoul(buf, 0, &claim_tmp); | ||
471 | if (error) | ||
472 | return error; | ||
473 | claim = !!claim_tmp; | ||
474 | |||
475 | /* | ||
476 | * Take the global lock to make sure the kernel is not in | ||
477 | * the middle of rfkill_switch_all | ||
478 | */ | ||
479 | error = mutex_lock_killable(&rfkill_global_mutex); | ||
480 | if (error) | ||
481 | return error; | ||
482 | |||
483 | if (rfkill->user_claim != claim) { | ||
484 | if (!claim && !rfkill_epo_lock_active) { | ||
485 | mutex_lock(&rfkill->mutex); | ||
486 | rfkill_toggle_radio(rfkill, | ||
487 | rfkill_global_states[rfkill->type].current_state, | ||
488 | 0); | ||
489 | mutex_unlock(&rfkill->mutex); | ||
490 | } | ||
491 | rfkill->user_claim = claim; | ||
492 | } | ||
493 | |||
494 | mutex_unlock(&rfkill_global_mutex); | ||
495 | |||
496 | return error ? error : count; | ||
497 | } | ||
498 | |||
499 | static struct device_attribute rfkill_dev_attrs[] = { | ||
500 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
501 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
502 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
503 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
504 | __ATTR_NULL | ||
505 | }; | ||
506 | |||
507 | static void rfkill_release(struct device *dev) | ||
508 | { | ||
509 | struct rfkill *rfkill = to_rfkill(dev); | ||
510 | |||
511 | kfree(rfkill); | ||
512 | module_put(THIS_MODULE); | ||
513 | } | ||
514 | |||
515 | #ifdef CONFIG_PM | ||
516 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
517 | { | ||
518 | struct rfkill *rfkill = to_rfkill(dev); | ||
519 | |||
520 | /* mark class device as suspended */ | ||
521 | if (dev->power.power_state.event != state.event) | ||
522 | dev->power.power_state = state; | ||
523 | |||
524 | /* store state for the resume handler */ | ||
525 | rfkill->state_for_resume = rfkill->state; | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static int rfkill_resume(struct device *dev) | ||
531 | { | ||
532 | struct rfkill *rfkill = to_rfkill(dev); | ||
533 | enum rfkill_state newstate; | ||
534 | |||
535 | if (dev->power.power_state.event != PM_EVENT_ON) { | ||
536 | mutex_lock(&rfkill->mutex); | ||
537 | |||
538 | dev->power.power_state.event = PM_EVENT_ON; | ||
539 | |||
540 | /* | ||
541 | * rfkill->state could have been modified before we got | ||
542 | * called, and won't be updated by rfkill_toggle_radio() | ||
543 | * in force mode. Sync it FIRST. | ||
544 | */ | ||
545 | if (rfkill->get_state && | ||
546 | !rfkill->get_state(rfkill->data, &newstate)) | ||
547 | rfkill->state = newstate; | ||
548 | |||
549 | /* | ||
550 | * If we are under EPO, kick transmitter offline, | ||
551 | * otherwise restore to pre-suspend state. | ||
552 | * | ||
553 | * Issue a notification in any case | ||
554 | */ | ||
555 | rfkill_toggle_radio(rfkill, | ||
556 | rfkill_epo_lock_active ? | ||
557 | RFKILL_STATE_SOFT_BLOCKED : | ||
558 | rfkill->state_for_resume, | ||
559 | 1); | ||
560 | |||
561 | mutex_unlock(&rfkill->mutex); | ||
562 | } | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | #else | ||
567 | #define rfkill_suspend NULL | ||
568 | #define rfkill_resume NULL | ||
569 | #endif | ||
570 | |||
571 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
572 | { | ||
573 | struct rfkill *rfkill = to_rfkill(dev); | ||
574 | int error; | ||
575 | |||
576 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
577 | if (error) | ||
578 | return error; | ||
579 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
580 | rfkill_get_type_str(rfkill->type)); | ||
581 | if (error) | ||
582 | return error; | ||
583 | error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state); | ||
584 | return error; | ||
585 | } | ||
586 | |||
587 | static struct class rfkill_class = { | ||
588 | .name = "rfkill", | ||
589 | .dev_release = rfkill_release, | ||
590 | .dev_attrs = rfkill_dev_attrs, | ||
591 | .suspend = rfkill_suspend, | ||
592 | .resume = rfkill_resume, | ||
593 | .dev_uevent = rfkill_dev_uevent, | ||
594 | }; | ||
595 | |||
596 | static int rfkill_check_duplicity(const struct rfkill *rfkill) | ||
597 | { | ||
598 | struct rfkill *p; | ||
599 | unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
600 | |||
601 | memset(seen, 0, sizeof(seen)); | ||
602 | |||
603 | list_for_each_entry(p, &rfkill_list, node) { | ||
604 | if (WARN((p == rfkill), KERN_WARNING | ||
605 | "rfkill: illegal attempt to register " | ||
606 | "an already registered rfkill struct\n")) | ||
607 | return -EEXIST; | ||
608 | set_bit(p->type, seen); | ||
609 | } | ||
610 | |||
611 | /* 0: first switch of its kind */ | ||
612 | return (test_bit(rfkill->type, seen)) ? 1 : 0; | ||
613 | } | ||
614 | |||
615 | static int rfkill_add_switch(struct rfkill *rfkill) | ||
616 | { | ||
617 | int error; | ||
618 | |||
619 | mutex_lock(&rfkill_global_mutex); | ||
620 | |||
621 | error = rfkill_check_duplicity(rfkill); | ||
622 | if (error < 0) | ||
623 | goto unlock_out; | ||
624 | |||
625 | if (!error) { | ||
626 | /* lock default after first use */ | ||
627 | set_bit(rfkill->type, rfkill_states_lockdflt); | ||
628 | rfkill_global_states[rfkill->type].current_state = | ||
629 | rfkill_global_states[rfkill->type].default_state; | ||
630 | } | ||
631 | |||
632 | rfkill_toggle_radio(rfkill, | ||
633 | rfkill_global_states[rfkill->type].current_state, | ||
634 | 0); | ||
635 | |||
636 | list_add_tail(&rfkill->node, &rfkill_list); | ||
637 | |||
638 | error = 0; | ||
639 | unlock_out: | ||
640 | mutex_unlock(&rfkill_global_mutex); | ||
641 | |||
642 | return error; | ||
643 | } | ||
644 | |||
645 | static void rfkill_remove_switch(struct rfkill *rfkill) | ||
646 | { | ||
647 | mutex_lock(&rfkill_global_mutex); | ||
648 | list_del_init(&rfkill->node); | ||
649 | mutex_unlock(&rfkill_global_mutex); | ||
650 | |||
651 | mutex_lock(&rfkill->mutex); | ||
652 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
653 | mutex_unlock(&rfkill->mutex); | ||
654 | } | ||
655 | |||
656 | /** | ||
657 | * rfkill_allocate - allocate memory for rfkill structure. | ||
658 | * @parent: device that has rf switch on it | ||
659 | * @type: type of the switch (RFKILL_TYPE_*) | ||
660 | * | ||
661 | * This function should be called by the network driver when it needs | ||
662 | * rfkill structure. Once the structure is allocated the driver should | ||
663 | * finish its initialization by setting the name, private data, enable_radio | ||
664 | * and disable_radio methods and then register it with rfkill_register(). | ||
665 | * | ||
666 | * NOTE: If registration fails the structure shoudl be freed by calling | ||
667 | * rfkill_free() otherwise rfkill_unregister() should be used. | ||
668 | */ | ||
669 | struct rfkill * __must_check rfkill_allocate(struct device *parent, | ||
670 | enum rfkill_type type) | ||
671 | { | ||
672 | struct rfkill *rfkill; | ||
673 | struct device *dev; | ||
674 | |||
675 | if (WARN((type >= RFKILL_TYPE_MAX), | ||
676 | KERN_WARNING | ||
677 | "rfkill: illegal type %d passed as parameter " | ||
678 | "to rfkill_allocate\n", type)) | ||
679 | return NULL; | ||
680 | |||
681 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); | ||
682 | if (!rfkill) | ||
683 | return NULL; | ||
684 | |||
685 | mutex_init(&rfkill->mutex); | ||
686 | INIT_LIST_HEAD(&rfkill->node); | ||
687 | rfkill->type = type; | ||
688 | |||
689 | dev = &rfkill->dev; | ||
690 | dev->class = &rfkill_class; | ||
691 | dev->parent = parent; | ||
692 | device_initialize(dev); | ||
693 | |||
694 | __module_get(THIS_MODULE); | ||
695 | |||
696 | return rfkill; | ||
697 | } | ||
698 | EXPORT_SYMBOL(rfkill_allocate); | ||
699 | |||
700 | /** | ||
701 | * rfkill_free - Mark rfkill structure for deletion | ||
702 | * @rfkill: rfkill structure to be destroyed | ||
703 | * | ||
704 | * Decrements reference count of the rfkill structure so it is destroyed. | ||
705 | * Note that rfkill_free() should _not_ be called after rfkill_unregister(). | ||
706 | */ | ||
707 | void rfkill_free(struct rfkill *rfkill) | ||
708 | { | ||
709 | if (rfkill) | ||
710 | put_device(&rfkill->dev); | ||
711 | } | ||
712 | EXPORT_SYMBOL(rfkill_free); | ||
713 | |||
714 | static void rfkill_led_trigger_register(struct rfkill *rfkill) | ||
715 | { | ||
716 | #ifdef CONFIG_RFKILL_LEDS | ||
717 | int error; | ||
718 | |||
719 | if (!rfkill->led_trigger.name) | ||
720 | rfkill->led_trigger.name = dev_name(&rfkill->dev); | ||
721 | if (!rfkill->led_trigger.activate) | ||
722 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
723 | error = led_trigger_register(&rfkill->led_trigger); | ||
724 | if (error) | ||
725 | rfkill->led_trigger.name = NULL; | ||
726 | #endif /* CONFIG_RFKILL_LEDS */ | ||
727 | } | ||
728 | |||
729 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
730 | { | ||
731 | #ifdef CONFIG_RFKILL_LEDS | ||
732 | if (rfkill->led_trigger.name) { | ||
733 | led_trigger_unregister(&rfkill->led_trigger); | ||
734 | rfkill->led_trigger.name = NULL; | ||
735 | } | ||
736 | #endif | ||
737 | } | ||
738 | |||
739 | /** | ||
740 | * rfkill_register - Register a rfkill structure. | ||
741 | * @rfkill: rfkill structure to be registered | ||
742 | * | ||
743 | * This function should be called by the network driver when the rfkill | ||
744 | * structure needs to be registered. Immediately from registration the | ||
745 | * switch driver should be able to service calls to toggle_radio. | ||
746 | */ | ||
747 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
748 | { | ||
749 | static atomic_t rfkill_no = ATOMIC_INIT(0); | ||
750 | struct device *dev = &rfkill->dev; | ||
751 | int error; | ||
752 | |||
753 | if (WARN((!rfkill || !rfkill->toggle_radio || | ||
754 | rfkill->type >= RFKILL_TYPE_MAX || | ||
755 | rfkill->state >= RFKILL_STATE_MAX), | ||
756 | KERN_WARNING | ||
757 | "rfkill: attempt to register a " | ||
758 | "badly initialized rfkill struct\n")) | ||
759 | return -EINVAL; | ||
760 | |||
761 | dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1); | ||
762 | |||
763 | rfkill_led_trigger_register(rfkill); | ||
764 | |||
765 | error = rfkill_add_switch(rfkill); | ||
766 | if (error) { | ||
767 | rfkill_led_trigger_unregister(rfkill); | ||
768 | return error; | ||
769 | } | ||
770 | |||
771 | error = device_add(dev); | ||
772 | if (error) { | ||
773 | rfkill_remove_switch(rfkill); | ||
774 | rfkill_led_trigger_unregister(rfkill); | ||
775 | return error; | ||
776 | } | ||
777 | |||
778 | return 0; | ||
779 | } | ||
780 | EXPORT_SYMBOL(rfkill_register); | ||
781 | |||
782 | /** | ||
783 | * rfkill_unregister - Unregister a rfkill structure. | ||
784 | * @rfkill: rfkill structure to be unregistered | ||
785 | * | ||
786 | * This function should be called by the network driver during device | ||
787 | * teardown to destroy rfkill structure. Note that rfkill_free() should | ||
788 | * _not_ be called after rfkill_unregister(). | ||
789 | */ | ||
790 | void rfkill_unregister(struct rfkill *rfkill) | ||
791 | { | ||
792 | BUG_ON(!rfkill); | ||
793 | device_del(&rfkill->dev); | ||
794 | rfkill_remove_switch(rfkill); | ||
795 | rfkill_led_trigger_unregister(rfkill); | ||
796 | put_device(&rfkill->dev); | ||
797 | } | ||
798 | EXPORT_SYMBOL(rfkill_unregister); | ||
799 | |||
800 | /** | ||
801 | * rfkill_set_default - set initial value for a switch type | ||
802 | * @type - the type of switch to set the default state of | ||
803 | * @state - the new default state for that group of switches | ||
804 | * | ||
805 | * Sets the initial state rfkill should use for a given type. | ||
806 | * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED | ||
807 | * and RFKILL_STATE_UNBLOCKED. | ||
808 | * | ||
809 | * This function is meant to be used by platform drivers for platforms | ||
810 | * that can save switch state across power down/reboot. | ||
811 | * | ||
812 | * The default state for each switch type can be changed exactly once. | ||
813 | * After a switch of that type is registered, the default state cannot | ||
814 | * be changed anymore. This guards against multiple drivers it the | ||
815 | * same platform trying to set the initial switch default state, which | ||
816 | * is not allowed. | ||
817 | * | ||
818 | * Returns -EPERM if the state has already been set once or is in use, | ||
819 | * so drivers likely want to either ignore or at most printk(KERN_NOTICE) | ||
820 | * if this function returns -EPERM. | ||
821 | * | ||
822 | * Returns 0 if the new default state was set, or an error if it | ||
823 | * could not be set. | ||
824 | */ | ||
825 | int rfkill_set_default(enum rfkill_type type, enum rfkill_state state) | ||
826 | { | ||
827 | int error; | ||
828 | |||
829 | if (WARN((type >= RFKILL_TYPE_MAX || | ||
830 | (state != RFKILL_STATE_SOFT_BLOCKED && | ||
831 | state != RFKILL_STATE_UNBLOCKED)), | ||
832 | KERN_WARNING | ||
833 | "rfkill: illegal state %d or type %d passed as " | ||
834 | "parameter to rfkill_set_default\n", state, type)) | ||
835 | return -EINVAL; | ||
836 | |||
837 | mutex_lock(&rfkill_global_mutex); | ||
838 | |||
839 | if (!test_and_set_bit(type, rfkill_states_lockdflt)) { | ||
840 | rfkill_global_states[type].default_state = state; | ||
841 | rfkill_global_states[type].current_state = state; | ||
842 | error = 0; | ||
843 | } else | ||
844 | error = -EPERM; | ||
845 | |||
846 | mutex_unlock(&rfkill_global_mutex); | ||
847 | return error; | ||
848 | } | ||
849 | EXPORT_SYMBOL_GPL(rfkill_set_default); | ||
850 | |||
851 | /* | ||
852 | * Rfkill module initialization/deinitialization. | ||
853 | */ | ||
854 | static int __init rfkill_init(void) | ||
855 | { | ||
856 | int error; | ||
857 | int i; | ||
858 | |||
859 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
860 | if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED && | ||
861 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | ||
862 | return -EINVAL; | ||
863 | |||
864 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
865 | rfkill_global_states[i].default_state = rfkill_default_state; | ||
866 | |||
867 | error = class_register(&rfkill_class); | ||
868 | if (error) { | ||
869 | printk(KERN_ERR "rfkill: unable to register rfkill class\n"); | ||
870 | return error; | ||
871 | } | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static void __exit rfkill_exit(void) | ||
877 | { | ||
878 | class_unregister(&rfkill_class); | ||
879 | } | ||
880 | |||
881 | subsys_initcall(rfkill_init); | ||
882 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill.h index fe8df6b5b935..d1117cb6e4de 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ivo van Doorn | 2 | * Copyright (C) 2007 Ivo van Doorn |
3 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
3 | */ | 4 | */ |
4 | 5 | ||
5 | /* | 6 | /* |
@@ -11,11 +12,16 @@ | |||
11 | #ifndef __RFKILL_INPUT_H | 12 | #ifndef __RFKILL_INPUT_H |
12 | #define __RFKILL_INPUT_H | 13 | #define __RFKILL_INPUT_H |
13 | 14 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 15 | /* core code */ |
16 | void rfkill_switch_all(const enum rfkill_type type, bool blocked); | ||
15 | void rfkill_epo(void); | 17 | void rfkill_epo(void); |
16 | void rfkill_restore_states(void); | 18 | void rfkill_restore_states(void); |
17 | void rfkill_remove_epo_lock(void); | 19 | void rfkill_remove_epo_lock(void); |
18 | bool rfkill_is_epo_lock_active(void); | 20 | bool rfkill_is_epo_lock_active(void); |
19 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); | 21 | bool rfkill_get_global_sw_state(const enum rfkill_type type); |
22 | |||
23 | /* input handler */ | ||
24 | int rfkill_handler_init(void); | ||
25 | void rfkill_handler_exit(void); | ||
20 | 26 | ||
21 | #endif /* __RFKILL_INPUT_H */ | 27 | #endif /* __RFKILL_INPUT_H */ |