diff options
Diffstat (limited to 'net/rfkill/core.c')
-rw-r--r-- | net/rfkill/core.c | 1225 |
1 files changed, 1225 insertions, 0 deletions
diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 000000000000..79693fe2001e --- /dev/null +++ b/net/rfkill/core.c | |||
@@ -0,0 +1,1225 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rfkill.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fs.h> | ||
35 | |||
36 | #include "rfkill.h" | ||
37 | |||
38 | #define POLL_INTERVAL (5 * HZ) | ||
39 | |||
40 | #define RFKILL_BLOCK_HW BIT(0) | ||
41 | #define RFKILL_BLOCK_SW BIT(1) | ||
42 | #define RFKILL_BLOCK_SW_PREV BIT(2) | ||
43 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | ||
44 | RFKILL_BLOCK_SW |\ | ||
45 | RFKILL_BLOCK_SW_PREV) | ||
46 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | ||
47 | |||
48 | struct rfkill { | ||
49 | spinlock_t lock; | ||
50 | |||
51 | const char *name; | ||
52 | enum rfkill_type type; | ||
53 | |||
54 | unsigned long state; | ||
55 | |||
56 | u32 idx; | ||
57 | |||
58 | bool registered; | ||
59 | bool persistent; | ||
60 | |||
61 | const struct rfkill_ops *ops; | ||
62 | void *data; | ||
63 | |||
64 | #ifdef CONFIG_RFKILL_LEDS | ||
65 | struct led_trigger led_trigger; | ||
66 | const char *ledtrigname; | ||
67 | #endif | ||
68 | |||
69 | struct device dev; | ||
70 | struct list_head node; | ||
71 | |||
72 | struct delayed_work poll_work; | ||
73 | struct work_struct uevent_work; | ||
74 | struct work_struct sync_work; | ||
75 | }; | ||
76 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | ||
77 | |||
78 | struct rfkill_int_event { | ||
79 | struct list_head list; | ||
80 | struct rfkill_event ev; | ||
81 | }; | ||
82 | |||
83 | struct rfkill_data { | ||
84 | struct list_head list; | ||
85 | struct list_head events; | ||
86 | struct mutex mtx; | ||
87 | wait_queue_head_t read_wait; | ||
88 | bool input_handler; | ||
89 | }; | ||
90 | |||
91 | |||
92 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
93 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
94 | MODULE_DESCRIPTION("RF switch support"); | ||
95 | MODULE_LICENSE("GPL"); | ||
96 | |||
97 | |||
98 | /* | ||
99 | * The locking here should be made much smarter, we currently have | ||
100 | * a bit of a stupid situation because drivers might want to register | ||
101 | * the rfkill struct under their own lock, and take this lock during | ||
102 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | ||
103 | * | ||
104 | * To fix that, we need to rework this code here to be mostly lock-free | ||
105 | * and only use the mutex for list manipulations, not to protect the | ||
106 | * various other global variables. Then we can avoid holding the mutex | ||
107 | * around driver operations, and all is happy. | ||
108 | */ | ||
109 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
110 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
111 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ | ||
112 | |||
113 | static unsigned int rfkill_default_state = 1; | ||
114 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
115 | MODULE_PARM_DESC(default_state, | ||
116 | "Default initial state for all radio types, 0 = radio off"); | ||
117 | |||
118 | static struct { | ||
119 | bool cur, sav; | ||
120 | } rfkill_global_states[NUM_RFKILL_TYPES]; | ||
121 | |||
122 | static bool rfkill_epo_lock_active; | ||
123 | |||
124 | |||
125 | #ifdef CONFIG_RFKILL_LEDS | ||
126 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
127 | { | ||
128 | struct led_trigger *trigger; | ||
129 | |||
130 | if (!rfkill->registered) | ||
131 | return; | ||
132 | |||
133 | trigger = &rfkill->led_trigger; | ||
134 | |||
135 | if (rfkill->state & RFKILL_BLOCK_ANY) | ||
136 | led_trigger_event(trigger, LED_OFF); | ||
137 | else | ||
138 | led_trigger_event(trigger, LED_FULL); | ||
139 | } | ||
140 | |||
141 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
142 | { | ||
143 | struct rfkill *rfkill; | ||
144 | |||
145 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | ||
146 | |||
147 | rfkill_led_trigger_event(rfkill); | ||
148 | } | ||
149 | |||
150 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | ||
151 | { | ||
152 | return rfkill->led_trigger.name; | ||
153 | } | ||
154 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | ||
155 | |||
156 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | ||
157 | { | ||
158 | BUG_ON(!rfkill); | ||
159 | |||
160 | rfkill->ledtrigname = name; | ||
161 | } | ||
162 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | ||
163 | |||
164 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
165 | { | ||
166 | rfkill->led_trigger.name = rfkill->ledtrigname | ||
167 | ? : dev_name(&rfkill->dev); | ||
168 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
169 | return led_trigger_register(&rfkill->led_trigger); | ||
170 | } | ||
171 | |||
172 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
173 | { | ||
174 | led_trigger_unregister(&rfkill->led_trigger); | ||
175 | } | ||
176 | #else | ||
177 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
178 | { | ||
179 | } | ||
180 | |||
181 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
182 | { | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
187 | { | ||
188 | } | ||
189 | #endif /* CONFIG_RFKILL_LEDS */ | ||
190 | |||
191 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, | ||
192 | enum rfkill_operation op) | ||
193 | { | ||
194 | unsigned long flags; | ||
195 | |||
196 | ev->idx = rfkill->idx; | ||
197 | ev->type = rfkill->type; | ||
198 | ev->op = op; | ||
199 | |||
200 | spin_lock_irqsave(&rfkill->lock, flags); | ||
201 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
202 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | ||
203 | RFKILL_BLOCK_SW_PREV)); | ||
204 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
205 | } | ||
206 | |||
207 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | ||
208 | { | ||
209 | struct rfkill_data *data; | ||
210 | struct rfkill_int_event *ev; | ||
211 | |||
212 | list_for_each_entry(data, &rfkill_fds, list) { | ||
213 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
214 | if (!ev) | ||
215 | continue; | ||
216 | rfkill_fill_event(&ev->ev, rfkill, op); | ||
217 | mutex_lock(&data->mtx); | ||
218 | list_add_tail(&ev->list, &data->events); | ||
219 | mutex_unlock(&data->mtx); | ||
220 | wake_up_interruptible(&data->read_wait); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static void rfkill_event(struct rfkill *rfkill) | ||
225 | { | ||
226 | if (!rfkill->registered) | ||
227 | return; | ||
228 | |||
229 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
230 | |||
231 | /* also send event to /dev/rfkill */ | ||
232 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | ||
233 | } | ||
234 | |||
235 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | ||
236 | bool blocked, bool *change) | ||
237 | { | ||
238 | unsigned long flags; | ||
239 | bool prev, any; | ||
240 | |||
241 | BUG_ON(!rfkill); | ||
242 | |||
243 | spin_lock_irqsave(&rfkill->lock, flags); | ||
244 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
245 | if (blocked) | ||
246 | rfkill->state |= RFKILL_BLOCK_HW; | ||
247 | else | ||
248 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
249 | *change = prev != blocked; | ||
250 | any = rfkill->state & RFKILL_BLOCK_ANY; | ||
251 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
252 | |||
253 | rfkill_led_trigger_event(rfkill); | ||
254 | |||
255 | return any; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * rfkill_set_block - wrapper for set_block method | ||
260 | * | ||
261 | * @rfkill: the rfkill struct to use | ||
262 | * @blocked: the new software state | ||
263 | * | ||
264 | * Calls the set_block method (when applicable) and handles notifications | ||
265 | * etc. as well. | ||
266 | */ | ||
267 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | ||
268 | { | ||
269 | unsigned long flags; | ||
270 | int err; | ||
271 | |||
272 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
273 | return; | ||
274 | |||
275 | /* | ||
276 | * Some platforms (...!) generate input events which affect the | ||
277 | * _hard_ kill state -- whenever something tries to change the | ||
278 | * current software state query the hardware state too. | ||
279 | */ | ||
280 | if (rfkill->ops->query) | ||
281 | rfkill->ops->query(rfkill, rfkill->data); | ||
282 | |||
283 | spin_lock_irqsave(&rfkill->lock, flags); | ||
284 | if (rfkill->state & RFKILL_BLOCK_SW) | ||
285 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | ||
286 | else | ||
287 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
288 | |||
289 | if (blocked) | ||
290 | rfkill->state |= RFKILL_BLOCK_SW; | ||
291 | else | ||
292 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
293 | |||
294 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | ||
295 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
296 | |||
297 | err = rfkill->ops->set_block(rfkill->data, blocked); | ||
298 | |||
299 | spin_lock_irqsave(&rfkill->lock, flags); | ||
300 | if (err) { | ||
301 | /* | ||
302 | * Failed -- reset status to _prev, this may be different | ||
303 | * from what set set _PREV to earlier in this function | ||
304 | * if rfkill_set_sw_state was invoked. | ||
305 | */ | ||
306 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | ||
307 | rfkill->state |= RFKILL_BLOCK_SW; | ||
308 | else | ||
309 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
310 | } | ||
311 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | ||
312 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
313 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
314 | |||
315 | rfkill_led_trigger_event(rfkill); | ||
316 | rfkill_event(rfkill); | ||
317 | } | ||
318 | |||
319 | #ifdef CONFIG_RFKILL_INPUT | ||
320 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | ||
321 | |||
322 | /** | ||
323 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
324 | * @type: type of interfaces to be affected | ||
325 | * @state: the new state | ||
326 | * | ||
327 | * This function sets the state of all switches of given type, | ||
328 | * unless a specific switch is claimed by userspace (in which case, | ||
329 | * that switch is left alone) or suspended. | ||
330 | * | ||
331 | * Caller must have acquired rfkill_global_mutex. | ||
332 | */ | ||
333 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | ||
334 | { | ||
335 | struct rfkill *rfkill; | ||
336 | |||
337 | rfkill_global_states[type].cur = blocked; | ||
338 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
339 | if (rfkill->type != type) | ||
340 | continue; | ||
341 | |||
342 | rfkill_set_block(rfkill, blocked); | ||
343 | } | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * rfkill_switch_all - Toggle state of all switches of given type | ||
348 | * @type: type of interfaces to be affected | ||
349 | * @state: the new state | ||
350 | * | ||
351 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
352 | * Please refer to __rfkill_switch_all() for details. | ||
353 | * | ||
354 | * Does nothing if the EPO lock is active. | ||
355 | */ | ||
356 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | ||
357 | { | ||
358 | if (atomic_read(&rfkill_input_disabled)) | ||
359 | return; | ||
360 | |||
361 | mutex_lock(&rfkill_global_mutex); | ||
362 | |||
363 | if (!rfkill_epo_lock_active) | ||
364 | __rfkill_switch_all(type, blocked); | ||
365 | |||
366 | mutex_unlock(&rfkill_global_mutex); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * rfkill_epo - emergency power off all transmitters | ||
371 | * | ||
372 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
373 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
374 | * | ||
375 | * The global state before the EPO is saved and can be restored later | ||
376 | * using rfkill_restore_states(). | ||
377 | */ | ||
378 | void rfkill_epo(void) | ||
379 | { | ||
380 | struct rfkill *rfkill; | ||
381 | int i; | ||
382 | |||
383 | if (atomic_read(&rfkill_input_disabled)) | ||
384 | return; | ||
385 | |||
386 | mutex_lock(&rfkill_global_mutex); | ||
387 | |||
388 | rfkill_epo_lock_active = true; | ||
389 | list_for_each_entry(rfkill, &rfkill_list, node) | ||
390 | rfkill_set_block(rfkill, true); | ||
391 | |||
392 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
393 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; | ||
394 | rfkill_global_states[i].cur = true; | ||
395 | } | ||
396 | |||
397 | mutex_unlock(&rfkill_global_mutex); | ||
398 | } | ||
399 | |||
400 | /** | ||
401 | * rfkill_restore_states - restore global states | ||
402 | * | ||
403 | * Restore (and sync switches to) the global state from the | ||
404 | * states in rfkill_default_states. This can undo the effects of | ||
405 | * a call to rfkill_epo(). | ||
406 | */ | ||
407 | void rfkill_restore_states(void) | ||
408 | { | ||
409 | int i; | ||
410 | |||
411 | if (atomic_read(&rfkill_input_disabled)) | ||
412 | return; | ||
413 | |||
414 | mutex_lock(&rfkill_global_mutex); | ||
415 | |||
416 | rfkill_epo_lock_active = false; | ||
417 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
418 | __rfkill_switch_all(i, rfkill_global_states[i].sav); | ||
419 | mutex_unlock(&rfkill_global_mutex); | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * rfkill_remove_epo_lock - unlock state changes | ||
424 | * | ||
425 | * Used by rfkill-input manually unlock state changes, when | ||
426 | * the EPO switch is deactivated. | ||
427 | */ | ||
428 | void rfkill_remove_epo_lock(void) | ||
429 | { | ||
430 | if (atomic_read(&rfkill_input_disabled)) | ||
431 | return; | ||
432 | |||
433 | mutex_lock(&rfkill_global_mutex); | ||
434 | rfkill_epo_lock_active = false; | ||
435 | mutex_unlock(&rfkill_global_mutex); | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
440 | * | ||
441 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
442 | * and 1 (true) if there is an active EPO contition, which | ||
443 | * locks all radios in one of the BLOCKED states. | ||
444 | * | ||
445 | * Can be called in atomic context. | ||
446 | */ | ||
447 | bool rfkill_is_epo_lock_active(void) | ||
448 | { | ||
449 | return rfkill_epo_lock_active; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * rfkill_get_global_sw_state - returns global state for a type | ||
454 | * @type: the type to get the global state of | ||
455 | * | ||
456 | * Returns the current global state for a given wireless | ||
457 | * device type. | ||
458 | */ | ||
459 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | ||
460 | { | ||
461 | return rfkill_global_states[type].cur; | ||
462 | } | ||
463 | #endif | ||
464 | |||
465 | |||
466 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | ||
467 | { | ||
468 | bool ret, change; | ||
469 | |||
470 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | ||
471 | |||
472 | if (!rfkill->registered) | ||
473 | return ret; | ||
474 | |||
475 | if (change) | ||
476 | schedule_work(&rfkill->uevent_work); | ||
477 | |||
478 | return ret; | ||
479 | } | ||
480 | EXPORT_SYMBOL(rfkill_set_hw_state); | ||
481 | |||
482 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
483 | { | ||
484 | u32 bit = RFKILL_BLOCK_SW; | ||
485 | |||
486 | /* if in a ops->set_block right now, use other bit */ | ||
487 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | ||
488 | bit = RFKILL_BLOCK_SW_PREV; | ||
489 | |||
490 | if (blocked) | ||
491 | rfkill->state |= bit; | ||
492 | else | ||
493 | rfkill->state &= ~bit; | ||
494 | } | ||
495 | |||
496 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
497 | { | ||
498 | unsigned long flags; | ||
499 | bool prev, hwblock; | ||
500 | |||
501 | BUG_ON(!rfkill); | ||
502 | |||
503 | spin_lock_irqsave(&rfkill->lock, flags); | ||
504 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
505 | __rfkill_set_sw_state(rfkill, blocked); | ||
506 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
507 | blocked = blocked || hwblock; | ||
508 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
509 | |||
510 | if (!rfkill->registered) | ||
511 | return blocked; | ||
512 | |||
513 | if (prev != blocked && !hwblock) | ||
514 | schedule_work(&rfkill->uevent_work); | ||
515 | |||
516 | rfkill_led_trigger_event(rfkill); | ||
517 | |||
518 | return blocked; | ||
519 | } | ||
520 | EXPORT_SYMBOL(rfkill_set_sw_state); | ||
521 | |||
522 | void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) | ||
523 | { | ||
524 | unsigned long flags; | ||
525 | |||
526 | BUG_ON(!rfkill); | ||
527 | BUG_ON(rfkill->registered); | ||
528 | |||
529 | spin_lock_irqsave(&rfkill->lock, flags); | ||
530 | __rfkill_set_sw_state(rfkill, blocked); | ||
531 | rfkill->persistent = true; | ||
532 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
533 | } | ||
534 | EXPORT_SYMBOL(rfkill_init_sw_state); | ||
535 | |||
536 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | ||
537 | { | ||
538 | unsigned long flags; | ||
539 | bool swprev, hwprev; | ||
540 | |||
541 | BUG_ON(!rfkill); | ||
542 | |||
543 | spin_lock_irqsave(&rfkill->lock, flags); | ||
544 | |||
545 | /* | ||
546 | * No need to care about prev/setblock ... this is for uevent only | ||
547 | * and that will get triggered by rfkill_set_block anyway. | ||
548 | */ | ||
549 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
550 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
551 | __rfkill_set_sw_state(rfkill, sw); | ||
552 | |||
553 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
554 | |||
555 | if (!rfkill->registered) { | ||
556 | rfkill->persistent = true; | ||
557 | } else { | ||
558 | if (swprev != sw || hwprev != hw) | ||
559 | schedule_work(&rfkill->uevent_work); | ||
560 | |||
561 | rfkill_led_trigger_event(rfkill); | ||
562 | } | ||
563 | } | ||
564 | EXPORT_SYMBOL(rfkill_set_states); | ||
565 | |||
566 | static ssize_t rfkill_name_show(struct device *dev, | ||
567 | struct device_attribute *attr, | ||
568 | char *buf) | ||
569 | { | ||
570 | struct rfkill *rfkill = to_rfkill(dev); | ||
571 | |||
572 | return sprintf(buf, "%s\n", rfkill->name); | ||
573 | } | ||
574 | |||
575 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
576 | { | ||
577 | switch (type) { | ||
578 | case RFKILL_TYPE_WLAN: | ||
579 | return "wlan"; | ||
580 | case RFKILL_TYPE_BLUETOOTH: | ||
581 | return "bluetooth"; | ||
582 | case RFKILL_TYPE_UWB: | ||
583 | return "ultrawideband"; | ||
584 | case RFKILL_TYPE_WIMAX: | ||
585 | return "wimax"; | ||
586 | case RFKILL_TYPE_WWAN: | ||
587 | return "wwan"; | ||
588 | default: | ||
589 | BUG(); | ||
590 | } | ||
591 | |||
592 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | ||
593 | } | ||
594 | |||
595 | static ssize_t rfkill_type_show(struct device *dev, | ||
596 | struct device_attribute *attr, | ||
597 | char *buf) | ||
598 | { | ||
599 | struct rfkill *rfkill = to_rfkill(dev); | ||
600 | |||
601 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
602 | } | ||
603 | |||
604 | static ssize_t rfkill_idx_show(struct device *dev, | ||
605 | struct device_attribute *attr, | ||
606 | char *buf) | ||
607 | { | ||
608 | struct rfkill *rfkill = to_rfkill(dev); | ||
609 | |||
610 | return sprintf(buf, "%d\n", rfkill->idx); | ||
611 | } | ||
612 | |||
613 | static ssize_t rfkill_persistent_show(struct device *dev, | ||
614 | struct device_attribute *attr, | ||
615 | char *buf) | ||
616 | { | ||
617 | struct rfkill *rfkill = to_rfkill(dev); | ||
618 | |||
619 | return sprintf(buf, "%d\n", rfkill->persistent); | ||
620 | } | ||
621 | |||
622 | static u8 user_state_from_blocked(unsigned long state) | ||
623 | { | ||
624 | if (state & RFKILL_BLOCK_HW) | ||
625 | return RFKILL_USER_STATE_HARD_BLOCKED; | ||
626 | if (state & RFKILL_BLOCK_SW) | ||
627 | return RFKILL_USER_STATE_SOFT_BLOCKED; | ||
628 | |||
629 | return RFKILL_USER_STATE_UNBLOCKED; | ||
630 | } | ||
631 | |||
632 | static ssize_t rfkill_state_show(struct device *dev, | ||
633 | struct device_attribute *attr, | ||
634 | char *buf) | ||
635 | { | ||
636 | struct rfkill *rfkill = to_rfkill(dev); | ||
637 | unsigned long flags; | ||
638 | u32 state; | ||
639 | |||
640 | spin_lock_irqsave(&rfkill->lock, flags); | ||
641 | state = rfkill->state; | ||
642 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
643 | |||
644 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | ||
645 | } | ||
646 | |||
647 | static ssize_t rfkill_state_store(struct device *dev, | ||
648 | struct device_attribute *attr, | ||
649 | const char *buf, size_t count) | ||
650 | { | ||
651 | /* | ||
652 | * The intention was that userspace can only take control over | ||
653 | * a given device when/if rfkill-input doesn't control it due | ||
654 | * to user_claim. Since user_claim is currently unsupported, | ||
655 | * we never support changing the state from userspace -- this | ||
656 | * can be implemented again later. | ||
657 | */ | ||
658 | |||
659 | return -EPERM; | ||
660 | } | ||
661 | |||
662 | static ssize_t rfkill_claim_show(struct device *dev, | ||
663 | struct device_attribute *attr, | ||
664 | char *buf) | ||
665 | { | ||
666 | return sprintf(buf, "%d\n", 0); | ||
667 | } | ||
668 | |||
669 | static ssize_t rfkill_claim_store(struct device *dev, | ||
670 | struct device_attribute *attr, | ||
671 | const char *buf, size_t count) | ||
672 | { | ||
673 | return -EOPNOTSUPP; | ||
674 | } | ||
675 | |||
676 | static struct device_attribute rfkill_dev_attrs[] = { | ||
677 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
678 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
679 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), | ||
680 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), | ||
681 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
682 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
683 | __ATTR_NULL | ||
684 | }; | ||
685 | |||
686 | static void rfkill_release(struct device *dev) | ||
687 | { | ||
688 | struct rfkill *rfkill = to_rfkill(dev); | ||
689 | |||
690 | kfree(rfkill); | ||
691 | } | ||
692 | |||
693 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
694 | { | ||
695 | struct rfkill *rfkill = to_rfkill(dev); | ||
696 | unsigned long flags; | ||
697 | u32 state; | ||
698 | int error; | ||
699 | |||
700 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
701 | if (error) | ||
702 | return error; | ||
703 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
704 | rfkill_get_type_str(rfkill->type)); | ||
705 | if (error) | ||
706 | return error; | ||
707 | spin_lock_irqsave(&rfkill->lock, flags); | ||
708 | state = rfkill->state; | ||
709 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
710 | error = add_uevent_var(env, "RFKILL_STATE=%d", | ||
711 | user_state_from_blocked(state)); | ||
712 | return error; | ||
713 | } | ||
714 | |||
715 | void rfkill_pause_polling(struct rfkill *rfkill) | ||
716 | { | ||
717 | BUG_ON(!rfkill); | ||
718 | |||
719 | if (!rfkill->ops->poll) | ||
720 | return; | ||
721 | |||
722 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
723 | } | ||
724 | EXPORT_SYMBOL(rfkill_pause_polling); | ||
725 | |||
726 | void rfkill_resume_polling(struct rfkill *rfkill) | ||
727 | { | ||
728 | BUG_ON(!rfkill); | ||
729 | |||
730 | if (!rfkill->ops->poll) | ||
731 | return; | ||
732 | |||
733 | schedule_work(&rfkill->poll_work.work); | ||
734 | } | ||
735 | EXPORT_SYMBOL(rfkill_resume_polling); | ||
736 | |||
737 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
738 | { | ||
739 | struct rfkill *rfkill = to_rfkill(dev); | ||
740 | |||
741 | rfkill_pause_polling(rfkill); | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int rfkill_resume(struct device *dev) | ||
747 | { | ||
748 | struct rfkill *rfkill = to_rfkill(dev); | ||
749 | bool cur; | ||
750 | |||
751 | if (!rfkill->persistent) { | ||
752 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
753 | rfkill_set_block(rfkill, cur); | ||
754 | } | ||
755 | |||
756 | rfkill_resume_polling(rfkill); | ||
757 | |||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static struct class rfkill_class = { | ||
762 | .name = "rfkill", | ||
763 | .dev_release = rfkill_release, | ||
764 | .dev_attrs = rfkill_dev_attrs, | ||
765 | .dev_uevent = rfkill_dev_uevent, | ||
766 | .suspend = rfkill_suspend, | ||
767 | .resume = rfkill_resume, | ||
768 | }; | ||
769 | |||
770 | bool rfkill_blocked(struct rfkill *rfkill) | ||
771 | { | ||
772 | unsigned long flags; | ||
773 | u32 state; | ||
774 | |||
775 | spin_lock_irqsave(&rfkill->lock, flags); | ||
776 | state = rfkill->state; | ||
777 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
778 | |||
779 | return !!(state & RFKILL_BLOCK_ANY); | ||
780 | } | ||
781 | EXPORT_SYMBOL(rfkill_blocked); | ||
782 | |||
783 | |||
784 | struct rfkill * __must_check rfkill_alloc(const char *name, | ||
785 | struct device *parent, | ||
786 | const enum rfkill_type type, | ||
787 | const struct rfkill_ops *ops, | ||
788 | void *ops_data) | ||
789 | { | ||
790 | struct rfkill *rfkill; | ||
791 | struct device *dev; | ||
792 | |||
793 | if (WARN_ON(!ops)) | ||
794 | return NULL; | ||
795 | |||
796 | if (WARN_ON(!ops->set_block)) | ||
797 | return NULL; | ||
798 | |||
799 | if (WARN_ON(!name)) | ||
800 | return NULL; | ||
801 | |||
802 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | ||
803 | return NULL; | ||
804 | |||
805 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | ||
806 | if (!rfkill) | ||
807 | return NULL; | ||
808 | |||
809 | spin_lock_init(&rfkill->lock); | ||
810 | INIT_LIST_HEAD(&rfkill->node); | ||
811 | rfkill->type = type; | ||
812 | rfkill->name = name; | ||
813 | rfkill->ops = ops; | ||
814 | rfkill->data = ops_data; | ||
815 | |||
816 | dev = &rfkill->dev; | ||
817 | dev->class = &rfkill_class; | ||
818 | dev->parent = parent; | ||
819 | device_initialize(dev); | ||
820 | |||
821 | return rfkill; | ||
822 | } | ||
823 | EXPORT_SYMBOL(rfkill_alloc); | ||
824 | |||
825 | static void rfkill_poll(struct work_struct *work) | ||
826 | { | ||
827 | struct rfkill *rfkill; | ||
828 | |||
829 | rfkill = container_of(work, struct rfkill, poll_work.work); | ||
830 | |||
831 | /* | ||
832 | * Poll hardware state -- driver will use one of the | ||
833 | * rfkill_set{,_hw,_sw}_state functions and use its | ||
834 | * return value to update the current status. | ||
835 | */ | ||
836 | rfkill->ops->poll(rfkill, rfkill->data); | ||
837 | |||
838 | schedule_delayed_work(&rfkill->poll_work, | ||
839 | round_jiffies_relative(POLL_INTERVAL)); | ||
840 | } | ||
841 | |||
842 | static void rfkill_uevent_work(struct work_struct *work) | ||
843 | { | ||
844 | struct rfkill *rfkill; | ||
845 | |||
846 | rfkill = container_of(work, struct rfkill, uevent_work); | ||
847 | |||
848 | mutex_lock(&rfkill_global_mutex); | ||
849 | rfkill_event(rfkill); | ||
850 | mutex_unlock(&rfkill_global_mutex); | ||
851 | } | ||
852 | |||
853 | static void rfkill_sync_work(struct work_struct *work) | ||
854 | { | ||
855 | struct rfkill *rfkill; | ||
856 | bool cur; | ||
857 | |||
858 | rfkill = container_of(work, struct rfkill, sync_work); | ||
859 | |||
860 | mutex_lock(&rfkill_global_mutex); | ||
861 | cur = rfkill_global_states[rfkill->type].cur; | ||
862 | rfkill_set_block(rfkill, cur); | ||
863 | mutex_unlock(&rfkill_global_mutex); | ||
864 | } | ||
865 | |||
866 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
867 | { | ||
868 | static unsigned long rfkill_no; | ||
869 | struct device *dev = &rfkill->dev; | ||
870 | int error; | ||
871 | |||
872 | BUG_ON(!rfkill); | ||
873 | |||
874 | mutex_lock(&rfkill_global_mutex); | ||
875 | |||
876 | if (rfkill->registered) { | ||
877 | error = -EALREADY; | ||
878 | goto unlock; | ||
879 | } | ||
880 | |||
881 | rfkill->idx = rfkill_no; | ||
882 | dev_set_name(dev, "rfkill%lu", rfkill_no); | ||
883 | rfkill_no++; | ||
884 | |||
885 | list_add_tail(&rfkill->node, &rfkill_list); | ||
886 | |||
887 | error = device_add(dev); | ||
888 | if (error) | ||
889 | goto remove; | ||
890 | |||
891 | error = rfkill_led_trigger_register(rfkill); | ||
892 | if (error) | ||
893 | goto devdel; | ||
894 | |||
895 | rfkill->registered = true; | ||
896 | |||
897 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | ||
898 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | ||
899 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | ||
900 | |||
901 | if (rfkill->ops->poll) | ||
902 | schedule_delayed_work(&rfkill->poll_work, | ||
903 | round_jiffies_relative(POLL_INTERVAL)); | ||
904 | |||
905 | if (!rfkill->persistent || rfkill_epo_lock_active) { | ||
906 | schedule_work(&rfkill->sync_work); | ||
907 | } else { | ||
908 | #ifdef CONFIG_RFKILL_INPUT | ||
909 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
910 | |||
911 | if (!atomic_read(&rfkill_input_disabled)) | ||
912 | __rfkill_switch_all(rfkill->type, soft_blocked); | ||
913 | #endif | ||
914 | } | ||
915 | |||
916 | rfkill_send_events(rfkill, RFKILL_OP_ADD); | ||
917 | |||
918 | mutex_unlock(&rfkill_global_mutex); | ||
919 | return 0; | ||
920 | |||
921 | devdel: | ||
922 | device_del(&rfkill->dev); | ||
923 | remove: | ||
924 | list_del_init(&rfkill->node); | ||
925 | unlock: | ||
926 | mutex_unlock(&rfkill_global_mutex); | ||
927 | return error; | ||
928 | } | ||
929 | EXPORT_SYMBOL(rfkill_register); | ||
930 | |||
931 | void rfkill_unregister(struct rfkill *rfkill) | ||
932 | { | ||
933 | BUG_ON(!rfkill); | ||
934 | |||
935 | if (rfkill->ops->poll) | ||
936 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
937 | |||
938 | cancel_work_sync(&rfkill->uevent_work); | ||
939 | cancel_work_sync(&rfkill->sync_work); | ||
940 | |||
941 | rfkill->registered = false; | ||
942 | |||
943 | device_del(&rfkill->dev); | ||
944 | |||
945 | mutex_lock(&rfkill_global_mutex); | ||
946 | rfkill_send_events(rfkill, RFKILL_OP_DEL); | ||
947 | list_del_init(&rfkill->node); | ||
948 | mutex_unlock(&rfkill_global_mutex); | ||
949 | |||
950 | rfkill_led_trigger_unregister(rfkill); | ||
951 | } | ||
952 | EXPORT_SYMBOL(rfkill_unregister); | ||
953 | |||
954 | void rfkill_destroy(struct rfkill *rfkill) | ||
955 | { | ||
956 | if (rfkill) | ||
957 | put_device(&rfkill->dev); | ||
958 | } | ||
959 | EXPORT_SYMBOL(rfkill_destroy); | ||
960 | |||
961 | static int rfkill_fop_open(struct inode *inode, struct file *file) | ||
962 | { | ||
963 | struct rfkill_data *data; | ||
964 | struct rfkill *rfkill; | ||
965 | struct rfkill_int_event *ev, *tmp; | ||
966 | |||
967 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
968 | if (!data) | ||
969 | return -ENOMEM; | ||
970 | |||
971 | INIT_LIST_HEAD(&data->events); | ||
972 | mutex_init(&data->mtx); | ||
973 | init_waitqueue_head(&data->read_wait); | ||
974 | |||
975 | mutex_lock(&rfkill_global_mutex); | ||
976 | mutex_lock(&data->mtx); | ||
977 | /* | ||
978 | * start getting events from elsewhere but hold mtx to get | ||
979 | * startup events added first | ||
980 | */ | ||
981 | list_add(&data->list, &rfkill_fds); | ||
982 | |||
983 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
984 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
985 | if (!ev) | ||
986 | goto free; | ||
987 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | ||
988 | list_add_tail(&ev->list, &data->events); | ||
989 | } | ||
990 | mutex_unlock(&data->mtx); | ||
991 | mutex_unlock(&rfkill_global_mutex); | ||
992 | |||
993 | file->private_data = data; | ||
994 | |||
995 | return nonseekable_open(inode, file); | ||
996 | |||
997 | free: | ||
998 | mutex_unlock(&data->mtx); | ||
999 | mutex_unlock(&rfkill_global_mutex); | ||
1000 | mutex_destroy(&data->mtx); | ||
1001 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
1002 | kfree(ev); | ||
1003 | kfree(data); | ||
1004 | return -ENOMEM; | ||
1005 | } | ||
1006 | |||
1007 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | ||
1008 | { | ||
1009 | struct rfkill_data *data = file->private_data; | ||
1010 | unsigned int res = POLLOUT | POLLWRNORM; | ||
1011 | |||
1012 | poll_wait(file, &data->read_wait, wait); | ||
1013 | |||
1014 | mutex_lock(&data->mtx); | ||
1015 | if (!list_empty(&data->events)) | ||
1016 | res = POLLIN | POLLRDNORM; | ||
1017 | mutex_unlock(&data->mtx); | ||
1018 | |||
1019 | return res; | ||
1020 | } | ||
1021 | |||
1022 | static bool rfkill_readable(struct rfkill_data *data) | ||
1023 | { | ||
1024 | bool r; | ||
1025 | |||
1026 | mutex_lock(&data->mtx); | ||
1027 | r = !list_empty(&data->events); | ||
1028 | mutex_unlock(&data->mtx); | ||
1029 | |||
1030 | return r; | ||
1031 | } | ||
1032 | |||
1033 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | ||
1034 | size_t count, loff_t *pos) | ||
1035 | { | ||
1036 | struct rfkill_data *data = file->private_data; | ||
1037 | struct rfkill_int_event *ev; | ||
1038 | unsigned long sz; | ||
1039 | int ret; | ||
1040 | |||
1041 | mutex_lock(&data->mtx); | ||
1042 | |||
1043 | while (list_empty(&data->events)) { | ||
1044 | if (file->f_flags & O_NONBLOCK) { | ||
1045 | ret = -EAGAIN; | ||
1046 | goto out; | ||
1047 | } | ||
1048 | mutex_unlock(&data->mtx); | ||
1049 | ret = wait_event_interruptible(data->read_wait, | ||
1050 | rfkill_readable(data)); | ||
1051 | mutex_lock(&data->mtx); | ||
1052 | |||
1053 | if (ret) | ||
1054 | goto out; | ||
1055 | } | ||
1056 | |||
1057 | ev = list_first_entry(&data->events, struct rfkill_int_event, | ||
1058 | list); | ||
1059 | |||
1060 | sz = min_t(unsigned long, sizeof(ev->ev), count); | ||
1061 | ret = sz; | ||
1062 | if (copy_to_user(buf, &ev->ev, sz)) | ||
1063 | ret = -EFAULT; | ||
1064 | |||
1065 | list_del(&ev->list); | ||
1066 | kfree(ev); | ||
1067 | out: | ||
1068 | mutex_unlock(&data->mtx); | ||
1069 | return ret; | ||
1070 | } | ||
1071 | |||
1072 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | ||
1073 | size_t count, loff_t *pos) | ||
1074 | { | ||
1075 | struct rfkill *rfkill; | ||
1076 | struct rfkill_event ev; | ||
1077 | |||
1078 | /* we don't need the 'hard' variable but accept it */ | ||
1079 | if (count < sizeof(ev) - 1) | ||
1080 | return -EINVAL; | ||
1081 | |||
1082 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | ||
1083 | return -EFAULT; | ||
1084 | |||
1085 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1086 | return -EINVAL; | ||
1087 | |||
1088 | if (ev.type >= NUM_RFKILL_TYPES) | ||
1089 | return -EINVAL; | ||
1090 | |||
1091 | mutex_lock(&rfkill_global_mutex); | ||
1092 | |||
1093 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | ||
1094 | if (ev.type == RFKILL_TYPE_ALL) { | ||
1095 | enum rfkill_type i; | ||
1096 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1097 | rfkill_global_states[i].cur = ev.soft; | ||
1098 | } else { | ||
1099 | rfkill_global_states[ev.type].cur = ev.soft; | ||
1100 | } | ||
1101 | } | ||
1102 | |||
1103 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
1104 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1105 | continue; | ||
1106 | |||
1107 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | ||
1108 | continue; | ||
1109 | |||
1110 | rfkill_set_block(rfkill, ev.soft); | ||
1111 | } | ||
1112 | mutex_unlock(&rfkill_global_mutex); | ||
1113 | |||
1114 | return count; | ||
1115 | } | ||
1116 | |||
1117 | static int rfkill_fop_release(struct inode *inode, struct file *file) | ||
1118 | { | ||
1119 | struct rfkill_data *data = file->private_data; | ||
1120 | struct rfkill_int_event *ev, *tmp; | ||
1121 | |||
1122 | mutex_lock(&rfkill_global_mutex); | ||
1123 | list_del(&data->list); | ||
1124 | mutex_unlock(&rfkill_global_mutex); | ||
1125 | |||
1126 | mutex_destroy(&data->mtx); | ||
1127 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
1128 | kfree(ev); | ||
1129 | |||
1130 | #ifdef CONFIG_RFKILL_INPUT | ||
1131 | if (data->input_handler) | ||
1132 | if (atomic_dec_return(&rfkill_input_disabled) == 0) | ||
1133 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); | ||
1134 | #endif | ||
1135 | |||
1136 | kfree(data); | ||
1137 | |||
1138 | return 0; | ||
1139 | } | ||
1140 | |||
1141 | #ifdef CONFIG_RFKILL_INPUT | ||
1142 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | ||
1143 | unsigned long arg) | ||
1144 | { | ||
1145 | struct rfkill_data *data = file->private_data; | ||
1146 | |||
1147 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | ||
1148 | return -ENOSYS; | ||
1149 | |||
1150 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | ||
1151 | return -ENOSYS; | ||
1152 | |||
1153 | mutex_lock(&data->mtx); | ||
1154 | |||
1155 | if (!data->input_handler) { | ||
1156 | if (atomic_inc_return(&rfkill_input_disabled) == 1) | ||
1157 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); | ||
1158 | data->input_handler = true; | ||
1159 | } | ||
1160 | |||
1161 | mutex_unlock(&data->mtx); | ||
1162 | |||
1163 | return 0; | ||
1164 | } | ||
1165 | #endif | ||
1166 | |||
1167 | static const struct file_operations rfkill_fops = { | ||
1168 | .open = rfkill_fop_open, | ||
1169 | .read = rfkill_fop_read, | ||
1170 | .write = rfkill_fop_write, | ||
1171 | .poll = rfkill_fop_poll, | ||
1172 | .release = rfkill_fop_release, | ||
1173 | #ifdef CONFIG_RFKILL_INPUT | ||
1174 | .unlocked_ioctl = rfkill_fop_ioctl, | ||
1175 | .compat_ioctl = rfkill_fop_ioctl, | ||
1176 | #endif | ||
1177 | }; | ||
1178 | |||
1179 | static struct miscdevice rfkill_miscdev = { | ||
1180 | .name = "rfkill", | ||
1181 | .fops = &rfkill_fops, | ||
1182 | .minor = MISC_DYNAMIC_MINOR, | ||
1183 | }; | ||
1184 | |||
1185 | static int __init rfkill_init(void) | ||
1186 | { | ||
1187 | int error; | ||
1188 | int i; | ||
1189 | |||
1190 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1191 | rfkill_global_states[i].cur = !rfkill_default_state; | ||
1192 | |||
1193 | error = class_register(&rfkill_class); | ||
1194 | if (error) | ||
1195 | goto out; | ||
1196 | |||
1197 | error = misc_register(&rfkill_miscdev); | ||
1198 | if (error) { | ||
1199 | class_unregister(&rfkill_class); | ||
1200 | goto out; | ||
1201 | } | ||
1202 | |||
1203 | #ifdef CONFIG_RFKILL_INPUT | ||
1204 | error = rfkill_handler_init(); | ||
1205 | if (error) { | ||
1206 | misc_deregister(&rfkill_miscdev); | ||
1207 | class_unregister(&rfkill_class); | ||
1208 | goto out; | ||
1209 | } | ||
1210 | #endif | ||
1211 | |||
1212 | out: | ||
1213 | return error; | ||
1214 | } | ||
1215 | subsys_initcall(rfkill_init); | ||
1216 | |||
1217 | static void __exit rfkill_exit(void) | ||
1218 | { | ||
1219 | #ifdef CONFIG_RFKILL_INPUT | ||
1220 | rfkill_handler_exit(); | ||
1221 | #endif | ||
1222 | misc_deregister(&rfkill_miscdev); | ||
1223 | class_unregister(&rfkill_class); | ||
1224 | } | ||
1225 | module_exit(rfkill_exit); | ||