diff options
Diffstat (limited to 'net/rfkill/core.c')
-rw-r--r-- | net/rfkill/core.c | 1205 |
1 files changed, 1205 insertions, 0 deletions
diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 000000000000..4e68ab439d5d --- /dev/null +++ b/net/rfkill/core.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rfkill.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fs.h> | ||
35 | |||
36 | #include "rfkill.h" | ||
37 | |||
38 | #define POLL_INTERVAL (5 * HZ) | ||
39 | |||
40 | #define RFKILL_BLOCK_HW BIT(0) | ||
41 | #define RFKILL_BLOCK_SW BIT(1) | ||
42 | #define RFKILL_BLOCK_SW_PREV BIT(2) | ||
43 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | ||
44 | RFKILL_BLOCK_SW |\ | ||
45 | RFKILL_BLOCK_SW_PREV) | ||
46 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | ||
47 | |||
48 | struct rfkill { | ||
49 | spinlock_t lock; | ||
50 | |||
51 | const char *name; | ||
52 | enum rfkill_type type; | ||
53 | |||
54 | unsigned long state; | ||
55 | |||
56 | u32 idx; | ||
57 | |||
58 | bool registered; | ||
59 | bool suspended; | ||
60 | bool persistent; | ||
61 | |||
62 | const struct rfkill_ops *ops; | ||
63 | void *data; | ||
64 | |||
65 | #ifdef CONFIG_RFKILL_LEDS | ||
66 | struct led_trigger led_trigger; | ||
67 | const char *ledtrigname; | ||
68 | #endif | ||
69 | |||
70 | struct device dev; | ||
71 | struct list_head node; | ||
72 | |||
73 | struct delayed_work poll_work; | ||
74 | struct work_struct uevent_work; | ||
75 | struct work_struct sync_work; | ||
76 | }; | ||
77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | ||
78 | |||
79 | struct rfkill_int_event { | ||
80 | struct list_head list; | ||
81 | struct rfkill_event ev; | ||
82 | }; | ||
83 | |||
84 | struct rfkill_data { | ||
85 | struct list_head list; | ||
86 | struct list_head events; | ||
87 | struct mutex mtx; | ||
88 | wait_queue_head_t read_wait; | ||
89 | bool input_handler; | ||
90 | }; | ||
91 | |||
92 | |||
93 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
94 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
95 | MODULE_DESCRIPTION("RF switch support"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | |||
98 | |||
99 | /* | ||
100 | * The locking here should be made much smarter, we currently have | ||
101 | * a bit of a stupid situation because drivers might want to register | ||
102 | * the rfkill struct under their own lock, and take this lock during | ||
103 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | ||
104 | * | ||
105 | * To fix that, we need to rework this code here to be mostly lock-free | ||
106 | * and only use the mutex for list manipulations, not to protect the | ||
107 | * various other global variables. Then we can avoid holding the mutex | ||
108 | * around driver operations, and all is happy. | ||
109 | */ | ||
110 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
111 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
112 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ | ||
113 | |||
114 | static unsigned int rfkill_default_state = 1; | ||
115 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
116 | MODULE_PARM_DESC(default_state, | ||
117 | "Default initial state for all radio types, 0 = radio off"); | ||
118 | |||
119 | static struct { | ||
120 | bool cur, sav; | ||
121 | } rfkill_global_states[NUM_RFKILL_TYPES]; | ||
122 | |||
123 | static bool rfkill_epo_lock_active; | ||
124 | |||
125 | |||
126 | #ifdef CONFIG_RFKILL_LEDS | ||
127 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
128 | { | ||
129 | struct led_trigger *trigger; | ||
130 | |||
131 | if (!rfkill->registered) | ||
132 | return; | ||
133 | |||
134 | trigger = &rfkill->led_trigger; | ||
135 | |||
136 | if (rfkill->state & RFKILL_BLOCK_ANY) | ||
137 | led_trigger_event(trigger, LED_OFF); | ||
138 | else | ||
139 | led_trigger_event(trigger, LED_FULL); | ||
140 | } | ||
141 | |||
142 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
143 | { | ||
144 | struct rfkill *rfkill; | ||
145 | |||
146 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | ||
147 | |||
148 | rfkill_led_trigger_event(rfkill); | ||
149 | } | ||
150 | |||
151 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | ||
152 | { | ||
153 | return rfkill->led_trigger.name; | ||
154 | } | ||
155 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | ||
156 | |||
157 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | ||
158 | { | ||
159 | BUG_ON(!rfkill); | ||
160 | |||
161 | rfkill->ledtrigname = name; | ||
162 | } | ||
163 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | ||
164 | |||
165 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
166 | { | ||
167 | rfkill->led_trigger.name = rfkill->ledtrigname | ||
168 | ? : dev_name(&rfkill->dev); | ||
169 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
170 | return led_trigger_register(&rfkill->led_trigger); | ||
171 | } | ||
172 | |||
173 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
174 | { | ||
175 | led_trigger_unregister(&rfkill->led_trigger); | ||
176 | } | ||
177 | #else | ||
178 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
183 | { | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
188 | { | ||
189 | } | ||
190 | #endif /* CONFIG_RFKILL_LEDS */ | ||
191 | |||
192 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, | ||
193 | enum rfkill_operation op) | ||
194 | { | ||
195 | unsigned long flags; | ||
196 | |||
197 | ev->idx = rfkill->idx; | ||
198 | ev->type = rfkill->type; | ||
199 | ev->op = op; | ||
200 | |||
201 | spin_lock_irqsave(&rfkill->lock, flags); | ||
202 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
203 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | ||
204 | RFKILL_BLOCK_SW_PREV)); | ||
205 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
206 | } | ||
207 | |||
208 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | ||
209 | { | ||
210 | struct rfkill_data *data; | ||
211 | struct rfkill_int_event *ev; | ||
212 | |||
213 | list_for_each_entry(data, &rfkill_fds, list) { | ||
214 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
215 | if (!ev) | ||
216 | continue; | ||
217 | rfkill_fill_event(&ev->ev, rfkill, op); | ||
218 | mutex_lock(&data->mtx); | ||
219 | list_add_tail(&ev->list, &data->events); | ||
220 | mutex_unlock(&data->mtx); | ||
221 | wake_up_interruptible(&data->read_wait); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void rfkill_event(struct rfkill *rfkill) | ||
226 | { | ||
227 | if (!rfkill->registered || rfkill->suspended) | ||
228 | return; | ||
229 | |||
230 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
231 | |||
232 | /* also send event to /dev/rfkill */ | ||
233 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | ||
234 | } | ||
235 | |||
236 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | ||
237 | bool blocked, bool *change) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | bool prev, any; | ||
241 | |||
242 | BUG_ON(!rfkill); | ||
243 | |||
244 | spin_lock_irqsave(&rfkill->lock, flags); | ||
245 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
246 | if (blocked) | ||
247 | rfkill->state |= RFKILL_BLOCK_HW; | ||
248 | else | ||
249 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
250 | *change = prev != blocked; | ||
251 | any = rfkill->state & RFKILL_BLOCK_ANY; | ||
252 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
253 | |||
254 | rfkill_led_trigger_event(rfkill); | ||
255 | |||
256 | return any; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * rfkill_set_block - wrapper for set_block method | ||
261 | * | ||
262 | * @rfkill: the rfkill struct to use | ||
263 | * @blocked: the new software state | ||
264 | * | ||
265 | * Calls the set_block method (when applicable) and handles notifications | ||
266 | * etc. as well. | ||
267 | */ | ||
268 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | int err; | ||
272 | |||
273 | /* | ||
274 | * Some platforms (...!) generate input events which affect the | ||
275 | * _hard_ kill state -- whenever something tries to change the | ||
276 | * current software state query the hardware state too. | ||
277 | */ | ||
278 | if (rfkill->ops->query) | ||
279 | rfkill->ops->query(rfkill, rfkill->data); | ||
280 | |||
281 | spin_lock_irqsave(&rfkill->lock, flags); | ||
282 | if (rfkill->state & RFKILL_BLOCK_SW) | ||
283 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | ||
284 | else | ||
285 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
286 | |||
287 | if (blocked) | ||
288 | rfkill->state |= RFKILL_BLOCK_SW; | ||
289 | else | ||
290 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
291 | |||
292 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | ||
293 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
294 | |||
295 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
296 | return; | ||
297 | |||
298 | err = rfkill->ops->set_block(rfkill->data, blocked); | ||
299 | |||
300 | spin_lock_irqsave(&rfkill->lock, flags); | ||
301 | if (err) { | ||
302 | /* | ||
303 | * Failed -- reset status to _prev, this may be different | ||
304 | * from what set set _PREV to earlier in this function | ||
305 | * if rfkill_set_sw_state was invoked. | ||
306 | */ | ||
307 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | ||
308 | rfkill->state |= RFKILL_BLOCK_SW; | ||
309 | else | ||
310 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
311 | } | ||
312 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | ||
313 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
314 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
315 | |||
316 | rfkill_led_trigger_event(rfkill); | ||
317 | rfkill_event(rfkill); | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_RFKILL_INPUT | ||
321 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | ||
322 | |||
323 | /** | ||
324 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
325 | * @type: type of interfaces to be affected | ||
326 | * @state: the new state | ||
327 | * | ||
328 | * This function sets the state of all switches of given type, | ||
329 | * unless a specific switch is claimed by userspace (in which case, | ||
330 | * that switch is left alone) or suspended. | ||
331 | * | ||
332 | * Caller must have acquired rfkill_global_mutex. | ||
333 | */ | ||
334 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | ||
335 | { | ||
336 | struct rfkill *rfkill; | ||
337 | |||
338 | rfkill_global_states[type].cur = blocked; | ||
339 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
340 | if (rfkill->type != type) | ||
341 | continue; | ||
342 | |||
343 | rfkill_set_block(rfkill, blocked); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * rfkill_switch_all - Toggle state of all switches of given type | ||
349 | * @type: type of interfaces to be affected | ||
350 | * @state: the new state | ||
351 | * | ||
352 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
353 | * Please refer to __rfkill_switch_all() for details. | ||
354 | * | ||
355 | * Does nothing if the EPO lock is active. | ||
356 | */ | ||
357 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | ||
358 | { | ||
359 | if (atomic_read(&rfkill_input_disabled)) | ||
360 | return; | ||
361 | |||
362 | mutex_lock(&rfkill_global_mutex); | ||
363 | |||
364 | if (!rfkill_epo_lock_active) | ||
365 | __rfkill_switch_all(type, blocked); | ||
366 | |||
367 | mutex_unlock(&rfkill_global_mutex); | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * rfkill_epo - emergency power off all transmitters | ||
372 | * | ||
373 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
374 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
375 | * | ||
376 | * The global state before the EPO is saved and can be restored later | ||
377 | * using rfkill_restore_states(). | ||
378 | */ | ||
379 | void rfkill_epo(void) | ||
380 | { | ||
381 | struct rfkill *rfkill; | ||
382 | int i; | ||
383 | |||
384 | if (atomic_read(&rfkill_input_disabled)) | ||
385 | return; | ||
386 | |||
387 | mutex_lock(&rfkill_global_mutex); | ||
388 | |||
389 | rfkill_epo_lock_active = true; | ||
390 | list_for_each_entry(rfkill, &rfkill_list, node) | ||
391 | rfkill_set_block(rfkill, true); | ||
392 | |||
393 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
394 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; | ||
395 | rfkill_global_states[i].cur = true; | ||
396 | } | ||
397 | |||
398 | mutex_unlock(&rfkill_global_mutex); | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * rfkill_restore_states - restore global states | ||
403 | * | ||
404 | * Restore (and sync switches to) the global state from the | ||
405 | * states in rfkill_default_states. This can undo the effects of | ||
406 | * a call to rfkill_epo(). | ||
407 | */ | ||
408 | void rfkill_restore_states(void) | ||
409 | { | ||
410 | int i; | ||
411 | |||
412 | if (atomic_read(&rfkill_input_disabled)) | ||
413 | return; | ||
414 | |||
415 | mutex_lock(&rfkill_global_mutex); | ||
416 | |||
417 | rfkill_epo_lock_active = false; | ||
418 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
419 | __rfkill_switch_all(i, rfkill_global_states[i].sav); | ||
420 | mutex_unlock(&rfkill_global_mutex); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * rfkill_remove_epo_lock - unlock state changes | ||
425 | * | ||
426 | * Used by rfkill-input manually unlock state changes, when | ||
427 | * the EPO switch is deactivated. | ||
428 | */ | ||
429 | void rfkill_remove_epo_lock(void) | ||
430 | { | ||
431 | if (atomic_read(&rfkill_input_disabled)) | ||
432 | return; | ||
433 | |||
434 | mutex_lock(&rfkill_global_mutex); | ||
435 | rfkill_epo_lock_active = false; | ||
436 | mutex_unlock(&rfkill_global_mutex); | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
441 | * | ||
442 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
443 | * and 1 (true) if there is an active EPO contition, which | ||
444 | * locks all radios in one of the BLOCKED states. | ||
445 | * | ||
446 | * Can be called in atomic context. | ||
447 | */ | ||
448 | bool rfkill_is_epo_lock_active(void) | ||
449 | { | ||
450 | return rfkill_epo_lock_active; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * rfkill_get_global_sw_state - returns global state for a type | ||
455 | * @type: the type to get the global state of | ||
456 | * | ||
457 | * Returns the current global state for a given wireless | ||
458 | * device type. | ||
459 | */ | ||
460 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | ||
461 | { | ||
462 | return rfkill_global_states[type].cur; | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | |||
467 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | ||
468 | { | ||
469 | bool ret, change; | ||
470 | |||
471 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | ||
472 | |||
473 | if (!rfkill->registered) | ||
474 | return ret; | ||
475 | |||
476 | if (change) | ||
477 | schedule_work(&rfkill->uevent_work); | ||
478 | |||
479 | return ret; | ||
480 | } | ||
481 | EXPORT_SYMBOL(rfkill_set_hw_state); | ||
482 | |||
483 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
484 | { | ||
485 | u32 bit = RFKILL_BLOCK_SW; | ||
486 | |||
487 | /* if in a ops->set_block right now, use other bit */ | ||
488 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | ||
489 | bit = RFKILL_BLOCK_SW_PREV; | ||
490 | |||
491 | if (blocked) | ||
492 | rfkill->state |= bit; | ||
493 | else | ||
494 | rfkill->state &= ~bit; | ||
495 | } | ||
496 | |||
497 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | bool prev, hwblock; | ||
501 | |||
502 | BUG_ON(!rfkill); | ||
503 | |||
504 | spin_lock_irqsave(&rfkill->lock, flags); | ||
505 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
506 | __rfkill_set_sw_state(rfkill, blocked); | ||
507 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
508 | blocked = blocked || hwblock; | ||
509 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
510 | |||
511 | if (!rfkill->registered) { | ||
512 | rfkill->persistent = true; | ||
513 | } else { | ||
514 | if (prev != blocked && !hwblock) | ||
515 | schedule_work(&rfkill->uevent_work); | ||
516 | |||
517 | rfkill_led_trigger_event(rfkill); | ||
518 | } | ||
519 | |||
520 | return blocked; | ||
521 | } | ||
522 | EXPORT_SYMBOL(rfkill_set_sw_state); | ||
523 | |||
524 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | ||
525 | { | ||
526 | unsigned long flags; | ||
527 | bool swprev, hwprev; | ||
528 | |||
529 | BUG_ON(!rfkill); | ||
530 | |||
531 | spin_lock_irqsave(&rfkill->lock, flags); | ||
532 | |||
533 | /* | ||
534 | * No need to care about prev/setblock ... this is for uevent only | ||
535 | * and that will get triggered by rfkill_set_block anyway. | ||
536 | */ | ||
537 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
538 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
539 | __rfkill_set_sw_state(rfkill, sw); | ||
540 | |||
541 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
542 | |||
543 | if (!rfkill->registered) { | ||
544 | rfkill->persistent = true; | ||
545 | } else { | ||
546 | if (swprev != sw || hwprev != hw) | ||
547 | schedule_work(&rfkill->uevent_work); | ||
548 | |||
549 | rfkill_led_trigger_event(rfkill); | ||
550 | } | ||
551 | } | ||
552 | EXPORT_SYMBOL(rfkill_set_states); | ||
553 | |||
554 | static ssize_t rfkill_name_show(struct device *dev, | ||
555 | struct device_attribute *attr, | ||
556 | char *buf) | ||
557 | { | ||
558 | struct rfkill *rfkill = to_rfkill(dev); | ||
559 | |||
560 | return sprintf(buf, "%s\n", rfkill->name); | ||
561 | } | ||
562 | |||
563 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
564 | { | ||
565 | switch (type) { | ||
566 | case RFKILL_TYPE_WLAN: | ||
567 | return "wlan"; | ||
568 | case RFKILL_TYPE_BLUETOOTH: | ||
569 | return "bluetooth"; | ||
570 | case RFKILL_TYPE_UWB: | ||
571 | return "ultrawideband"; | ||
572 | case RFKILL_TYPE_WIMAX: | ||
573 | return "wimax"; | ||
574 | case RFKILL_TYPE_WWAN: | ||
575 | return "wwan"; | ||
576 | default: | ||
577 | BUG(); | ||
578 | } | ||
579 | |||
580 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | ||
581 | } | ||
582 | |||
583 | static ssize_t rfkill_type_show(struct device *dev, | ||
584 | struct device_attribute *attr, | ||
585 | char *buf) | ||
586 | { | ||
587 | struct rfkill *rfkill = to_rfkill(dev); | ||
588 | |||
589 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
590 | } | ||
591 | |||
592 | static ssize_t rfkill_idx_show(struct device *dev, | ||
593 | struct device_attribute *attr, | ||
594 | char *buf) | ||
595 | { | ||
596 | struct rfkill *rfkill = to_rfkill(dev); | ||
597 | |||
598 | return sprintf(buf, "%d\n", rfkill->idx); | ||
599 | } | ||
600 | |||
601 | static u8 user_state_from_blocked(unsigned long state) | ||
602 | { | ||
603 | if (state & RFKILL_BLOCK_HW) | ||
604 | return RFKILL_USER_STATE_HARD_BLOCKED; | ||
605 | if (state & RFKILL_BLOCK_SW) | ||
606 | return RFKILL_USER_STATE_SOFT_BLOCKED; | ||
607 | |||
608 | return RFKILL_USER_STATE_UNBLOCKED; | ||
609 | } | ||
610 | |||
611 | static ssize_t rfkill_state_show(struct device *dev, | ||
612 | struct device_attribute *attr, | ||
613 | char *buf) | ||
614 | { | ||
615 | struct rfkill *rfkill = to_rfkill(dev); | ||
616 | unsigned long flags; | ||
617 | u32 state; | ||
618 | |||
619 | spin_lock_irqsave(&rfkill->lock, flags); | ||
620 | state = rfkill->state; | ||
621 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
622 | |||
623 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | ||
624 | } | ||
625 | |||
626 | static ssize_t rfkill_state_store(struct device *dev, | ||
627 | struct device_attribute *attr, | ||
628 | const char *buf, size_t count) | ||
629 | { | ||
630 | /* | ||
631 | * The intention was that userspace can only take control over | ||
632 | * a given device when/if rfkill-input doesn't control it due | ||
633 | * to user_claim. Since user_claim is currently unsupported, | ||
634 | * we never support changing the state from userspace -- this | ||
635 | * can be implemented again later. | ||
636 | */ | ||
637 | |||
638 | return -EPERM; | ||
639 | } | ||
640 | |||
641 | static ssize_t rfkill_claim_show(struct device *dev, | ||
642 | struct device_attribute *attr, | ||
643 | char *buf) | ||
644 | { | ||
645 | return sprintf(buf, "%d\n", 0); | ||
646 | } | ||
647 | |||
648 | static ssize_t rfkill_claim_store(struct device *dev, | ||
649 | struct device_attribute *attr, | ||
650 | const char *buf, size_t count) | ||
651 | { | ||
652 | return -EOPNOTSUPP; | ||
653 | } | ||
654 | |||
655 | static struct device_attribute rfkill_dev_attrs[] = { | ||
656 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
657 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
658 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), | ||
659 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
660 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
661 | __ATTR_NULL | ||
662 | }; | ||
663 | |||
664 | static void rfkill_release(struct device *dev) | ||
665 | { | ||
666 | struct rfkill *rfkill = to_rfkill(dev); | ||
667 | |||
668 | kfree(rfkill); | ||
669 | } | ||
670 | |||
671 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
672 | { | ||
673 | struct rfkill *rfkill = to_rfkill(dev); | ||
674 | unsigned long flags; | ||
675 | u32 state; | ||
676 | int error; | ||
677 | |||
678 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
679 | if (error) | ||
680 | return error; | ||
681 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
682 | rfkill_get_type_str(rfkill->type)); | ||
683 | if (error) | ||
684 | return error; | ||
685 | spin_lock_irqsave(&rfkill->lock, flags); | ||
686 | state = rfkill->state; | ||
687 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
688 | error = add_uevent_var(env, "RFKILL_STATE=%d", | ||
689 | user_state_from_blocked(state)); | ||
690 | return error; | ||
691 | } | ||
692 | |||
693 | void rfkill_pause_polling(struct rfkill *rfkill) | ||
694 | { | ||
695 | BUG_ON(!rfkill); | ||
696 | |||
697 | if (!rfkill->ops->poll) | ||
698 | return; | ||
699 | |||
700 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
701 | } | ||
702 | EXPORT_SYMBOL(rfkill_pause_polling); | ||
703 | |||
704 | void rfkill_resume_polling(struct rfkill *rfkill) | ||
705 | { | ||
706 | BUG_ON(!rfkill); | ||
707 | |||
708 | if (!rfkill->ops->poll) | ||
709 | return; | ||
710 | |||
711 | schedule_work(&rfkill->poll_work.work); | ||
712 | } | ||
713 | EXPORT_SYMBOL(rfkill_resume_polling); | ||
714 | |||
715 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
716 | { | ||
717 | struct rfkill *rfkill = to_rfkill(dev); | ||
718 | |||
719 | rfkill_pause_polling(rfkill); | ||
720 | |||
721 | rfkill->suspended = true; | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int rfkill_resume(struct device *dev) | ||
727 | { | ||
728 | struct rfkill *rfkill = to_rfkill(dev); | ||
729 | bool cur; | ||
730 | |||
731 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
732 | rfkill_set_block(rfkill, cur); | ||
733 | |||
734 | rfkill->suspended = false; | ||
735 | |||
736 | rfkill_resume_polling(rfkill); | ||
737 | |||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | static struct class rfkill_class = { | ||
742 | .name = "rfkill", | ||
743 | .dev_release = rfkill_release, | ||
744 | .dev_attrs = rfkill_dev_attrs, | ||
745 | .dev_uevent = rfkill_dev_uevent, | ||
746 | .suspend = rfkill_suspend, | ||
747 | .resume = rfkill_resume, | ||
748 | }; | ||
749 | |||
750 | bool rfkill_blocked(struct rfkill *rfkill) | ||
751 | { | ||
752 | unsigned long flags; | ||
753 | u32 state; | ||
754 | |||
755 | spin_lock_irqsave(&rfkill->lock, flags); | ||
756 | state = rfkill->state; | ||
757 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
758 | |||
759 | return !!(state & RFKILL_BLOCK_ANY); | ||
760 | } | ||
761 | EXPORT_SYMBOL(rfkill_blocked); | ||
762 | |||
763 | |||
764 | struct rfkill * __must_check rfkill_alloc(const char *name, | ||
765 | struct device *parent, | ||
766 | const enum rfkill_type type, | ||
767 | const struct rfkill_ops *ops, | ||
768 | void *ops_data) | ||
769 | { | ||
770 | struct rfkill *rfkill; | ||
771 | struct device *dev; | ||
772 | |||
773 | if (WARN_ON(!ops)) | ||
774 | return NULL; | ||
775 | |||
776 | if (WARN_ON(!ops->set_block)) | ||
777 | return NULL; | ||
778 | |||
779 | if (WARN_ON(!name)) | ||
780 | return NULL; | ||
781 | |||
782 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | ||
783 | return NULL; | ||
784 | |||
785 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | ||
786 | if (!rfkill) | ||
787 | return NULL; | ||
788 | |||
789 | spin_lock_init(&rfkill->lock); | ||
790 | INIT_LIST_HEAD(&rfkill->node); | ||
791 | rfkill->type = type; | ||
792 | rfkill->name = name; | ||
793 | rfkill->ops = ops; | ||
794 | rfkill->data = ops_data; | ||
795 | |||
796 | dev = &rfkill->dev; | ||
797 | dev->class = &rfkill_class; | ||
798 | dev->parent = parent; | ||
799 | device_initialize(dev); | ||
800 | |||
801 | return rfkill; | ||
802 | } | ||
803 | EXPORT_SYMBOL(rfkill_alloc); | ||
804 | |||
805 | static void rfkill_poll(struct work_struct *work) | ||
806 | { | ||
807 | struct rfkill *rfkill; | ||
808 | |||
809 | rfkill = container_of(work, struct rfkill, poll_work.work); | ||
810 | |||
811 | /* | ||
812 | * Poll hardware state -- driver will use one of the | ||
813 | * rfkill_set{,_hw,_sw}_state functions and use its | ||
814 | * return value to update the current status. | ||
815 | */ | ||
816 | rfkill->ops->poll(rfkill, rfkill->data); | ||
817 | |||
818 | schedule_delayed_work(&rfkill->poll_work, | ||
819 | round_jiffies_relative(POLL_INTERVAL)); | ||
820 | } | ||
821 | |||
822 | static void rfkill_uevent_work(struct work_struct *work) | ||
823 | { | ||
824 | struct rfkill *rfkill; | ||
825 | |||
826 | rfkill = container_of(work, struct rfkill, uevent_work); | ||
827 | |||
828 | mutex_lock(&rfkill_global_mutex); | ||
829 | rfkill_event(rfkill); | ||
830 | mutex_unlock(&rfkill_global_mutex); | ||
831 | } | ||
832 | |||
833 | static void rfkill_sync_work(struct work_struct *work) | ||
834 | { | ||
835 | struct rfkill *rfkill; | ||
836 | bool cur; | ||
837 | |||
838 | rfkill = container_of(work, struct rfkill, sync_work); | ||
839 | |||
840 | mutex_lock(&rfkill_global_mutex); | ||
841 | cur = rfkill_global_states[rfkill->type].cur; | ||
842 | rfkill_set_block(rfkill, cur); | ||
843 | mutex_unlock(&rfkill_global_mutex); | ||
844 | } | ||
845 | |||
846 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
847 | { | ||
848 | static unsigned long rfkill_no; | ||
849 | struct device *dev = &rfkill->dev; | ||
850 | int error; | ||
851 | |||
852 | BUG_ON(!rfkill); | ||
853 | |||
854 | mutex_lock(&rfkill_global_mutex); | ||
855 | |||
856 | if (rfkill->registered) { | ||
857 | error = -EALREADY; | ||
858 | goto unlock; | ||
859 | } | ||
860 | |||
861 | rfkill->idx = rfkill_no; | ||
862 | dev_set_name(dev, "rfkill%lu", rfkill_no); | ||
863 | rfkill_no++; | ||
864 | |||
865 | list_add_tail(&rfkill->node, &rfkill_list); | ||
866 | |||
867 | error = device_add(dev); | ||
868 | if (error) | ||
869 | goto remove; | ||
870 | |||
871 | error = rfkill_led_trigger_register(rfkill); | ||
872 | if (error) | ||
873 | goto devdel; | ||
874 | |||
875 | rfkill->registered = true; | ||
876 | |||
877 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | ||
878 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | ||
879 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | ||
880 | |||
881 | if (rfkill->ops->poll) | ||
882 | schedule_delayed_work(&rfkill->poll_work, | ||
883 | round_jiffies_relative(POLL_INTERVAL)); | ||
884 | |||
885 | if (!rfkill->persistent || rfkill_epo_lock_active) { | ||
886 | schedule_work(&rfkill->sync_work); | ||
887 | } else { | ||
888 | #ifdef CONFIG_RFKILL_INPUT | ||
889 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
890 | |||
891 | if (!atomic_read(&rfkill_input_disabled)) | ||
892 | __rfkill_switch_all(rfkill->type, soft_blocked); | ||
893 | #endif | ||
894 | } | ||
895 | |||
896 | rfkill_send_events(rfkill, RFKILL_OP_ADD); | ||
897 | |||
898 | mutex_unlock(&rfkill_global_mutex); | ||
899 | return 0; | ||
900 | |||
901 | devdel: | ||
902 | device_del(&rfkill->dev); | ||
903 | remove: | ||
904 | list_del_init(&rfkill->node); | ||
905 | unlock: | ||
906 | mutex_unlock(&rfkill_global_mutex); | ||
907 | return error; | ||
908 | } | ||
909 | EXPORT_SYMBOL(rfkill_register); | ||
910 | |||
911 | void rfkill_unregister(struct rfkill *rfkill) | ||
912 | { | ||
913 | BUG_ON(!rfkill); | ||
914 | |||
915 | if (rfkill->ops->poll) | ||
916 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
917 | |||
918 | cancel_work_sync(&rfkill->uevent_work); | ||
919 | cancel_work_sync(&rfkill->sync_work); | ||
920 | |||
921 | rfkill->registered = false; | ||
922 | |||
923 | device_del(&rfkill->dev); | ||
924 | |||
925 | mutex_lock(&rfkill_global_mutex); | ||
926 | rfkill_send_events(rfkill, RFKILL_OP_DEL); | ||
927 | list_del_init(&rfkill->node); | ||
928 | mutex_unlock(&rfkill_global_mutex); | ||
929 | |||
930 | rfkill_led_trigger_unregister(rfkill); | ||
931 | } | ||
932 | EXPORT_SYMBOL(rfkill_unregister); | ||
933 | |||
934 | void rfkill_destroy(struct rfkill *rfkill) | ||
935 | { | ||
936 | if (rfkill) | ||
937 | put_device(&rfkill->dev); | ||
938 | } | ||
939 | EXPORT_SYMBOL(rfkill_destroy); | ||
940 | |||
941 | static int rfkill_fop_open(struct inode *inode, struct file *file) | ||
942 | { | ||
943 | struct rfkill_data *data; | ||
944 | struct rfkill *rfkill; | ||
945 | struct rfkill_int_event *ev, *tmp; | ||
946 | |||
947 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
948 | if (!data) | ||
949 | return -ENOMEM; | ||
950 | |||
951 | INIT_LIST_HEAD(&data->events); | ||
952 | mutex_init(&data->mtx); | ||
953 | init_waitqueue_head(&data->read_wait); | ||
954 | |||
955 | mutex_lock(&rfkill_global_mutex); | ||
956 | mutex_lock(&data->mtx); | ||
957 | /* | ||
958 | * start getting events from elsewhere but hold mtx to get | ||
959 | * startup events added first | ||
960 | */ | ||
961 | list_add(&data->list, &rfkill_fds); | ||
962 | |||
963 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
964 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
965 | if (!ev) | ||
966 | goto free; | ||
967 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | ||
968 | list_add_tail(&ev->list, &data->events); | ||
969 | } | ||
970 | mutex_unlock(&data->mtx); | ||
971 | mutex_unlock(&rfkill_global_mutex); | ||
972 | |||
973 | file->private_data = data; | ||
974 | |||
975 | return nonseekable_open(inode, file); | ||
976 | |||
977 | free: | ||
978 | mutex_unlock(&data->mtx); | ||
979 | mutex_unlock(&rfkill_global_mutex); | ||
980 | mutex_destroy(&data->mtx); | ||
981 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
982 | kfree(ev); | ||
983 | kfree(data); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | ||
988 | { | ||
989 | struct rfkill_data *data = file->private_data; | ||
990 | unsigned int res = POLLOUT | POLLWRNORM; | ||
991 | |||
992 | poll_wait(file, &data->read_wait, wait); | ||
993 | |||
994 | mutex_lock(&data->mtx); | ||
995 | if (!list_empty(&data->events)) | ||
996 | res = POLLIN | POLLRDNORM; | ||
997 | mutex_unlock(&data->mtx); | ||
998 | |||
999 | return res; | ||
1000 | } | ||
1001 | |||
1002 | static bool rfkill_readable(struct rfkill_data *data) | ||
1003 | { | ||
1004 | bool r; | ||
1005 | |||
1006 | mutex_lock(&data->mtx); | ||
1007 | r = !list_empty(&data->events); | ||
1008 | mutex_unlock(&data->mtx); | ||
1009 | |||
1010 | return r; | ||
1011 | } | ||
1012 | |||
1013 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | ||
1014 | size_t count, loff_t *pos) | ||
1015 | { | ||
1016 | struct rfkill_data *data = file->private_data; | ||
1017 | struct rfkill_int_event *ev; | ||
1018 | unsigned long sz; | ||
1019 | int ret; | ||
1020 | |||
1021 | mutex_lock(&data->mtx); | ||
1022 | |||
1023 | while (list_empty(&data->events)) { | ||
1024 | if (file->f_flags & O_NONBLOCK) { | ||
1025 | ret = -EAGAIN; | ||
1026 | goto out; | ||
1027 | } | ||
1028 | mutex_unlock(&data->mtx); | ||
1029 | ret = wait_event_interruptible(data->read_wait, | ||
1030 | rfkill_readable(data)); | ||
1031 | mutex_lock(&data->mtx); | ||
1032 | |||
1033 | if (ret) | ||
1034 | goto out; | ||
1035 | } | ||
1036 | |||
1037 | ev = list_first_entry(&data->events, struct rfkill_int_event, | ||
1038 | list); | ||
1039 | |||
1040 | sz = min_t(unsigned long, sizeof(ev->ev), count); | ||
1041 | ret = sz; | ||
1042 | if (copy_to_user(buf, &ev->ev, sz)) | ||
1043 | ret = -EFAULT; | ||
1044 | |||
1045 | list_del(&ev->list); | ||
1046 | kfree(ev); | ||
1047 | out: | ||
1048 | mutex_unlock(&data->mtx); | ||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | ||
1053 | size_t count, loff_t *pos) | ||
1054 | { | ||
1055 | struct rfkill *rfkill; | ||
1056 | struct rfkill_event ev; | ||
1057 | |||
1058 | /* we don't need the 'hard' variable but accept it */ | ||
1059 | if (count < sizeof(ev) - 1) | ||
1060 | return -EINVAL; | ||
1061 | |||
1062 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | ||
1063 | return -EFAULT; | ||
1064 | |||
1065 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | if (ev.type >= NUM_RFKILL_TYPES) | ||
1069 | return -EINVAL; | ||
1070 | |||
1071 | mutex_lock(&rfkill_global_mutex); | ||
1072 | |||
1073 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | ||
1074 | if (ev.type == RFKILL_TYPE_ALL) { | ||
1075 | enum rfkill_type i; | ||
1076 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1077 | rfkill_global_states[i].cur = ev.soft; | ||
1078 | } else { | ||
1079 | rfkill_global_states[ev.type].cur = ev.soft; | ||
1080 | } | ||
1081 | } | ||
1082 | |||
1083 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
1084 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1085 | continue; | ||
1086 | |||
1087 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | ||
1088 | continue; | ||
1089 | |||
1090 | rfkill_set_block(rfkill, ev.soft); | ||
1091 | } | ||
1092 | mutex_unlock(&rfkill_global_mutex); | ||
1093 | |||
1094 | return count; | ||
1095 | } | ||
1096 | |||
1097 | static int rfkill_fop_release(struct inode *inode, struct file *file) | ||
1098 | { | ||
1099 | struct rfkill_data *data = file->private_data; | ||
1100 | struct rfkill_int_event *ev, *tmp; | ||
1101 | |||
1102 | mutex_lock(&rfkill_global_mutex); | ||
1103 | list_del(&data->list); | ||
1104 | mutex_unlock(&rfkill_global_mutex); | ||
1105 | |||
1106 | mutex_destroy(&data->mtx); | ||
1107 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
1108 | kfree(ev); | ||
1109 | |||
1110 | #ifdef CONFIG_RFKILL_INPUT | ||
1111 | if (data->input_handler) | ||
1112 | if (atomic_dec_return(&rfkill_input_disabled) == 0) | ||
1113 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); | ||
1114 | #endif | ||
1115 | |||
1116 | kfree(data); | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | #ifdef CONFIG_RFKILL_INPUT | ||
1122 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | ||
1123 | unsigned long arg) | ||
1124 | { | ||
1125 | struct rfkill_data *data = file->private_data; | ||
1126 | |||
1127 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | ||
1128 | return -ENOSYS; | ||
1129 | |||
1130 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | ||
1131 | return -ENOSYS; | ||
1132 | |||
1133 | mutex_lock(&data->mtx); | ||
1134 | |||
1135 | if (!data->input_handler) { | ||
1136 | if (atomic_inc_return(&rfkill_input_disabled) == 1) | ||
1137 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); | ||
1138 | data->input_handler = true; | ||
1139 | } | ||
1140 | |||
1141 | mutex_unlock(&data->mtx); | ||
1142 | |||
1143 | return 0; | ||
1144 | } | ||
1145 | #endif | ||
1146 | |||
1147 | static const struct file_operations rfkill_fops = { | ||
1148 | .open = rfkill_fop_open, | ||
1149 | .read = rfkill_fop_read, | ||
1150 | .write = rfkill_fop_write, | ||
1151 | .poll = rfkill_fop_poll, | ||
1152 | .release = rfkill_fop_release, | ||
1153 | #ifdef CONFIG_RFKILL_INPUT | ||
1154 | .unlocked_ioctl = rfkill_fop_ioctl, | ||
1155 | .compat_ioctl = rfkill_fop_ioctl, | ||
1156 | #endif | ||
1157 | }; | ||
1158 | |||
1159 | static struct miscdevice rfkill_miscdev = { | ||
1160 | .name = "rfkill", | ||
1161 | .fops = &rfkill_fops, | ||
1162 | .minor = MISC_DYNAMIC_MINOR, | ||
1163 | }; | ||
1164 | |||
1165 | static int __init rfkill_init(void) | ||
1166 | { | ||
1167 | int error; | ||
1168 | int i; | ||
1169 | |||
1170 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1171 | rfkill_global_states[i].cur = !rfkill_default_state; | ||
1172 | |||
1173 | error = class_register(&rfkill_class); | ||
1174 | if (error) | ||
1175 | goto out; | ||
1176 | |||
1177 | error = misc_register(&rfkill_miscdev); | ||
1178 | if (error) { | ||
1179 | class_unregister(&rfkill_class); | ||
1180 | goto out; | ||
1181 | } | ||
1182 | |||
1183 | #ifdef CONFIG_RFKILL_INPUT | ||
1184 | error = rfkill_handler_init(); | ||
1185 | if (error) { | ||
1186 | misc_deregister(&rfkill_miscdev); | ||
1187 | class_unregister(&rfkill_class); | ||
1188 | goto out; | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
1192 | out: | ||
1193 | return error; | ||
1194 | } | ||
1195 | subsys_initcall(rfkill_init); | ||
1196 | |||
1197 | static void __exit rfkill_exit(void) | ||
1198 | { | ||
1199 | #ifdef CONFIG_RFKILL_INPUT | ||
1200 | rfkill_handler_exit(); | ||
1201 | #endif | ||
1202 | misc_deregister(&rfkill_miscdev); | ||
1203 | class_unregister(&rfkill_class); | ||
1204 | } | ||
1205 | module_exit(rfkill_exit); | ||