aboutsummaryrefslogtreecommitdiffstats
path: root/net/rfkill
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2009-06-02 07:01:38 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-06-03 14:06:14 -0400
commitc64fb01627e24725d1f9d535e4426475a4415753 (patch)
tree24b7e5caef5b1ddeaf479c98d56b2c38e25fca57 /net/rfkill
parent19d337dff95cbf76edd3ad95c0cee2732c3e1ec5 (diff)
rfkill: create useful userspace interface
The new code added by this patch will make rfkill create a misc character device /dev/rfkill that userspace can use to control rfkill soft blocks and get status of devices as well as events when the status changes. Using it is very simple -- when you open it you can read a number of times to get the initial state, and every further read blocks (you can poll) on getting the next event from the kernel. The same structure you read is also used when writing to it to change the soft block of a given device, all devices of a given type, or all devices. This also makes CONFIG_RFKILL_INPUT selectable again in order to be able to test without it present since its functionality can now be replaced by userspace entirely and distros and users may not want the input part of rfkill interfering with their userspace code. We will also write a userspace daemon to handle all that and consequently add the input code to the feature removal schedule. In order to have rfkilld support both kernels with and without CONFIG_RFKILL_INPUT (or new kernels after its eventual removal) we also add an ioctl (that only exists if rfkill-input is present) to disable rfkill-input. It is not very efficient, but at least gives the correct behaviour in all cases. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Acked-by: Marcel Holtmann <marcel@holtmann.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/rfkill')
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/rfkill/core.c330
2 files changed, 327 insertions, 7 deletions
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index b47f72fae05d..fd7600d8ab14 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
18 default y 18 default y
19 19
20config RFKILL_INPUT 20config RFKILL_INPUT
21 bool 21 bool "RF switch input support"
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y 24 default y if !EMBEDDED
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 30a6f8d819b2..2230aa6b14f3 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -28,6 +28,10 @@
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/rfkill.h> 29#include <linux/rfkill.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/miscdevice.h>
32#include <linux/wait.h>
33#include <linux/poll.h>
34#include <linux/fs.h>
31 35
32#include "rfkill.h" 36#include "rfkill.h"
33 37
@@ -49,6 +53,8 @@ struct rfkill {
49 53
50 unsigned long state; 54 unsigned long state;
51 55
56 u32 idx;
57
52 bool registered; 58 bool registered;
53 bool suspended; 59 bool suspended;
54 60
@@ -69,6 +75,18 @@ struct rfkill {
69}; 75};
70#define to_rfkill(d) container_of(d, struct rfkill, dev) 76#define to_rfkill(d) container_of(d, struct rfkill, dev)
71 77
78struct rfkill_int_event {
79 struct list_head list;
80 struct rfkill_event ev;
81};
82
83struct rfkill_data {
84 struct list_head list;
85 struct list_head events;
86 struct mutex mtx;
87 wait_queue_head_t read_wait;
88 bool input_handler;
89};
72 90
73 91
74MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); 92MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
@@ -90,6 +108,7 @@ MODULE_LICENSE("GPL");
90 */ 108 */
91static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 109static LIST_HEAD(rfkill_list); /* list of registered rf switches */
92static DEFINE_MUTEX(rfkill_global_mutex); 110static DEFINE_MUTEX(rfkill_global_mutex);
111static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
93 112
94static unsigned int rfkill_default_state = 1; 113static unsigned int rfkill_default_state = 1;
95module_param_named(default_state, rfkill_default_state, uint, 0444); 114module_param_named(default_state, rfkill_default_state, uint, 0444);
@@ -171,12 +190,48 @@ static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
171} 190}
172#endif /* CONFIG_RFKILL_LEDS */ 191#endif /* CONFIG_RFKILL_LEDS */
173 192
174static void rfkill_uevent(struct rfkill *rfkill) 193static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
194 enum rfkill_operation op)
195{
196 unsigned long flags;
197
198 ev->idx = rfkill->idx;
199 ev->type = rfkill->type;
200 ev->op = op;
201
202 spin_lock_irqsave(&rfkill->lock, flags);
203 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
204 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
205 RFKILL_BLOCK_SW_PREV));
206 spin_unlock_irqrestore(&rfkill->lock, flags);
207}
208
209static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
210{
211 struct rfkill_data *data;
212 struct rfkill_int_event *ev;
213
214 list_for_each_entry(data, &rfkill_fds, list) {
215 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
216 if (!ev)
217 continue;
218 rfkill_fill_event(&ev->ev, rfkill, op);
219 mutex_lock(&data->mtx);
220 list_add_tail(&ev->list, &data->events);
221 mutex_unlock(&data->mtx);
222 wake_up_interruptible(&data->read_wait);
223 }
224}
225
226static void rfkill_event(struct rfkill *rfkill)
175{ 227{
176 if (!rfkill->registered || rfkill->suspended) 228 if (!rfkill->registered || rfkill->suspended)
177 return; 229 return;
178 230
179 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); 231 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
232
233 /* also send event to /dev/rfkill */
234 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
180} 235}
181 236
182static bool __rfkill_set_hw_state(struct rfkill *rfkill, 237static bool __rfkill_set_hw_state(struct rfkill *rfkill,
@@ -260,9 +315,12 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
260 spin_unlock_irqrestore(&rfkill->lock, flags); 315 spin_unlock_irqrestore(&rfkill->lock, flags);
261 316
262 rfkill_led_trigger_event(rfkill); 317 rfkill_led_trigger_event(rfkill);
263 rfkill_uevent(rfkill); 318 rfkill_event(rfkill);
264} 319}
265 320
321#ifdef CONFIG_RFKILL_INPUT
322static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
323
266/** 324/**
267 * __rfkill_switch_all - Toggle state of all switches of given type 325 * __rfkill_switch_all - Toggle state of all switches of given type
268 * @type: type of interfaces to be affected 326 * @type: type of interfaces to be affected
@@ -299,6 +357,9 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
299 */ 357 */
300void rfkill_switch_all(enum rfkill_type type, bool blocked) 358void rfkill_switch_all(enum rfkill_type type, bool blocked)
301{ 359{
360 if (atomic_read(&rfkill_input_disabled))
361 return;
362
302 mutex_lock(&rfkill_global_mutex); 363 mutex_lock(&rfkill_global_mutex);
303 364
304 if (!rfkill_epo_lock_active) 365 if (!rfkill_epo_lock_active)
@@ -321,6 +382,9 @@ void rfkill_epo(void)
321 struct rfkill *rfkill; 382 struct rfkill *rfkill;
322 int i; 383 int i;
323 384
385 if (atomic_read(&rfkill_input_disabled))
386 return;
387
324 mutex_lock(&rfkill_global_mutex); 388 mutex_lock(&rfkill_global_mutex);
325 389
326 rfkill_epo_lock_active = true; 390 rfkill_epo_lock_active = true;
@@ -331,6 +395,7 @@ void rfkill_epo(void)
331 rfkill_global_states[i].def = rfkill_global_states[i].cur; 395 rfkill_global_states[i].def = rfkill_global_states[i].cur;
332 rfkill_global_states[i].cur = true; 396 rfkill_global_states[i].cur = true;
333 } 397 }
398
334 mutex_unlock(&rfkill_global_mutex); 399 mutex_unlock(&rfkill_global_mutex);
335} 400}
336 401
@@ -345,6 +410,9 @@ void rfkill_restore_states(void)
345{ 410{
346 int i; 411 int i;
347 412
413 if (atomic_read(&rfkill_input_disabled))
414 return;
415
348 mutex_lock(&rfkill_global_mutex); 416 mutex_lock(&rfkill_global_mutex);
349 417
350 rfkill_epo_lock_active = false; 418 rfkill_epo_lock_active = false;
@@ -361,6 +429,9 @@ void rfkill_restore_states(void)
361 */ 429 */
362void rfkill_remove_epo_lock(void) 430void rfkill_remove_epo_lock(void)
363{ 431{
432 if (atomic_read(&rfkill_input_disabled))
433 return;
434
364 mutex_lock(&rfkill_global_mutex); 435 mutex_lock(&rfkill_global_mutex);
365 rfkill_epo_lock_active = false; 436 rfkill_epo_lock_active = false;
366 mutex_unlock(&rfkill_global_mutex); 437 mutex_unlock(&rfkill_global_mutex);
@@ -391,9 +462,12 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
391{ 462{
392 return rfkill_global_states[type].cur; 463 return rfkill_global_states[type].cur;
393} 464}
465#endif
394 466
395void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked) 467void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
396{ 468{
469 BUG_ON(type == RFKILL_TYPE_ALL);
470
397 mutex_lock(&rfkill_global_mutex); 471 mutex_lock(&rfkill_global_mutex);
398 472
399 /* don't allow unblock when epo */ 473 /* don't allow unblock when epo */
@@ -537,6 +611,15 @@ static ssize_t rfkill_type_show(struct device *dev,
537 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); 611 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
538} 612}
539 613
614static ssize_t rfkill_idx_show(struct device *dev,
615 struct device_attribute *attr,
616 char *buf)
617{
618 struct rfkill *rfkill = to_rfkill(dev);
619
620 return sprintf(buf, "%d\n", rfkill->idx);
621}
622
540static u8 user_state_from_blocked(unsigned long state) 623static u8 user_state_from_blocked(unsigned long state)
541{ 624{
542 if (state & RFKILL_BLOCK_HW) 625 if (state & RFKILL_BLOCK_HW)
@@ -594,6 +677,7 @@ static ssize_t rfkill_claim_store(struct device *dev,
594static struct device_attribute rfkill_dev_attrs[] = { 677static struct device_attribute rfkill_dev_attrs[] = {
595 __ATTR(name, S_IRUGO, rfkill_name_show, NULL), 678 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
596 __ATTR(type, S_IRUGO, rfkill_type_show, NULL), 679 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
680 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
597 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), 681 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
598 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 682 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
599 __ATTR_NULL 683 __ATTR_NULL
@@ -708,7 +792,7 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
708 if (WARN_ON(!name)) 792 if (WARN_ON(!name))
709 return NULL; 793 return NULL;
710 794
711 if (WARN_ON(type >= NUM_RFKILL_TYPES)) 795 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
712 return NULL; 796 return NULL;
713 797
714 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); 798 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
@@ -754,7 +838,9 @@ static void rfkill_uevent_work(struct work_struct *work)
754 838
755 rfkill = container_of(work, struct rfkill, uevent_work); 839 rfkill = container_of(work, struct rfkill, uevent_work);
756 840
757 rfkill_uevent(rfkill); 841 mutex_lock(&rfkill_global_mutex);
842 rfkill_event(rfkill);
843 mutex_unlock(&rfkill_global_mutex);
758} 844}
759 845
760static void rfkill_sync_work(struct work_struct *work) 846static void rfkill_sync_work(struct work_struct *work)
@@ -785,6 +871,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
785 goto unlock; 871 goto unlock;
786 } 872 }
787 873
874 rfkill->idx = rfkill_no;
788 dev_set_name(dev, "rfkill%lu", rfkill_no); 875 dev_set_name(dev, "rfkill%lu", rfkill_no);
789 rfkill_no++; 876 rfkill_no++;
790 877
@@ -819,6 +906,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
819 906
820 INIT_WORK(&rfkill->sync_work, rfkill_sync_work); 907 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
821 schedule_work(&rfkill->sync_work); 908 schedule_work(&rfkill->sync_work);
909 rfkill_send_events(rfkill, RFKILL_OP_ADD);
822 910
823 mutex_unlock(&rfkill_global_mutex); 911 mutex_unlock(&rfkill_global_mutex);
824 return 0; 912 return 0;
@@ -848,6 +936,7 @@ void rfkill_unregister(struct rfkill *rfkill)
848 device_del(&rfkill->dev); 936 device_del(&rfkill->dev);
849 937
850 mutex_lock(&rfkill_global_mutex); 938 mutex_lock(&rfkill_global_mutex);
939 rfkill_send_events(rfkill, RFKILL_OP_DEL);
851 list_del_init(&rfkill->node); 940 list_del_init(&rfkill->node);
852 mutex_unlock(&rfkill_global_mutex); 941 mutex_unlock(&rfkill_global_mutex);
853 942
@@ -862,6 +951,227 @@ void rfkill_destroy(struct rfkill *rfkill)
862} 951}
863EXPORT_SYMBOL(rfkill_destroy); 952EXPORT_SYMBOL(rfkill_destroy);
864 953
954static int rfkill_fop_open(struct inode *inode, struct file *file)
955{
956 struct rfkill_data *data;
957 struct rfkill *rfkill;
958 struct rfkill_int_event *ev, *tmp;
959
960 data = kzalloc(sizeof(*data), GFP_KERNEL);
961 if (!data)
962 return -ENOMEM;
963
964 INIT_LIST_HEAD(&data->events);
965 mutex_init(&data->mtx);
966 init_waitqueue_head(&data->read_wait);
967
968 mutex_lock(&rfkill_global_mutex);
969 mutex_lock(&data->mtx);
970 /*
971 * start getting events from elsewhere but hold mtx to get
972 * startup events added first
973 */
974 list_add(&data->list, &rfkill_fds);
975
976 list_for_each_entry(rfkill, &rfkill_list, node) {
977 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
978 if (!ev)
979 goto free;
980 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
981 list_add_tail(&ev->list, &data->events);
982 }
983 mutex_unlock(&data->mtx);
984 mutex_unlock(&rfkill_global_mutex);
985
986 file->private_data = data;
987
988 return nonseekable_open(inode, file);
989
990 free:
991 mutex_unlock(&data->mtx);
992 mutex_unlock(&rfkill_global_mutex);
993 mutex_destroy(&data->mtx);
994 list_for_each_entry_safe(ev, tmp, &data->events, list)
995 kfree(ev);
996 kfree(data);
997 return -ENOMEM;
998}
999
1000static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1001{
1002 struct rfkill_data *data = file->private_data;
1003 unsigned int res = POLLOUT | POLLWRNORM;
1004
1005 poll_wait(file, &data->read_wait, wait);
1006
1007 mutex_lock(&data->mtx);
1008 if (!list_empty(&data->events))
1009 res = POLLIN | POLLRDNORM;
1010 mutex_unlock(&data->mtx);
1011
1012 return res;
1013}
1014
1015static bool rfkill_readable(struct rfkill_data *data)
1016{
1017 bool r;
1018
1019 mutex_lock(&data->mtx);
1020 r = !list_empty(&data->events);
1021 mutex_unlock(&data->mtx);
1022
1023 return r;
1024}
1025
1026static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1027 size_t count, loff_t *pos)
1028{
1029 struct rfkill_data *data = file->private_data;
1030 struct rfkill_int_event *ev;
1031 unsigned long sz;
1032 int ret;
1033
1034 mutex_lock(&data->mtx);
1035
1036 while (list_empty(&data->events)) {
1037 if (file->f_flags & O_NONBLOCK) {
1038 ret = -EAGAIN;
1039 goto out;
1040 }
1041 mutex_unlock(&data->mtx);
1042 ret = wait_event_interruptible(data->read_wait,
1043 rfkill_readable(data));
1044 mutex_lock(&data->mtx);
1045
1046 if (ret)
1047 goto out;
1048 }
1049
1050 ev = list_first_entry(&data->events, struct rfkill_int_event,
1051 list);
1052
1053 sz = min_t(unsigned long, sizeof(ev->ev), count);
1054 ret = sz;
1055 if (copy_to_user(buf, &ev->ev, sz))
1056 ret = -EFAULT;
1057
1058 list_del(&ev->list);
1059 kfree(ev);
1060 out:
1061 mutex_unlock(&data->mtx);
1062 return ret;
1063}
1064
1065static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1066 size_t count, loff_t *pos)
1067{
1068 struct rfkill *rfkill;
1069 struct rfkill_event ev;
1070
1071 /* we don't need the 'hard' variable but accept it */
1072 if (count < sizeof(ev) - 1)
1073 return -EINVAL;
1074
1075 if (copy_from_user(&ev, buf, sizeof(ev) - 1))
1076 return -EFAULT;
1077
1078 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1079 return -EINVAL;
1080
1081 if (ev.type >= NUM_RFKILL_TYPES)
1082 return -EINVAL;
1083
1084 mutex_lock(&rfkill_global_mutex);
1085
1086 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1087 if (ev.type == RFKILL_TYPE_ALL) {
1088 enum rfkill_type i;
1089 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1090 rfkill_global_states[i].cur = ev.soft;
1091 } else {
1092 rfkill_global_states[ev.type].cur = ev.soft;
1093 }
1094 }
1095
1096 list_for_each_entry(rfkill, &rfkill_list, node) {
1097 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1098 continue;
1099
1100 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1101 continue;
1102
1103 rfkill_set_block(rfkill, ev.soft);
1104 }
1105 mutex_unlock(&rfkill_global_mutex);
1106
1107 return count;
1108}
1109
1110static int rfkill_fop_release(struct inode *inode, struct file *file)
1111{
1112 struct rfkill_data *data = file->private_data;
1113 struct rfkill_int_event *ev, *tmp;
1114
1115 mutex_lock(&rfkill_global_mutex);
1116 list_del(&data->list);
1117 mutex_unlock(&rfkill_global_mutex);
1118
1119 mutex_destroy(&data->mtx);
1120 list_for_each_entry_safe(ev, tmp, &data->events, list)
1121 kfree(ev);
1122
1123#ifdef CONFIG_RFKILL_INPUT
1124 if (data->input_handler)
1125 atomic_dec(&rfkill_input_disabled);
1126#endif
1127
1128 kfree(data);
1129
1130 return 0;
1131}
1132
1133#ifdef CONFIG_RFKILL_INPUT
1134static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1135 unsigned long arg)
1136{
1137 struct rfkill_data *data = file->private_data;
1138
1139 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1140 return -ENOSYS;
1141
1142 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1143 return -ENOSYS;
1144
1145 mutex_lock(&data->mtx);
1146
1147 if (!data->input_handler) {
1148 atomic_inc(&rfkill_input_disabled);
1149 data->input_handler = true;
1150 }
1151
1152 mutex_unlock(&data->mtx);
1153
1154 return 0;
1155}
1156#endif
1157
1158static const struct file_operations rfkill_fops = {
1159 .open = rfkill_fop_open,
1160 .read = rfkill_fop_read,
1161 .write = rfkill_fop_write,
1162 .poll = rfkill_fop_poll,
1163 .release = rfkill_fop_release,
1164#ifdef CONFIG_RFKILL_INPUT
1165 .unlocked_ioctl = rfkill_fop_ioctl,
1166 .compat_ioctl = rfkill_fop_ioctl,
1167#endif
1168};
1169
1170static struct miscdevice rfkill_miscdev = {
1171 .name = "rfkill",
1172 .fops = &rfkill_fops,
1173 .minor = MISC_DYNAMIC_MINOR,
1174};
865 1175
866static int __init rfkill_init(void) 1176static int __init rfkill_init(void)
867{ 1177{
@@ -875,10 +1185,19 @@ static int __init rfkill_init(void)
875 if (error) 1185 if (error)
876 goto out; 1186 goto out;
877 1187
1188 error = misc_register(&rfkill_miscdev);
1189 if (error) {
1190 class_unregister(&rfkill_class);
1191 goto out;
1192 }
1193
878#ifdef CONFIG_RFKILL_INPUT 1194#ifdef CONFIG_RFKILL_INPUT
879 error = rfkill_handler_init(); 1195 error = rfkill_handler_init();
880 if (error) 1196 if (error) {
1197 misc_deregister(&rfkill_miscdev);
881 class_unregister(&rfkill_class); 1198 class_unregister(&rfkill_class);
1199 goto out;
1200 }
882#endif 1201#endif
883 1202
884 out: 1203 out:
@@ -891,6 +1210,7 @@ static void __exit rfkill_exit(void)
891#ifdef CONFIG_RFKILL_INPUT 1210#ifdef CONFIG_RFKILL_INPUT
892 rfkill_handler_exit(); 1211 rfkill_handler_exit();
893#endif 1212#endif
1213 misc_deregister(&rfkill_miscdev);
894 class_unregister(&rfkill_class); 1214 class_unregister(&rfkill_class);
895} 1215}
896module_exit(rfkill_exit); 1216module_exit(rfkill_exit);