aboutsummaryrefslogtreecommitdiffstats
path: root/net/rfkill
diff options
context:
space:
mode:
Diffstat (limited to 'net/rfkill')
-rw-r--r--net/rfkill/rfkill-input.c306
-rw-r--r--net/rfkill/rfkill-input.h2
-rw-r--r--net/rfkill/rfkill.c46
3 files changed, 274 insertions, 80 deletions
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index bfdade72e066..7706541f9f79 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -24,138 +24,291 @@ MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24MODULE_DESCRIPTION("Input layer to RF switch connector"); 24MODULE_DESCRIPTION("Input layer to RF switch connector");
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27enum rfkill_input_master_mode {
28 RFKILL_INPUT_MASTER_DONOTHING = 0,
29 RFKILL_INPUT_MASTER_RESTORE = 1,
30 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31 RFKILL_INPUT_MASTER_MAX, /* marker */
32};
33
34static enum rfkill_input_master_mode rfkill_master_switch_mode =
35 RFKILL_INPUT_MASTER_UNBLOCKALL;
36module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
37MODULE_PARM_DESC(master_switch_mode,
38 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
39
40enum rfkill_global_sched_op {
41 RFKILL_GLOBAL_OP_EPO = 0,
42 RFKILL_GLOBAL_OP_RESTORE,
43 RFKILL_GLOBAL_OP_UNLOCK,
44 RFKILL_GLOBAL_OP_UNBLOCK,
45};
46
47/*
48 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
49 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
50 * future, when such events are added, that code will be necessary.
51 */
52
27struct rfkill_task { 53struct rfkill_task {
28 struct work_struct work; 54 struct work_struct work;
29 enum rfkill_type type; 55
30 struct mutex mutex; /* ensures that task is serialized */ 56 /* ensures that task is serialized */
31 spinlock_t lock; /* for accessing last and desired state */ 57 struct mutex mutex;
32 unsigned long last; /* last schedule */ 58
33 enum rfkill_state desired_state; /* on/off */ 59 /* protects everything below */
60 spinlock_t lock;
61
62 /* pending regular switch operations (1=pending) */
63 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
64
65#ifdef RFKILL_NEED_SWSET
66 /* set operation pending (1=pending) */
67 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
68
69 /* desired state for pending set operation (1=unblock) */
70 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
71#endif
72
73 /* should the state be complemented (1=yes) */
74 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
75
76 bool global_op_pending;
77 enum rfkill_global_sched_op op;
34}; 78};
35 79
36static void rfkill_task_handler(struct work_struct *work) 80static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
37{ 81{
38 struct rfkill_task *task = container_of(work, struct rfkill_task, work); 82 unsigned int i;
83
84 switch (op) {
85 case RFKILL_GLOBAL_OP_EPO:
86 rfkill_epo();
87 break;
88 case RFKILL_GLOBAL_OP_RESTORE:
89 rfkill_restore_states();
90 break;
91 case RFKILL_GLOBAL_OP_UNLOCK:
92 rfkill_remove_epo_lock();
93 break;
94 case RFKILL_GLOBAL_OP_UNBLOCK:
95 rfkill_remove_epo_lock();
96 for (i = 0; i < RFKILL_TYPE_MAX; i++)
97 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
98 break;
99 default:
100 /* memory corruption or bug, fail safely */
101 rfkill_epo();
102 WARN(1, "Unknown requested operation %d! "
103 "rfkill Emergency Power Off activated\n",
104 op);
105 }
106}
39 107
40 mutex_lock(&task->mutex); 108#ifdef RFKILL_NEED_SWSET
109static void __rfkill_handle_normal_op(const enum rfkill_type type,
110 const bool sp, const bool s, const bool c)
111{
112 enum rfkill_state state;
41 113
42 rfkill_switch_all(task->type, task->desired_state); 114 if (sp)
115 state = (s) ? RFKILL_STATE_UNBLOCKED :
116 RFKILL_STATE_SOFT_BLOCKED;
117 else
118 state = rfkill_get_global_state(type);
43 119
44 mutex_unlock(&task->mutex); 120 if (c)
121 state = rfkill_state_complement(state);
122
123 rfkill_switch_all(type, state);
45} 124}
125#else
126static void __rfkill_handle_normal_op(const enum rfkill_type type,
127 const bool c)
128{
129 enum rfkill_state state;
130
131 state = rfkill_get_global_state(type);
132 if (c)
133 state = rfkill_state_complement(state);
134
135 rfkill_switch_all(type, state);
136}
137#endif
46 138
47static void rfkill_task_epo_handler(struct work_struct *work) 139static void rfkill_task_handler(struct work_struct *work)
48{ 140{
49 rfkill_epo(); 141 struct rfkill_task *task =
142 container_of(work, struct rfkill_task, work);
143 bool doit = true;
144
145 mutex_lock(&task->mutex);
146
147 spin_lock_irq(&task->lock);
148 while (doit) {
149 if (task->global_op_pending) {
150 enum rfkill_global_sched_op op = task->op;
151 task->global_op_pending = false;
152 memset(task->sw_pending, 0, sizeof(task->sw_pending));
153 spin_unlock_irq(&task->lock);
154
155 __rfkill_handle_global_op(op);
156
157 /* make sure we do at least one pass with
158 * !task->global_op_pending */
159 spin_lock_irq(&task->lock);
160 continue;
161 } else if (!rfkill_is_epo_lock_active()) {
162 unsigned int i = 0;
163
164 while (!task->global_op_pending &&
165 i < RFKILL_TYPE_MAX) {
166 if (test_and_clear_bit(i, task->sw_pending)) {
167 bool c;
168#ifdef RFKILL_NEED_SWSET
169 bool sp, s;
170 sp = test_and_clear_bit(i,
171 task->sw_setpending);
172 s = test_bit(i, task->sw_newstate);
173#endif
174 c = test_and_clear_bit(i,
175 task->sw_togglestate);
176 spin_unlock_irq(&task->lock);
177
178#ifdef RFKILL_NEED_SWSET
179 __rfkill_handle_normal_op(i, sp, s, c);
180#else
181 __rfkill_handle_normal_op(i, c);
182#endif
183
184 spin_lock_irq(&task->lock);
185 }
186 i++;
187 }
188 }
189 doit = task->global_op_pending;
190 }
191 spin_unlock_irq(&task->lock);
192
193 mutex_unlock(&task->mutex);
50} 194}
51 195
52static DECLARE_WORK(epo_work, rfkill_task_epo_handler); 196static struct rfkill_task rfkill_task = {
197 .work = __WORK_INITIALIZER(rfkill_task.work,
198 rfkill_task_handler),
199 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
200 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
201};
53 202
54static void rfkill_schedule_epo(void) 203static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
55{ 204{
56 schedule_work(&epo_work); 205 unsigned long flags;
206
207 spin_lock_irqsave(&rfkill_task.lock, flags);
208 rfkill_task.op = op;
209 rfkill_task.global_op_pending = true;
210 schedule_work(&rfkill_task.work);
211 spin_unlock_irqrestore(&rfkill_task.lock, flags);
57} 212}
58 213
59static void rfkill_schedule_set(struct rfkill_task *task, 214#ifdef RFKILL_NEED_SWSET
215/* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
216
217static void rfkill_schedule_set(enum rfkill_type type,
60 enum rfkill_state desired_state) 218 enum rfkill_state desired_state)
61{ 219{
62 unsigned long flags; 220 unsigned long flags;
63 221
64 if (unlikely(work_pending(&epo_work))) 222 if (rfkill_is_epo_lock_active())
65 return; 223 return;
66 224
67 spin_lock_irqsave(&task->lock, flags); 225 spin_lock_irqsave(&rfkill_task.lock, flags);
68 226 if (!rfkill_task.global_op_pending) {
69 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 227 set_bit(type, rfkill_task.sw_pending);
70 task->desired_state = desired_state; 228 set_bit(type, rfkill_task.sw_setpending);
71 task->last = jiffies; 229 clear_bit(type, rfkill_task.sw_togglestate);
72 schedule_work(&task->work); 230 if (desired_state)
231 set_bit(type, rfkill_task.sw_newstate);
232 else
233 clear_bit(type, rfkill_task.sw_newstate);
234 schedule_work(&rfkill_task.work);
73 } 235 }
74 236 spin_unlock_irqrestore(&rfkill_task.lock, flags);
75 spin_unlock_irqrestore(&task->lock, flags);
76} 237}
238#endif
77 239
78static void rfkill_schedule_toggle(struct rfkill_task *task) 240static void rfkill_schedule_toggle(enum rfkill_type type)
79{ 241{
80 unsigned long flags; 242 unsigned long flags;
81 243
82 if (unlikely(work_pending(&epo_work))) 244 if (rfkill_is_epo_lock_active())
83 return; 245 return;
84 246
85 spin_lock_irqsave(&task->lock, flags); 247 spin_lock_irqsave(&rfkill_task.lock, flags);
86 248 if (!rfkill_task.global_op_pending) {
87 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 249 set_bit(type, rfkill_task.sw_pending);
88 task->desired_state = 250 change_bit(type, rfkill_task.sw_togglestate);
89 rfkill_state_complement(task->desired_state); 251 schedule_work(&rfkill_task.work);
90 task->last = jiffies;
91 schedule_work(&task->work);
92 } 252 }
93 253 spin_unlock_irqrestore(&rfkill_task.lock, flags);
94 spin_unlock_irqrestore(&task->lock, flags);
95} 254}
96 255
97#define DEFINE_RFKILL_TASK(n, t) \
98 struct rfkill_task n = { \
99 .work = __WORK_INITIALIZER(n.work, \
100 rfkill_task_handler), \
101 .type = t, \
102 .mutex = __MUTEX_INITIALIZER(n.mutex), \
103 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \
104 .desired_state = RFKILL_STATE_UNBLOCKED, \
105 }
106
107static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN);
108static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
109static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
110static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
111static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN);
112
113static void rfkill_schedule_evsw_rfkillall(int state) 256static void rfkill_schedule_evsw_rfkillall(int state)
114{ 257{
115 /* EVERY radio type. state != 0 means radios ON */
116 /* handle EPO (emergency power off) through shortcut */
117 if (state) { 258 if (state) {
118 rfkill_schedule_set(&rfkill_wwan, 259 switch (rfkill_master_switch_mode) {
119 RFKILL_STATE_UNBLOCKED); 260 case RFKILL_INPUT_MASTER_UNBLOCKALL:
120 rfkill_schedule_set(&rfkill_wimax, 261 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
121 RFKILL_STATE_UNBLOCKED); 262 break;
122 rfkill_schedule_set(&rfkill_uwb, 263 case RFKILL_INPUT_MASTER_RESTORE:
123 RFKILL_STATE_UNBLOCKED); 264 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
124 rfkill_schedule_set(&rfkill_bt, 265 break;
125 RFKILL_STATE_UNBLOCKED); 266 case RFKILL_INPUT_MASTER_DONOTHING:
126 rfkill_schedule_set(&rfkill_wlan, 267 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
127 RFKILL_STATE_UNBLOCKED); 268 break;
269 default:
270 /* memory corruption or driver bug! fail safely */
271 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
272 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
273 "driver bug or memory corruption detected!\n",
274 rfkill_master_switch_mode);
275 break;
276 }
128 } else 277 } else
129 rfkill_schedule_epo(); 278 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
130} 279}
131 280
132static void rfkill_event(struct input_handle *handle, unsigned int type, 281static void rfkill_event(struct input_handle *handle, unsigned int type,
133 unsigned int code, int data) 282 unsigned int code, int data)
134{ 283{
135 if (type == EV_KEY && data == 1) { 284 if (type == EV_KEY && data == 1) {
285 enum rfkill_type t;
286
136 switch (code) { 287 switch (code) {
137 case KEY_WLAN: 288 case KEY_WLAN:
138 rfkill_schedule_toggle(&rfkill_wlan); 289 t = RFKILL_TYPE_WLAN;
139 break; 290 break;
140 case KEY_BLUETOOTH: 291 case KEY_BLUETOOTH:
141 rfkill_schedule_toggle(&rfkill_bt); 292 t = RFKILL_TYPE_BLUETOOTH;
142 break; 293 break;
143 case KEY_UWB: 294 case KEY_UWB:
144 rfkill_schedule_toggle(&rfkill_uwb); 295 t = RFKILL_TYPE_UWB;
145 break; 296 break;
146 case KEY_WIMAX: 297 case KEY_WIMAX:
147 rfkill_schedule_toggle(&rfkill_wimax); 298 t = RFKILL_TYPE_WIMAX;
148 break; 299 break;
149 default: 300 default:
150 break; 301 return;
151 } 302 }
303 rfkill_schedule_toggle(t);
304 return;
152 } else if (type == EV_SW) { 305 } else if (type == EV_SW) {
153 switch (code) { 306 switch (code) {
154 case SW_RFKILL_ALL: 307 case SW_RFKILL_ALL:
155 rfkill_schedule_evsw_rfkillall(data); 308 rfkill_schedule_evsw_rfkillall(data);
156 break; 309 return;
157 default: 310 default:
158 break; 311 return;
159 } 312 }
160 } 313 }
161} 314}
@@ -256,11 +409,9 @@ static struct input_handler rfkill_handler = {
256 409
257static int __init rfkill_handler_init(void) 410static int __init rfkill_handler_init(void)
258{ 411{
259 unsigned long last_run = jiffies - msecs_to_jiffies(500); 412 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
260 rfkill_wlan.last = last_run; 413 return -EINVAL;
261 rfkill_bt.last = last_run; 414
262 rfkill_uwb.last = last_run;
263 rfkill_wimax.last = last_run;
264 return input_register_handler(&rfkill_handler); 415 return input_register_handler(&rfkill_handler);
265} 416}
266 417
@@ -268,6 +419,7 @@ static void __exit rfkill_handler_exit(void)
268{ 419{
269 input_unregister_handler(&rfkill_handler); 420 input_unregister_handler(&rfkill_handler);
270 flush_scheduled_work(); 421 flush_scheduled_work();
422 rfkill_remove_epo_lock();
271} 423}
272 424
273module_init(rfkill_handler_init); 425module_init(rfkill_handler_init);
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index d1e03e85cbed..fe8df6b5b935 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -14,6 +14,8 @@
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void); 15void rfkill_epo(void);
16void rfkill_restore_states(void); 16void rfkill_restore_states(void);
17void rfkill_remove_epo_lock(void);
18bool rfkill_is_epo_lock_active(void);
17enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); 19enum rfkill_state rfkill_get_global_state(const enum rfkill_type type);
18 20
19#endif /* __RFKILL_INPUT_H */ 21#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index fdf87d2ab25e..e348eab756f3 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -51,6 +51,7 @@ struct rfkill_gsw_state {
51 51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; 52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; 53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
54static bool rfkill_epo_lock_active;
54 55
55static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); 56static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
56 57
@@ -264,11 +265,14 @@ static void __rfkill_switch_all(const enum rfkill_type type,
264 * 265 *
265 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). 266 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
266 * Please refer to __rfkill_switch_all() for details. 267 * Please refer to __rfkill_switch_all() for details.
268 *
269 * Does nothing if the EPO lock is active.
267 */ 270 */
268void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 271void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
269{ 272{
270 mutex_lock(&rfkill_global_mutex); 273 mutex_lock(&rfkill_global_mutex);
271 __rfkill_switch_all(type, state); 274 if (!rfkill_epo_lock_active)
275 __rfkill_switch_all(type, state);
272 mutex_unlock(&rfkill_global_mutex); 276 mutex_unlock(&rfkill_global_mutex);
273} 277}
274EXPORT_SYMBOL(rfkill_switch_all); 278EXPORT_SYMBOL(rfkill_switch_all);
@@ -289,6 +293,7 @@ void rfkill_epo(void)
289 293
290 mutex_lock(&rfkill_global_mutex); 294 mutex_lock(&rfkill_global_mutex);
291 295
296 rfkill_epo_lock_active = true;
292 list_for_each_entry(rfkill, &rfkill_list, node) { 297 list_for_each_entry(rfkill, &rfkill_list, node) {
293 mutex_lock(&rfkill->mutex); 298 mutex_lock(&rfkill->mutex);
294 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 299 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
@@ -317,6 +322,7 @@ void rfkill_restore_states(void)
317 322
318 mutex_lock(&rfkill_global_mutex); 323 mutex_lock(&rfkill_global_mutex);
319 324
325 rfkill_epo_lock_active = false;
320 for (i = 0; i < RFKILL_TYPE_MAX; i++) 326 for (i = 0; i < RFKILL_TYPE_MAX; i++)
321 __rfkill_switch_all(i, rfkill_global_states[i].default_state); 327 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
322 mutex_unlock(&rfkill_global_mutex); 328 mutex_unlock(&rfkill_global_mutex);
@@ -324,6 +330,35 @@ void rfkill_restore_states(void)
324EXPORT_SYMBOL_GPL(rfkill_restore_states); 330EXPORT_SYMBOL_GPL(rfkill_restore_states);
325 331
326/** 332/**
333 * rfkill_remove_epo_lock - unlock state changes
334 *
335 * Used by rfkill-input manually unlock state changes, when
336 * the EPO switch is deactivated.
337 */
338void rfkill_remove_epo_lock(void)
339{
340 mutex_lock(&rfkill_global_mutex);
341 rfkill_epo_lock_active = false;
342 mutex_unlock(&rfkill_global_mutex);
343}
344EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock);
345
346/**
347 * rfkill_is_epo_lock_active - returns true EPO is active
348 *
349 * Returns 0 (false) if there is NOT an active EPO contidion,
350 * and 1 (true) if there is an active EPO contition, which
351 * locks all radios in one of the BLOCKED states.
352 *
353 * Can be called in atomic context.
354 */
355bool rfkill_is_epo_lock_active(void)
356{
357 return rfkill_epo_lock_active;
358}
359EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active);
360
361/**
327 * rfkill_get_global_state - returns global state for a type 362 * rfkill_get_global_state - returns global state for a type
328 * @type: the type to get the global state of 363 * @type: the type to get the global state of
329 * 364 *
@@ -447,7 +482,12 @@ static ssize_t rfkill_state_store(struct device *dev,
447 error = mutex_lock_killable(&rfkill->mutex); 482 error = mutex_lock_killable(&rfkill->mutex);
448 if (error) 483 if (error)
449 return error; 484 return error;
450 error = rfkill_toggle_radio(rfkill, state, 0); 485
486 if (!rfkill_epo_lock_active)
487 error = rfkill_toggle_radio(rfkill, state, 0);
488 else
489 error = -EPERM;
490
451 mutex_unlock(&rfkill->mutex); 491 mutex_unlock(&rfkill->mutex);
452 492
453 return error ? error : count; 493 return error ? error : count;
@@ -491,7 +531,7 @@ static ssize_t rfkill_claim_store(struct device *dev,
491 return error; 531 return error;
492 532
493 if (rfkill->user_claim != claim) { 533 if (rfkill->user_claim != claim) {
494 if (!claim) { 534 if (!claim && !rfkill_epo_lock_active) {
495 mutex_lock(&rfkill->mutex); 535 mutex_lock(&rfkill->mutex);
496 rfkill_toggle_radio(rfkill, 536 rfkill_toggle_radio(rfkill,
497 rfkill_global_states[rfkill->type].current_state, 537 rfkill_global_states[rfkill->type].current_state,