diff options
Diffstat (limited to 'net/rfkill/rfkill-input.c')
-rw-r--r-- | net/rfkill/rfkill-input.c | 306 |
1 files changed, 229 insertions, 77 deletions
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index bfdade72e066..7706541f9f79 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c | |||
@@ -24,138 +24,291 @@ MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); | |||
24 | MODULE_DESCRIPTION("Input layer to RF switch connector"); | 24 | MODULE_DESCRIPTION("Input layer to RF switch connector"); |
25 | MODULE_LICENSE("GPL"); | 25 | MODULE_LICENSE("GPL"); |
26 | 26 | ||
27 | enum rfkill_input_master_mode { | ||
28 | RFKILL_INPUT_MASTER_DONOTHING = 0, | ||
29 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
30 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
31 | RFKILL_INPUT_MASTER_MAX, /* marker */ | ||
32 | }; | ||
33 | |||
34 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
35 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
36 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
37 | MODULE_PARM_DESC(master_switch_mode, | ||
38 | "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all"); | ||
39 | |||
40 | enum rfkill_global_sched_op { | ||
41 | RFKILL_GLOBAL_OP_EPO = 0, | ||
42 | RFKILL_GLOBAL_OP_RESTORE, | ||
43 | RFKILL_GLOBAL_OP_UNLOCK, | ||
44 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * Currently, the code marked with RFKILL_NEED_SWSET is inactive. | ||
49 | * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the | ||
50 | * future, when such events are added, that code will be necessary. | ||
51 | */ | ||
52 | |||
27 | struct rfkill_task { | 53 | struct rfkill_task { |
28 | struct work_struct work; | 54 | struct work_struct work; |
29 | enum rfkill_type type; | 55 | |
30 | struct mutex mutex; /* ensures that task is serialized */ | 56 | /* ensures that task is serialized */ |
31 | spinlock_t lock; /* for accessing last and desired state */ | 57 | struct mutex mutex; |
32 | unsigned long last; /* last schedule */ | 58 | |
33 | enum rfkill_state desired_state; /* on/off */ | 59 | /* protects everything below */ |
60 | spinlock_t lock; | ||
61 | |||
62 | /* pending regular switch operations (1=pending) */ | ||
63 | unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
64 | |||
65 | #ifdef RFKILL_NEED_SWSET | ||
66 | /* set operation pending (1=pending) */ | ||
67 | unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
68 | |||
69 | /* desired state for pending set operation (1=unblock) */ | ||
70 | unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
71 | #endif | ||
72 | |||
73 | /* should the state be complemented (1=yes) */ | ||
74 | unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
75 | |||
76 | bool global_op_pending; | ||
77 | enum rfkill_global_sched_op op; | ||
34 | }; | 78 | }; |
35 | 79 | ||
36 | static void rfkill_task_handler(struct work_struct *work) | 80 | static void __rfkill_handle_global_op(enum rfkill_global_sched_op op) |
37 | { | 81 | { |
38 | struct rfkill_task *task = container_of(work, struct rfkill_task, work); | 82 | unsigned int i; |
83 | |||
84 | switch (op) { | ||
85 | case RFKILL_GLOBAL_OP_EPO: | ||
86 | rfkill_epo(); | ||
87 | break; | ||
88 | case RFKILL_GLOBAL_OP_RESTORE: | ||
89 | rfkill_restore_states(); | ||
90 | break; | ||
91 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
92 | rfkill_remove_epo_lock(); | ||
93 | break; | ||
94 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
95 | rfkill_remove_epo_lock(); | ||
96 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
97 | rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED); | ||
98 | break; | ||
99 | default: | ||
100 | /* memory corruption or bug, fail safely */ | ||
101 | rfkill_epo(); | ||
102 | WARN(1, "Unknown requested operation %d! " | ||
103 | "rfkill Emergency Power Off activated\n", | ||
104 | op); | ||
105 | } | ||
106 | } | ||
39 | 107 | ||
40 | mutex_lock(&task->mutex); | 108 | #ifdef RFKILL_NEED_SWSET |
109 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
110 | const bool sp, const bool s, const bool c) | ||
111 | { | ||
112 | enum rfkill_state state; | ||
41 | 113 | ||
42 | rfkill_switch_all(task->type, task->desired_state); | 114 | if (sp) |
115 | state = (s) ? RFKILL_STATE_UNBLOCKED : | ||
116 | RFKILL_STATE_SOFT_BLOCKED; | ||
117 | else | ||
118 | state = rfkill_get_global_state(type); | ||
43 | 119 | ||
44 | mutex_unlock(&task->mutex); | 120 | if (c) |
121 | state = rfkill_state_complement(state); | ||
122 | |||
123 | rfkill_switch_all(type, state); | ||
45 | } | 124 | } |
125 | #else | ||
126 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
127 | const bool c) | ||
128 | { | ||
129 | enum rfkill_state state; | ||
130 | |||
131 | state = rfkill_get_global_state(type); | ||
132 | if (c) | ||
133 | state = rfkill_state_complement(state); | ||
134 | |||
135 | rfkill_switch_all(type, state); | ||
136 | } | ||
137 | #endif | ||
46 | 138 | ||
47 | static void rfkill_task_epo_handler(struct work_struct *work) | 139 | static void rfkill_task_handler(struct work_struct *work) |
48 | { | 140 | { |
49 | rfkill_epo(); | 141 | struct rfkill_task *task = |
142 | container_of(work, struct rfkill_task, work); | ||
143 | bool doit = true; | ||
144 | |||
145 | mutex_lock(&task->mutex); | ||
146 | |||
147 | spin_lock_irq(&task->lock); | ||
148 | while (doit) { | ||
149 | if (task->global_op_pending) { | ||
150 | enum rfkill_global_sched_op op = task->op; | ||
151 | task->global_op_pending = false; | ||
152 | memset(task->sw_pending, 0, sizeof(task->sw_pending)); | ||
153 | spin_unlock_irq(&task->lock); | ||
154 | |||
155 | __rfkill_handle_global_op(op); | ||
156 | |||
157 | /* make sure we do at least one pass with | ||
158 | * !task->global_op_pending */ | ||
159 | spin_lock_irq(&task->lock); | ||
160 | continue; | ||
161 | } else if (!rfkill_is_epo_lock_active()) { | ||
162 | unsigned int i = 0; | ||
163 | |||
164 | while (!task->global_op_pending && | ||
165 | i < RFKILL_TYPE_MAX) { | ||
166 | if (test_and_clear_bit(i, task->sw_pending)) { | ||
167 | bool c; | ||
168 | #ifdef RFKILL_NEED_SWSET | ||
169 | bool sp, s; | ||
170 | sp = test_and_clear_bit(i, | ||
171 | task->sw_setpending); | ||
172 | s = test_bit(i, task->sw_newstate); | ||
173 | #endif | ||
174 | c = test_and_clear_bit(i, | ||
175 | task->sw_togglestate); | ||
176 | spin_unlock_irq(&task->lock); | ||
177 | |||
178 | #ifdef RFKILL_NEED_SWSET | ||
179 | __rfkill_handle_normal_op(i, sp, s, c); | ||
180 | #else | ||
181 | __rfkill_handle_normal_op(i, c); | ||
182 | #endif | ||
183 | |||
184 | spin_lock_irq(&task->lock); | ||
185 | } | ||
186 | i++; | ||
187 | } | ||
188 | } | ||
189 | doit = task->global_op_pending; | ||
190 | } | ||
191 | spin_unlock_irq(&task->lock); | ||
192 | |||
193 | mutex_unlock(&task->mutex); | ||
50 | } | 194 | } |
51 | 195 | ||
52 | static DECLARE_WORK(epo_work, rfkill_task_epo_handler); | 196 | static struct rfkill_task rfkill_task = { |
197 | .work = __WORK_INITIALIZER(rfkill_task.work, | ||
198 | rfkill_task_handler), | ||
199 | .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex), | ||
200 | .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock), | ||
201 | }; | ||
53 | 202 | ||
54 | static void rfkill_schedule_epo(void) | 203 | static void rfkill_schedule_global_op(enum rfkill_global_sched_op op) |
55 | { | 204 | { |
56 | schedule_work(&epo_work); | 205 | unsigned long flags; |
206 | |||
207 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
208 | rfkill_task.op = op; | ||
209 | rfkill_task.global_op_pending = true; | ||
210 | schedule_work(&rfkill_task.work); | ||
211 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
57 | } | 212 | } |
58 | 213 | ||
59 | static void rfkill_schedule_set(struct rfkill_task *task, | 214 | #ifdef RFKILL_NEED_SWSET |
215 | /* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */ | ||
216 | |||
217 | static void rfkill_schedule_set(enum rfkill_type type, | ||
60 | enum rfkill_state desired_state) | 218 | enum rfkill_state desired_state) |
61 | { | 219 | { |
62 | unsigned long flags; | 220 | unsigned long flags; |
63 | 221 | ||
64 | if (unlikely(work_pending(&epo_work))) | 222 | if (rfkill_is_epo_lock_active()) |
65 | return; | 223 | return; |
66 | 224 | ||
67 | spin_lock_irqsave(&task->lock, flags); | 225 | spin_lock_irqsave(&rfkill_task.lock, flags); |
68 | 226 | if (!rfkill_task.global_op_pending) { | |
69 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | 227 | set_bit(type, rfkill_task.sw_pending); |
70 | task->desired_state = desired_state; | 228 | set_bit(type, rfkill_task.sw_setpending); |
71 | task->last = jiffies; | 229 | clear_bit(type, rfkill_task.sw_togglestate); |
72 | schedule_work(&task->work); | 230 | if (desired_state) |
231 | set_bit(type, rfkill_task.sw_newstate); | ||
232 | else | ||
233 | clear_bit(type, rfkill_task.sw_newstate); | ||
234 | schedule_work(&rfkill_task.work); | ||
73 | } | 235 | } |
74 | 236 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | |
75 | spin_unlock_irqrestore(&task->lock, flags); | ||
76 | } | 237 | } |
238 | #endif | ||
77 | 239 | ||
78 | static void rfkill_schedule_toggle(struct rfkill_task *task) | 240 | static void rfkill_schedule_toggle(enum rfkill_type type) |
79 | { | 241 | { |
80 | unsigned long flags; | 242 | unsigned long flags; |
81 | 243 | ||
82 | if (unlikely(work_pending(&epo_work))) | 244 | if (rfkill_is_epo_lock_active()) |
83 | return; | 245 | return; |
84 | 246 | ||
85 | spin_lock_irqsave(&task->lock, flags); | 247 | spin_lock_irqsave(&rfkill_task.lock, flags); |
86 | 248 | if (!rfkill_task.global_op_pending) { | |
87 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | 249 | set_bit(type, rfkill_task.sw_pending); |
88 | task->desired_state = | 250 | change_bit(type, rfkill_task.sw_togglestate); |
89 | rfkill_state_complement(task->desired_state); | 251 | schedule_work(&rfkill_task.work); |
90 | task->last = jiffies; | ||
91 | schedule_work(&task->work); | ||
92 | } | 252 | } |
93 | 253 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | |
94 | spin_unlock_irqrestore(&task->lock, flags); | ||
95 | } | 254 | } |
96 | 255 | ||
97 | #define DEFINE_RFKILL_TASK(n, t) \ | ||
98 | struct rfkill_task n = { \ | ||
99 | .work = __WORK_INITIALIZER(n.work, \ | ||
100 | rfkill_task_handler), \ | ||
101 | .type = t, \ | ||
102 | .mutex = __MUTEX_INITIALIZER(n.mutex), \ | ||
103 | .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ | ||
104 | .desired_state = RFKILL_STATE_UNBLOCKED, \ | ||
105 | } | ||
106 | |||
107 | static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); | ||
108 | static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); | ||
109 | static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); | ||
110 | static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); | ||
111 | static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN); | ||
112 | |||
113 | static void rfkill_schedule_evsw_rfkillall(int state) | 256 | static void rfkill_schedule_evsw_rfkillall(int state) |
114 | { | 257 | { |
115 | /* EVERY radio type. state != 0 means radios ON */ | ||
116 | /* handle EPO (emergency power off) through shortcut */ | ||
117 | if (state) { | 258 | if (state) { |
118 | rfkill_schedule_set(&rfkill_wwan, | 259 | switch (rfkill_master_switch_mode) { |
119 | RFKILL_STATE_UNBLOCKED); | 260 | case RFKILL_INPUT_MASTER_UNBLOCKALL: |
120 | rfkill_schedule_set(&rfkill_wimax, | 261 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK); |
121 | RFKILL_STATE_UNBLOCKED); | 262 | break; |
122 | rfkill_schedule_set(&rfkill_uwb, | 263 | case RFKILL_INPUT_MASTER_RESTORE: |
123 | RFKILL_STATE_UNBLOCKED); | 264 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE); |
124 | rfkill_schedule_set(&rfkill_bt, | 265 | break; |
125 | RFKILL_STATE_UNBLOCKED); | 266 | case RFKILL_INPUT_MASTER_DONOTHING: |
126 | rfkill_schedule_set(&rfkill_wlan, | 267 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK); |
127 | RFKILL_STATE_UNBLOCKED); | 268 | break; |
269 | default: | ||
270 | /* memory corruption or driver bug! fail safely */ | ||
271 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
272 | WARN(1, "Unknown rfkill_master_switch_mode (%d), " | ||
273 | "driver bug or memory corruption detected!\n", | ||
274 | rfkill_master_switch_mode); | ||
275 | break; | ||
276 | } | ||
128 | } else | 277 | } else |
129 | rfkill_schedule_epo(); | 278 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); |
130 | } | 279 | } |
131 | 280 | ||
132 | static void rfkill_event(struct input_handle *handle, unsigned int type, | 281 | static void rfkill_event(struct input_handle *handle, unsigned int type, |
133 | unsigned int code, int data) | 282 | unsigned int code, int data) |
134 | { | 283 | { |
135 | if (type == EV_KEY && data == 1) { | 284 | if (type == EV_KEY && data == 1) { |
285 | enum rfkill_type t; | ||
286 | |||
136 | switch (code) { | 287 | switch (code) { |
137 | case KEY_WLAN: | 288 | case KEY_WLAN: |
138 | rfkill_schedule_toggle(&rfkill_wlan); | 289 | t = RFKILL_TYPE_WLAN; |
139 | break; | 290 | break; |
140 | case KEY_BLUETOOTH: | 291 | case KEY_BLUETOOTH: |
141 | rfkill_schedule_toggle(&rfkill_bt); | 292 | t = RFKILL_TYPE_BLUETOOTH; |
142 | break; | 293 | break; |
143 | case KEY_UWB: | 294 | case KEY_UWB: |
144 | rfkill_schedule_toggle(&rfkill_uwb); | 295 | t = RFKILL_TYPE_UWB; |
145 | break; | 296 | break; |
146 | case KEY_WIMAX: | 297 | case KEY_WIMAX: |
147 | rfkill_schedule_toggle(&rfkill_wimax); | 298 | t = RFKILL_TYPE_WIMAX; |
148 | break; | 299 | break; |
149 | default: | 300 | default: |
150 | break; | 301 | return; |
151 | } | 302 | } |
303 | rfkill_schedule_toggle(t); | ||
304 | return; | ||
152 | } else if (type == EV_SW) { | 305 | } else if (type == EV_SW) { |
153 | switch (code) { | 306 | switch (code) { |
154 | case SW_RFKILL_ALL: | 307 | case SW_RFKILL_ALL: |
155 | rfkill_schedule_evsw_rfkillall(data); | 308 | rfkill_schedule_evsw_rfkillall(data); |
156 | break; | 309 | return; |
157 | default: | 310 | default: |
158 | break; | 311 | return; |
159 | } | 312 | } |
160 | } | 313 | } |
161 | } | 314 | } |
@@ -256,11 +409,9 @@ static struct input_handler rfkill_handler = { | |||
256 | 409 | ||
257 | static int __init rfkill_handler_init(void) | 410 | static int __init rfkill_handler_init(void) |
258 | { | 411 | { |
259 | unsigned long last_run = jiffies - msecs_to_jiffies(500); | 412 | if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX) |
260 | rfkill_wlan.last = last_run; | 413 | return -EINVAL; |
261 | rfkill_bt.last = last_run; | 414 | |
262 | rfkill_uwb.last = last_run; | ||
263 | rfkill_wimax.last = last_run; | ||
264 | return input_register_handler(&rfkill_handler); | 415 | return input_register_handler(&rfkill_handler); |
265 | } | 416 | } |
266 | 417 | ||
@@ -268,6 +419,7 @@ static void __exit rfkill_handler_exit(void) | |||
268 | { | 419 | { |
269 | input_unregister_handler(&rfkill_handler); | 420 | input_unregister_handler(&rfkill_handler); |
270 | flush_scheduled_work(); | 421 | flush_scheduled_work(); |
422 | rfkill_remove_epo_lock(); | ||
271 | } | 423 | } |
272 | 424 | ||
273 | module_init(rfkill_handler_init); | 425 | module_init(rfkill_handler_init); |