diff options
Diffstat (limited to 'net/rfkill')
-rw-r--r-- | net/rfkill/rfkill-input.c | 98 | ||||
-rw-r--r-- | net/rfkill/rfkill-input.h | 1 | ||||
-rw-r--r-- | net/rfkill/rfkill.c | 314 |
3 files changed, 346 insertions, 67 deletions
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index e4b051dbed61..8aa822730145 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c | |||
@@ -30,39 +30,62 @@ struct rfkill_task { | |||
30 | spinlock_t lock; /* for accessing last and desired state */ | 30 | spinlock_t lock; /* for accessing last and desired state */ |
31 | unsigned long last; /* last schedule */ | 31 | unsigned long last; /* last schedule */ |
32 | enum rfkill_state desired_state; /* on/off */ | 32 | enum rfkill_state desired_state; /* on/off */ |
33 | enum rfkill_state current_state; /* on/off */ | ||
34 | }; | 33 | }; |
35 | 34 | ||
36 | static void rfkill_task_handler(struct work_struct *work) | 35 | static void rfkill_task_handler(struct work_struct *work) |
37 | { | 36 | { |
38 | struct rfkill_task *task = container_of(work, struct rfkill_task, work); | 37 | struct rfkill_task *task = container_of(work, struct rfkill_task, work); |
39 | enum rfkill_state state; | ||
40 | 38 | ||
41 | mutex_lock(&task->mutex); | 39 | mutex_lock(&task->mutex); |
42 | 40 | ||
43 | /* | 41 | rfkill_switch_all(task->type, task->desired_state); |
44 | * Use temp variable to fetch desired state to keep it | ||
45 | * consistent even if rfkill_schedule_toggle() runs in | ||
46 | * another thread or interrupts us. | ||
47 | */ | ||
48 | state = task->desired_state; | ||
49 | 42 | ||
50 | if (state != task->current_state) { | 43 | mutex_unlock(&task->mutex); |
51 | rfkill_switch_all(task->type, state); | 44 | } |
52 | task->current_state = state; | 45 | |
46 | static void rfkill_task_epo_handler(struct work_struct *work) | ||
47 | { | ||
48 | rfkill_epo(); | ||
49 | } | ||
50 | |||
51 | static DECLARE_WORK(epo_work, rfkill_task_epo_handler); | ||
52 | |||
53 | static void rfkill_schedule_epo(void) | ||
54 | { | ||
55 | schedule_work(&epo_work); | ||
56 | } | ||
57 | |||
58 | static void rfkill_schedule_set(struct rfkill_task *task, | ||
59 | enum rfkill_state desired_state) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | |||
63 | if (unlikely(work_pending(&epo_work))) | ||
64 | return; | ||
65 | |||
66 | spin_lock_irqsave(&task->lock, flags); | ||
67 | |||
68 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | ||
69 | task->desired_state = desired_state; | ||
70 | task->last = jiffies; | ||
71 | schedule_work(&task->work); | ||
53 | } | 72 | } |
54 | 73 | ||
55 | mutex_unlock(&task->mutex); | 74 | spin_unlock_irqrestore(&task->lock, flags); |
56 | } | 75 | } |
57 | 76 | ||
58 | static void rfkill_schedule_toggle(struct rfkill_task *task) | 77 | static void rfkill_schedule_toggle(struct rfkill_task *task) |
59 | { | 78 | { |
60 | unsigned long flags; | 79 | unsigned long flags; |
61 | 80 | ||
81 | if (unlikely(work_pending(&epo_work))) | ||
82 | return; | ||
83 | |||
62 | spin_lock_irqsave(&task->lock, flags); | 84 | spin_lock_irqsave(&task->lock, flags); |
63 | 85 | ||
64 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { | 86 | if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { |
65 | task->desired_state = !task->desired_state; | 87 | task->desired_state = |
88 | rfkill_state_complement(task->desired_state); | ||
66 | task->last = jiffies; | 89 | task->last = jiffies; |
67 | schedule_work(&task->work); | 90 | schedule_work(&task->work); |
68 | } | 91 | } |
@@ -70,26 +93,26 @@ static void rfkill_schedule_toggle(struct rfkill_task *task) | |||
70 | spin_unlock_irqrestore(&task->lock, flags); | 93 | spin_unlock_irqrestore(&task->lock, flags); |
71 | } | 94 | } |
72 | 95 | ||
73 | #define DEFINE_RFKILL_TASK(n, t) \ | 96 | #define DEFINE_RFKILL_TASK(n, t) \ |
74 | struct rfkill_task n = { \ | 97 | struct rfkill_task n = { \ |
75 | .work = __WORK_INITIALIZER(n.work, \ | 98 | .work = __WORK_INITIALIZER(n.work, \ |
76 | rfkill_task_handler), \ | 99 | rfkill_task_handler), \ |
77 | .type = t, \ | 100 | .type = t, \ |
78 | .mutex = __MUTEX_INITIALIZER(n.mutex), \ | 101 | .mutex = __MUTEX_INITIALIZER(n.mutex), \ |
79 | .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ | 102 | .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ |
80 | .desired_state = RFKILL_STATE_ON, \ | 103 | .desired_state = RFKILL_STATE_UNBLOCKED, \ |
81 | .current_state = RFKILL_STATE_ON, \ | ||
82 | } | 104 | } |
83 | 105 | ||
84 | static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); | 106 | static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); |
85 | static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); | 107 | static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); |
86 | static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); | 108 | static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); |
87 | static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); | 109 | static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); |
110 | static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN); | ||
88 | 111 | ||
89 | static void rfkill_event(struct input_handle *handle, unsigned int type, | 112 | static void rfkill_event(struct input_handle *handle, unsigned int type, |
90 | unsigned int code, int down) | 113 | unsigned int code, int data) |
91 | { | 114 | { |
92 | if (type == EV_KEY && down == 1) { | 115 | if (type == EV_KEY && data == 1) { |
93 | switch (code) { | 116 | switch (code) { |
94 | case KEY_WLAN: | 117 | case KEY_WLAN: |
95 | rfkill_schedule_toggle(&rfkill_wlan); | 118 | rfkill_schedule_toggle(&rfkill_wlan); |
@@ -106,6 +129,28 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, | |||
106 | default: | 129 | default: |
107 | break; | 130 | break; |
108 | } | 131 | } |
132 | } else if (type == EV_SW) { | ||
133 | switch (code) { | ||
134 | case SW_RFKILL_ALL: | ||
135 | /* EVERY radio type. data != 0 means radios ON */ | ||
136 | /* handle EPO (emergency power off) through shortcut */ | ||
137 | if (data) { | ||
138 | rfkill_schedule_set(&rfkill_wwan, | ||
139 | RFKILL_STATE_UNBLOCKED); | ||
140 | rfkill_schedule_set(&rfkill_wimax, | ||
141 | RFKILL_STATE_UNBLOCKED); | ||
142 | rfkill_schedule_set(&rfkill_uwb, | ||
143 | RFKILL_STATE_UNBLOCKED); | ||
144 | rfkill_schedule_set(&rfkill_bt, | ||
145 | RFKILL_STATE_UNBLOCKED); | ||
146 | rfkill_schedule_set(&rfkill_wlan, | ||
147 | RFKILL_STATE_UNBLOCKED); | ||
148 | } else | ||
149 | rfkill_schedule_epo(); | ||
150 | break; | ||
151 | default: | ||
152 | break; | ||
153 | } | ||
109 | } | 154 | } |
110 | } | 155 | } |
111 | 156 | ||
@@ -168,6 +213,11 @@ static const struct input_device_id rfkill_ids[] = { | |||
168 | .evbit = { BIT_MASK(EV_KEY) }, | 213 | .evbit = { BIT_MASK(EV_KEY) }, |
169 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | 214 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, |
170 | }, | 215 | }, |
216 | { | ||
217 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
218 | .evbit = { BIT(EV_SW) }, | ||
219 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
220 | }, | ||
171 | { } | 221 | { } |
172 | }; | 222 | }; |
173 | 223 | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h index 4dae5006fc77..f63d05045685 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill-input.h | |||
@@ -12,5 +12,6 @@ | |||
12 | #define __RFKILL_INPUT_H | 12 | #define __RFKILL_INPUT_H |
13 | 13 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); |
15 | void rfkill_epo(void); | ||
15 | 16 | ||
16 | #endif /* __RFKILL_INPUT_H */ | 17 | #endif /* __RFKILL_INPUT_H */ |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 4e10a95de832..7a560b785097 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -39,8 +39,56 @@ MODULE_LICENSE("GPL"); | |||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | 39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ |
40 | static DEFINE_MUTEX(rfkill_mutex); | 40 | static DEFINE_MUTEX(rfkill_mutex); |
41 | 41 | ||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | ||
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
44 | MODULE_PARM_DESC(default_state, | ||
45 | "Default initial state for all radio types, 0 = radio off"); | ||
46 | |||
42 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; | 47 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; |
43 | 48 | ||
49 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | ||
50 | |||
51 | |||
52 | /** | ||
53 | * register_rfkill_notifier - Add notifier to rfkill notifier chain | ||
54 | * @nb: pointer to the new entry to add to the chain | ||
55 | * | ||
56 | * See blocking_notifier_chain_register() for return value and further | ||
57 | * observations. | ||
58 | * | ||
59 | * Adds a notifier to the rfkill notifier chain. The chain will be | ||
60 | * called with a pointer to the relevant rfkill structure as a parameter, | ||
61 | * refer to include/linux/rfkill.h for the possible events. | ||
62 | * | ||
63 | * Notifiers added to this chain are to always return NOTIFY_DONE. This | ||
64 | * chain is a blocking notifier chain: notifiers can sleep. | ||
65 | * | ||
66 | * Calls to this chain may have been done through a workqueue. One must | ||
67 | * assume unordered asynchronous behaviour, there is no way to know if | ||
68 | * actions related to the event that generated the notification have been | ||
69 | * carried out already. | ||
70 | */ | ||
71 | int register_rfkill_notifier(struct notifier_block *nb) | ||
72 | { | ||
73 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); | ||
76 | |||
77 | /** | ||
78 | * unregister_rfkill_notifier - remove notifier from rfkill notifier chain | ||
79 | * @nb: pointer to the entry to remove from the chain | ||
80 | * | ||
81 | * See blocking_notifier_chain_unregister() for return value and further | ||
82 | * observations. | ||
83 | * | ||
84 | * Removes a notifier from the rfkill notifier chain. | ||
85 | */ | ||
86 | int unregister_rfkill_notifier(struct notifier_block *nb) | ||
87 | { | ||
88 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); | ||
91 | |||
44 | 92 | ||
45 | static void rfkill_led_trigger(struct rfkill *rfkill, | 93 | static void rfkill_led_trigger(struct rfkill *rfkill, |
46 | enum rfkill_state state) | 94 | enum rfkill_state state) |
@@ -50,24 +98,101 @@ static void rfkill_led_trigger(struct rfkill *rfkill, | |||
50 | 98 | ||
51 | if (!led->name) | 99 | if (!led->name) |
52 | return; | 100 | return; |
53 | if (state == RFKILL_STATE_OFF) | 101 | if (state != RFKILL_STATE_UNBLOCKED) |
54 | led_trigger_event(led, LED_OFF); | 102 | led_trigger_event(led, LED_OFF); |
55 | else | 103 | else |
56 | led_trigger_event(led, LED_FULL); | 104 | led_trigger_event(led, LED_FULL); |
57 | #endif /* CONFIG_RFKILL_LEDS */ | 105 | #endif /* CONFIG_RFKILL_LEDS */ |
58 | } | 106 | } |
59 | 107 | ||
108 | static void notify_rfkill_state_change(struct rfkill *rfkill) | ||
109 | { | ||
110 | blocking_notifier_call_chain(&rfkill_notifier_list, | ||
111 | RFKILL_STATE_CHANGED, | ||
112 | rfkill); | ||
113 | } | ||
114 | |||
115 | static void update_rfkill_state(struct rfkill *rfkill) | ||
116 | { | ||
117 | enum rfkill_state newstate, oldstate; | ||
118 | |||
119 | if (rfkill->get_state) { | ||
120 | mutex_lock(&rfkill->mutex); | ||
121 | if (!rfkill->get_state(rfkill->data, &newstate)) { | ||
122 | oldstate = rfkill->state; | ||
123 | rfkill->state = newstate; | ||
124 | if (oldstate != newstate) | ||
125 | notify_rfkill_state_change(rfkill); | ||
126 | } | ||
127 | mutex_unlock(&rfkill->mutex); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * rfkill_toggle_radio - wrapper for toggle_radio hook | ||
133 | * | ||
134 | * @rfkill: the rfkill struct to use | ||
135 | * @force: calls toggle_radio even if cache says it is not needed, | ||
136 | * and also makes sure notifications of the state will be | ||
137 | * sent even if it didn't change | ||
138 | * @state: the new state to call toggle_radio() with | ||
139 | * | ||
140 | * Calls rfkill->toggle_radio, enforcing the API for toggle_radio | ||
141 | * calls and handling all the red tape such as issuing notifications | ||
142 | * if the call is successful. | ||
143 | * | ||
144 | * Note that @force cannot override a (possibly cached) state of | ||
145 | * RFKILL_STATE_HARD_BLOCKED. Any device making use of | ||
146 | * RFKILL_STATE_HARD_BLOCKED implements either get_state() or | ||
147 | * rfkill_force_state(), so the cache either is bypassed or valid. | ||
148 | * | ||
149 | * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED | ||
150 | * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to | ||
151 | * give the driver a hint that it should double-BLOCK the transmitter. | ||
152 | * | ||
153 | * Caller must have aquired rfkill_mutex. | ||
154 | */ | ||
60 | static int rfkill_toggle_radio(struct rfkill *rfkill, | 155 | static int rfkill_toggle_radio(struct rfkill *rfkill, |
61 | enum rfkill_state state) | 156 | enum rfkill_state state, |
157 | int force) | ||
62 | { | 158 | { |
63 | int retval = 0; | 159 | int retval = 0; |
160 | enum rfkill_state oldstate, newstate; | ||
161 | |||
162 | oldstate = rfkill->state; | ||
163 | |||
164 | if (rfkill->get_state && !force && | ||
165 | !rfkill->get_state(rfkill->data, &newstate)) | ||
166 | rfkill->state = newstate; | ||
167 | |||
168 | switch (state) { | ||
169 | case RFKILL_STATE_HARD_BLOCKED: | ||
170 | /* typically happens when refreshing hardware state, | ||
171 | * such as on resume */ | ||
172 | state = RFKILL_STATE_SOFT_BLOCKED; | ||
173 | break; | ||
174 | case RFKILL_STATE_UNBLOCKED: | ||
175 | /* force can't override this, only rfkill_force_state() can */ | ||
176 | if (rfkill->state == RFKILL_STATE_HARD_BLOCKED) | ||
177 | return -EPERM; | ||
178 | break; | ||
179 | case RFKILL_STATE_SOFT_BLOCKED: | ||
180 | /* nothing to do, we want to give drivers the hint to double | ||
181 | * BLOCK even a transmitter that is already in state | ||
182 | * RFKILL_STATE_HARD_BLOCKED */ | ||
183 | break; | ||
184 | } | ||
64 | 185 | ||
65 | if (state != rfkill->state) { | 186 | if (force || state != rfkill->state) { |
66 | retval = rfkill->toggle_radio(rfkill->data, state); | 187 | retval = rfkill->toggle_radio(rfkill->data, state); |
67 | if (!retval) { | 188 | /* never allow a HARD->SOFT downgrade! */ |
189 | if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED) | ||
68 | rfkill->state = state; | 190 | rfkill->state = state; |
69 | rfkill_led_trigger(rfkill, state); | 191 | } |
70 | } | 192 | |
193 | if (force || rfkill->state != oldstate) { | ||
194 | rfkill_led_trigger(rfkill, rfkill->state); | ||
195 | notify_rfkill_state_change(rfkill); | ||
71 | } | 196 | } |
72 | 197 | ||
73 | return retval; | 198 | return retval; |
@@ -82,7 +207,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
82 | * a specific switch is claimed by userspace in which case it is | 207 | * a specific switch is claimed by userspace in which case it is |
83 | * left alone. | 208 | * left alone. |
84 | */ | 209 | */ |
85 | |||
86 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | 210 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) |
87 | { | 211 | { |
88 | struct rfkill *rfkill; | 212 | struct rfkill *rfkill; |
@@ -93,13 +217,66 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | |||
93 | 217 | ||
94 | list_for_each_entry(rfkill, &rfkill_list, node) { | 218 | list_for_each_entry(rfkill, &rfkill_list, node) { |
95 | if ((!rfkill->user_claim) && (rfkill->type == type)) | 219 | if ((!rfkill->user_claim) && (rfkill->type == type)) |
96 | rfkill_toggle_radio(rfkill, state); | 220 | rfkill_toggle_radio(rfkill, state, 0); |
97 | } | 221 | } |
98 | 222 | ||
99 | mutex_unlock(&rfkill_mutex); | 223 | mutex_unlock(&rfkill_mutex); |
100 | } | 224 | } |
101 | EXPORT_SYMBOL(rfkill_switch_all); | 225 | EXPORT_SYMBOL(rfkill_switch_all); |
102 | 226 | ||
227 | /** | ||
228 | * rfkill_epo - emergency power off all transmitters | ||
229 | * | ||
230 | * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring | ||
231 | * everything in its path but rfkill_mutex. | ||
232 | */ | ||
233 | void rfkill_epo(void) | ||
234 | { | ||
235 | struct rfkill *rfkill; | ||
236 | |||
237 | mutex_lock(&rfkill_mutex); | ||
238 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
239 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
240 | } | ||
241 | mutex_unlock(&rfkill_mutex); | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(rfkill_epo); | ||
244 | |||
245 | /** | ||
246 | * rfkill_force_state - Force the internal rfkill radio state | ||
247 | * @rfkill: pointer to the rfkill class to modify. | ||
248 | * @state: the current radio state the class should be forced to. | ||
249 | * | ||
250 | * This function updates the internal state of the radio cached | ||
251 | * by the rfkill class. It should be used when the driver gets | ||
252 | * a notification by the firmware/hardware of the current *real* | ||
253 | * state of the radio rfkill switch. | ||
254 | * | ||
255 | * It may not be called from an atomic context. | ||
256 | */ | ||
257 | int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | ||
258 | { | ||
259 | enum rfkill_state oldstate; | ||
260 | |||
261 | if (state != RFKILL_STATE_SOFT_BLOCKED && | ||
262 | state != RFKILL_STATE_UNBLOCKED && | ||
263 | state != RFKILL_STATE_HARD_BLOCKED) | ||
264 | return -EINVAL; | ||
265 | |||
266 | mutex_lock(&rfkill->mutex); | ||
267 | |||
268 | oldstate = rfkill->state; | ||
269 | rfkill->state = state; | ||
270 | |||
271 | if (state != oldstate) | ||
272 | notify_rfkill_state_change(rfkill); | ||
273 | |||
274 | mutex_unlock(&rfkill->mutex); | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | EXPORT_SYMBOL(rfkill_force_state); | ||
279 | |||
103 | static ssize_t rfkill_name_show(struct device *dev, | 280 | static ssize_t rfkill_name_show(struct device *dev, |
104 | struct device_attribute *attr, | 281 | struct device_attribute *attr, |
105 | char *buf) | 282 | char *buf) |
@@ -109,31 +286,31 @@ static ssize_t rfkill_name_show(struct device *dev, | |||
109 | return sprintf(buf, "%s\n", rfkill->name); | 286 | return sprintf(buf, "%s\n", rfkill->name); |
110 | } | 287 | } |
111 | 288 | ||
112 | static ssize_t rfkill_type_show(struct device *dev, | 289 | static const char *rfkill_get_type_str(enum rfkill_type type) |
113 | struct device_attribute *attr, | ||
114 | char *buf) | ||
115 | { | 290 | { |
116 | struct rfkill *rfkill = to_rfkill(dev); | 291 | switch (type) { |
117 | const char *type; | ||
118 | |||
119 | switch (rfkill->type) { | ||
120 | case RFKILL_TYPE_WLAN: | 292 | case RFKILL_TYPE_WLAN: |
121 | type = "wlan"; | 293 | return "wlan"; |
122 | break; | ||
123 | case RFKILL_TYPE_BLUETOOTH: | 294 | case RFKILL_TYPE_BLUETOOTH: |
124 | type = "bluetooth"; | 295 | return "bluetooth"; |
125 | break; | ||
126 | case RFKILL_TYPE_UWB: | 296 | case RFKILL_TYPE_UWB: |
127 | type = "ultrawideband"; | 297 | return "ultrawideband"; |
128 | break; | ||
129 | case RFKILL_TYPE_WIMAX: | 298 | case RFKILL_TYPE_WIMAX: |
130 | type = "wimax"; | 299 | return "wimax"; |
131 | break; | 300 | case RFKILL_TYPE_WWAN: |
301 | return "wwan"; | ||
132 | default: | 302 | default: |
133 | BUG(); | 303 | BUG(); |
134 | } | 304 | } |
305 | } | ||
306 | |||
307 | static ssize_t rfkill_type_show(struct device *dev, | ||
308 | struct device_attribute *attr, | ||
309 | char *buf) | ||
310 | { | ||
311 | struct rfkill *rfkill = to_rfkill(dev); | ||
135 | 312 | ||
136 | return sprintf(buf, "%s\n", type); | 313 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); |
137 | } | 314 | } |
138 | 315 | ||
139 | static ssize_t rfkill_state_show(struct device *dev, | 316 | static ssize_t rfkill_state_show(struct device *dev, |
@@ -142,6 +319,7 @@ static ssize_t rfkill_state_show(struct device *dev, | |||
142 | { | 319 | { |
143 | struct rfkill *rfkill = to_rfkill(dev); | 320 | struct rfkill *rfkill = to_rfkill(dev); |
144 | 321 | ||
322 | update_rfkill_state(rfkill); | ||
145 | return sprintf(buf, "%d\n", rfkill->state); | 323 | return sprintf(buf, "%d\n", rfkill->state); |
146 | } | 324 | } |
147 | 325 | ||
@@ -156,10 +334,14 @@ static ssize_t rfkill_state_store(struct device *dev, | |||
156 | if (!capable(CAP_NET_ADMIN)) | 334 | if (!capable(CAP_NET_ADMIN)) |
157 | return -EPERM; | 335 | return -EPERM; |
158 | 336 | ||
337 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
338 | if (state != RFKILL_STATE_UNBLOCKED && | ||
339 | state != RFKILL_STATE_SOFT_BLOCKED) | ||
340 | return -EINVAL; | ||
341 | |||
159 | if (mutex_lock_interruptible(&rfkill->mutex)) | 342 | if (mutex_lock_interruptible(&rfkill->mutex)) |
160 | return -ERESTARTSYS; | 343 | return -ERESTARTSYS; |
161 | error = rfkill_toggle_radio(rfkill, | 344 | error = rfkill_toggle_radio(rfkill, state, 0); |
162 | state ? RFKILL_STATE_ON : RFKILL_STATE_OFF); | ||
163 | mutex_unlock(&rfkill->mutex); | 345 | mutex_unlock(&rfkill->mutex); |
164 | 346 | ||
165 | return error ? error : count; | 347 | return error ? error : count; |
@@ -200,7 +382,8 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
200 | if (rfkill->user_claim != claim) { | 382 | if (rfkill->user_claim != claim) { |
201 | if (!claim) | 383 | if (!claim) |
202 | rfkill_toggle_radio(rfkill, | 384 | rfkill_toggle_radio(rfkill, |
203 | rfkill_states[rfkill->type]); | 385 | rfkill_states[rfkill->type], |
386 | 0); | ||
204 | rfkill->user_claim = claim; | 387 | rfkill->user_claim = claim; |
205 | } | 388 | } |
206 | 389 | ||
@@ -233,12 +416,12 @@ static int rfkill_suspend(struct device *dev, pm_message_t state) | |||
233 | 416 | ||
234 | if (dev->power.power_state.event != state.event) { | 417 | if (dev->power.power_state.event != state.event) { |
235 | if (state.event & PM_EVENT_SLEEP) { | 418 | if (state.event & PM_EVENT_SLEEP) { |
236 | mutex_lock(&rfkill->mutex); | 419 | /* Stop transmitter, keep state, no notifies */ |
237 | 420 | update_rfkill_state(rfkill); | |
238 | if (rfkill->state == RFKILL_STATE_ON) | ||
239 | rfkill->toggle_radio(rfkill->data, | ||
240 | RFKILL_STATE_OFF); | ||
241 | 421 | ||
422 | mutex_lock(&rfkill->mutex); | ||
423 | rfkill->toggle_radio(rfkill->data, | ||
424 | RFKILL_STATE_SOFT_BLOCKED); | ||
242 | mutex_unlock(&rfkill->mutex); | 425 | mutex_unlock(&rfkill->mutex); |
243 | } | 426 | } |
244 | 427 | ||
@@ -255,8 +438,8 @@ static int rfkill_resume(struct device *dev) | |||
255 | if (dev->power.power_state.event != PM_EVENT_ON) { | 438 | if (dev->power.power_state.event != PM_EVENT_ON) { |
256 | mutex_lock(&rfkill->mutex); | 439 | mutex_lock(&rfkill->mutex); |
257 | 440 | ||
258 | if (rfkill->state == RFKILL_STATE_ON) | 441 | /* restore radio state AND notify everybody */ |
259 | rfkill->toggle_radio(rfkill->data, RFKILL_STATE_ON); | 442 | rfkill_toggle_radio(rfkill, rfkill->state, 1); |
260 | 443 | ||
261 | mutex_unlock(&rfkill->mutex); | 444 | mutex_unlock(&rfkill->mutex); |
262 | } | 445 | } |
@@ -269,34 +452,71 @@ static int rfkill_resume(struct device *dev) | |||
269 | #define rfkill_resume NULL | 452 | #define rfkill_resume NULL |
270 | #endif | 453 | #endif |
271 | 454 | ||
455 | static int rfkill_blocking_uevent_notifier(struct notifier_block *nb, | ||
456 | unsigned long eventid, | ||
457 | void *data) | ||
458 | { | ||
459 | struct rfkill *rfkill = (struct rfkill *)data; | ||
460 | |||
461 | switch (eventid) { | ||
462 | case RFKILL_STATE_CHANGED: | ||
463 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
464 | break; | ||
465 | default: | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | return NOTIFY_DONE; | ||
470 | } | ||
471 | |||
472 | static struct notifier_block rfkill_blocking_uevent_nb = { | ||
473 | .notifier_call = rfkill_blocking_uevent_notifier, | ||
474 | .priority = 0, | ||
475 | }; | ||
476 | |||
477 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
478 | { | ||
479 | struct rfkill *rfkill = to_rfkill(dev); | ||
480 | int error; | ||
481 | |||
482 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
483 | if (error) | ||
484 | return error; | ||
485 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
486 | rfkill_get_type_str(rfkill->type)); | ||
487 | if (error) | ||
488 | return error; | ||
489 | error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state); | ||
490 | return error; | ||
491 | } | ||
492 | |||
272 | static struct class rfkill_class = { | 493 | static struct class rfkill_class = { |
273 | .name = "rfkill", | 494 | .name = "rfkill", |
274 | .dev_release = rfkill_release, | 495 | .dev_release = rfkill_release, |
275 | .dev_attrs = rfkill_dev_attrs, | 496 | .dev_attrs = rfkill_dev_attrs, |
276 | .suspend = rfkill_suspend, | 497 | .suspend = rfkill_suspend, |
277 | .resume = rfkill_resume, | 498 | .resume = rfkill_resume, |
499 | .dev_uevent = rfkill_dev_uevent, | ||
278 | }; | 500 | }; |
279 | 501 | ||
280 | static int rfkill_add_switch(struct rfkill *rfkill) | 502 | static int rfkill_add_switch(struct rfkill *rfkill) |
281 | { | 503 | { |
282 | int error; | ||
283 | |||
284 | mutex_lock(&rfkill_mutex); | 504 | mutex_lock(&rfkill_mutex); |
285 | 505 | ||
286 | error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); | 506 | rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); |
287 | if (!error) | 507 | |
288 | list_add_tail(&rfkill->node, &rfkill_list); | 508 | list_add_tail(&rfkill->node, &rfkill_list); |
289 | 509 | ||
290 | mutex_unlock(&rfkill_mutex); | 510 | mutex_unlock(&rfkill_mutex); |
291 | 511 | ||
292 | return error; | 512 | return 0; |
293 | } | 513 | } |
294 | 514 | ||
295 | static void rfkill_remove_switch(struct rfkill *rfkill) | 515 | static void rfkill_remove_switch(struct rfkill *rfkill) |
296 | { | 516 | { |
297 | mutex_lock(&rfkill_mutex); | 517 | mutex_lock(&rfkill_mutex); |
298 | list_del_init(&rfkill->node); | 518 | list_del_init(&rfkill->node); |
299 | rfkill_toggle_radio(rfkill, RFKILL_STATE_OFF); | 519 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
300 | mutex_unlock(&rfkill_mutex); | 520 | mutex_unlock(&rfkill_mutex); |
301 | } | 521 | } |
302 | 522 | ||
@@ -412,7 +632,7 @@ int rfkill_register(struct rfkill *rfkill) | |||
412 | EXPORT_SYMBOL(rfkill_register); | 632 | EXPORT_SYMBOL(rfkill_register); |
413 | 633 | ||
414 | /** | 634 | /** |
415 | * rfkill_unregister - Uegister a rfkill structure. | 635 | * rfkill_unregister - Unregister a rfkill structure. |
416 | * @rfkill: rfkill structure to be unregistered | 636 | * @rfkill: rfkill structure to be unregistered |
417 | * | 637 | * |
418 | * This function should be called by the network driver during device | 638 | * This function should be called by the network driver during device |
@@ -436,8 +656,13 @@ static int __init rfkill_init(void) | |||
436 | int error; | 656 | int error; |
437 | int i; | 657 | int i; |
438 | 658 | ||
659 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
660 | if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED && | ||
661 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | ||
662 | return -EINVAL; | ||
663 | |||
439 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) | 664 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) |
440 | rfkill_states[i] = RFKILL_STATE_ON; | 665 | rfkill_states[i] = rfkill_default_state; |
441 | 666 | ||
442 | error = class_register(&rfkill_class); | 667 | error = class_register(&rfkill_class); |
443 | if (error) { | 668 | if (error) { |
@@ -445,11 +670,14 @@ static int __init rfkill_init(void) | |||
445 | return error; | 670 | return error; |
446 | } | 671 | } |
447 | 672 | ||
673 | register_rfkill_notifier(&rfkill_blocking_uevent_nb); | ||
674 | |||
448 | return 0; | 675 | return 0; |
449 | } | 676 | } |
450 | 677 | ||
451 | static void __exit rfkill_exit(void) | 678 | static void __exit rfkill_exit(void) |
452 | { | 679 | { |
680 | unregister_rfkill_notifier(&rfkill_blocking_uevent_nb); | ||
453 | class_unregister(&rfkill_class); | 681 | class_unregister(&rfkill_class); |
454 | } | 682 | } |
455 | 683 | ||