aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-07 07:24:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-07 07:24:21 -0400
commitb1bc81a0ef86b86fa410dd303d84c8c7bd09a64d (patch)
treea0d2e6dd179e5d057776edd0ed865bc744dfa54d /net
parenta93958ac980f0ce594ad90657ecbc595ff157a40 (diff)
parent0c0c9e7076b69f93678e4ec711e2bf237398e623 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c7
-rw-r--r--net/mac80211/Kconfig5
-rw-r--r--net/mac80211/cfg.c65
-rw-r--r--net/mac80211/driver-ops.h7
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c12
-rw-r--r--net/mac80211/sta_info.c9
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c10
-rw-r--r--net/mac80211/wext.c80
-rw-r--r--net/rfkill/Kconfig21
-rw-r--r--net/rfkill/Makefile5
-rw-r--r--net/rfkill/core.c1228
-rw-r--r--net/rfkill/input.c342
-rw-r--r--net/rfkill/rfkill-input.c390
-rw-r--r--net/rfkill/rfkill.c855
-rw-r--r--net/rfkill/rfkill.h (renamed from net/rfkill/rfkill-input.h)10
-rw-r--r--net/wimax/Kconfig15
-rw-r--r--net/wimax/op-rfkill.c123
-rw-r--r--net/wireless/Kconfig3
-rw-r--r--net/wireless/core.c97
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/nl80211.c57
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/scan.c3
-rw-r--r--net/wireless/util.c13
-rw-r--r--net/wireless/wext-compat.c83
27 files changed, 1961 insertions, 1500 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 34b49a6a22fd..1f38401fc028 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1048,7 +1048,7 @@ void dev_load(struct net *net, const char *name)
1048int dev_open(struct net_device *dev) 1048int dev_open(struct net_device *dev)
1049{ 1049{
1050 const struct net_device_ops *ops = dev->netdev_ops; 1050 const struct net_device_ops *ops = dev->netdev_ops;
1051 int ret = 0; 1051 int ret;
1052 1052
1053 ASSERT_RTNL(); 1053 ASSERT_RTNL();
1054 1054
@@ -1065,6 +1065,11 @@ int dev_open(struct net_device *dev)
1065 if (!netif_device_present(dev)) 1065 if (!netif_device_present(dev))
1066 return -ENODEV; 1066 return -ENODEV;
1067 1067
1068 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1069 ret = notifier_to_errno(ret);
1070 if (ret)
1071 return ret;
1072
1068 /* 1073 /*
1069 * Call device private open method 1074 * Call device private open method
1070 */ 1075 */
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 9cbf545e95a2..ba2643a43c73 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -1,16 +1,19 @@
1config MAC80211 1config MAC80211
2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
3 depends on CFG80211
3 select CRYPTO 4 select CRYPTO
4 select CRYPTO_ECB 5 select CRYPTO_ECB
5 select CRYPTO_ARC4 6 select CRYPTO_ARC4
6 select CRYPTO_AES 7 select CRYPTO_AES
7 select CRC32 8 select CRC32
8 select WIRELESS_EXT 9 select WIRELESS_EXT
9 select CFG80211
10 ---help--- 10 ---help---
11 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
12 networking stack. 12 networking stack.
13 13
14comment "CFG80211 needs to be enabled for MAC80211"
15 depends on CFG80211=n
16
14config MAC80211_DEFAULT_PS 17config MAC80211_DEFAULT_PS
15 bool "enable powersave by default" 18 bool "enable powersave by default"
16 depends on MAC80211 19 depends on MAC80211
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 77e9ff5ec4f3..a9211cc183cb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -664,18 +664,19 @@ static void sta_apply_parameters(struct ieee80211_local *local,
664 spin_unlock_bh(&sta->lock); 664 spin_unlock_bh(&sta->lock);
665 665
666 /* 666 /*
667 * cfg80211 validates this (1-2007) and allows setting the AID
668 * only when creating a new station entry
669 */
670 if (params->aid)
671 sta->sta.aid = params->aid;
672
673 /*
667 * FIXME: updating the following information is racy when this 674 * FIXME: updating the following information is racy when this
668 * function is called from ieee80211_change_station(). 675 * function is called from ieee80211_change_station().
669 * However, all this information should be static so 676 * However, all this information should be static so
670 * maybe we should just reject attemps to change it. 677 * maybe we should just reject attemps to change it.
671 */ 678 */
672 679
673 if (params->aid) {
674 sta->sta.aid = params->aid;
675 if (sta->sta.aid > IEEE80211_MAX_AID)
676 sta->sta.aid = 0; /* XXX: should this be an error? */
677 }
678
679 if (params->listen_interval >= 0) 680 if (params->listen_interval >= 0)
680 sta->listen_interval = params->listen_interval; 681 sta->listen_interval = params->listen_interval;
681 682
@@ -1255,7 +1256,7 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1255 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; 1256 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
1256 1257
1257 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); 1258 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
1258 if (ret) 1259 if (ret && ret != -EALREADY)
1259 return ret; 1260 return ret;
1260 1261
1261 if (req->use_mfp) { 1262 if (req->use_mfp) {
@@ -1333,6 +1334,53 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1333 return 0; 1334 return 0;
1334} 1335}
1335 1336
1337static int ieee80211_set_tx_power(struct wiphy *wiphy,
1338 enum tx_power_setting type, int dbm)
1339{
1340 struct ieee80211_local *local = wiphy_priv(wiphy);
1341 struct ieee80211_channel *chan = local->hw.conf.channel;
1342 u32 changes = 0;
1343
1344 switch (type) {
1345 case TX_POWER_AUTOMATIC:
1346 local->user_power_level = -1;
1347 break;
1348 case TX_POWER_LIMITED:
1349 if (dbm < 0)
1350 return -EINVAL;
1351 local->user_power_level = dbm;
1352 break;
1353 case TX_POWER_FIXED:
1354 if (dbm < 0)
1355 return -EINVAL;
1356 /* TODO: move to cfg80211 when it knows the channel */
1357 if (dbm > chan->max_power)
1358 return -EINVAL;
1359 local->user_power_level = dbm;
1360 break;
1361 }
1362
1363 ieee80211_hw_config(local, changes);
1364
1365 return 0;
1366}
1367
1368static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm)
1369{
1370 struct ieee80211_local *local = wiphy_priv(wiphy);
1371
1372 *dbm = local->hw.conf.power_level;
1373
1374 return 0;
1375}
1376
1377static void ieee80211_rfkill_poll(struct wiphy *wiphy)
1378{
1379 struct ieee80211_local *local = wiphy_priv(wiphy);
1380
1381 drv_rfkill_poll(local);
1382}
1383
1336struct cfg80211_ops mac80211_config_ops = { 1384struct cfg80211_ops mac80211_config_ops = {
1337 .add_virtual_intf = ieee80211_add_iface, 1385 .add_virtual_intf = ieee80211_add_iface,
1338 .del_virtual_intf = ieee80211_del_iface, 1386 .del_virtual_intf = ieee80211_del_iface,
@@ -1372,4 +1420,7 @@ struct cfg80211_ops mac80211_config_ops = {
1372 .join_ibss = ieee80211_join_ibss, 1420 .join_ibss = ieee80211_join_ibss,
1373 .leave_ibss = ieee80211_leave_ibss, 1421 .leave_ibss = ieee80211_leave_ibss,
1374 .set_wiphy_params = ieee80211_set_wiphy_params, 1422 .set_wiphy_params = ieee80211_set_wiphy_params,
1423 .set_tx_power = ieee80211_set_tx_power,
1424 .get_tx_power = ieee80211_get_tx_power,
1425 .rfkill_poll = ieee80211_rfkill_poll,
1375}; 1426};
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 3912b5334b9c..b13446afd48f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -181,4 +181,11 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
181 sta, tid, ssn); 181 sta, tid, ssn);
182 return -EOPNOTSUPP; 182 return -EOPNOTSUPP;
183} 183}
184
185
186static inline void drv_rfkill_poll(struct ieee80211_local *local)
187{
188 if (local->ops->rfkill_poll)
189 local->ops->rfkill_poll(&local->hw);
190}
184#endif /* __MAC80211_DRIVER_OPS */ 191#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8c9f1c722cdb..b7c8a4484298 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -170,7 +170,7 @@ static int ieee80211_open(struct net_device *dev)
170 goto err_del_bss; 170 goto err_del_bss;
171 /* we're brought up, everything changes */ 171 /* we're brought up, everything changes */
172 hw_reconf_flags = ~0; 172 hw_reconf_flags = ~0;
173 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 173 ieee80211_led_radio(local, true);
174 } 174 }
175 175
176 /* 176 /*
@@ -560,7 +560,7 @@ static int ieee80211_stop(struct net_device *dev)
560 560
561 drv_stop(local); 561 drv_stop(local);
562 562
563 ieee80211_led_radio(local, 0); 563 ieee80211_led_radio(local, false);
564 564
565 flush_workqueue(local->hw.workqueue); 565 flush_workqueue(local->hw.workqueue);
566 566
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e37770ced53c..2683df918073 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -289,16 +289,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
289 drv_bss_info_changed(local, &sdata->vif, 289 drv_bss_info_changed(local, &sdata->vif,
290 &sdata->vif.bss_conf, changed); 290 &sdata->vif.bss_conf, changed);
291 291
292 /* 292 /* DEPRECATED */
293 * DEPRECATED 293 local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
294 *
295 * ~changed is just there to not do this at resume time
296 */
297 if (changed & BSS_CHANGED_BEACON_INT && ~changed) {
298 local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
299 ieee80211_hw_config(local,
300 _IEEE80211_CONF_CHANGE_BEACON_INTERVAL);
301 }
302} 294}
303 295
304u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 296u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d5611d8fd0d6..a360bceeba59 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -44,6 +44,15 @@
44 * When the insertion fails (sta_info_insert()) returns non-zero), the 44 * When the insertion fails (sta_info_insert()) returns non-zero), the
45 * structure will have been freed by sta_info_insert()! 45 * structure will have been freed by sta_info_insert()!
46 * 46 *
47 * sta entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when
51 * we receive a probe response or a beacon from target IBSS network. For
52 * WDS we add the sta for the peer imediately upon device open. When using
53 * AP mode we add stations for each respective station upon request from
54 * userspace through nl80211.
55 *
47 * Because there are debugfs entries for each station, and adding those 56 * Because there are debugfs entries for each station, and adding those
48 * must be able to sleep, it is also possible to "pin" a station entry, 57 * must be able to sleep, it is also possible to "pin" a station entry,
49 * that means it can be removed from the hash table but not be freed. 58 * that means it can be removed from the hash table but not be freed.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index a910148b8228..1436f747531a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1238,7 +1238,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1238 bool txpending) 1238 bool txpending)
1239{ 1239{
1240 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1240 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1241 struct sta_info *sta;
1242 struct ieee80211_tx_data tx; 1241 struct ieee80211_tx_data tx;
1243 ieee80211_tx_result res_prepare; 1242 ieee80211_tx_result res_prepare;
1244 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1243 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1270,7 +1269,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1270 return; 1269 return;
1271 } 1270 }
1272 1271
1273 sta = tx.sta;
1274 tx.channel = local->hw.conf.channel; 1272 tx.channel = local->hw.conf.channel;
1275 info->band = tx.channel->band; 1273 info->band = tx.channel->band;
1276 1274
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 949d857debd8..22f63815fb36 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -657,15 +657,15 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
657 657
658 switch (queue) { 658 switch (queue) {
659 case 3: /* AC_BK */ 659 case 3: /* AC_BK */
660 qparam.cw_max = aCWmin; 660 qparam.cw_max = aCWmax;
661 qparam.cw_min = aCWmax; 661 qparam.cw_min = aCWmin;
662 qparam.txop = 0; 662 qparam.txop = 0;
663 qparam.aifs = 7; 663 qparam.aifs = 7;
664 break; 664 break;
665 default: /* never happens but let's not leave undefined */ 665 default: /* never happens but let's not leave undefined */
666 case 2: /* AC_BE */ 666 case 2: /* AC_BE */
667 qparam.cw_max = aCWmin; 667 qparam.cw_max = aCWmax;
668 qparam.cw_min = aCWmax; 668 qparam.cw_min = aCWmin;
669 qparam.txop = 0; 669 qparam.txop = 0;
670 qparam.aifs = 3; 670 qparam.aifs = 3;
671 break; 671 break;
@@ -973,7 +973,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
973 if (local->open_count) { 973 if (local->open_count) {
974 res = drv_start(local); 974 res = drv_start(local);
975 975
976 ieee80211_led_radio(local, hw->conf.radio_enabled); 976 ieee80211_led_radio(local, true);
977 } 977 }
978 978
979 /* add interfaces */ 979 /* add interfaces */
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index a01154e127f0..d2d81b103341 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -306,82 +306,6 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
306 return 0; 306 return 0;
307} 307}
308 308
309static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
310 struct iw_request_info *info,
311 union iwreq_data *data, char *extra)
312{
313 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
314 struct ieee80211_channel* chan = local->hw.conf.channel;
315 bool reconf = false;
316 u32 reconf_flags = 0;
317 int new_power_level;
318
319 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
320 return -EINVAL;
321 if (data->txpower.flags & IW_TXPOW_RANGE)
322 return -EINVAL;
323 if (!chan)
324 return -EINVAL;
325
326 /* only change when not disabling */
327 if (!data->txpower.disabled) {
328 if (data->txpower.fixed) {
329 if (data->txpower.value < 0)
330 return -EINVAL;
331 new_power_level = data->txpower.value;
332 /*
333 * Debatable, but we cannot do a fixed power
334 * level above the regulatory constraint.
335 * Use "iwconfig wlan0 txpower 15dBm" instead.
336 */
337 if (new_power_level > chan->max_power)
338 return -EINVAL;
339 } else {
340 /*
341 * Automatic power level setting, max being the value
342 * passed in from userland.
343 */
344 if (data->txpower.value < 0)
345 new_power_level = -1;
346 else
347 new_power_level = data->txpower.value;
348 }
349
350 reconf = true;
351
352 /*
353 * ieee80211_hw_config() will limit to the channel's
354 * max power and possibly power constraint from AP.
355 */
356 local->user_power_level = new_power_level;
357 }
358
359 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
360 local->hw.conf.radio_enabled = !(data->txpower.disabled);
361 reconf_flags |= IEEE80211_CONF_CHANGE_RADIO_ENABLED;
362 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
363 }
364
365 if (reconf || reconf_flags)
366 ieee80211_hw_config(local, reconf_flags);
367
368 return 0;
369}
370
371static int ieee80211_ioctl_giwtxpower(struct net_device *dev,
372 struct iw_request_info *info,
373 union iwreq_data *data, char *extra)
374{
375 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
376
377 data->txpower.fixed = 1;
378 data->txpower.disabled = !(local->hw.conf.radio_enabled);
379 data->txpower.value = local->hw.conf.power_level;
380 data->txpower.flags = IW_TXPOW_DBM;
381
382 return 0;
383}
384
385static int ieee80211_ioctl_siwpower(struct net_device *dev, 309static int ieee80211_ioctl_siwpower(struct net_device *dev,
386 struct iw_request_info *info, 310 struct iw_request_info *info,
387 struct iw_param *wrq, 311 struct iw_param *wrq,
@@ -658,8 +582,8 @@ static const iw_handler ieee80211_handler[] =
658 (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */ 582 (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */
659 (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */ 583 (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */
660 (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */ 584 (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */
661 (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */ 585 (iw_handler) cfg80211_wext_siwtxpower, /* SIOCSIWTXPOW */
662 (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */ 586 (iw_handler) cfg80211_wext_giwtxpower, /* SIOCGIWTXPOW */
663 (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */ 587 (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */
664 (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */ 588 (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */
665 (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */ 589 (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 7f807b30cfbb..fd7600d8ab14 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,22 +10,15 @@ menuconfig RFKILL
10 To compile this driver as a module, choose M here: the 10 To compile this driver as a module, choose M here: the
11 module will be called rfkill. 11 module will be called rfkill.
12 12
13config RFKILL_INPUT
14 tristate "Input layer to RF switch connector"
15 depends on RFKILL && INPUT
16 help
17 Say Y here if you want kernel automatically toggle state
18 of RF switches on and off when user presses appropriate
19 button or a key on the keyboard. Without this module you
20 need a some kind of userspace application to control
21 state of the switches.
22
23 To compile this driver as a module, choose M here: the
24 module will be called rfkill-input.
25
26# LED trigger support 13# LED trigger support
27config RFKILL_LEDS 14config RFKILL_LEDS
28 bool 15 bool
29 depends on RFKILL && LEDS_TRIGGERS 16 depends on RFKILL
17 depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS
30 default y 18 default y
31 19
20config RFKILL_INPUT
21 bool "RF switch input support"
22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EMBEDDED
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index b38c430be057..662105352691 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -2,5 +2,6 @@
2# Makefile for the RF switch subsystem. 2# Makefile for the RF switch subsystem.
3# 3#
4 4
5obj-$(CONFIG_RFKILL) += rfkill.o 5rfkill-y += core.o
6obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
new file mode 100644
index 000000000000..11b7314723df
--- /dev/null
+++ b/net/rfkill/core.c
@@ -0,0 +1,1228 @@
1/*
2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/workqueue.h>
26#include <linux/capability.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/rfkill.h>
30#include <linux/spinlock.h>
31#include <linux/miscdevice.h>
32#include <linux/wait.h>
33#include <linux/poll.h>
34#include <linux/fs.h>
35
36#include "rfkill.h"
37
38#define POLL_INTERVAL (5 * HZ)
39
40#define RFKILL_BLOCK_HW BIT(0)
41#define RFKILL_BLOCK_SW BIT(1)
42#define RFKILL_BLOCK_SW_PREV BIT(2)
43#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
44 RFKILL_BLOCK_SW |\
45 RFKILL_BLOCK_SW_PREV)
46#define RFKILL_BLOCK_SW_SETCALL BIT(31)
47
48struct rfkill {
49 spinlock_t lock;
50
51 const char *name;
52 enum rfkill_type type;
53
54 unsigned long state;
55
56 u32 idx;
57
58 bool registered;
59 bool suspended;
60
61 const struct rfkill_ops *ops;
62 void *data;
63
64#ifdef CONFIG_RFKILL_LEDS
65 struct led_trigger led_trigger;
66 const char *ledtrigname;
67#endif
68
69 struct device dev;
70 struct list_head node;
71
72 struct delayed_work poll_work;
73 struct work_struct uevent_work;
74 struct work_struct sync_work;
75};
76#define to_rfkill(d) container_of(d, struct rfkill, dev)
77
78struct rfkill_int_event {
79 struct list_head list;
80 struct rfkill_event ev;
81};
82
83struct rfkill_data {
84 struct list_head list;
85 struct list_head events;
86 struct mutex mtx;
87 wait_queue_head_t read_wait;
88 bool input_handler;
89};
90
91
92MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
93MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
94MODULE_DESCRIPTION("RF switch support");
95MODULE_LICENSE("GPL");
96
97
98/*
99 * The locking here should be made much smarter, we currently have
100 * a bit of a stupid situation because drivers might want to register
101 * the rfkill struct under their own lock, and take this lock during
102 * rfkill method calls -- which will cause an AB-BA deadlock situation.
103 *
104 * To fix that, we need to rework this code here to be mostly lock-free
105 * and only use the mutex for list manipulations, not to protect the
106 * various other global variables. Then we can avoid holding the mutex
107 * around driver operations, and all is happy.
108 */
109static LIST_HEAD(rfkill_list); /* list of registered rf switches */
110static DEFINE_MUTEX(rfkill_global_mutex);
111static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
112
113static unsigned int rfkill_default_state = 1;
114module_param_named(default_state, rfkill_default_state, uint, 0444);
115MODULE_PARM_DESC(default_state,
116 "Default initial state for all radio types, 0 = radio off");
117
118static struct {
119 bool cur, def;
120} rfkill_global_states[NUM_RFKILL_TYPES];
121
122static unsigned long rfkill_states_default_locked;
123
124static bool rfkill_epo_lock_active;
125
126
127#ifdef CONFIG_RFKILL_LEDS
128static void rfkill_led_trigger_event(struct rfkill *rfkill)
129{
130 struct led_trigger *trigger;
131
132 if (!rfkill->registered)
133 return;
134
135 trigger = &rfkill->led_trigger;
136
137 if (rfkill->state & RFKILL_BLOCK_ANY)
138 led_trigger_event(trigger, LED_OFF);
139 else
140 led_trigger_event(trigger, LED_FULL);
141}
142
143static void rfkill_led_trigger_activate(struct led_classdev *led)
144{
145 struct rfkill *rfkill;
146
147 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
148
149 rfkill_led_trigger_event(rfkill);
150}
151
152const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
153{
154 return rfkill->led_trigger.name;
155}
156EXPORT_SYMBOL(rfkill_get_led_trigger_name);
157
158void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
159{
160 BUG_ON(!rfkill);
161
162 rfkill->ledtrigname = name;
163}
164EXPORT_SYMBOL(rfkill_set_led_trigger_name);
165
166static int rfkill_led_trigger_register(struct rfkill *rfkill)
167{
168 rfkill->led_trigger.name = rfkill->ledtrigname
169 ? : dev_name(&rfkill->dev);
170 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
171 return led_trigger_register(&rfkill->led_trigger);
172}
173
174static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
175{
176 led_trigger_unregister(&rfkill->led_trigger);
177}
178#else
179static void rfkill_led_trigger_event(struct rfkill *rfkill)
180{
181}
182
183static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
184{
185 return 0;
186}
187
188static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
189{
190}
191#endif /* CONFIG_RFKILL_LEDS */
192
193static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
194 enum rfkill_operation op)
195{
196 unsigned long flags;
197
198 ev->idx = rfkill->idx;
199 ev->type = rfkill->type;
200 ev->op = op;
201
202 spin_lock_irqsave(&rfkill->lock, flags);
203 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
204 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
205 RFKILL_BLOCK_SW_PREV));
206 spin_unlock_irqrestore(&rfkill->lock, flags);
207}
208
209static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
210{
211 struct rfkill_data *data;
212 struct rfkill_int_event *ev;
213
214 list_for_each_entry(data, &rfkill_fds, list) {
215 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
216 if (!ev)
217 continue;
218 rfkill_fill_event(&ev->ev, rfkill, op);
219 mutex_lock(&data->mtx);
220 list_add_tail(&ev->list, &data->events);
221 mutex_unlock(&data->mtx);
222 wake_up_interruptible(&data->read_wait);
223 }
224}
225
226static void rfkill_event(struct rfkill *rfkill)
227{
228 if (!rfkill->registered || rfkill->suspended)
229 return;
230
231 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
232
233 /* also send event to /dev/rfkill */
234 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
235}
236
237static bool __rfkill_set_hw_state(struct rfkill *rfkill,
238 bool blocked, bool *change)
239{
240 unsigned long flags;
241 bool prev, any;
242
243 BUG_ON(!rfkill);
244
245 spin_lock_irqsave(&rfkill->lock, flags);
246 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
247 if (blocked)
248 rfkill->state |= RFKILL_BLOCK_HW;
249 else
250 rfkill->state &= ~RFKILL_BLOCK_HW;
251 *change = prev != blocked;
252 any = rfkill->state & RFKILL_BLOCK_ANY;
253 spin_unlock_irqrestore(&rfkill->lock, flags);
254
255 rfkill_led_trigger_event(rfkill);
256
257 return any;
258}
259
260/**
261 * rfkill_set_block - wrapper for set_block method
262 *
263 * @rfkill: the rfkill struct to use
264 * @blocked: the new software state
265 *
266 * Calls the set_block method (when applicable) and handles notifications
267 * etc. as well.
268 */
269static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
270{
271 unsigned long flags;
272 int err;
273
274 /*
275 * Some platforms (...!) generate input events which affect the
276 * _hard_ kill state -- whenever something tries to change the
277 * current software state query the hardware state too.
278 */
279 if (rfkill->ops->query)
280 rfkill->ops->query(rfkill, rfkill->data);
281
282 spin_lock_irqsave(&rfkill->lock, flags);
283 if (rfkill->state & RFKILL_BLOCK_SW)
284 rfkill->state |= RFKILL_BLOCK_SW_PREV;
285 else
286 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
287
288 if (blocked)
289 rfkill->state |= RFKILL_BLOCK_SW;
290 else
291 rfkill->state &= ~RFKILL_BLOCK_SW;
292
293 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
294 spin_unlock_irqrestore(&rfkill->lock, flags);
295
296 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
297 return;
298
299 err = rfkill->ops->set_block(rfkill->data, blocked);
300
301 spin_lock_irqsave(&rfkill->lock, flags);
302 if (err) {
303 /*
304 * Failed -- reset status to _prev, this may be different
305 * from what set set _PREV to earlier in this function
306 * if rfkill_set_sw_state was invoked.
307 */
308 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
309 rfkill->state |= RFKILL_BLOCK_SW;
310 else
311 rfkill->state &= ~RFKILL_BLOCK_SW;
312 }
313 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
314 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
315 spin_unlock_irqrestore(&rfkill->lock, flags);
316
317 rfkill_led_trigger_event(rfkill);
318 rfkill_event(rfkill);
319}
320
321#ifdef CONFIG_RFKILL_INPUT
322static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
323
324/**
325 * __rfkill_switch_all - Toggle state of all switches of given type
326 * @type: type of interfaces to be affected
327 * @state: the new state
328 *
329 * This function sets the state of all switches of given type,
330 * unless a specific switch is claimed by userspace (in which case,
331 * that switch is left alone) or suspended.
332 *
333 * Caller must have acquired rfkill_global_mutex.
334 */
335static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
336{
337 struct rfkill *rfkill;
338
339 rfkill_global_states[type].cur = blocked;
340 list_for_each_entry(rfkill, &rfkill_list, node) {
341 if (rfkill->type != type)
342 continue;
343
344 rfkill_set_block(rfkill, blocked);
345 }
346}
347
348/**
349 * rfkill_switch_all - Toggle state of all switches of given type
350 * @type: type of interfaces to be affected
351 * @state: the new state
352 *
353 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
354 * Please refer to __rfkill_switch_all() for details.
355 *
356 * Does nothing if the EPO lock is active.
357 */
358void rfkill_switch_all(enum rfkill_type type, bool blocked)
359{
360 if (atomic_read(&rfkill_input_disabled))
361 return;
362
363 mutex_lock(&rfkill_global_mutex);
364
365 if (!rfkill_epo_lock_active)
366 __rfkill_switch_all(type, blocked);
367
368 mutex_unlock(&rfkill_global_mutex);
369}
370
371/**
372 * rfkill_epo - emergency power off all transmitters
373 *
374 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
375 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
376 *
377 * The global state before the EPO is saved and can be restored later
378 * using rfkill_restore_states().
379 */
380void rfkill_epo(void)
381{
382 struct rfkill *rfkill;
383 int i;
384
385 if (atomic_read(&rfkill_input_disabled))
386 return;
387
388 mutex_lock(&rfkill_global_mutex);
389
390 rfkill_epo_lock_active = true;
391 list_for_each_entry(rfkill, &rfkill_list, node)
392 rfkill_set_block(rfkill, true);
393
394 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
395 rfkill_global_states[i].def = rfkill_global_states[i].cur;
396 rfkill_global_states[i].cur = true;
397 }
398
399 mutex_unlock(&rfkill_global_mutex);
400}
401
402/**
403 * rfkill_restore_states - restore global states
404 *
405 * Restore (and sync switches to) the global state from the
406 * states in rfkill_default_states. This can undo the effects of
407 * a call to rfkill_epo().
408 */
409void rfkill_restore_states(void)
410{
411 int i;
412
413 if (atomic_read(&rfkill_input_disabled))
414 return;
415
416 mutex_lock(&rfkill_global_mutex);
417
418 rfkill_epo_lock_active = false;
419 for (i = 0; i < NUM_RFKILL_TYPES; i++)
420 __rfkill_switch_all(i, rfkill_global_states[i].def);
421 mutex_unlock(&rfkill_global_mutex);
422}
423
424/**
425 * rfkill_remove_epo_lock - unlock state changes
426 *
427 * Used by rfkill-input manually unlock state changes, when
428 * the EPO switch is deactivated.
429 */
430void rfkill_remove_epo_lock(void)
431{
432 if (atomic_read(&rfkill_input_disabled))
433 return;
434
435 mutex_lock(&rfkill_global_mutex);
436 rfkill_epo_lock_active = false;
437 mutex_unlock(&rfkill_global_mutex);
438}
439
440/**
441 * rfkill_is_epo_lock_active - returns true EPO is active
442 *
443 * Returns 0 (false) if there is NOT an active EPO contidion,
444 * and 1 (true) if there is an active EPO contition, which
445 * locks all radios in one of the BLOCKED states.
446 *
447 * Can be called in atomic context.
448 */
449bool rfkill_is_epo_lock_active(void)
450{
451 return rfkill_epo_lock_active;
452}
453
454/**
455 * rfkill_get_global_sw_state - returns global state for a type
456 * @type: the type to get the global state of
457 *
458 * Returns the current global state for a given wireless
459 * device type.
460 */
461bool rfkill_get_global_sw_state(const enum rfkill_type type)
462{
463 return rfkill_global_states[type].cur;
464}
465#endif
466
467void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
468{
469 BUG_ON(type == RFKILL_TYPE_ALL);
470
471 mutex_lock(&rfkill_global_mutex);
472
473 /* don't allow unblock when epo */
474 if (rfkill_epo_lock_active && !blocked)
475 goto out;
476
477 /* too late */
478 if (rfkill_states_default_locked & BIT(type))
479 goto out;
480
481 rfkill_states_default_locked |= BIT(type);
482
483 rfkill_global_states[type].cur = blocked;
484 rfkill_global_states[type].def = blocked;
485 out:
486 mutex_unlock(&rfkill_global_mutex);
487}
488EXPORT_SYMBOL(rfkill_set_global_sw_state);
489
490
491bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
492{
493 bool ret, change;
494
495 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
496
497 if (!rfkill->registered)
498 return ret;
499
500 if (change)
501 schedule_work(&rfkill->uevent_work);
502
503 return ret;
504}
505EXPORT_SYMBOL(rfkill_set_hw_state);
506
507static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
508{
509 u32 bit = RFKILL_BLOCK_SW;
510
511 /* if in a ops->set_block right now, use other bit */
512 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
513 bit = RFKILL_BLOCK_SW_PREV;
514
515 if (blocked)
516 rfkill->state |= bit;
517 else
518 rfkill->state &= ~bit;
519}
520
521bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
522{
523 unsigned long flags;
524 bool prev, hwblock;
525
526 BUG_ON(!rfkill);
527
528 spin_lock_irqsave(&rfkill->lock, flags);
529 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
530 __rfkill_set_sw_state(rfkill, blocked);
531 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
532 blocked = blocked || hwblock;
533 spin_unlock_irqrestore(&rfkill->lock, flags);
534
535 if (!rfkill->registered)
536 return blocked;
537
538 if (prev != blocked && !hwblock)
539 schedule_work(&rfkill->uevent_work);
540
541 rfkill_led_trigger_event(rfkill);
542
543 return blocked;
544}
545EXPORT_SYMBOL(rfkill_set_sw_state);
546
547void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
548{
549 unsigned long flags;
550 bool swprev, hwprev;
551
552 BUG_ON(!rfkill);
553
554 spin_lock_irqsave(&rfkill->lock, flags);
555
556 /*
557 * No need to care about prev/setblock ... this is for uevent only
558 * and that will get triggered by rfkill_set_block anyway.
559 */
560 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
561 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
562 __rfkill_set_sw_state(rfkill, sw);
563
564 spin_unlock_irqrestore(&rfkill->lock, flags);
565
566 if (!rfkill->registered)
567 return;
568
569 if (swprev != sw || hwprev != hw)
570 schedule_work(&rfkill->uevent_work);
571
572 rfkill_led_trigger_event(rfkill);
573}
574EXPORT_SYMBOL(rfkill_set_states);
575
576static ssize_t rfkill_name_show(struct device *dev,
577 struct device_attribute *attr,
578 char *buf)
579{
580 struct rfkill *rfkill = to_rfkill(dev);
581
582 return sprintf(buf, "%s\n", rfkill->name);
583}
584
585static const char *rfkill_get_type_str(enum rfkill_type type)
586{
587 switch (type) {
588 case RFKILL_TYPE_WLAN:
589 return "wlan";
590 case RFKILL_TYPE_BLUETOOTH:
591 return "bluetooth";
592 case RFKILL_TYPE_UWB:
593 return "ultrawideband";
594 case RFKILL_TYPE_WIMAX:
595 return "wimax";
596 case RFKILL_TYPE_WWAN:
597 return "wwan";
598 default:
599 BUG();
600 }
601
602 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
603}
604
605static ssize_t rfkill_type_show(struct device *dev,
606 struct device_attribute *attr,
607 char *buf)
608{
609 struct rfkill *rfkill = to_rfkill(dev);
610
611 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
612}
613
614static ssize_t rfkill_idx_show(struct device *dev,
615 struct device_attribute *attr,
616 char *buf)
617{
618 struct rfkill *rfkill = to_rfkill(dev);
619
620 return sprintf(buf, "%d\n", rfkill->idx);
621}
622
623static u8 user_state_from_blocked(unsigned long state)
624{
625 if (state & RFKILL_BLOCK_HW)
626 return RFKILL_USER_STATE_HARD_BLOCKED;
627 if (state & RFKILL_BLOCK_SW)
628 return RFKILL_USER_STATE_SOFT_BLOCKED;
629
630 return RFKILL_USER_STATE_UNBLOCKED;
631}
632
633static ssize_t rfkill_state_show(struct device *dev,
634 struct device_attribute *attr,
635 char *buf)
636{
637 struct rfkill *rfkill = to_rfkill(dev);
638 unsigned long flags;
639 u32 state;
640
641 spin_lock_irqsave(&rfkill->lock, flags);
642 state = rfkill->state;
643 spin_unlock_irqrestore(&rfkill->lock, flags);
644
645 return sprintf(buf, "%d\n", user_state_from_blocked(state));
646}
647
648static ssize_t rfkill_state_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t count)
651{
652 /*
653 * The intention was that userspace can only take control over
654 * a given device when/if rfkill-input doesn't control it due
655 * to user_claim. Since user_claim is currently unsupported,
656 * we never support changing the state from userspace -- this
657 * can be implemented again later.
658 */
659
660 return -EPERM;
661}
662
663static ssize_t rfkill_claim_show(struct device *dev,
664 struct device_attribute *attr,
665 char *buf)
666{
667 return sprintf(buf, "%d\n", 0);
668}
669
670static ssize_t rfkill_claim_store(struct device *dev,
671 struct device_attribute *attr,
672 const char *buf, size_t count)
673{
674 return -EOPNOTSUPP;
675}
676
677static struct device_attribute rfkill_dev_attrs[] = {
678 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
679 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
680 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
681 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
682 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
683 __ATTR_NULL
684};
685
686static void rfkill_release(struct device *dev)
687{
688 struct rfkill *rfkill = to_rfkill(dev);
689
690 kfree(rfkill);
691}
692
693static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
694{
695 struct rfkill *rfkill = to_rfkill(dev);
696 unsigned long flags;
697 u32 state;
698 int error;
699
700 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
701 if (error)
702 return error;
703 error = add_uevent_var(env, "RFKILL_TYPE=%s",
704 rfkill_get_type_str(rfkill->type));
705 if (error)
706 return error;
707 spin_lock_irqsave(&rfkill->lock, flags);
708 state = rfkill->state;
709 spin_unlock_irqrestore(&rfkill->lock, flags);
710 error = add_uevent_var(env, "RFKILL_STATE=%d",
711 user_state_from_blocked(state));
712 return error;
713}
714
715void rfkill_pause_polling(struct rfkill *rfkill)
716{
717 BUG_ON(!rfkill);
718
719 if (!rfkill->ops->poll)
720 return;
721
722 cancel_delayed_work_sync(&rfkill->poll_work);
723}
724EXPORT_SYMBOL(rfkill_pause_polling);
725
726void rfkill_resume_polling(struct rfkill *rfkill)
727{
728 BUG_ON(!rfkill);
729
730 if (!rfkill->ops->poll)
731 return;
732
733 schedule_work(&rfkill->poll_work.work);
734}
735EXPORT_SYMBOL(rfkill_resume_polling);
736
737static int rfkill_suspend(struct device *dev, pm_message_t state)
738{
739 struct rfkill *rfkill = to_rfkill(dev);
740
741 rfkill_pause_polling(rfkill);
742
743 rfkill->suspended = true;
744
745 return 0;
746}
747
748static int rfkill_resume(struct device *dev)
749{
750 struct rfkill *rfkill = to_rfkill(dev);
751 bool cur;
752
753 mutex_lock(&rfkill_global_mutex);
754 cur = rfkill_global_states[rfkill->type].cur;
755 rfkill_set_block(rfkill, cur);
756 mutex_unlock(&rfkill_global_mutex);
757
758 rfkill->suspended = false;
759
760 schedule_work(&rfkill->uevent_work);
761
762 rfkill_resume_polling(rfkill);
763
764 return 0;
765}
766
767static struct class rfkill_class = {
768 .name = "rfkill",
769 .dev_release = rfkill_release,
770 .dev_attrs = rfkill_dev_attrs,
771 .dev_uevent = rfkill_dev_uevent,
772 .suspend = rfkill_suspend,
773 .resume = rfkill_resume,
774};
775
776bool rfkill_blocked(struct rfkill *rfkill)
777{
778 unsigned long flags;
779 u32 state;
780
781 spin_lock_irqsave(&rfkill->lock, flags);
782 state = rfkill->state;
783 spin_unlock_irqrestore(&rfkill->lock, flags);
784
785 return !!(state & RFKILL_BLOCK_ANY);
786}
787EXPORT_SYMBOL(rfkill_blocked);
788
789
790struct rfkill * __must_check rfkill_alloc(const char *name,
791 struct device *parent,
792 const enum rfkill_type type,
793 const struct rfkill_ops *ops,
794 void *ops_data)
795{
796 struct rfkill *rfkill;
797 struct device *dev;
798
799 if (WARN_ON(!ops))
800 return NULL;
801
802 if (WARN_ON(!ops->set_block))
803 return NULL;
804
805 if (WARN_ON(!name))
806 return NULL;
807
808 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
809 return NULL;
810
811 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
812 if (!rfkill)
813 return NULL;
814
815 spin_lock_init(&rfkill->lock);
816 INIT_LIST_HEAD(&rfkill->node);
817 rfkill->type = type;
818 rfkill->name = name;
819 rfkill->ops = ops;
820 rfkill->data = ops_data;
821
822 dev = &rfkill->dev;
823 dev->class = &rfkill_class;
824 dev->parent = parent;
825 device_initialize(dev);
826
827 return rfkill;
828}
829EXPORT_SYMBOL(rfkill_alloc);
830
831static void rfkill_poll(struct work_struct *work)
832{
833 struct rfkill *rfkill;
834
835 rfkill = container_of(work, struct rfkill, poll_work.work);
836
837 /*
838 * Poll hardware state -- driver will use one of the
839 * rfkill_set{,_hw,_sw}_state functions and use its
840 * return value to update the current status.
841 */
842 rfkill->ops->poll(rfkill, rfkill->data);
843
844 schedule_delayed_work(&rfkill->poll_work,
845 round_jiffies_relative(POLL_INTERVAL));
846}
847
848static void rfkill_uevent_work(struct work_struct *work)
849{
850 struct rfkill *rfkill;
851
852 rfkill = container_of(work, struct rfkill, uevent_work);
853
854 mutex_lock(&rfkill_global_mutex);
855 rfkill_event(rfkill);
856 mutex_unlock(&rfkill_global_mutex);
857}
858
859static void rfkill_sync_work(struct work_struct *work)
860{
861 struct rfkill *rfkill;
862 bool cur;
863
864 rfkill = container_of(work, struct rfkill, sync_work);
865
866 mutex_lock(&rfkill_global_mutex);
867 cur = rfkill_global_states[rfkill->type].cur;
868 rfkill_set_block(rfkill, cur);
869 mutex_unlock(&rfkill_global_mutex);
870}
871
872int __must_check rfkill_register(struct rfkill *rfkill)
873{
874 static unsigned long rfkill_no;
875 struct device *dev = &rfkill->dev;
876 int error;
877
878 BUG_ON(!rfkill);
879
880 mutex_lock(&rfkill_global_mutex);
881
882 if (rfkill->registered) {
883 error = -EALREADY;
884 goto unlock;
885 }
886
887 rfkill->idx = rfkill_no;
888 dev_set_name(dev, "rfkill%lu", rfkill_no);
889 rfkill_no++;
890
891 if (!(rfkill_states_default_locked & BIT(rfkill->type))) {
892 /* first of its kind */
893 BUILD_BUG_ON(NUM_RFKILL_TYPES >
894 sizeof(rfkill_states_default_locked) * 8);
895 rfkill_states_default_locked |= BIT(rfkill->type);
896 rfkill_global_states[rfkill->type].cur =
897 rfkill_global_states[rfkill->type].def;
898 }
899
900 list_add_tail(&rfkill->node, &rfkill_list);
901
902 error = device_add(dev);
903 if (error)
904 goto remove;
905
906 error = rfkill_led_trigger_register(rfkill);
907 if (error)
908 goto devdel;
909
910 rfkill->registered = true;
911
912 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
913 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
914 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
915
916 if (rfkill->ops->poll)
917 schedule_delayed_work(&rfkill->poll_work,
918 round_jiffies_relative(POLL_INTERVAL));
919 schedule_work(&rfkill->sync_work);
920
921 rfkill_send_events(rfkill, RFKILL_OP_ADD);
922
923 mutex_unlock(&rfkill_global_mutex);
924 return 0;
925
926 devdel:
927 device_del(&rfkill->dev);
928 remove:
929 list_del_init(&rfkill->node);
930 unlock:
931 mutex_unlock(&rfkill_global_mutex);
932 return error;
933}
934EXPORT_SYMBOL(rfkill_register);
935
936void rfkill_unregister(struct rfkill *rfkill)
937{
938 BUG_ON(!rfkill);
939
940 if (rfkill->ops->poll)
941 cancel_delayed_work_sync(&rfkill->poll_work);
942
943 cancel_work_sync(&rfkill->uevent_work);
944 cancel_work_sync(&rfkill->sync_work);
945
946 rfkill->registered = false;
947
948 device_del(&rfkill->dev);
949
950 mutex_lock(&rfkill_global_mutex);
951 rfkill_send_events(rfkill, RFKILL_OP_DEL);
952 list_del_init(&rfkill->node);
953 mutex_unlock(&rfkill_global_mutex);
954
955 rfkill_led_trigger_unregister(rfkill);
956}
957EXPORT_SYMBOL(rfkill_unregister);
958
959void rfkill_destroy(struct rfkill *rfkill)
960{
961 if (rfkill)
962 put_device(&rfkill->dev);
963}
964EXPORT_SYMBOL(rfkill_destroy);
965
966static int rfkill_fop_open(struct inode *inode, struct file *file)
967{
968 struct rfkill_data *data;
969 struct rfkill *rfkill;
970 struct rfkill_int_event *ev, *tmp;
971
972 data = kzalloc(sizeof(*data), GFP_KERNEL);
973 if (!data)
974 return -ENOMEM;
975
976 INIT_LIST_HEAD(&data->events);
977 mutex_init(&data->mtx);
978 init_waitqueue_head(&data->read_wait);
979
980 mutex_lock(&rfkill_global_mutex);
981 mutex_lock(&data->mtx);
982 /*
983 * start getting events from elsewhere but hold mtx to get
984 * startup events added first
985 */
986 list_add(&data->list, &rfkill_fds);
987
988 list_for_each_entry(rfkill, &rfkill_list, node) {
989 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
990 if (!ev)
991 goto free;
992 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
993 list_add_tail(&ev->list, &data->events);
994 }
995 mutex_unlock(&data->mtx);
996 mutex_unlock(&rfkill_global_mutex);
997
998 file->private_data = data;
999
1000 return nonseekable_open(inode, file);
1001
1002 free:
1003 mutex_unlock(&data->mtx);
1004 mutex_unlock(&rfkill_global_mutex);
1005 mutex_destroy(&data->mtx);
1006 list_for_each_entry_safe(ev, tmp, &data->events, list)
1007 kfree(ev);
1008 kfree(data);
1009 return -ENOMEM;
1010}
1011
1012static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1013{
1014 struct rfkill_data *data = file->private_data;
1015 unsigned int res = POLLOUT | POLLWRNORM;
1016
1017 poll_wait(file, &data->read_wait, wait);
1018
1019 mutex_lock(&data->mtx);
1020 if (!list_empty(&data->events))
1021 res = POLLIN | POLLRDNORM;
1022 mutex_unlock(&data->mtx);
1023
1024 return res;
1025}
1026
1027static bool rfkill_readable(struct rfkill_data *data)
1028{
1029 bool r;
1030
1031 mutex_lock(&data->mtx);
1032 r = !list_empty(&data->events);
1033 mutex_unlock(&data->mtx);
1034
1035 return r;
1036}
1037
1038static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1039 size_t count, loff_t *pos)
1040{
1041 struct rfkill_data *data = file->private_data;
1042 struct rfkill_int_event *ev;
1043 unsigned long sz;
1044 int ret;
1045
1046 mutex_lock(&data->mtx);
1047
1048 while (list_empty(&data->events)) {
1049 if (file->f_flags & O_NONBLOCK) {
1050 ret = -EAGAIN;
1051 goto out;
1052 }
1053 mutex_unlock(&data->mtx);
1054 ret = wait_event_interruptible(data->read_wait,
1055 rfkill_readable(data));
1056 mutex_lock(&data->mtx);
1057
1058 if (ret)
1059 goto out;
1060 }
1061
1062 ev = list_first_entry(&data->events, struct rfkill_int_event,
1063 list);
1064
1065 sz = min_t(unsigned long, sizeof(ev->ev), count);
1066 ret = sz;
1067 if (copy_to_user(buf, &ev->ev, sz))
1068 ret = -EFAULT;
1069
1070 list_del(&ev->list);
1071 kfree(ev);
1072 out:
1073 mutex_unlock(&data->mtx);
1074 return ret;
1075}
1076
1077static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1078 size_t count, loff_t *pos)
1079{
1080 struct rfkill *rfkill;
1081 struct rfkill_event ev;
1082
1083 /* we don't need the 'hard' variable but accept it */
1084 if (count < sizeof(ev) - 1)
1085 return -EINVAL;
1086
1087 if (copy_from_user(&ev, buf, sizeof(ev) - 1))
1088 return -EFAULT;
1089
1090 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1091 return -EINVAL;
1092
1093 if (ev.type >= NUM_RFKILL_TYPES)
1094 return -EINVAL;
1095
1096 mutex_lock(&rfkill_global_mutex);
1097
1098 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1099 if (ev.type == RFKILL_TYPE_ALL) {
1100 enum rfkill_type i;
1101 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1102 rfkill_global_states[i].cur = ev.soft;
1103 } else {
1104 rfkill_global_states[ev.type].cur = ev.soft;
1105 }
1106 }
1107
1108 list_for_each_entry(rfkill, &rfkill_list, node) {
1109 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1110 continue;
1111
1112 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1113 continue;
1114
1115 rfkill_set_block(rfkill, ev.soft);
1116 }
1117 mutex_unlock(&rfkill_global_mutex);
1118
1119 return count;
1120}
1121
1122static int rfkill_fop_release(struct inode *inode, struct file *file)
1123{
1124 struct rfkill_data *data = file->private_data;
1125 struct rfkill_int_event *ev, *tmp;
1126
1127 mutex_lock(&rfkill_global_mutex);
1128 list_del(&data->list);
1129 mutex_unlock(&rfkill_global_mutex);
1130
1131 mutex_destroy(&data->mtx);
1132 list_for_each_entry_safe(ev, tmp, &data->events, list)
1133 kfree(ev);
1134
1135#ifdef CONFIG_RFKILL_INPUT
1136 if (data->input_handler)
1137 atomic_dec(&rfkill_input_disabled);
1138#endif
1139
1140 kfree(data);
1141
1142 return 0;
1143}
1144
1145#ifdef CONFIG_RFKILL_INPUT
1146static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1147 unsigned long arg)
1148{
1149 struct rfkill_data *data = file->private_data;
1150
1151 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1152 return -ENOSYS;
1153
1154 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1155 return -ENOSYS;
1156
1157 mutex_lock(&data->mtx);
1158
1159 if (!data->input_handler) {
1160 atomic_inc(&rfkill_input_disabled);
1161 data->input_handler = true;
1162 }
1163
1164 mutex_unlock(&data->mtx);
1165
1166 return 0;
1167}
1168#endif
1169
1170static const struct file_operations rfkill_fops = {
1171 .open = rfkill_fop_open,
1172 .read = rfkill_fop_read,
1173 .write = rfkill_fop_write,
1174 .poll = rfkill_fop_poll,
1175 .release = rfkill_fop_release,
1176#ifdef CONFIG_RFKILL_INPUT
1177 .unlocked_ioctl = rfkill_fop_ioctl,
1178 .compat_ioctl = rfkill_fop_ioctl,
1179#endif
1180};
1181
1182static struct miscdevice rfkill_miscdev = {
1183 .name = "rfkill",
1184 .fops = &rfkill_fops,
1185 .minor = MISC_DYNAMIC_MINOR,
1186};
1187
1188static int __init rfkill_init(void)
1189{
1190 int error;
1191 int i;
1192
1193 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1194 rfkill_global_states[i].def = !rfkill_default_state;
1195
1196 error = class_register(&rfkill_class);
1197 if (error)
1198 goto out;
1199
1200 error = misc_register(&rfkill_miscdev);
1201 if (error) {
1202 class_unregister(&rfkill_class);
1203 goto out;
1204 }
1205
1206#ifdef CONFIG_RFKILL_INPUT
1207 error = rfkill_handler_init();
1208 if (error) {
1209 misc_deregister(&rfkill_miscdev);
1210 class_unregister(&rfkill_class);
1211 goto out;
1212 }
1213#endif
1214
1215 out:
1216 return error;
1217}
1218subsys_initcall(rfkill_init);
1219
1220static void __exit rfkill_exit(void)
1221{
1222#ifdef CONFIG_RFKILL_INPUT
1223 rfkill_handler_exit();
1224#endif
1225 misc_deregister(&rfkill_miscdev);
1226 class_unregister(&rfkill_class);
1227}
1228module_exit(rfkill_exit);
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
new file mode 100644
index 000000000000..a7295ad5f9cb
--- /dev/null
+++ b/net/rfkill/input.c
@@ -0,0 +1,342 @@
1/*
2 * Input layer to RF Kill interface connector
3 *
4 * Copyright (c) 2007 Dmitry Torokhov
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * If you ever run into a situation in which you have a SW_ type rfkill
12 * input device, then you can revive code that was removed in the patch
13 * "rfkill-input: remove unused code".
14 */
15
16#include <linux/input.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <linux/init.h>
20#include <linux/rfkill.h>
21#include <linux/sched.h>
22
23#include "rfkill.h"
24
25enum rfkill_input_master_mode {
26 RFKILL_INPUT_MASTER_UNLOCK = 0,
27 RFKILL_INPUT_MASTER_RESTORE = 1,
28 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
29 NUM_RFKILL_INPUT_MASTER_MODES
30};
31
32/* Delay (in ms) between consecutive switch ops */
33#define RFKILL_OPS_DELAY 200
34
35static enum rfkill_input_master_mode rfkill_master_switch_mode =
36 RFKILL_INPUT_MASTER_UNBLOCKALL;
37module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
38MODULE_PARM_DESC(master_switch_mode,
39 "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
40
41static spinlock_t rfkill_op_lock;
42static bool rfkill_op_pending;
43static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
44static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
45
46enum rfkill_sched_op {
47 RFKILL_GLOBAL_OP_EPO = 0,
48 RFKILL_GLOBAL_OP_RESTORE,
49 RFKILL_GLOBAL_OP_UNLOCK,
50 RFKILL_GLOBAL_OP_UNBLOCK,
51};
52
53static enum rfkill_sched_op rfkill_master_switch_op;
54static enum rfkill_sched_op rfkill_op;
55
56static void __rfkill_handle_global_op(enum rfkill_sched_op op)
57{
58 unsigned int i;
59
60 switch (op) {
61 case RFKILL_GLOBAL_OP_EPO:
62 rfkill_epo();
63 break;
64 case RFKILL_GLOBAL_OP_RESTORE:
65 rfkill_restore_states();
66 break;
67 case RFKILL_GLOBAL_OP_UNLOCK:
68 rfkill_remove_epo_lock();
69 break;
70 case RFKILL_GLOBAL_OP_UNBLOCK:
71 rfkill_remove_epo_lock();
72 for (i = 0; i < NUM_RFKILL_TYPES; i++)
73 rfkill_switch_all(i, false);
74 break;
75 default:
76 /* memory corruption or bug, fail safely */
77 rfkill_epo();
78 WARN(1, "Unknown requested operation %d! "
79 "rfkill Emergency Power Off activated\n",
80 op);
81 }
82}
83
84static void __rfkill_handle_normal_op(const enum rfkill_type type,
85 const bool complement)
86{
87 bool blocked;
88
89 blocked = rfkill_get_global_sw_state(type);
90 if (complement)
91 blocked = !blocked;
92
93 rfkill_switch_all(type, blocked);
94}
95
96static void rfkill_op_handler(struct work_struct *work)
97{
98 unsigned int i;
99 bool c;
100
101 spin_lock_irq(&rfkill_op_lock);
102 do {
103 if (rfkill_op_pending) {
104 enum rfkill_sched_op op = rfkill_op;
105 rfkill_op_pending = false;
106 memset(rfkill_sw_pending, 0,
107 sizeof(rfkill_sw_pending));
108 spin_unlock_irq(&rfkill_op_lock);
109
110 __rfkill_handle_global_op(op);
111
112 spin_lock_irq(&rfkill_op_lock);
113
114 /*
115 * handle global ops first -- during unlocked period
116 * we might have gotten a new global op.
117 */
118 if (rfkill_op_pending)
119 continue;
120 }
121
122 if (rfkill_is_epo_lock_active())
123 continue;
124
125 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
126 if (__test_and_clear_bit(i, rfkill_sw_pending)) {
127 c = __test_and_clear_bit(i, rfkill_sw_state);
128 spin_unlock_irq(&rfkill_op_lock);
129
130 __rfkill_handle_normal_op(i, c);
131
132 spin_lock_irq(&rfkill_op_lock);
133 }
134 }
135 } while (rfkill_op_pending);
136 spin_unlock_irq(&rfkill_op_lock);
137}
138
139static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
140static unsigned long rfkill_last_scheduled;
141
142static unsigned long rfkill_ratelimit(const unsigned long last)
143{
144 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
145 return (time_after(jiffies, last + delay)) ? 0 : delay;
146}
147
148static void rfkill_schedule_ratelimited(void)
149{
150 if (delayed_work_pending(&rfkill_op_work))
151 return;
152 schedule_delayed_work(&rfkill_op_work,
153 rfkill_ratelimit(rfkill_last_scheduled));
154 rfkill_last_scheduled = jiffies;
155}
156
157static void rfkill_schedule_global_op(enum rfkill_sched_op op)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&rfkill_op_lock, flags);
162 rfkill_op = op;
163 rfkill_op_pending = true;
164 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
165 /* bypass the limiter for EPO */
166 cancel_delayed_work(&rfkill_op_work);
167 schedule_delayed_work(&rfkill_op_work, 0);
168 rfkill_last_scheduled = jiffies;
169 } else
170 rfkill_schedule_ratelimited();
171 spin_unlock_irqrestore(&rfkill_op_lock, flags);
172}
173
174static void rfkill_schedule_toggle(enum rfkill_type type)
175{
176 unsigned long flags;
177
178 if (rfkill_is_epo_lock_active())
179 return;
180
181 spin_lock_irqsave(&rfkill_op_lock, flags);
182 if (!rfkill_op_pending) {
183 __set_bit(type, rfkill_sw_pending);
184 __change_bit(type, rfkill_sw_state);
185 rfkill_schedule_ratelimited();
186 }
187 spin_unlock_irqrestore(&rfkill_op_lock, flags);
188}
189
190static void rfkill_schedule_evsw_rfkillall(int state)
191{
192 if (state)
193 rfkill_schedule_global_op(rfkill_master_switch_op);
194 else
195 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
196}
197
198static void rfkill_event(struct input_handle *handle, unsigned int type,
199 unsigned int code, int data)
200{
201 if (type == EV_KEY && data == 1) {
202 switch (code) {
203 case KEY_WLAN:
204 rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
205 break;
206 case KEY_BLUETOOTH:
207 rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
208 break;
209 case KEY_UWB:
210 rfkill_schedule_toggle(RFKILL_TYPE_UWB);
211 break;
212 case KEY_WIMAX:
213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214 break;
215 }
216 } else if (type == EV_SW && code == SW_RFKILL_ALL)
217 rfkill_schedule_evsw_rfkillall(data);
218}
219
220static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
221 const struct input_device_id *id)
222{
223 struct input_handle *handle;
224 int error;
225
226 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
227 if (!handle)
228 return -ENOMEM;
229
230 handle->dev = dev;
231 handle->handler = handler;
232 handle->name = "rfkill";
233
234 /* causes rfkill_start() to be called */
235 error = input_register_handle(handle);
236 if (error)
237 goto err_free_handle;
238
239 error = input_open_device(handle);
240 if (error)
241 goto err_unregister_handle;
242
243 return 0;
244
245 err_unregister_handle:
246 input_unregister_handle(handle);
247 err_free_handle:
248 kfree(handle);
249 return error;
250}
251
252static void rfkill_start(struct input_handle *handle)
253{
254 /*
255 * Take event_lock to guard against configuration changes, we
256 * should be able to deal with concurrency with rfkill_event()
257 * just fine (which event_lock will also avoid).
258 */
259 spin_lock_irq(&handle->dev->event_lock);
260
261 if (test_bit(EV_SW, handle->dev->evbit) &&
262 test_bit(SW_RFKILL_ALL, handle->dev->swbit))
263 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
264 handle->dev->sw));
265
266 spin_unlock_irq(&handle->dev->event_lock);
267}
268
269static void rfkill_disconnect(struct input_handle *handle)
270{
271 input_close_device(handle);
272 input_unregister_handle(handle);
273 kfree(handle);
274}
275
276static const struct input_device_id rfkill_ids[] = {
277 {
278 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
279 .evbit = { BIT_MASK(EV_KEY) },
280 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
281 },
282 {
283 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
284 .evbit = { BIT_MASK(EV_KEY) },
285 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
286 },
287 {
288 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
289 .evbit = { BIT_MASK(EV_KEY) },
290 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
291 },
292 {
293 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
294 .evbit = { BIT_MASK(EV_KEY) },
295 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
296 },
297 {
298 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
299 .evbit = { BIT(EV_SW) },
300 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
301 },
302 { }
303};
304
305static struct input_handler rfkill_handler = {
306 .name = "rfkill",
307 .event = rfkill_event,
308 .connect = rfkill_connect,
309 .start = rfkill_start,
310 .disconnect = rfkill_disconnect,
311 .id_table = rfkill_ids,
312};
313
314int __init rfkill_handler_init(void)
315{
316 switch (rfkill_master_switch_mode) {
317 case RFKILL_INPUT_MASTER_UNBLOCKALL:
318 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
319 break;
320 case RFKILL_INPUT_MASTER_RESTORE:
321 rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
322 break;
323 case RFKILL_INPUT_MASTER_UNLOCK:
324 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
325 break;
326 default:
327 return -EINVAL;
328 }
329
330 spin_lock_init(&rfkill_op_lock);
331
332 /* Avoid delay at first schedule */
333 rfkill_last_scheduled =
334 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
335 return input_register_handler(&rfkill_handler);
336}
337
338void __exit rfkill_handler_exit(void)
339{
340 input_unregister_handler(&rfkill_handler);
341 cancel_delayed_work_sync(&rfkill_op_work);
342}
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
deleted file mode 100644
index 60a34f3b5f65..000000000000
--- a/net/rfkill/rfkill-input.c
+++ /dev/null
@@ -1,390 +0,0 @@
1/*
2 * Input layer to RF Kill interface connector
3 *
4 * Copyright (c) 2007 Dmitry Torokhov
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/input.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/init.h>
18#include <linux/rfkill.h>
19#include <linux/sched.h>
20
21#include "rfkill-input.h"
22
23MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24MODULE_DESCRIPTION("Input layer to RF switch connector");
25MODULE_LICENSE("GPL");
26
27enum rfkill_input_master_mode {
28 RFKILL_INPUT_MASTER_DONOTHING = 0,
29 RFKILL_INPUT_MASTER_RESTORE = 1,
30 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31 RFKILL_INPUT_MASTER_MAX, /* marker */
32};
33
34/* Delay (in ms) between consecutive switch ops */
35#define RFKILL_OPS_DELAY 200
36
37static enum rfkill_input_master_mode rfkill_master_switch_mode =
38 RFKILL_INPUT_MASTER_UNBLOCKALL;
39module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
40MODULE_PARM_DESC(master_switch_mode,
41 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
42
43enum rfkill_global_sched_op {
44 RFKILL_GLOBAL_OP_EPO = 0,
45 RFKILL_GLOBAL_OP_RESTORE,
46 RFKILL_GLOBAL_OP_UNLOCK,
47 RFKILL_GLOBAL_OP_UNBLOCK,
48};
49
50struct rfkill_task {
51 struct delayed_work dwork;
52
53 /* ensures that task is serialized */
54 struct mutex mutex;
55
56 /* protects everything below */
57 spinlock_t lock;
58
59 /* pending regular switch operations (1=pending) */
60 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
61
62 /* should the state be complemented (1=yes) */
63 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
64
65 bool global_op_pending;
66 enum rfkill_global_sched_op op;
67
68 /* last time it was scheduled */
69 unsigned long last_scheduled;
70};
71
72static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
73{
74 unsigned int i;
75
76 switch (op) {
77 case RFKILL_GLOBAL_OP_EPO:
78 rfkill_epo();
79 break;
80 case RFKILL_GLOBAL_OP_RESTORE:
81 rfkill_restore_states();
82 break;
83 case RFKILL_GLOBAL_OP_UNLOCK:
84 rfkill_remove_epo_lock();
85 break;
86 case RFKILL_GLOBAL_OP_UNBLOCK:
87 rfkill_remove_epo_lock();
88 for (i = 0; i < RFKILL_TYPE_MAX; i++)
89 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
90 break;
91 default:
92 /* memory corruption or bug, fail safely */
93 rfkill_epo();
94 WARN(1, "Unknown requested operation %d! "
95 "rfkill Emergency Power Off activated\n",
96 op);
97 }
98}
99
100static void __rfkill_handle_normal_op(const enum rfkill_type type,
101 const bool c)
102{
103 enum rfkill_state state;
104
105 state = rfkill_get_global_state(type);
106 if (c)
107 state = rfkill_state_complement(state);
108
109 rfkill_switch_all(type, state);
110}
111
112static void rfkill_task_handler(struct work_struct *work)
113{
114 struct rfkill_task *task = container_of(work,
115 struct rfkill_task, dwork.work);
116 bool doit = true;
117
118 mutex_lock(&task->mutex);
119
120 spin_lock_irq(&task->lock);
121 while (doit) {
122 if (task->global_op_pending) {
123 enum rfkill_global_sched_op op = task->op;
124 task->global_op_pending = false;
125 memset(task->sw_pending, 0, sizeof(task->sw_pending));
126 spin_unlock_irq(&task->lock);
127
128 __rfkill_handle_global_op(op);
129
130 /* make sure we do at least one pass with
131 * !task->global_op_pending */
132 spin_lock_irq(&task->lock);
133 continue;
134 } else if (!rfkill_is_epo_lock_active()) {
135 unsigned int i = 0;
136
137 while (!task->global_op_pending &&
138 i < RFKILL_TYPE_MAX) {
139 if (test_and_clear_bit(i, task->sw_pending)) {
140 bool c;
141 c = test_and_clear_bit(i,
142 task->sw_togglestate);
143 spin_unlock_irq(&task->lock);
144
145 __rfkill_handle_normal_op(i, c);
146
147 spin_lock_irq(&task->lock);
148 }
149 i++;
150 }
151 }
152 doit = task->global_op_pending;
153 }
154 spin_unlock_irq(&task->lock);
155
156 mutex_unlock(&task->mutex);
157}
158
159static struct rfkill_task rfkill_task = {
160 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
161 rfkill_task_handler),
162 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
163 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
164};
165
166static unsigned long rfkill_ratelimit(const unsigned long last)
167{
168 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
169 return (time_after(jiffies, last + delay)) ? 0 : delay;
170}
171
172static void rfkill_schedule_ratelimited(void)
173{
174 if (!delayed_work_pending(&rfkill_task.dwork)) {
175 schedule_delayed_work(&rfkill_task.dwork,
176 rfkill_ratelimit(rfkill_task.last_scheduled));
177 rfkill_task.last_scheduled = jiffies;
178 }
179}
180
181static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
182{
183 unsigned long flags;
184
185 spin_lock_irqsave(&rfkill_task.lock, flags);
186 rfkill_task.op = op;
187 rfkill_task.global_op_pending = true;
188 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
189 /* bypass the limiter for EPO */
190 cancel_delayed_work(&rfkill_task.dwork);
191 schedule_delayed_work(&rfkill_task.dwork, 0);
192 rfkill_task.last_scheduled = jiffies;
193 } else
194 rfkill_schedule_ratelimited();
195 spin_unlock_irqrestore(&rfkill_task.lock, flags);
196}
197
198static void rfkill_schedule_toggle(enum rfkill_type type)
199{
200 unsigned long flags;
201
202 if (rfkill_is_epo_lock_active())
203 return;
204
205 spin_lock_irqsave(&rfkill_task.lock, flags);
206 if (!rfkill_task.global_op_pending) {
207 set_bit(type, rfkill_task.sw_pending);
208 change_bit(type, rfkill_task.sw_togglestate);
209 rfkill_schedule_ratelimited();
210 }
211 spin_unlock_irqrestore(&rfkill_task.lock, flags);
212}
213
214static void rfkill_schedule_evsw_rfkillall(int state)
215{
216 if (state) {
217 switch (rfkill_master_switch_mode) {
218 case RFKILL_INPUT_MASTER_UNBLOCKALL:
219 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
220 break;
221 case RFKILL_INPUT_MASTER_RESTORE:
222 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
223 break;
224 case RFKILL_INPUT_MASTER_DONOTHING:
225 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
226 break;
227 default:
228 /* memory corruption or driver bug! fail safely */
229 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
230 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
231 "driver bug or memory corruption detected!\n",
232 rfkill_master_switch_mode);
233 break;
234 }
235 } else
236 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
237}
238
239static void rfkill_event(struct input_handle *handle, unsigned int type,
240 unsigned int code, int data)
241{
242 if (type == EV_KEY && data == 1) {
243 enum rfkill_type t;
244
245 switch (code) {
246 case KEY_WLAN:
247 t = RFKILL_TYPE_WLAN;
248 break;
249 case KEY_BLUETOOTH:
250 t = RFKILL_TYPE_BLUETOOTH;
251 break;
252 case KEY_UWB:
253 t = RFKILL_TYPE_UWB;
254 break;
255 case KEY_WIMAX:
256 t = RFKILL_TYPE_WIMAX;
257 break;
258 default:
259 return;
260 }
261 rfkill_schedule_toggle(t);
262 return;
263 } else if (type == EV_SW) {
264 switch (code) {
265 case SW_RFKILL_ALL:
266 rfkill_schedule_evsw_rfkillall(data);
267 return;
268 default:
269 return;
270 }
271 }
272}
273
274static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
275 const struct input_device_id *id)
276{
277 struct input_handle *handle;
278 int error;
279
280 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
281 if (!handle)
282 return -ENOMEM;
283
284 handle->dev = dev;
285 handle->handler = handler;
286 handle->name = "rfkill";
287
288 /* causes rfkill_start() to be called */
289 error = input_register_handle(handle);
290 if (error)
291 goto err_free_handle;
292
293 error = input_open_device(handle);
294 if (error)
295 goto err_unregister_handle;
296
297 return 0;
298
299 err_unregister_handle:
300 input_unregister_handle(handle);
301 err_free_handle:
302 kfree(handle);
303 return error;
304}
305
306static void rfkill_start(struct input_handle *handle)
307{
308 /* Take event_lock to guard against configuration changes, we
309 * should be able to deal with concurrency with rfkill_event()
310 * just fine (which event_lock will also avoid). */
311 spin_lock_irq(&handle->dev->event_lock);
312
313 if (test_bit(EV_SW, handle->dev->evbit)) {
314 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
315 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
316 handle->dev->sw));
317 /* add resync for further EV_SW events here */
318 }
319
320 spin_unlock_irq(&handle->dev->event_lock);
321}
322
323static void rfkill_disconnect(struct input_handle *handle)
324{
325 input_close_device(handle);
326 input_unregister_handle(handle);
327 kfree(handle);
328}
329
330static const struct input_device_id rfkill_ids[] = {
331 {
332 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
333 .evbit = { BIT_MASK(EV_KEY) },
334 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
335 },
336 {
337 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
338 .evbit = { BIT_MASK(EV_KEY) },
339 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
340 },
341 {
342 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
343 .evbit = { BIT_MASK(EV_KEY) },
344 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
345 },
346 {
347 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
348 .evbit = { BIT_MASK(EV_KEY) },
349 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
350 },
351 {
352 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
353 .evbit = { BIT(EV_SW) },
354 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
355 },
356 { }
357};
358
359static struct input_handler rfkill_handler = {
360 .event = rfkill_event,
361 .connect = rfkill_connect,
362 .disconnect = rfkill_disconnect,
363 .start = rfkill_start,
364 .name = "rfkill",
365 .id_table = rfkill_ids,
366};
367
368static int __init rfkill_handler_init(void)
369{
370 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
371 return -EINVAL;
372
373 /*
374 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
375 * at the first use. Acceptable, but if we can avoid it, why not?
376 */
377 rfkill_task.last_scheduled =
378 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
379 return input_register_handler(&rfkill_handler);
380}
381
382static void __exit rfkill_handler_exit(void)
383{
384 input_unregister_handler(&rfkill_handler);
385 cancel_delayed_work_sync(&rfkill_task.dwork);
386 rfkill_remove_epo_lock();
387}
388
389module_init(rfkill_handler_init);
390module_exit(rfkill_handler_exit);
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
deleted file mode 100644
index 4f5a83183c95..000000000000
--- a/net/rfkill/rfkill.c
+++ /dev/null
@@ -1,855 +0,0 @@
1/*
2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/workqueue.h>
25#include <linux/capability.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/rfkill.h>
29
30/* Get declaration of rfkill_switch_all() to shut up sparse. */
31#include "rfkill-input.h"
32
33
34MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
35MODULE_VERSION("1.0");
36MODULE_DESCRIPTION("RF switch support");
37MODULE_LICENSE("GPL");
38
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_global_mutex);
41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off");
46
47struct rfkill_gsw_state {
48 enum rfkill_state current_state;
49 enum rfkill_state default_state;
50};
51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
54static bool rfkill_epo_lock_active;
55
56
57#ifdef CONFIG_RFKILL_LEDS
58static void rfkill_led_trigger(struct rfkill *rfkill,
59 enum rfkill_state state)
60{
61 struct led_trigger *led = &rfkill->led_trigger;
62
63 if (!led->name)
64 return;
65 if (state != RFKILL_STATE_UNBLOCKED)
66 led_trigger_event(led, LED_OFF);
67 else
68 led_trigger_event(led, LED_FULL);
69}
70
71static void rfkill_led_trigger_activate(struct led_classdev *led)
72{
73 struct rfkill *rfkill = container_of(led->trigger,
74 struct rfkill, led_trigger);
75
76 rfkill_led_trigger(rfkill, rfkill->state);
77}
78#else
79static inline void rfkill_led_trigger(struct rfkill *rfkill,
80 enum rfkill_state state)
81{
82}
83#endif /* CONFIG_RFKILL_LEDS */
84
85static void rfkill_uevent(struct rfkill *rfkill)
86{
87 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
88}
89
90static void update_rfkill_state(struct rfkill *rfkill)
91{
92 enum rfkill_state newstate, oldstate;
93
94 if (rfkill->get_state) {
95 mutex_lock(&rfkill->mutex);
96 if (!rfkill->get_state(rfkill->data, &newstate)) {
97 oldstate = rfkill->state;
98 rfkill->state = newstate;
99 if (oldstate != newstate)
100 rfkill_uevent(rfkill);
101 }
102 mutex_unlock(&rfkill->mutex);
103 }
104 rfkill_led_trigger(rfkill, rfkill->state);
105}
106
107/**
108 * rfkill_toggle_radio - wrapper for toggle_radio hook
109 * @rfkill: the rfkill struct to use
110 * @force: calls toggle_radio even if cache says it is not needed,
111 * and also makes sure notifications of the state will be
112 * sent even if it didn't change
113 * @state: the new state to call toggle_radio() with
114 *
115 * Calls rfkill->toggle_radio, enforcing the API for toggle_radio
116 * calls and handling all the red tape such as issuing notifications
117 * if the call is successful.
118 *
119 * Suspended devices are not touched at all, and -EAGAIN is returned.
120 *
121 * Note that the @force parameter cannot override a (possibly cached)
122 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
123 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or
124 * rfkill_force_state(), so the cache either is bypassed or valid.
125 *
126 * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED
127 * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to
128 * give the driver a hint that it should double-BLOCK the transmitter.
129 *
130 * Caller must have acquired rfkill->mutex.
131 */
132static int rfkill_toggle_radio(struct rfkill *rfkill,
133 enum rfkill_state state,
134 int force)
135{
136 int retval = 0;
137 enum rfkill_state oldstate, newstate;
138
139 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
140 return -EBUSY;
141
142 oldstate = rfkill->state;
143
144 if (rfkill->get_state && !force &&
145 !rfkill->get_state(rfkill->data, &newstate)) {
146 rfkill->state = newstate;
147 }
148
149 switch (state) {
150 case RFKILL_STATE_HARD_BLOCKED:
151 /* typically happens when refreshing hardware state,
152 * such as on resume */
153 state = RFKILL_STATE_SOFT_BLOCKED;
154 break;
155 case RFKILL_STATE_UNBLOCKED:
156 /* force can't override this, only rfkill_force_state() can */
157 if (rfkill->state == RFKILL_STATE_HARD_BLOCKED)
158 return -EPERM;
159 break;
160 case RFKILL_STATE_SOFT_BLOCKED:
161 /* nothing to do, we want to give drivers the hint to double
162 * BLOCK even a transmitter that is already in state
163 * RFKILL_STATE_HARD_BLOCKED */
164 break;
165 default:
166 WARN(1, KERN_WARNING
167 "rfkill: illegal state %d passed as parameter "
168 "to rfkill_toggle_radio\n", state);
169 return -EINVAL;
170 }
171
172 if (force || state != rfkill->state) {
173 retval = rfkill->toggle_radio(rfkill->data, state);
174 /* never allow a HARD->SOFT downgrade! */
175 if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED)
176 rfkill->state = state;
177 }
178
179 if (force || rfkill->state != oldstate)
180 rfkill_uevent(rfkill);
181
182 rfkill_led_trigger(rfkill, rfkill->state);
183 return retval;
184}
185
186/**
187 * __rfkill_switch_all - Toggle state of all switches of given type
188 * @type: type of interfaces to be affected
189 * @state: the new state
190 *
191 * This function toggles the state of all switches of given type,
192 * unless a specific switch is claimed by userspace (in which case,
193 * that switch is left alone) or suspended.
194 *
195 * Caller must have acquired rfkill_global_mutex.
196 */
197static void __rfkill_switch_all(const enum rfkill_type type,
198 const enum rfkill_state state)
199{
200 struct rfkill *rfkill;
201
202 if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
203 KERN_WARNING
204 "rfkill: illegal state %d or type %d "
205 "passed as parameter to __rfkill_switch_all\n",
206 state, type))
207 return;
208
209 rfkill_global_states[type].current_state = state;
210 list_for_each_entry(rfkill, &rfkill_list, node) {
211 if (rfkill->type == type) {
212 mutex_lock(&rfkill->mutex);
213 rfkill_toggle_radio(rfkill, state, 0);
214 mutex_unlock(&rfkill->mutex);
215 rfkill_led_trigger(rfkill, rfkill->state);
216 }
217 }
218}
219
220/**
221 * rfkill_switch_all - Toggle state of all switches of given type
222 * @type: type of interfaces to be affected
223 * @state: the new state
224 *
225 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
226 * Please refer to __rfkill_switch_all() for details.
227 *
228 * Does nothing if the EPO lock is active.
229 */
230void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
231{
232 mutex_lock(&rfkill_global_mutex);
233 if (!rfkill_epo_lock_active)
234 __rfkill_switch_all(type, state);
235 mutex_unlock(&rfkill_global_mutex);
236}
237EXPORT_SYMBOL(rfkill_switch_all);
238
239/**
240 * rfkill_epo - emergency power off all transmitters
241 *
242 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
243 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
244 *
245 * The global state before the EPO is saved and can be restored later
246 * using rfkill_restore_states().
247 */
248void rfkill_epo(void)
249{
250 struct rfkill *rfkill;
251 int i;
252
253 mutex_lock(&rfkill_global_mutex);
254
255 rfkill_epo_lock_active = true;
256 list_for_each_entry(rfkill, &rfkill_list, node) {
257 mutex_lock(&rfkill->mutex);
258 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
259 mutex_unlock(&rfkill->mutex);
260 }
261 for (i = 0; i < RFKILL_TYPE_MAX; i++) {
262 rfkill_global_states[i].default_state =
263 rfkill_global_states[i].current_state;
264 rfkill_global_states[i].current_state =
265 RFKILL_STATE_SOFT_BLOCKED;
266 }
267 mutex_unlock(&rfkill_global_mutex);
268 rfkill_led_trigger(rfkill, rfkill->state);
269}
270EXPORT_SYMBOL_GPL(rfkill_epo);
271
272/**
273 * rfkill_restore_states - restore global states
274 *
275 * Restore (and sync switches to) the global state from the
276 * states in rfkill_default_states. This can undo the effects of
277 * a call to rfkill_epo().
278 */
279void rfkill_restore_states(void)
280{
281 int i;
282
283 mutex_lock(&rfkill_global_mutex);
284
285 rfkill_epo_lock_active = false;
286 for (i = 0; i < RFKILL_TYPE_MAX; i++)
287 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
288 mutex_unlock(&rfkill_global_mutex);
289}
290EXPORT_SYMBOL_GPL(rfkill_restore_states);
291
292/**
293 * rfkill_remove_epo_lock - unlock state changes
294 *
295 * Used by rfkill-input manually unlock state changes, when
296 * the EPO switch is deactivated.
297 */
298void rfkill_remove_epo_lock(void)
299{
300 mutex_lock(&rfkill_global_mutex);
301 rfkill_epo_lock_active = false;
302 mutex_unlock(&rfkill_global_mutex);
303}
304EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock);
305
306/**
307 * rfkill_is_epo_lock_active - returns true EPO is active
308 *
309 * Returns 0 (false) if there is NOT an active EPO contidion,
310 * and 1 (true) if there is an active EPO contition, which
311 * locks all radios in one of the BLOCKED states.
312 *
313 * Can be called in atomic context.
314 */
315bool rfkill_is_epo_lock_active(void)
316{
317 return rfkill_epo_lock_active;
318}
319EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active);
320
321/**
322 * rfkill_get_global_state - returns global state for a type
323 * @type: the type to get the global state of
324 *
325 * Returns the current global state for a given wireless
326 * device type.
327 */
328enum rfkill_state rfkill_get_global_state(const enum rfkill_type type)
329{
330 return rfkill_global_states[type].current_state;
331}
332EXPORT_SYMBOL_GPL(rfkill_get_global_state);
333
334/**
335 * rfkill_force_state - Force the internal rfkill radio state
336 * @rfkill: pointer to the rfkill class to modify.
337 * @state: the current radio state the class should be forced to.
338 *
339 * This function updates the internal state of the radio cached
340 * by the rfkill class. It should be used when the driver gets
341 * a notification by the firmware/hardware of the current *real*
342 * state of the radio rfkill switch.
343 *
344 * Devices which are subject to external changes on their rfkill
345 * state (such as those caused by a hardware rfkill line) MUST
346 * have their driver arrange to call rfkill_force_state() as soon
347 * as possible after such a change.
348 *
349 * This function may not be called from an atomic context.
350 */
351int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
352{
353 enum rfkill_state oldstate;
354
355 BUG_ON(!rfkill);
356 if (WARN((state >= RFKILL_STATE_MAX),
357 KERN_WARNING
358 "rfkill: illegal state %d passed as parameter "
359 "to rfkill_force_state\n", state))
360 return -EINVAL;
361
362 mutex_lock(&rfkill->mutex);
363
364 oldstate = rfkill->state;
365 rfkill->state = state;
366
367 if (state != oldstate)
368 rfkill_uevent(rfkill);
369
370 mutex_unlock(&rfkill->mutex);
371 rfkill_led_trigger(rfkill, rfkill->state);
372
373 return 0;
374}
375EXPORT_SYMBOL(rfkill_force_state);
376
377static ssize_t rfkill_name_show(struct device *dev,
378 struct device_attribute *attr,
379 char *buf)
380{
381 struct rfkill *rfkill = to_rfkill(dev);
382
383 return sprintf(buf, "%s\n", rfkill->name);
384}
385
386static const char *rfkill_get_type_str(enum rfkill_type type)
387{
388 switch (type) {
389 case RFKILL_TYPE_WLAN:
390 return "wlan";
391 case RFKILL_TYPE_BLUETOOTH:
392 return "bluetooth";
393 case RFKILL_TYPE_UWB:
394 return "ultrawideband";
395 case RFKILL_TYPE_WIMAX:
396 return "wimax";
397 case RFKILL_TYPE_WWAN:
398 return "wwan";
399 default:
400 BUG();
401 }
402}
403
404static ssize_t rfkill_type_show(struct device *dev,
405 struct device_attribute *attr,
406 char *buf)
407{
408 struct rfkill *rfkill = to_rfkill(dev);
409
410 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
411}
412
413static ssize_t rfkill_state_show(struct device *dev,
414 struct device_attribute *attr,
415 char *buf)
416{
417 struct rfkill *rfkill = to_rfkill(dev);
418
419 update_rfkill_state(rfkill);
420 return sprintf(buf, "%d\n", rfkill->state);
421}
422
423static ssize_t rfkill_state_store(struct device *dev,
424 struct device_attribute *attr,
425 const char *buf, size_t count)
426{
427 struct rfkill *rfkill = to_rfkill(dev);
428 unsigned long state;
429 int error;
430
431 if (!capable(CAP_NET_ADMIN))
432 return -EPERM;
433
434 error = strict_strtoul(buf, 0, &state);
435 if (error)
436 return error;
437
438 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
439 if (state != RFKILL_STATE_UNBLOCKED &&
440 state != RFKILL_STATE_SOFT_BLOCKED)
441 return -EINVAL;
442
443 error = mutex_lock_killable(&rfkill->mutex);
444 if (error)
445 return error;
446
447 if (!rfkill_epo_lock_active)
448 error = rfkill_toggle_radio(rfkill, state, 0);
449 else
450 error = -EPERM;
451
452 mutex_unlock(&rfkill->mutex);
453
454 return error ? error : count;
455}
456
457static ssize_t rfkill_claim_show(struct device *dev,
458 struct device_attribute *attr,
459 char *buf)
460{
461 return sprintf(buf, "%d\n", 0);
462}
463
464static ssize_t rfkill_claim_store(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t count)
467{
468 return -EOPNOTSUPP;
469}
470
471static struct device_attribute rfkill_dev_attrs[] = {
472 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
473 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
474 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
475 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
476 __ATTR_NULL
477};
478
479static void rfkill_release(struct device *dev)
480{
481 struct rfkill *rfkill = to_rfkill(dev);
482
483 kfree(rfkill);
484 module_put(THIS_MODULE);
485}
486
487#ifdef CONFIG_PM
488static int rfkill_suspend(struct device *dev, pm_message_t state)
489{
490 struct rfkill *rfkill = to_rfkill(dev);
491
492 /* mark class device as suspended */
493 if (dev->power.power_state.event != state.event)
494 dev->power.power_state = state;
495
496 /* store state for the resume handler */
497 rfkill->state_for_resume = rfkill->state;
498
499 return 0;
500}
501
502static int rfkill_resume(struct device *dev)
503{
504 struct rfkill *rfkill = to_rfkill(dev);
505 enum rfkill_state newstate;
506
507 if (dev->power.power_state.event != PM_EVENT_ON) {
508 mutex_lock(&rfkill->mutex);
509
510 dev->power.power_state.event = PM_EVENT_ON;
511
512 /*
513 * rfkill->state could have been modified before we got
514 * called, and won't be updated by rfkill_toggle_radio()
515 * in force mode. Sync it FIRST.
516 */
517 if (rfkill->get_state &&
518 !rfkill->get_state(rfkill->data, &newstate))
519 rfkill->state = newstate;
520
521 /*
522 * If we are under EPO, kick transmitter offline,
523 * otherwise restore to pre-suspend state.
524 *
525 * Issue a notification in any case
526 */
527 rfkill_toggle_radio(rfkill,
528 rfkill_epo_lock_active ?
529 RFKILL_STATE_SOFT_BLOCKED :
530 rfkill->state_for_resume,
531 1);
532
533 mutex_unlock(&rfkill->mutex);
534 rfkill_led_trigger(rfkill, rfkill->state);
535 }
536
537 return 0;
538}
539#else
540#define rfkill_suspend NULL
541#define rfkill_resume NULL
542#endif
543
544static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
545{
546 struct rfkill *rfkill = to_rfkill(dev);
547 int error;
548
549 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
550 if (error)
551 return error;
552 error = add_uevent_var(env, "RFKILL_TYPE=%s",
553 rfkill_get_type_str(rfkill->type));
554 if (error)
555 return error;
556 error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state);
557 return error;
558}
559
560static struct class rfkill_class = {
561 .name = "rfkill",
562 .dev_release = rfkill_release,
563 .dev_attrs = rfkill_dev_attrs,
564 .suspend = rfkill_suspend,
565 .resume = rfkill_resume,
566 .dev_uevent = rfkill_dev_uevent,
567};
568
569static int rfkill_check_duplicity(const struct rfkill *rfkill)
570{
571 struct rfkill *p;
572 unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
573
574 memset(seen, 0, sizeof(seen));
575
576 list_for_each_entry(p, &rfkill_list, node) {
577 if (WARN((p == rfkill), KERN_WARNING
578 "rfkill: illegal attempt to register "
579 "an already registered rfkill struct\n"))
580 return -EEXIST;
581 set_bit(p->type, seen);
582 }
583
584 /* 0: first switch of its kind */
585 return (test_bit(rfkill->type, seen)) ? 1 : 0;
586}
587
588static int rfkill_add_switch(struct rfkill *rfkill)
589{
590 int error;
591
592 mutex_lock(&rfkill_global_mutex);
593
594 error = rfkill_check_duplicity(rfkill);
595 if (error < 0)
596 goto unlock_out;
597
598 if (!error) {
599 /* lock default after first use */
600 set_bit(rfkill->type, rfkill_states_lockdflt);
601 rfkill_global_states[rfkill->type].current_state =
602 rfkill_global_states[rfkill->type].default_state;
603 }
604
605 rfkill_toggle_radio(rfkill,
606 rfkill_global_states[rfkill->type].current_state,
607 0);
608
609 list_add_tail(&rfkill->node, &rfkill_list);
610
611 error = 0;
612unlock_out:
613 mutex_unlock(&rfkill_global_mutex);
614
615 return error;
616}
617
618static void rfkill_remove_switch(struct rfkill *rfkill)
619{
620 mutex_lock(&rfkill_global_mutex);
621 list_del_init(&rfkill->node);
622 mutex_unlock(&rfkill_global_mutex);
623
624 mutex_lock(&rfkill->mutex);
625 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
626 mutex_unlock(&rfkill->mutex);
627}
628
629/**
630 * rfkill_allocate - allocate memory for rfkill structure.
631 * @parent: device that has rf switch on it
632 * @type: type of the switch (RFKILL_TYPE_*)
633 *
634 * This function should be called by the network driver when it needs
635 * rfkill structure. Once the structure is allocated the driver should
636 * finish its initialization by setting the name, private data, enable_radio
637 * and disable_radio methods and then register it with rfkill_register().
638 *
639 * NOTE: If registration fails the structure shoudl be freed by calling
640 * rfkill_free() otherwise rfkill_unregister() should be used.
641 */
642struct rfkill * __must_check rfkill_allocate(struct device *parent,
643 enum rfkill_type type)
644{
645 struct rfkill *rfkill;
646 struct device *dev;
647
648 if (WARN((type >= RFKILL_TYPE_MAX),
649 KERN_WARNING
650 "rfkill: illegal type %d passed as parameter "
651 "to rfkill_allocate\n", type))
652 return NULL;
653
654 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
655 if (!rfkill)
656 return NULL;
657
658 mutex_init(&rfkill->mutex);
659 INIT_LIST_HEAD(&rfkill->node);
660 rfkill->type = type;
661
662 dev = &rfkill->dev;
663 dev->class = &rfkill_class;
664 dev->parent = parent;
665 device_initialize(dev);
666
667 __module_get(THIS_MODULE);
668
669 return rfkill;
670}
671EXPORT_SYMBOL(rfkill_allocate);
672
673/**
674 * rfkill_free - Mark rfkill structure for deletion
675 * @rfkill: rfkill structure to be destroyed
676 *
677 * Decrements reference count of the rfkill structure so it is destroyed.
678 * Note that rfkill_free() should _not_ be called after rfkill_unregister().
679 */
680void rfkill_free(struct rfkill *rfkill)
681{
682 if (rfkill)
683 put_device(&rfkill->dev);
684}
685EXPORT_SYMBOL(rfkill_free);
686
687static void rfkill_led_trigger_register(struct rfkill *rfkill)
688{
689#ifdef CONFIG_RFKILL_LEDS
690 int error;
691
692 if (!rfkill->led_trigger.name)
693 rfkill->led_trigger.name = dev_name(&rfkill->dev);
694 if (!rfkill->led_trigger.activate)
695 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
696 error = led_trigger_register(&rfkill->led_trigger);
697 if (error)
698 rfkill->led_trigger.name = NULL;
699#endif /* CONFIG_RFKILL_LEDS */
700}
701
702static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
703{
704#ifdef CONFIG_RFKILL_LEDS
705 if (rfkill->led_trigger.name) {
706 led_trigger_unregister(&rfkill->led_trigger);
707 rfkill->led_trigger.name = NULL;
708 }
709#endif
710}
711
712/**
713 * rfkill_register - Register a rfkill structure.
714 * @rfkill: rfkill structure to be registered
715 *
716 * This function should be called by the network driver when the rfkill
717 * structure needs to be registered. Immediately from registration the
718 * switch driver should be able to service calls to toggle_radio.
719 */
720int __must_check rfkill_register(struct rfkill *rfkill)
721{
722 static atomic_t rfkill_no = ATOMIC_INIT(0);
723 struct device *dev = &rfkill->dev;
724 int error;
725
726 if (WARN((!rfkill || !rfkill->toggle_radio ||
727 rfkill->type >= RFKILL_TYPE_MAX ||
728 rfkill->state >= RFKILL_STATE_MAX),
729 KERN_WARNING
730 "rfkill: attempt to register a "
731 "badly initialized rfkill struct\n"))
732 return -EINVAL;
733
734 dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
735
736 rfkill_led_trigger_register(rfkill);
737
738 error = rfkill_add_switch(rfkill);
739 if (error) {
740 rfkill_led_trigger_unregister(rfkill);
741 return error;
742 }
743
744 error = device_add(dev);
745 if (error) {
746 rfkill_remove_switch(rfkill);
747 rfkill_led_trigger_unregister(rfkill);
748 return error;
749 }
750
751 return 0;
752}
753EXPORT_SYMBOL(rfkill_register);
754
755/**
756 * rfkill_unregister - Unregister a rfkill structure.
757 * @rfkill: rfkill structure to be unregistered
758 *
759 * This function should be called by the network driver during device
760 * teardown to destroy rfkill structure. Note that rfkill_free() should
761 * _not_ be called after rfkill_unregister().
762 */
763void rfkill_unregister(struct rfkill *rfkill)
764{
765 BUG_ON(!rfkill);
766 device_del(&rfkill->dev);
767 rfkill_remove_switch(rfkill);
768 rfkill_led_trigger_unregister(rfkill);
769 put_device(&rfkill->dev);
770}
771EXPORT_SYMBOL(rfkill_unregister);
772
773/**
774 * rfkill_set_default - set initial value for a switch type
775 * @type - the type of switch to set the default state of
776 * @state - the new default state for that group of switches
777 *
778 * Sets the initial state rfkill should use for a given type.
779 * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
780 * and RFKILL_STATE_UNBLOCKED.
781 *
782 * This function is meant to be used by platform drivers for platforms
783 * that can save switch state across power down/reboot.
784 *
785 * The default state for each switch type can be changed exactly once.
786 * After a switch of that type is registered, the default state cannot
787 * be changed anymore. This guards against multiple drivers it the
788 * same platform trying to set the initial switch default state, which
789 * is not allowed.
790 *
791 * Returns -EPERM if the state has already been set once or is in use,
792 * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
793 * if this function returns -EPERM.
794 *
795 * Returns 0 if the new default state was set, or an error if it
796 * could not be set.
797 */
798int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
799{
800 int error;
801
802 if (WARN((type >= RFKILL_TYPE_MAX ||
803 (state != RFKILL_STATE_SOFT_BLOCKED &&
804 state != RFKILL_STATE_UNBLOCKED)),
805 KERN_WARNING
806 "rfkill: illegal state %d or type %d passed as "
807 "parameter to rfkill_set_default\n", state, type))
808 return -EINVAL;
809
810 mutex_lock(&rfkill_global_mutex);
811
812 if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
813 rfkill_global_states[type].default_state = state;
814 rfkill_global_states[type].current_state = state;
815 error = 0;
816 } else
817 error = -EPERM;
818
819 mutex_unlock(&rfkill_global_mutex);
820 return error;
821}
822EXPORT_SYMBOL_GPL(rfkill_set_default);
823
824/*
825 * Rfkill module initialization/deinitialization.
826 */
827static int __init rfkill_init(void)
828{
829 int error;
830 int i;
831
832 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
833 if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED &&
834 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
835 return -EINVAL;
836
837 for (i = 0; i < RFKILL_TYPE_MAX; i++)
838 rfkill_global_states[i].default_state = rfkill_default_state;
839
840 error = class_register(&rfkill_class);
841 if (error) {
842 printk(KERN_ERR "rfkill: unable to register rfkill class\n");
843 return error;
844 }
845
846 return 0;
847}
848
849static void __exit rfkill_exit(void)
850{
851 class_unregister(&rfkill_class);
852}
853
854subsys_initcall(rfkill_init);
855module_exit(rfkill_exit);
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill.h
index fe8df6b5b935..d1117cb6e4de 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 Ivo van Doorn 2 * Copyright (C) 2007 Ivo van Doorn
3 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
3 */ 4 */
4 5
5/* 6/*
@@ -11,11 +12,16 @@
11#ifndef __RFKILL_INPUT_H 12#ifndef __RFKILL_INPUT_H
12#define __RFKILL_INPUT_H 13#define __RFKILL_INPUT_H
13 14
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 15/* core code */
16void rfkill_switch_all(const enum rfkill_type type, bool blocked);
15void rfkill_epo(void); 17void rfkill_epo(void);
16void rfkill_restore_states(void); 18void rfkill_restore_states(void);
17void rfkill_remove_epo_lock(void); 19void rfkill_remove_epo_lock(void);
18bool rfkill_is_epo_lock_active(void); 20bool rfkill_is_epo_lock_active(void);
19enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); 21bool rfkill_get_global_sw_state(const enum rfkill_type type);
22
23/* input handler */
24int rfkill_handler_init(void);
25void rfkill_handler_exit(void);
20 26
21#endif /* __RFKILL_INPUT_H */ 27#endif /* __RFKILL_INPUT_H */
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 1b46747a5f5a..e4d97ab476d5 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,23 +1,10 @@
1# 1#
2# WiMAX LAN device configuration 2# WiMAX LAN device configuration
3# 3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropriate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
16 4
17menuconfig WIMAX 5menuconfig WIMAX
18 tristate "WiMAX Wireless Broadband support" 6 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m 7 depends on RFKILL || !RFKILL
20 depends on (INPUT && RFKILL != n) || RFKILL = n
21 help 8 help
22 9
23 Select to configure support for devices that provide 10 Select to configure support for devices that provide
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index a3616e2ccb8a..bb102e4aa3e9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -29,8 +29,8 @@
29 * A non-polled generic rfkill device is embedded into the WiMAX 29 * A non-polled generic rfkill device is embedded into the WiMAX
30 * subsystem's representation of a device. 30 * subsystem's representation of a device.
31 * 31 *
32 * FIXME: Need polled support? use a timer or add the implementation 32 * FIXME: Need polled support? Let drivers provide a poll routine
33 * to the stack. 33 * and hand it to rfkill ops then?
34 * 34 *
35 * All device drivers have to do is after wimax_dev_init(), call 35 * All device drivers have to do is after wimax_dev_init(), call
36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update 36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
@@ -43,7 +43,7 @@
43 * wimax_rfkill() Kernel calling wimax_rfkill() 43 * wimax_rfkill() Kernel calling wimax_rfkill()
44 * __wimax_rf_toggle_radio() 44 * __wimax_rf_toggle_radio()
45 * 45 *
46 * wimax_rfkill_toggle_radio() RF-Kill subsytem calling 46 * wimax_rfkill_set_radio_block() RF-Kill subsytem calling
47 * __wimax_rf_toggle_radio() 47 * __wimax_rf_toggle_radio()
48 * 48 *
49 * __wimax_rf_toggle_radio() 49 * __wimax_rf_toggle_radio()
@@ -65,15 +65,11 @@
65#include <linux/wimax.h> 65#include <linux/wimax.h>
66#include <linux/security.h> 66#include <linux/security.h>
67#include <linux/rfkill.h> 67#include <linux/rfkill.h>
68#include <linux/input.h>
69#include "wimax-internal.h" 68#include "wimax-internal.h"
70 69
71#define D_SUBMODULE op_rfkill 70#define D_SUBMODULE op_rfkill
72#include "debug-levels.h" 71#include "debug-levels.h"
73 72
74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75
76
77/** 73/**
78 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch 74 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
79 * 75 *
@@ -99,7 +95,6 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
99 int result; 95 int result;
100 struct device *dev = wimax_dev_to_dev(wimax_dev); 96 struct device *dev = wimax_dev_to_dev(wimax_dev);
101 enum wimax_st wimax_state; 97 enum wimax_st wimax_state;
102 enum rfkill_state rfkill_state;
103 98
104 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 99 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
105 BUG_ON(state == WIMAX_RF_QUERY); 100 BUG_ON(state == WIMAX_RF_QUERY);
@@ -112,16 +107,15 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
112 107
113 if (state != wimax_dev->rf_hw) { 108 if (state != wimax_dev->rf_hw) {
114 wimax_dev->rf_hw = state; 109 wimax_dev->rf_hw = state;
115 rfkill_state = state == WIMAX_RF_ON ?
116 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
117 if (wimax_dev->rf_hw == WIMAX_RF_ON 110 if (wimax_dev->rf_hw == WIMAX_RF_ON
118 && wimax_dev->rf_sw == WIMAX_RF_ON) 111 && wimax_dev->rf_sw == WIMAX_RF_ON)
119 wimax_state = WIMAX_ST_READY; 112 wimax_state = WIMAX_ST_READY;
120 else 113 else
121 wimax_state = WIMAX_ST_RADIO_OFF; 114 wimax_state = WIMAX_ST_RADIO_OFF;
115
116 rfkill_set_hw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
117
122 __wimax_state_change(wimax_dev, wimax_state); 118 __wimax_state_change(wimax_dev, wimax_state);
123 input_report_key(wimax_dev->rfkill_input, KEY_WIMAX,
124 rfkill_state);
125 } 119 }
126error_not_ready: 120error_not_ready:
127 mutex_unlock(&wimax_dev->mutex); 121 mutex_unlock(&wimax_dev->mutex);
@@ -174,6 +168,7 @@ void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
174 else 168 else
175 wimax_state = WIMAX_ST_RADIO_OFF; 169 wimax_state = WIMAX_ST_RADIO_OFF;
176 __wimax_state_change(wimax_dev, wimax_state); 170 __wimax_state_change(wimax_dev, wimax_state);
171 rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
177 } 172 }
178error_not_ready: 173error_not_ready:
179 mutex_unlock(&wimax_dev->mutex); 174 mutex_unlock(&wimax_dev->mutex);
@@ -249,36 +244,31 @@ out_no_change:
249 * 244 *
250 * NOTE: This call will block until the operation is completed. 245 * NOTE: This call will block until the operation is completed.
251 */ 246 */
252static 247static int wimax_rfkill_set_radio_block(void *data, bool blocked)
253int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
254{ 248{
255 int result; 249 int result;
256 struct wimax_dev *wimax_dev = data; 250 struct wimax_dev *wimax_dev = data;
257 struct device *dev = wimax_dev_to_dev(wimax_dev); 251 struct device *dev = wimax_dev_to_dev(wimax_dev);
258 enum wimax_rf_state rf_state; 252 enum wimax_rf_state rf_state;
259 253
260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); 254 d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked);
261 switch (state) { 255 rf_state = WIMAX_RF_ON;
262 case RFKILL_STATE_SOFT_BLOCKED: 256 if (blocked)
263 rf_state = WIMAX_RF_OFF; 257 rf_state = WIMAX_RF_OFF;
264 break;
265 case RFKILL_STATE_UNBLOCKED:
266 rf_state = WIMAX_RF_ON;
267 break;
268 default:
269 BUG();
270 }
271 mutex_lock(&wimax_dev->mutex); 258 mutex_lock(&wimax_dev->mutex);
272 if (wimax_dev->state <= __WIMAX_ST_QUIESCING) 259 if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
273 result = 0; /* just pretend it didn't happen */ 260 result = 0;
274 else 261 else
275 result = __wimax_rf_toggle_radio(wimax_dev, rf_state); 262 result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
276 mutex_unlock(&wimax_dev->mutex); 263 mutex_unlock(&wimax_dev->mutex);
277 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", 264 d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n",
278 wimax_dev, state, result); 265 wimax_dev, blocked, result);
279 return result; 266 return result;
280} 267}
281 268
269static const struct rfkill_ops wimax_rfkill_ops = {
270 .set_block = wimax_rfkill_set_radio_block,
271};
282 272
283/** 273/**
284 * wimax_rfkill - Set the software RF switch state for a WiMAX device 274 * wimax_rfkill - Set the software RF switch state for a WiMAX device
@@ -322,6 +312,7 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
322 result = __wimax_rf_toggle_radio(wimax_dev, state); 312 result = __wimax_rf_toggle_radio(wimax_dev, state);
323 if (result < 0) 313 if (result < 0)
324 goto error; 314 goto error;
315 rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
325 break; 316 break;
326 case WIMAX_RF_QUERY: 317 case WIMAX_RF_QUERY:
327 break; 318 break;
@@ -349,40 +340,20 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
349{ 340{
350 int result; 341 int result;
351 struct rfkill *rfkill; 342 struct rfkill *rfkill;
352 struct input_dev *input_dev;
353 struct device *dev = wimax_dev_to_dev(wimax_dev); 343 struct device *dev = wimax_dev_to_dev(wimax_dev);
354 344
355 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); 345 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
356 /* Initialize RF Kill */ 346 /* Initialize RF Kill */
357 result = -ENOMEM; 347 result = -ENOMEM;
358 rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX); 348 rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX,
349 &wimax_rfkill_ops, wimax_dev);
359 if (rfkill == NULL) 350 if (rfkill == NULL)
360 goto error_rfkill_allocate; 351 goto error_rfkill_allocate;
352
353 d_printf(1, dev, "rfkill %p\n", rfkill);
354
361 wimax_dev->rfkill = rfkill; 355 wimax_dev->rfkill = rfkill;
362 356
363 rfkill->name = wimax_dev->name;
364 rfkill->state = RFKILL_STATE_UNBLOCKED;
365 rfkill->data = wimax_dev;
366 rfkill->toggle_radio = wimax_rfkill_toggle_radio;
367
368 /* Initialize the input device for the hw key */
369 input_dev = input_allocate_device();
370 if (input_dev == NULL)
371 goto error_input_allocate;
372 wimax_dev->rfkill_input = input_dev;
373 d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev);
374
375 input_dev->name = wimax_dev->name;
376 /* FIXME: get a real device bus ID and stuff? do we care? */
377 input_dev->id.bustype = BUS_HOST;
378 input_dev->id.vendor = 0xffff;
379 input_dev->evbit[0] = BIT(EV_KEY);
380 set_bit(KEY_WIMAX, input_dev->keybit);
381
382 /* Register both */
383 result = input_register_device(wimax_dev->rfkill_input);
384 if (result < 0)
385 goto error_input_register;
386 result = rfkill_register(wimax_dev->rfkill); 357 result = rfkill_register(wimax_dev->rfkill);
387 if (result < 0) 358 if (result < 0)
388 goto error_rfkill_register; 359 goto error_rfkill_register;
@@ -394,17 +365,8 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
394 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); 365 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
395 return 0; 366 return 0;
396 367
397 /* if rfkill_register() suceeds, can't use rfkill_free() any
398 * more, only rfkill_unregister() [it owns the refcount]; with
399 * the input device we have the same issue--hence the if. */
400error_rfkill_register: 368error_rfkill_register:
401 input_unregister_device(wimax_dev->rfkill_input); 369 rfkill_destroy(wimax_dev->rfkill);
402 wimax_dev->rfkill_input = NULL;
403error_input_register:
404 if (wimax_dev->rfkill_input)
405 input_free_device(wimax_dev->rfkill_input);
406error_input_allocate:
407 rfkill_free(wimax_dev->rfkill);
408error_rfkill_allocate: 370error_rfkill_allocate:
409 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); 371 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
410 return result; 372 return result;
@@ -423,45 +385,12 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
423{ 385{
424 struct device *dev = wimax_dev_to_dev(wimax_dev); 386 struct device *dev = wimax_dev_to_dev(wimax_dev);
425 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); 387 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
426 rfkill_unregister(wimax_dev->rfkill); /* frees */ 388 rfkill_unregister(wimax_dev->rfkill);
427 input_unregister_device(wimax_dev->rfkill_input); 389 rfkill_destroy(wimax_dev->rfkill);
428 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); 390 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
429} 391}
430 392
431 393
432#else /* #ifdef CONFIG_RFKILL */
433
434void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
435 enum wimax_rf_state state)
436{
437}
438EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
439
440void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
441 enum wimax_rf_state state)
442{
443}
444EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
445
446int wimax_rfkill(struct wimax_dev *wimax_dev,
447 enum wimax_rf_state state)
448{
449 return WIMAX_RF_ON << 1 | WIMAX_RF_ON;
450}
451EXPORT_SYMBOL_GPL(wimax_rfkill);
452
453int wimax_rfkill_add(struct wimax_dev *wimax_dev)
454{
455 return 0;
456}
457
458void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
459{
460}
461
462#endif /* #ifdef CONFIG_RFKILL */
463
464
465/* 394/*
466 * Exporting to user space over generic netlink 395 * Exporting to user space over generic netlink
467 * 396 *
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 45005497c634..4428dd5e911d 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,5 +1,6 @@
1config CFG80211 1config CFG80211
2 tristate "Improved wireless configuration API" 2 tristate "Improved wireless configuration API"
3 depends on RFKILL || !RFKILL
3 4
4config CFG80211_REG_DEBUG 5config CFG80211_REG_DEBUG
5 bool "cfg80211 regulatory debugging" 6 bool "cfg80211 regulatory debugging"
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a5dbea1da476..3b74b88e10a3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -12,6 +12,7 @@
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/rtnetlink.h>
15#include <net/genetlink.h> 16#include <net/genetlink.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include "nl80211.h" 18#include "nl80211.h"
@@ -227,6 +228,41 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
227 return 0; 228 return 0;
228} 229}
229 230
231static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
232{
233 struct cfg80211_registered_device *drv = data;
234
235 drv->ops->rfkill_poll(&drv->wiphy);
236}
237
238static int cfg80211_rfkill_set_block(void *data, bool blocked)
239{
240 struct cfg80211_registered_device *drv = data;
241 struct wireless_dev *wdev;
242
243 if (!blocked)
244 return 0;
245
246 rtnl_lock();
247 mutex_lock(&drv->devlist_mtx);
248
249 list_for_each_entry(wdev, &drv->netdev_list, list)
250 dev_close(wdev->netdev);
251
252 mutex_unlock(&drv->devlist_mtx);
253 rtnl_unlock();
254
255 return 0;
256}
257
258static void cfg80211_rfkill_sync_work(struct work_struct *work)
259{
260 struct cfg80211_registered_device *drv;
261
262 drv = container_of(work, struct cfg80211_registered_device, rfkill_sync);
263 cfg80211_rfkill_set_block(drv, rfkill_blocked(drv->rfkill));
264}
265
230/* exported functions */ 266/* exported functions */
231 267
232struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) 268struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
@@ -274,6 +310,18 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
274 drv->wiphy.dev.class = &ieee80211_class; 310 drv->wiphy.dev.class = &ieee80211_class;
275 drv->wiphy.dev.platform_data = drv; 311 drv->wiphy.dev.platform_data = drv;
276 312
313 drv->rfkill_ops.set_block = cfg80211_rfkill_set_block;
314 drv->rfkill = rfkill_alloc(dev_name(&drv->wiphy.dev),
315 &drv->wiphy.dev, RFKILL_TYPE_WLAN,
316 &drv->rfkill_ops, drv);
317
318 if (!drv->rfkill) {
319 kfree(drv);
320 return NULL;
321 }
322
323 INIT_WORK(&drv->rfkill_sync, cfg80211_rfkill_sync_work);
324
277 /* 325 /*
278 * Initialize wiphy parameters to IEEE 802.11 MIB default values. 326 * Initialize wiphy parameters to IEEE 802.11 MIB default values.
279 * Fragmentation and RTS threshold are disabled by default with the 327 * Fragmentation and RTS threshold are disabled by default with the
@@ -356,6 +404,10 @@ int wiphy_register(struct wiphy *wiphy)
356 if (res) 404 if (res)
357 goto out_unlock; 405 goto out_unlock;
358 406
407 res = rfkill_register(drv->rfkill);
408 if (res)
409 goto out_rm_dev;
410
359 list_add(&drv->list, &cfg80211_drv_list); 411 list_add(&drv->list, &cfg80211_drv_list);
360 412
361 /* add to debugfs */ 413 /* add to debugfs */
@@ -379,16 +431,41 @@ int wiphy_register(struct wiphy *wiphy)
379 cfg80211_debugfs_drv_add(drv); 431 cfg80211_debugfs_drv_add(drv);
380 432
381 res = 0; 433 res = 0;
382out_unlock: 434 goto out_unlock;
435
436 out_rm_dev:
437 device_del(&drv->wiphy.dev);
438 out_unlock:
383 mutex_unlock(&cfg80211_mutex); 439 mutex_unlock(&cfg80211_mutex);
384 return res; 440 return res;
385} 441}
386EXPORT_SYMBOL(wiphy_register); 442EXPORT_SYMBOL(wiphy_register);
387 443
444void wiphy_rfkill_start_polling(struct wiphy *wiphy)
445{
446 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
447
448 if (!drv->ops->rfkill_poll)
449 return;
450 drv->rfkill_ops.poll = cfg80211_rfkill_poll;
451 rfkill_resume_polling(drv->rfkill);
452}
453EXPORT_SYMBOL(wiphy_rfkill_start_polling);
454
455void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
456{
457 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
458
459 rfkill_pause_polling(drv->rfkill);
460}
461EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
462
388void wiphy_unregister(struct wiphy *wiphy) 463void wiphy_unregister(struct wiphy *wiphy)
389{ 464{
390 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); 465 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
391 466
467 rfkill_unregister(drv->rfkill);
468
392 /* protect the device list */ 469 /* protect the device list */
393 mutex_lock(&cfg80211_mutex); 470 mutex_lock(&cfg80211_mutex);
394 471
@@ -425,6 +502,7 @@ EXPORT_SYMBOL(wiphy_unregister);
425void cfg80211_dev_free(struct cfg80211_registered_device *drv) 502void cfg80211_dev_free(struct cfg80211_registered_device *drv)
426{ 503{
427 struct cfg80211_internal_bss *scan, *tmp; 504 struct cfg80211_internal_bss *scan, *tmp;
505 rfkill_destroy(drv->rfkill);
428 mutex_destroy(&drv->mtx); 506 mutex_destroy(&drv->mtx);
429 mutex_destroy(&drv->devlist_mtx); 507 mutex_destroy(&drv->devlist_mtx);
430 list_for_each_entry_safe(scan, tmp, &drv->bss_list, list) 508 list_for_each_entry_safe(scan, tmp, &drv->bss_list, list)
@@ -438,6 +516,15 @@ void wiphy_free(struct wiphy *wiphy)
438} 516}
439EXPORT_SYMBOL(wiphy_free); 517EXPORT_SYMBOL(wiphy_free);
440 518
519void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
520{
521 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
522
523 if (rfkill_set_hw_state(drv->rfkill, blocked))
524 schedule_work(&drv->rfkill_sync);
525}
526EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
527
441static int cfg80211_netdev_notifier_call(struct notifier_block * nb, 528static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
442 unsigned long state, 529 unsigned long state,
443 void *ndev) 530 void *ndev)
@@ -446,7 +533,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
446 struct cfg80211_registered_device *rdev; 533 struct cfg80211_registered_device *rdev;
447 534
448 if (!dev->ieee80211_ptr) 535 if (!dev->ieee80211_ptr)
449 return 0; 536 return NOTIFY_DONE;
450 537
451 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); 538 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
452 539
@@ -492,9 +579,13 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
492 } 579 }
493 mutex_unlock(&rdev->devlist_mtx); 580 mutex_unlock(&rdev->devlist_mtx);
494 break; 581 break;
582 case NETDEV_PRE_UP:
583 if (rfkill_blocked(rdev->rfkill))
584 return notifier_from_errno(-ERFKILL);
585 break;
495 } 586 }
496 587
497 return 0; 588 return NOTIFY_DONE;
498} 589}
499 590
500static struct notifier_block cfg80211_netdev_notifier = { 591static struct notifier_block cfg80211_netdev_notifier = {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index ab512bcd8153..bfa340c7abb5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -11,6 +11,8 @@
11#include <linux/kref.h> 11#include <linux/kref.h>
12#include <linux/rbtree.h> 12#include <linux/rbtree.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/rfkill.h>
15#include <linux/workqueue.h>
14#include <net/genetlink.h> 16#include <net/genetlink.h>
15#include <net/cfg80211.h> 17#include <net/cfg80211.h>
16#include "reg.h" 18#include "reg.h"
@@ -24,6 +26,11 @@ struct cfg80211_registered_device {
24 * any call is in progress */ 26 * any call is in progress */
25 struct mutex mtx; 27 struct mutex mtx;
26 28
29 /* rfkill support */
30 struct rfkill_ops rfkill_ops;
31 struct rfkill *rfkill;
32 struct work_struct rfkill_sync;
33
27 /* ISO / IEC 3166 alpha2 for which this device is receiving 34 /* ISO / IEC 3166 alpha2 for which this device is receiving
28 * country IEs on, this can help disregard country IEs from APs 35 * country IEs on, this can help disregard country IEs from APs
29 * on the same alpha2 quickly. The alpha2 may differ from 36 * on the same alpha2 quickly. The alpha2 may differ from
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4b4d3c8a1aed..24168560ebae 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,6 +1687,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1687 if (err) 1687 if (err)
1688 goto out_rtnl; 1688 goto out_rtnl;
1689 1689
1690 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1691 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
1692 err = -EINVAL;
1693 goto out;
1694 }
1695
1690 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan); 1696 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1691 if (err) 1697 if (err)
1692 goto out; 1698 goto out;
@@ -1738,7 +1744,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1738 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 1744 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
1739 params.listen_interval = 1745 params.listen_interval =
1740 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1746 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1747
1741 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1748 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1749 if (!params.aid || params.aid > IEEE80211_MAX_AID)
1750 return -EINVAL;
1751
1742 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 1752 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1743 params.ht_capa = 1753 params.ht_capa =
1744 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 1754 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
@@ -3559,11 +3569,43 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
3559 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); 3569 genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL);
3560} 3570}
3561 3571
3572static int nl80211_add_scan_req(struct sk_buff *msg,
3573 struct cfg80211_registered_device *rdev)
3574{
3575 struct cfg80211_scan_request *req = rdev->scan_req;
3576 struct nlattr *nest;
3577 int i;
3578
3579 if (WARN_ON(!req))
3580 return 0;
3581
3582 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
3583 if (!nest)
3584 goto nla_put_failure;
3585 for (i = 0; i < req->n_ssids; i++)
3586 NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
3587 nla_nest_end(msg, nest);
3588
3589 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
3590 if (!nest)
3591 goto nla_put_failure;
3592 for (i = 0; i < req->n_channels; i++)
3593 NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
3594 nla_nest_end(msg, nest);
3595
3596 if (req->ie)
3597 NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
3598
3599 return 0;
3600 nla_put_failure:
3601 return -ENOBUFS;
3602}
3603
3562static int nl80211_send_scan_donemsg(struct sk_buff *msg, 3604static int nl80211_send_scan_donemsg(struct sk_buff *msg,
3563 struct cfg80211_registered_device *rdev, 3605 struct cfg80211_registered_device *rdev,
3564 struct net_device *netdev, 3606 struct net_device *netdev,
3565 u32 pid, u32 seq, int flags, 3607 u32 pid, u32 seq, int flags,
3566 u32 cmd) 3608 u32 cmd)
3567{ 3609{
3568 void *hdr; 3610 void *hdr;
3569 3611
@@ -3574,7 +3616,8 @@ static int nl80211_send_scan_donemsg(struct sk_buff *msg,
3574 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 3616 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
3575 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 3617 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
3576 3618
3577 /* XXX: we should probably bounce back the request? */ 3619 /* ignore errors and send incomplete event anyway */
3620 nl80211_add_scan_req(msg, rdev);
3578 3621
3579 return genlmsg_end(msg, hdr); 3622 return genlmsg_end(msg, hdr);
3580 3623
@@ -3828,7 +3871,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
3828 struct sk_buff *msg; 3871 struct sk_buff *msg;
3829 void *hdr; 3872 void *hdr;
3830 3873
3831 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3874 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3832 if (!msg) 3875 if (!msg)
3833 return; 3876 return;
3834 3877
@@ -3852,7 +3895,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
3852 return; 3895 return;
3853 } 3896 }
3854 3897
3855 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); 3898 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3856 return; 3899 return;
3857 3900
3858 nla_put_failure: 3901 nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f87ac1df2df5..ea4c299fbe3b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2171,7 +2171,13 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2171 * the country IE rd with what CRDA believes that country should have 2171 * the country IE rd with what CRDA believes that country should have
2172 */ 2172 */
2173 2173
2174 BUG_ON(!country_ie_regdomain); 2174 /*
2175 * Userspace could have sent two replies with only
2176 * one kernel request. By the second reply we would have
2177 * already processed and consumed the country_ie_regdomain.
2178 */
2179 if (!country_ie_regdomain)
2180 return -EALREADY;
2175 BUG_ON(rd == country_ie_regdomain); 2181 BUG_ON(rd == country_ie_regdomain);
2176 2182
2177 /* 2183 /*
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index df59440290e5..e95b638b919f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -29,13 +29,14 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
29 goto out; 29 goto out;
30 30
31 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 31 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
32 wiphy_to_dev(request->wiphy)->scan_req = NULL;
33 32
34 if (aborted) 33 if (aborted)
35 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); 34 nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev);
36 else 35 else
37 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); 36 nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev);
38 37
38 wiphy_to_dev(request->wiphy)->scan_req = NULL;
39
39#ifdef CONFIG_WIRELESS_EXT 40#ifdef CONFIG_WIRELESS_EXT
40 if (!aborted) { 41 if (!aborted) {
41 memset(&wrqu, 0, sizeof(wrqu)); 42 memset(&wrqu, 0, sizeof(wrqu));
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d072bff463aa..25550692dda6 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -157,26 +157,25 @@ int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
157 params->cipher != WLAN_CIPHER_SUITE_WEP104) 157 params->cipher != WLAN_CIPHER_SUITE_WEP104)
158 return -EINVAL; 158 return -EINVAL;
159 159
160 /* TODO: add definitions for the lengths to linux/ieee80211.h */
161 switch (params->cipher) { 160 switch (params->cipher) {
162 case WLAN_CIPHER_SUITE_WEP40: 161 case WLAN_CIPHER_SUITE_WEP40:
163 if (params->key_len != 5) 162 if (params->key_len != WLAN_KEY_LEN_WEP40)
164 return -EINVAL; 163 return -EINVAL;
165 break; 164 break;
166 case WLAN_CIPHER_SUITE_TKIP: 165 case WLAN_CIPHER_SUITE_TKIP:
167 if (params->key_len != 32) 166 if (params->key_len != WLAN_KEY_LEN_TKIP)
168 return -EINVAL; 167 return -EINVAL;
169 break; 168 break;
170 case WLAN_CIPHER_SUITE_CCMP: 169 case WLAN_CIPHER_SUITE_CCMP:
171 if (params->key_len != 16) 170 if (params->key_len != WLAN_KEY_LEN_CCMP)
172 return -EINVAL; 171 return -EINVAL;
173 break; 172 break;
174 case WLAN_CIPHER_SUITE_WEP104: 173 case WLAN_CIPHER_SUITE_WEP104:
175 if (params->key_len != 13) 174 if (params->key_len != WLAN_KEY_LEN_WEP104)
176 return -EINVAL; 175 return -EINVAL;
177 break; 176 break;
178 case WLAN_CIPHER_SUITE_AES_CMAC: 177 case WLAN_CIPHER_SUITE_AES_CMAC:
179 if (params->key_len != 16) 178 if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
180 return -EINVAL; 179 return -EINVAL;
181 break; 180 break;
182 default: 181 default:
@@ -259,7 +258,7 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
259} 258}
260EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); 259EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
261 260
262int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) 261static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
263{ 262{
264 int ae = meshhdr->flags & MESH_FLAGS_AE; 263 int ae = meshhdr->flags & MESH_FLAGS_AE;
265 /* 7.1.3.5a.2 */ 264 /* 7.1.3.5a.2 */
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 711e00a0c9b5..d030c5315672 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -744,3 +744,86 @@ int cfg80211_wext_giwencode(struct net_device *dev,
744 return err; 744 return err;
745} 745}
746EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); 746EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
747
748int cfg80211_wext_siwtxpower(struct net_device *dev,
749 struct iw_request_info *info,
750 union iwreq_data *data, char *extra)
751{
752 struct wireless_dev *wdev = dev->ieee80211_ptr;
753 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
754 enum tx_power_setting type;
755 int dbm = 0;
756
757 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
758 return -EINVAL;
759 if (data->txpower.flags & IW_TXPOW_RANGE)
760 return -EINVAL;
761
762 if (!rdev->ops->set_tx_power)
763 return -EOPNOTSUPP;
764
765 /* only change when not disabling */
766 if (!data->txpower.disabled) {
767 rfkill_set_sw_state(rdev->rfkill, false);
768
769 if (data->txpower.fixed) {
770 /*
771 * wext doesn't support negative values, see
772 * below where it's for automatic
773 */
774 if (data->txpower.value < 0)
775 return -EINVAL;
776 dbm = data->txpower.value;
777 type = TX_POWER_FIXED;
778 /* TODO: do regulatory check! */
779 } else {
780 /*
781 * Automatic power level setting, max being the value
782 * passed in from userland.
783 */
784 if (data->txpower.value < 0) {
785 type = TX_POWER_AUTOMATIC;
786 } else {
787 dbm = data->txpower.value;
788 type = TX_POWER_LIMITED;
789 }
790 }
791 } else {
792 rfkill_set_sw_state(rdev->rfkill, true);
793 schedule_work(&rdev->rfkill_sync);
794 return 0;
795 }
796
797 return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);;
798}
799EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
800
801int cfg80211_wext_giwtxpower(struct net_device *dev,
802 struct iw_request_info *info,
803 union iwreq_data *data, char *extra)
804{
805 struct wireless_dev *wdev = dev->ieee80211_ptr;
806 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
807 int err, val;
808
809 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
810 return -EINVAL;
811 if (data->txpower.flags & IW_TXPOW_RANGE)
812 return -EINVAL;
813
814 if (!rdev->ops->get_tx_power)
815 return -EOPNOTSUPP;
816
817 err = rdev->ops->get_tx_power(wdev->wiphy, &val);
818 if (err)
819 return err;
820
821 /* well... oh well */
822 data->txpower.fixed = 1;
823 data->txpower.disabled = rfkill_blocked(rdev->rfkill);
824 data->txpower.value = val;
825 data->txpower.flags = IW_TXPOW_DBM;
826
827 return 0;
828}
829EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);