aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ieee802154/af_ieee802154.c12
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/iucv/af_iucv.c297
-rw-r--r--net/rfkill/core.c56
-rw-r--r--net/wireless/nl80211.c95
5 files changed, 312 insertions, 162 deletions
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 882a927cefae..3bb6bdb1dac1 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -39,14 +39,6 @@
39 39
40#include "af802154.h" 40#include "af802154.h"
41 41
42#define DBG_DUMP(data, len) { \
43 int i; \
44 pr_debug("function: %s: data: len %d:\n", __func__, len); \
45 for (i = 0; i < len; i++) {\
46 pr_debug("%02x: %02x\n", i, (data)[i]); \
47 } \
48}
49
50/* 42/*
51 * Utility function for families 43 * Utility function for families
52 */ 44 */
@@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = {
302static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev, 294static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
303 struct packet_type *pt, struct net_device *orig_dev) 295 struct packet_type *pt, struct net_device *orig_dev)
304{ 296{
305 DBG_DUMP(skb->data, skb->len);
306 if (!netif_running(dev)) 297 if (!netif_running(dev))
307 return -ENODEV; 298 return -ENODEV;
308 pr_debug("got frame, type %d, dev %p\n", dev->type, dev); 299 pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
300#ifdef DEBUG
301 print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
302#endif
309 303
310 if (!net_eq(dev_net(dev), &init_net)) 304 if (!net_eq(dev_net(dev), &init_net))
311 goto drop; 305 goto drop;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3cb7092..65b3a8b11a6c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,16 @@ restart:
1085 now = jiffies; 1085 now = jiffies;
1086 1086
1087 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1087 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1088 rt_drop(rt); 1088 /*
1089 return 0; 1089 * If we're not caching, just tell the caller we
1090 * were successful and don't touch the route. The
1091 * caller hold the sole reference to the cache entry, and
1092 * it will be released when the caller is done with it.
1093 * If we drop it here, the callers have no way to resolve routes
1094 * when we're not caching. Instead, just point *rp at rt, so
1095 * the caller gets a single use out of the route
1096 */
1097 goto report_and_exit;
1090 } 1098 }
1091 1099
1092 rthp = &rt_hash_table[hash].chain; 1100 rthp = &rt_hash_table[hash].chain;
@@ -1217,6 +1225,8 @@ restart:
1217 rcu_assign_pointer(rt_hash_table[hash].chain, rt); 1225 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1218 1226
1219 spin_unlock_bh(rt_hash_lock_addr(hash)); 1227 spin_unlock_bh(rt_hash_lock_addr(hash));
1228
1229report_and_exit:
1220 if (rp) 1230 if (rp)
1221 *rp = rt; 1231 *rp = rt;
1222 else 1232 else
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 656cbd195825..6be5f92d1094 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -54,6 +54,38 @@ static const u8 iprm_shutdown[8] =
54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ 54#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
55#define CB_TRGCLS_LEN (TRGCLS_SIZE) 55#define CB_TRGCLS_LEN (TRGCLS_SIZE)
56 56
57#define __iucv_sock_wait(sk, condition, timeo, ret) \
58do { \
59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \
61 ret = 0; \
62 while (!(condition)) { \
63 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
64 if (!__timeo) { \
65 ret = -EAGAIN; \
66 break; \
67 } \
68 if (signal_pending(current)) { \
69 ret = sock_intr_errno(__timeo); \
70 break; \
71 } \
72 release_sock(sk); \
73 __timeo = schedule_timeout(__timeo); \
74 lock_sock(sk); \
75 ret = sock_error(sk); \
76 if (ret) \
77 break; \
78 } \
79 finish_wait(sk->sk_sleep, &__wait); \
80} while (0)
81
82#define iucv_sock_wait(sk, condition, timeo) \
83({ \
84 int __ret = 0; \
85 if (!(condition)) \
86 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 __ret; \
88})
57 89
58static void iucv_sock_kill(struct sock *sk); 90static void iucv_sock_kill(struct sock *sk);
59static void iucv_sock_close(struct sock *sk); 91static void iucv_sock_close(struct sock *sk);
@@ -238,6 +270,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
238 return msg->length; 270 return msg->length;
239} 271}
240 272
273/**
274 * iucv_sock_in_state() - check for specific states
275 * @sk: sock structure
276 * @state: first iucv sk state
277 * @state: second iucv sk state
278 *
279 * Returns true if the socket in either in the first or second state.
280 */
281static int iucv_sock_in_state(struct sock *sk, int state, int state2)
282{
283 return (sk->sk_state == state || sk->sk_state == state2);
284}
285
286/**
287 * iucv_below_msglim() - function to check if messages can be sent
288 * @sk: sock structure
289 *
290 * Returns true if the send queue length is lower than the message limit.
291 * Always returns true if the socket is not connected (no iucv path for
292 * checking the message limit).
293 */
294static inline int iucv_below_msglim(struct sock *sk)
295{
296 struct iucv_sock *iucv = iucv_sk(sk);
297
298 if (sk->sk_state != IUCV_CONNECTED)
299 return 1;
300 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
301}
302
303/**
304 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
305 */
306static void iucv_sock_wake_msglim(struct sock *sk)
307{
308 read_lock(&sk->sk_callback_lock);
309 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
310 wake_up_interruptible_all(sk->sk_sleep);
311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312 read_unlock(&sk->sk_callback_lock);
313}
314
241/* Timers */ 315/* Timers */
242static void iucv_sock_timeout(unsigned long arg) 316static void iucv_sock_timeout(unsigned long arg)
243{ 317{
@@ -329,7 +403,9 @@ static void iucv_sock_close(struct sock *sk)
329 timeo = sk->sk_lingertime; 403 timeo = sk->sk_lingertime;
330 else 404 else
331 timeo = IUCV_DISCONN_TIMEOUT; 405 timeo = IUCV_DISCONN_TIMEOUT;
332 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 406 err = iucv_sock_wait(sk,
407 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
408 timeo);
333 } 409 }
334 410
335 case IUCV_CLOSING: /* fall through */ 411 case IUCV_CLOSING: /* fall through */
@@ -510,39 +586,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
510 return NULL; 586 return NULL;
511} 587}
512 588
513int iucv_sock_wait_state(struct sock *sk, int state, int state2,
514 unsigned long timeo)
515{
516 DECLARE_WAITQUEUE(wait, current);
517 int err = 0;
518
519 add_wait_queue(sk->sk_sleep, &wait);
520 while (sk->sk_state != state && sk->sk_state != state2) {
521 set_current_state(TASK_INTERRUPTIBLE);
522
523 if (!timeo) {
524 err = -EAGAIN;
525 break;
526 }
527
528 if (signal_pending(current)) {
529 err = sock_intr_errno(timeo);
530 break;
531 }
532
533 release_sock(sk);
534 timeo = schedule_timeout(timeo);
535 lock_sock(sk);
536
537 err = sock_error(sk);
538 if (err)
539 break;
540 }
541 set_current_state(TASK_RUNNING);
542 remove_wait_queue(sk->sk_sleep, &wait);
543 return err;
544}
545
546/* Bind an unbound socket */ 589/* Bind an unbound socket */
547static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 590static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
548 int addr_len) 591 int addr_len)
@@ -687,8 +730,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
687 } 730 }
688 731
689 if (sk->sk_state != IUCV_CONNECTED) { 732 if (sk->sk_state != IUCV_CONNECTED) {
690 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, 733 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
691 sock_sndtimeo(sk, flags & O_NONBLOCK)); 734 IUCV_DISCONN),
735 sock_sndtimeo(sk, flags & O_NONBLOCK));
692 } 736 }
693 737
694 if (sk->sk_state == IUCV_DISCONN) { 738 if (sk->sk_state == IUCV_DISCONN) {
@@ -842,9 +886,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
842 struct iucv_message txmsg; 886 struct iucv_message txmsg;
843 struct cmsghdr *cmsg; 887 struct cmsghdr *cmsg;
844 int cmsg_done; 888 int cmsg_done;
889 long timeo;
845 char user_id[9]; 890 char user_id[9];
846 char appl_id[9]; 891 char appl_id[9];
847 int err; 892 int err;
893 int noblock = msg->msg_flags & MSG_DONTWAIT;
848 894
849 err = sock_error(sk); 895 err = sock_error(sk);
850 if (err) 896 if (err)
@@ -864,108 +910,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
864 goto out; 910 goto out;
865 } 911 }
866 912
867 if (sk->sk_state == IUCV_CONNECTED) { 913 /* Return if the socket is not in connected state */
868 /* initialize defaults */ 914 if (sk->sk_state != IUCV_CONNECTED) {
869 cmsg_done = 0; /* check for duplicate headers */ 915 err = -ENOTCONN;
870 txmsg.class = 0; 916 goto out;
917 }
871 918
872 /* iterate over control messages */ 919 /* initialize defaults */
873 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; 920 cmsg_done = 0; /* check for duplicate headers */
874 cmsg = CMSG_NXTHDR(msg, cmsg)) { 921 txmsg.class = 0;
875 922
876 if (!CMSG_OK(msg, cmsg)) { 923 /* iterate over control messages */
877 err = -EINVAL; 924 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
878 goto out; 925 cmsg = CMSG_NXTHDR(msg, cmsg)) {
879 } 926
927 if (!CMSG_OK(msg, cmsg)) {
928 err = -EINVAL;
929 goto out;
930 }
931
932 if (cmsg->cmsg_level != SOL_IUCV)
933 continue;
880 934
881 if (cmsg->cmsg_level != SOL_IUCV) 935 if (cmsg->cmsg_type & cmsg_done) {
882 continue; 936 err = -EINVAL;
937 goto out;
938 }
939 cmsg_done |= cmsg->cmsg_type;
883 940
884 if (cmsg->cmsg_type & cmsg_done) { 941 switch (cmsg->cmsg_type) {
942 case SCM_IUCV_TRGCLS:
943 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
885 err = -EINVAL; 944 err = -EINVAL;
886 goto out; 945 goto out;
887 } 946 }
888 cmsg_done |= cmsg->cmsg_type;
889
890 switch (cmsg->cmsg_type) {
891 case SCM_IUCV_TRGCLS:
892 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
893 err = -EINVAL;
894 goto out;
895 }
896 947
897 /* set iucv message target class */ 948 /* set iucv message target class */
898 memcpy(&txmsg.class, 949 memcpy(&txmsg.class,
899 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 950 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
900 951
901 break; 952 break;
902 953
903 default: 954 default:
904 err = -EINVAL; 955 err = -EINVAL;
905 goto out; 956 goto out;
906 break; 957 break;
907 }
908 } 958 }
959 }
909 960
910 /* allocate one skb for each iucv message: 961 /* allocate one skb for each iucv message:
911 * this is fine for SOCK_SEQPACKET (unless we want to support 962 * this is fine for SOCK_SEQPACKET (unless we want to support
912 * segmented records using the MSG_EOR flag), but 963 * segmented records using the MSG_EOR flag), but
913 * for SOCK_STREAM we might want to improve it in future */ 964 * for SOCK_STREAM we might want to improve it in future */
914 if (!(skb = sock_alloc_send_skb(sk, len, 965 skb = sock_alloc_send_skb(sk, len, noblock, &err);
915 msg->msg_flags & MSG_DONTWAIT, 966 if (!skb)
916 &err))) 967 goto out;
917 goto out; 968 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
969 err = -EFAULT;
970 goto fail;
971 }
918 972
919 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 973 /* wait if outstanding messages for iucv path has reached */
920 err = -EFAULT; 974 timeo = sock_sndtimeo(sk, noblock);
921 goto fail; 975 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
922 } 976 if (err)
977 goto fail;
923 978
924 /* increment and save iucv message tag for msg_completion cbk */ 979 /* return -ECONNRESET if the socket is no longer connected */
925 txmsg.tag = iucv->send_tag++; 980 if (sk->sk_state != IUCV_CONNECTED) {
926 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 981 err = -ECONNRESET;
927 skb_queue_tail(&iucv->send_skb_q, skb); 982 goto fail;
983 }
928 984
929 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 985 /* increment and save iucv message tag for msg_completion cbk */
930 && skb->len <= 7) { 986 txmsg.tag = iucv->send_tag++;
931 err = iucv_send_iprm(iucv->path, &txmsg, skb); 987 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
988 skb_queue_tail(&iucv->send_skb_q, skb);
932 989
933 /* on success: there is no message_complete callback 990 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
934 * for an IPRMDATA msg; remove skb from send queue */ 991 && skb->len <= 7) {
935 if (err == 0) { 992 err = iucv_send_iprm(iucv->path, &txmsg, skb);
936 skb_unlink(skb, &iucv->send_skb_q);
937 kfree_skb(skb);
938 }
939 993
940 /* this error should never happen since the 994 /* on success: there is no message_complete callback
941 * IUCV_IPRMDATA path flag is set... sever path */ 995 * for an IPRMDATA msg; remove skb from send queue */
942 if (err == 0x15) { 996 if (err == 0) {
943 iucv_path_sever(iucv->path, NULL); 997 skb_unlink(skb, &iucv->send_skb_q);
944 skb_unlink(skb, &iucv->send_skb_q); 998 kfree_skb(skb);
945 err = -EPIPE; 999 }
946 goto fail; 1000
947 } 1001 /* this error should never happen since the
948 } else 1002 * IUCV_IPRMDATA path flag is set... sever path */
949 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1003 if (err == 0x15) {
950 (void *) skb->data, skb->len); 1004 iucv_path_sever(iucv->path, NULL);
951 if (err) {
952 if (err == 3) {
953 user_id[8] = 0;
954 memcpy(user_id, iucv->dst_user_id, 8);
955 appl_id[8] = 0;
956 memcpy(appl_id, iucv->dst_name, 8);
957 pr_err("Application %s on z/VM guest %s"
958 " exceeds message limit\n",
959 user_id, appl_id);
960 }
961 skb_unlink(skb, &iucv->send_skb_q); 1005 skb_unlink(skb, &iucv->send_skb_q);
962 err = -EPIPE; 1006 err = -EPIPE;
963 goto fail; 1007 goto fail;
964 } 1008 }
965 1009 } else
966 } else { 1010 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
967 err = -ENOTCONN; 1011 (void *) skb->data, skb->len);
968 goto out; 1012 if (err) {
1013 if (err == 3) {
1014 user_id[8] = 0;
1015 memcpy(user_id, iucv->dst_user_id, 8);
1016 appl_id[8] = 0;
1017 memcpy(appl_id, iucv->dst_name, 8);
1018 pr_err("Application %s on z/VM guest %s"
1019 " exceeds message limit\n",
1020 appl_id, user_id);
1021 err = -EAGAIN;
1022 } else
1023 err = -EPIPE;
1024 skb_unlink(skb, &iucv->send_skb_q);
1025 goto fail;
969 } 1026 }
970 1027
971 release_sock(sk); 1028 release_sock(sk);
@@ -1581,7 +1638,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
1581 1638
1582 spin_unlock_irqrestore(&list->lock, flags); 1639 spin_unlock_irqrestore(&list->lock, flags);
1583 1640
1584 kfree_skb(this); 1641 if (this) {
1642 kfree_skb(this);
1643 /* wake up any process waiting for sending */
1644 iucv_sock_wake_msglim(sk);
1645 }
1585 } 1646 }
1586 BUG_ON(!this); 1647 BUG_ON(!this);
1587 1648
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 4e68ab439d5d..79693fe2001e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -56,7 +56,6 @@ struct rfkill {
56 u32 idx; 56 u32 idx;
57 57
58 bool registered; 58 bool registered;
59 bool suspended;
60 bool persistent; 59 bool persistent;
61 60
62 const struct rfkill_ops *ops; 61 const struct rfkill_ops *ops;
@@ -224,7 +223,7 @@ static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
224 223
225static void rfkill_event(struct rfkill *rfkill) 224static void rfkill_event(struct rfkill *rfkill)
226{ 225{
227 if (!rfkill->registered || rfkill->suspended) 226 if (!rfkill->registered)
228 return; 227 return;
229 228
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); 229 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
@@ -270,6 +269,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
270 unsigned long flags; 269 unsigned long flags;
271 int err; 270 int err;
272 271
272 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
273 return;
274
273 /* 275 /*
274 * Some platforms (...!) generate input events which affect the 276 * Some platforms (...!) generate input events which affect the
275 * _hard_ kill state -- whenever something tries to change the 277 * _hard_ kill state -- whenever something tries to change the
@@ -292,9 +294,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
292 rfkill->state |= RFKILL_BLOCK_SW_SETCALL; 294 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
293 spin_unlock_irqrestore(&rfkill->lock, flags); 295 spin_unlock_irqrestore(&rfkill->lock, flags);
294 296
295 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
296 return;
297
298 err = rfkill->ops->set_block(rfkill->data, blocked); 297 err = rfkill->ops->set_block(rfkill->data, blocked);
299 298
300 spin_lock_irqsave(&rfkill->lock, flags); 299 spin_lock_irqsave(&rfkill->lock, flags);
@@ -508,19 +507,32 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
508 blocked = blocked || hwblock; 507 blocked = blocked || hwblock;
509 spin_unlock_irqrestore(&rfkill->lock, flags); 508 spin_unlock_irqrestore(&rfkill->lock, flags);
510 509
511 if (!rfkill->registered) { 510 if (!rfkill->registered)
512 rfkill->persistent = true; 511 return blocked;
513 } else {
514 if (prev != blocked && !hwblock)
515 schedule_work(&rfkill->uevent_work);
516 512
517 rfkill_led_trigger_event(rfkill); 513 if (prev != blocked && !hwblock)
518 } 514 schedule_work(&rfkill->uevent_work);
515
516 rfkill_led_trigger_event(rfkill);
519 517
520 return blocked; 518 return blocked;
521} 519}
522EXPORT_SYMBOL(rfkill_set_sw_state); 520EXPORT_SYMBOL(rfkill_set_sw_state);
523 521
522void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
523{
524 unsigned long flags;
525
526 BUG_ON(!rfkill);
527 BUG_ON(rfkill->registered);
528
529 spin_lock_irqsave(&rfkill->lock, flags);
530 __rfkill_set_sw_state(rfkill, blocked);
531 rfkill->persistent = true;
532 spin_unlock_irqrestore(&rfkill->lock, flags);
533}
534EXPORT_SYMBOL(rfkill_init_sw_state);
535
524void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) 536void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
525{ 537{
526 unsigned long flags; 538 unsigned long flags;
@@ -598,6 +610,15 @@ static ssize_t rfkill_idx_show(struct device *dev,
598 return sprintf(buf, "%d\n", rfkill->idx); 610 return sprintf(buf, "%d\n", rfkill->idx);
599} 611}
600 612
613static ssize_t rfkill_persistent_show(struct device *dev,
614 struct device_attribute *attr,
615 char *buf)
616{
617 struct rfkill *rfkill = to_rfkill(dev);
618
619 return sprintf(buf, "%d\n", rfkill->persistent);
620}
621
601static u8 user_state_from_blocked(unsigned long state) 622static u8 user_state_from_blocked(unsigned long state)
602{ 623{
603 if (state & RFKILL_BLOCK_HW) 624 if (state & RFKILL_BLOCK_HW)
@@ -656,6 +677,7 @@ static struct device_attribute rfkill_dev_attrs[] = {
656 __ATTR(name, S_IRUGO, rfkill_name_show, NULL), 677 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
657 __ATTR(type, S_IRUGO, rfkill_type_show, NULL), 678 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
658 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), 679 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
680 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
659 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), 681 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
660 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 682 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
661 __ATTR_NULL 683 __ATTR_NULL
@@ -718,8 +740,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
718 740
719 rfkill_pause_polling(rfkill); 741 rfkill_pause_polling(rfkill);
720 742
721 rfkill->suspended = true;
722
723 return 0; 743 return 0;
724} 744}
725 745
@@ -728,10 +748,10 @@ static int rfkill_resume(struct device *dev)
728 struct rfkill *rfkill = to_rfkill(dev); 748 struct rfkill *rfkill = to_rfkill(dev);
729 bool cur; 749 bool cur;
730 750
731 cur = !!(rfkill->state & RFKILL_BLOCK_SW); 751 if (!rfkill->persistent) {
732 rfkill_set_block(rfkill, cur); 752 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
733 753 rfkill_set_block(rfkill, cur);
734 rfkill->suspended = false; 754 }
735 755
736 rfkill_resume_polling(rfkill); 756 rfkill_resume_polling(rfkill);
737 757
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 24168560ebae..241bddd0b4f1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,13 +1687,52 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1687 if (err) 1687 if (err)
1688 goto out_rtnl; 1688 goto out_rtnl;
1689 1689
1690 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1690 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1691 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1691 if (err)
1692 err = -EINVAL;
1693 goto out; 1692 goto out;
1693
1694 /* validate settings */
1695 err = 0;
1696
1697 switch (dev->ieee80211_ptr->iftype) {
1698 case NL80211_IFTYPE_AP:
1699 case NL80211_IFTYPE_AP_VLAN:
1700 /* disallow mesh-specific things */
1701 if (params.plink_action)
1702 err = -EINVAL;
1703 break;
1704 case NL80211_IFTYPE_STATION:
1705 /* disallow everything but AUTHORIZED flag */
1706 if (params.plink_action)
1707 err = -EINVAL;
1708 if (params.vlan)
1709 err = -EINVAL;
1710 if (params.supported_rates)
1711 err = -EINVAL;
1712 if (params.ht_capa)
1713 err = -EINVAL;
1714 if (params.listen_interval >= 0)
1715 err = -EINVAL;
1716 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
1717 err = -EINVAL;
1718 break;
1719 case NL80211_IFTYPE_MESH_POINT:
1720 /* disallow things mesh doesn't support */
1721 if (params.vlan)
1722 err = -EINVAL;
1723 if (params.ht_capa)
1724 err = -EINVAL;
1725 if (params.listen_interval >= 0)
1726 err = -EINVAL;
1727 if (params.supported_rates)
1728 err = -EINVAL;
1729 if (params.sta_flags_mask)
1730 err = -EINVAL;
1731 break;
1732 default:
1733 err = -EINVAL;
1694 } 1734 }
1695 1735
1696 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1697 if (err) 1736 if (err)
1698 goto out; 1737 goto out;
1699 1738
@@ -1728,9 +1767,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1728 if (!info->attrs[NL80211_ATTR_MAC]) 1767 if (!info->attrs[NL80211_ATTR_MAC])
1729 return -EINVAL; 1768 return -EINVAL;
1730 1769
1731 if (!info->attrs[NL80211_ATTR_STA_AID])
1732 return -EINVAL;
1733
1734 if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 1770 if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
1735 return -EINVAL; 1771 return -EINVAL;
1736 1772
@@ -1745,9 +1781,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1745 params.listen_interval = 1781 params.listen_interval =
1746 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1782 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1747 1783
1748 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1784 if (info->attrs[NL80211_ATTR_STA_AID]) {
1749 if (!params.aid || params.aid > IEEE80211_MAX_AID) 1785 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1750 return -EINVAL; 1786 if (!params.aid || params.aid > IEEE80211_MAX_AID)
1787 return -EINVAL;
1788 }
1751 1789
1752 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 1790 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1753 params.ht_capa = 1791 params.ht_capa =
@@ -1762,13 +1800,39 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1762 if (err) 1800 if (err)
1763 goto out_rtnl; 1801 goto out_rtnl;
1764 1802
1765 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1803 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1766 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1804 if (err)
1767 err = -EINVAL;
1768 goto out; 1805 goto out;
1806
1807 /* validate settings */
1808 err = 0;
1809
1810 switch (dev->ieee80211_ptr->iftype) {
1811 case NL80211_IFTYPE_AP:
1812 case NL80211_IFTYPE_AP_VLAN:
1813 /* all ok but must have AID */
1814 if (!params.aid)
1815 err = -EINVAL;
1816 break;
1817 case NL80211_IFTYPE_MESH_POINT:
1818 /* disallow things mesh doesn't support */
1819 if (params.vlan)
1820 err = -EINVAL;
1821 if (params.aid)
1822 err = -EINVAL;
1823 if (params.ht_capa)
1824 err = -EINVAL;
1825 if (params.listen_interval >= 0)
1826 err = -EINVAL;
1827 if (params.supported_rates)
1828 err = -EINVAL;
1829 if (params.sta_flags_mask)
1830 err = -EINVAL;
1831 break;
1832 default:
1833 err = -EINVAL;
1769 } 1834 }
1770 1835
1771 err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
1772 if (err) 1836 if (err)
1773 goto out; 1837 goto out;
1774 1838
@@ -1812,7 +1876,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
1812 goto out_rtnl; 1876 goto out_rtnl;
1813 1877
1814 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 1878 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1815 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 1879 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
1880 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
1816 err = -EINVAL; 1881 err = -EINVAL;
1817 goto out; 1882 goto out;
1818 } 1883 }