aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c11
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hidp/core.c18
-rw-r--r--net/bluetooth/hidp/hidp.h1
-rw-r--r--net/bluetooth/l2cap_core.c10
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/ceph/ceph_fs.c17
-rw-r--r--net/ceph/osd_client.c10
-rw-r--r--net/core/dst.c6
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/xfrm4_output.c7
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/route.c25
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/mac80211/wpa.c24
-rw-r--r--net/sctp/output.c19
-rw-r--r--net/sctp/outqueue.c20
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_sideeffect.c20
-rw-r--r--net/sctp/sm_statefuns.c32
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/ulpevent.c16
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c27
-rw-r--r--net/wireless/core.c12
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c27
-rw-r--r--net/wireless/scan.c10
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--net/xfrm/xfrm_state.c2
35 files changed, 271 insertions, 146 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7ea5cf9ea08a..6e82148edfc8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -528,7 +528,11 @@ static int vlan_dev_init(struct net_device *dev)
528 (1<<__LINK_STATE_DORMANT))) | 528 (1<<__LINK_STATE_DORMANT))) |
529 (1<<__LINK_STATE_PRESENT); 529 (1<<__LINK_STATE_PRESENT);
530 530
531 dev->hw_features = NETIF_F_ALL_TX_OFFLOADS; 531 dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
532 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
533 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
534 NETIF_F_ALL_FCOE;
535
532 dev->features |= real_dev->vlan_features | NETIF_F_LLTX; 536 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
533 dev->gso_max_size = real_dev->gso_max_size; 537 dev->gso_max_size = real_dev->gso_max_size;
534 538
@@ -586,9 +590,14 @@ static void vlan_dev_uninit(struct net_device *dev)
586static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) 590static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
587{ 591{
588 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 592 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
593 u32 old_features = features;
589 594
590 features &= real_dev->features; 595 features &= real_dev->features;
591 features &= real_dev->vlan_features; 596 features &= real_dev->vlan_features;
597
598 if (old_features & NETIF_F_SOFT_FEATURES)
599 features |= old_features & NETIF_F_SOFT_FEATURES;
600
592 if (dev_ethtool_get_rx_csum(real_dev)) 601 if (dev_ethtool_get_rx_csum(real_dev))
593 features |= NETIF_F_RXCSUM; 602 features |= NETIF_F_RXCSUM;
594 features |= NETIF_F_LLTX; 603 features |= NETIF_F_LLTX;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index d3a05b9ade7a..bcd158f40bb9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
393 393
394 hci_dev_put(hdev); 394 hci_dev_put(hdev);
395 395
396 if (conn->handle == 0)
397 kfree(conn);
398
396 return 0; 399 return 0;
397} 400}
398 401
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c405a954a603..43b4c2deb7cc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
464{ 464{
465 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
466 466
467 kthread_stop(session->task); 467 atomic_inc(&session->terminate);
468 wake_up_process(session->task);
468} 469}
469 470
470static void hidp_set_timer(struct hidp_session *session) 471static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
535 skb_queue_purge(&session->ctrl_transmit); 536 skb_queue_purge(&session->ctrl_transmit);
536 skb_queue_purge(&session->intr_transmit); 537 skb_queue_purge(&session->intr_transmit);
537 538
538 kthread_stop(session->task); 539 atomic_inc(&session->terminate);
540 wake_up_process(current);
539 } 541 }
540} 542}
541 543
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
706 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 708 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
707 session->waiting_for_startup = 0; 709 session->waiting_for_startup = 0;
708 wake_up_interruptible(&session->startup_queue); 710 wake_up_interruptible(&session->startup_queue);
709 while (!kthread_should_stop()) { 711 set_current_state(TASK_INTERRUPTIBLE);
710 set_current_state(TASK_INTERRUPTIBLE); 712 while (!atomic_read(&session->terminate)) {
711
712 if (ctrl_sk->sk_state != BT_CONNECTED || 713 if (ctrl_sk->sk_state != BT_CONNECTED ||
713 intr_sk->sk_state != BT_CONNECTED) 714 intr_sk->sk_state != BT_CONNECTED)
714 break; 715 break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
726 hidp_process_transmit(session); 727 hidp_process_transmit(session);
727 728
728 schedule(); 729 schedule();
730 set_current_state(TASK_INTERRUPTIBLE);
729 } 731 }
730 set_current_state(TASK_RUNNING); 732 set_current_state(TASK_RUNNING);
731 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 733 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1060err_add_device: 1062err_add_device:
1061 hid_destroy_device(session->hid); 1063 hid_destroy_device(session->hid);
1062 session->hid = NULL; 1064 session->hid = NULL;
1063 kthread_stop(session->task); 1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1064 1067
1065unlink: 1068unlink:
1066 hidp_del_timer(session); 1069 hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1111 skb_queue_purge(&session->ctrl_transmit); 1114 skb_queue_purge(&session->ctrl_transmit);
1112 skb_queue_purge(&session->intr_transmit); 1115 skb_queue_purge(&session->intr_transmit);
1113 1116
1114 kthread_stop(session->task); 1117 atomic_inc(&session->terminate);
1118 wake_up_process(session->task);
1115 } 1119 }
1116 } else 1120 } else
1117 err = -ENOENT; 1121 err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 19e95004b286..af1bcc823f26 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -142,6 +142,7 @@ struct hidp_session {
142 uint ctrl_mtu; 142 uint ctrl_mtu;
143 uint intr_mtu; 143 uint intr_mtu;
144 144
145 atomic_t terminate;
145 struct task_struct *task; 146 struct task_struct *task;
146 147
147 unsigned char keys[8]; 148 unsigned char keys[8];
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 56fdd9162da9..7705e26e699f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -620,7 +620,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
620 struct sock *parent = bt_sk(sk)->parent; 620 struct sock *parent = bt_sk(sk)->parent;
621 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 621 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623 parent->sk_data_ready(parent, 0); 623 if (parent)
624 parent->sk_data_ready(parent, 0);
624 625
625 } else { 626 } else {
626 sk->sk_state = BT_CONFIG; 627 sk->sk_state = BT_CONFIG;
@@ -2323,7 +2324,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2323 2324
2324 sk = chan->sk; 2325 sk = chan->sk;
2325 2326
2326 if (sk->sk_state != BT_CONFIG) { 2327 if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
2327 struct l2cap_cmd_rej rej; 2328 struct l2cap_cmd_rej rej;
2328 2329
2329 rej.reason = cpu_to_le16(0x0002); 2330 rej.reason = cpu_to_le16(0x0002);
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2334 2335
2335 /* Reject if config buffer is too small. */ 2336 /* Reject if config buffer is too small. */
2336 len = cmd_len - sizeof(*req); 2337 len = cmd_len - sizeof(*req);
2337 if (chan->conf_len + len > sizeof(chan->conf_req)) { 2338 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2338 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2339 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2339 l2cap_build_conf_rsp(chan, rsp, 2340 l2cap_build_conf_rsp(chan, rsp,
2340 L2CAP_CONF_REJECT, flags), rsp); 2341 L2CAP_CONF_REJECT, flags), rsp);
@@ -4009,7 +4010,8 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4009 struct sock *parent = bt_sk(sk)->parent; 4010 struct sock *parent = bt_sk(sk)->parent;
4010 res = L2CAP_CR_PEND; 4011 res = L2CAP_CR_PEND;
4011 stat = L2CAP_CS_AUTHOR_PEND; 4012 stat = L2CAP_CS_AUTHOR_PEND;
4012 parent->sk_data_ready(parent, 0); 4013 if (parent)
4014 parent->sk_data_ready(parent, 0);
4013 } else { 4015 } else {
4014 sk->sk_state = BT_CONFIG; 4016 sk->sk_state = BT_CONFIG;
4015 res = L2CAP_CR_SUCCESS; 4017 res = L2CAP_CR_SUCCESS;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index c188c803c09c..32b8f9f7f79e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
49 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
50 50
51 rcu_read_lock(); 51 rcu_read_lock();
52 if (is_multicast_ether_addr(dest)) { 52 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) {
53 if (unlikely(netpoll_tx_running(dev))) { 55 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb); 56 br_flood_deliver(br, skb);
55 goto out; 57 goto out;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f3ac1e858ee1..f06ee39c73fd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
60 br = p->br; 60 br = p->br;
61 br_fdb_update(br, p, eth_hdr(skb)->h_source); 61 br_fdb_update(br, p, eth_hdr(skb)->h_source);
62 62
63 if (is_multicast_ether_addr(dest) && 63 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
64 br_multicast_rcv(br, p, skb)) 64 br_multicast_rcv(br, p, skb))
65 goto drop; 65 goto drop;
66 66
@@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
77 77
78 dst = NULL; 78 dst = NULL;
79 79
80 if (is_multicast_ether_addr(dest)) { 80 if (is_broadcast_ether_addr(dest))
81 skb2 = skb;
82 else if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 83 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 84 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && mdst->mglist) || 85 if ((mdst && mdst->mglist) ||
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index a3a3a31d3c37..41466ccb972a 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -36,16 +36,19 @@ int ceph_flags_to_mode(int flags)
36 if ((flags & O_DIRECTORY) == O_DIRECTORY) 36 if ((flags & O_DIRECTORY) == O_DIRECTORY)
37 return CEPH_FILE_MODE_PIN; 37 return CEPH_FILE_MODE_PIN;
38#endif 38#endif
39 if ((flags & O_APPEND) == O_APPEND)
40 flags |= O_WRONLY;
41 39
42 if ((flags & O_ACCMODE) == O_RDWR) 40 switch (flags & O_ACCMODE) {
43 mode = CEPH_FILE_MODE_RDWR; 41 case O_WRONLY:
44 else if ((flags & O_ACCMODE) == O_WRONLY)
45 mode = CEPH_FILE_MODE_WR; 42 mode = CEPH_FILE_MODE_WR;
46 else 43 break;
44 case O_RDONLY:
47 mode = CEPH_FILE_MODE_RD; 45 mode = CEPH_FILE_MODE_RD;
48 46 break;
47 case O_RDWR:
48 case O_ACCMODE: /* this is what the VFS does */
49 mode = CEPH_FILE_MODE_RDWR;
50 break;
51 }
49#ifdef O_LAZY 52#ifdef O_LAZY
50 if (flags & O_LAZY) 53 if (flags & O_LAZY)
51 mode |= CEPH_FILE_MODE_LAZY; 54 mode |= CEPH_FILE_MODE_LAZY;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9cb627a4073a..7330c2757c0c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
477 calc_layout(osdc, vino, layout, off, plen, req, ops); 477 calc_layout(osdc, vino, layout, off, plen, req, ops);
478 req->r_file_layout = *layout; /* keep a copy */ 478 req->r_file_layout = *layout; /* keep a copy */
479 479
480 /* in case it differs from natural alignment that calc_layout 480 /* in case it differs from natural (file) alignment that
481 filled in for us */ 481 calc_layout filled in for us */
482 req->r_num_pages = calc_pages_for(page_align, *plen);
482 req->r_page_alignment = page_align; 483 req->r_page_alignment = page_align;
483 484
484 ceph_osdc_build_request(req, off, plen, ops, 485 ceph_osdc_build_request(req, off, plen, ops,
@@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2027 int want = calc_pages_for(req->r_page_alignment, data_len); 2028 int want = calc_pages_for(req->r_page_alignment, data_len);
2028 2029
2029 if (unlikely(req->r_num_pages < want)) { 2030 if (unlikely(req->r_num_pages < want)) {
2030 pr_warning("tid %lld reply %d > expected %d pages\n", 2031 pr_warning("tid %lld reply has %d bytes %d pages, we"
2031 tid, want, m->nr_pages); 2032 " had only %d pages ready\n", tid, data_len,
2033 want, req->r_num_pages);
2032 *skip = 1; 2034 *skip = 1;
2033 ceph_msg_put(m); 2035 ceph_msg_put(m);
2034 m = NULL; 2036 m = NULL;
diff --git a/net/core/dst.c b/net/core/dst.c
index 9ccca038444f..6135f3671692 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -190,7 +190,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
190 dst->lastuse = jiffies; 190 dst->lastuse = jiffies;
191 dst->flags = flags; 191 dst->flags = flags;
192 dst->next = NULL; 192 dst->next = NULL;
193 dst_entries_add(ops, 1); 193 if (!(flags & DST_NOCOUNT))
194 dst_entries_add(ops, 1);
194 return dst; 195 return dst;
195} 196}
196EXPORT_SYMBOL(dst_alloc); 197EXPORT_SYMBOL(dst_alloc);
@@ -243,7 +244,8 @@ again:
243 neigh_release(neigh); 244 neigh_release(neigh);
244 } 245 }
245 246
246 dst_entries_add(dst->ops, -1); 247 if (!(dst->flags & DST_NOCOUNT))
248 dst_entries_add(dst->ops, -1);
247 249
248 if (dst->ops->destroy) 250 if (dst->ops->destroy)
249 dst->ops->destroy(dst); 251 dst->ops->destroy(dst);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index eae1f676f870..ef1528af7abf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,8 +465,10 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
465 if (addr_len < sizeof(struct sockaddr_in)) 465 if (addr_len < sizeof(struct sockaddr_in))
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) 468 if (addr->sin_family != AF_INET) {
469 err = -EAFNOSUPPORT;
469 goto out; 470 goto out;
471 }
470 472
471 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
472 474
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4a7e16b5d3f3..84f26e8e6c60 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
828 cork->length += length; 828 cork->length += length;
829 if (((length > mtu) || (skb && skb_is_gso(skb))) && 829 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
830 (sk->sk_protocol == IPPROTO_UDP) && 830 (sk->sk_protocol == IPPROTO_UDP) &&
831 (rt->dst.dev->features & NETIF_F_UFO)) { 831 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
832 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 832 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
833 hh_len, fragheaderlen, transhdrlen, 833 hh_len, fragheaderlen, transhdrlen,
834 mtu, flags); 834 mtu, flags);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 054a59d21eb0..46febcacb729 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries);
3220void __init tcp_init(void) 3220void __init tcp_init(void)
3221{ 3221{
3222 struct sk_buff *skb = NULL; 3222 struct sk_buff *skb = NULL;
3223 unsigned long nr_pages, limit; 3223 unsigned long limit;
3224 int i, max_share, cnt; 3224 int i, max_share, cnt;
3225 unsigned long jiffy = jiffies; 3225 unsigned long jiffy = jiffies;
3226 3226
@@ -3277,13 +3277,7 @@ void __init tcp_init(void)
3277 sysctl_tcp_max_orphans = cnt / 2; 3277 sysctl_tcp_max_orphans = cnt / 2;
3278 sysctl_max_syn_backlog = max(128, cnt / 256); 3278 sysctl_max_syn_backlog = max(128, cnt / 256);
3279 3279
3280 /* Set the pressure threshold to be a fraction of global memory that 3280 limit = nr_free_buffer_pages() / 8;
3281 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
3282 * memory, with a floor of 128 pages.
3283 */
3284 nr_pages = totalram_pages - totalhigh_pages;
3285 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
3286 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
3287 limit = max(limit, 128UL); 3281 limit = max(limit, 128UL);
3288 sysctl_tcp_mem[0] = limit / 4 * 3; 3282 sysctl_tcp_mem[0] = limit / 4 * 3;
3289 sysctl_tcp_mem[1] = limit; 3283 sysctl_tcp_mem[1] = limit;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 48cd88e62553..198f75b7bdd3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2209,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name)
2209 2209
2210void __init udp_init(void) 2210void __init udp_init(void)
2211{ 2211{
2212 unsigned long nr_pages, limit; 2212 unsigned long limit;
2213 2213
2214 udp_table_init(&udp_table, "UDP"); 2214 udp_table_init(&udp_table, "UDP");
2215 /* Set the pressure threshold up by the same strategy of TCP. It is a 2215 limit = nr_free_buffer_pages() / 8;
2216 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
2217 * toward zero with the amount of memory, with a floor of 128 pages.
2218 */
2219 nr_pages = totalram_pages - totalhigh_pages;
2220 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2221 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2222 limit = max(limit, 128UL); 2216 limit = max(limit, 128UL);
2223 sysctl_udp_mem[0] = limit / 4 * 3; 2217 sysctl_udp_mem[0] = limit / 4 * 3;
2224 sysctl_udp_mem[1] = limit; 2218 sysctl_udp_mem[1] = limit;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2d51840e53a1..327a617d594c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
32 dst = skb_dst(skb); 32 dst = skb_dst(skb);
33 mtu = dst_mtu(dst); 33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 34 if (skb->len > mtu) {
35 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 35 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else
39 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu));
36 ret = -EMSGSIZE; 41 ret = -EMSGSIZE;
37 } 42 }
38out: 43out:
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d450a2f9fc06..3b5669a2582d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -274,7 +274,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
274 return -EINVAL; 274 return -EINVAL;
275 275
276 if (addr->sin6_family != AF_INET6) 276 if (addr->sin6_family != AF_INET6)
277 return -EINVAL; 277 return -EAFNOSUPPORT;
278 278
279 addr_type = ipv6_addr_type(&addr->sin6_addr); 279 addr_type = ipv6_addr_type(&addr->sin6_addr);
280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) 280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index de2b1decd786..0ef1f086feb8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -228,9 +228,10 @@ static struct rt6_info ip6_blk_hole_entry_template = {
228 228
229/* allocate dst with ip6_dst_ops */ 229/* allocate dst with ip6_dst_ops */
230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, 230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev) 231 struct net_device *dev,
232 int flags)
232{ 233{
233 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0); 234 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
234 235
235 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 236 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
236 237
@@ -1042,7 +1043,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1042 if (unlikely(idev == NULL)) 1043 if (unlikely(idev == NULL))
1043 return NULL; 1044 return NULL;
1044 1045
1045 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev); 1046 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1046 if (unlikely(rt == NULL)) { 1047 if (unlikely(rt == NULL)) {
1047 in6_dev_put(idev); 1048 in6_dev_put(idev);
1048 goto out; 1049 goto out;
@@ -1062,14 +1063,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1062 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1063 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1063 rt->dst.output = ip6_output; 1064 rt->dst.output = ip6_output;
1064 1065
1065#if 0 /* there's no chance to use these for ndisc */
1066 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
1067 ? DST_HOST
1068 : 0;
1069 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1070 rt->rt6i_dst.plen = 128;
1071#endif
1072
1073 spin_lock_bh(&icmp6_dst_lock); 1066 spin_lock_bh(&icmp6_dst_lock);
1074 rt->dst.next = icmp6_dst_gc_list; 1067 rt->dst.next = icmp6_dst_gc_list;
1075 icmp6_dst_gc_list = &rt->dst; 1068 icmp6_dst_gc_list = &rt->dst;
@@ -1214,7 +1207,7 @@ int ip6_route_add(struct fib6_config *cfg)
1214 goto out; 1207 goto out;
1215 } 1208 }
1216 1209
1217 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL); 1210 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1218 1211
1219 if (rt == NULL) { 1212 if (rt == NULL) {
1220 err = -ENOMEM; 1213 err = -ENOMEM;
@@ -1244,7 +1237,7 @@ int ip6_route_add(struct fib6_config *cfg)
1244 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 1237 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1245 rt->rt6i_dst.plen = cfg->fc_dst_len; 1238 rt->rt6i_dst.plen = cfg->fc_dst_len;
1246 if (rt->rt6i_dst.plen == 128) 1239 if (rt->rt6i_dst.plen == 128)
1247 rt->dst.flags = DST_HOST; 1240 rt->dst.flags |= DST_HOST;
1248 1241
1249#ifdef CONFIG_IPV6_SUBTREES 1242#ifdef CONFIG_IPV6_SUBTREES
1250 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1243 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1734,7 +1727,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1734{ 1727{
1735 struct net *net = dev_net(ort->rt6i_dev); 1728 struct net *net = dev_net(ort->rt6i_dev);
1736 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 1729 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1737 ort->dst.dev); 1730 ort->dst.dev, 0);
1738 1731
1739 if (rt) { 1732 if (rt) {
1740 rt->dst.input = ort->dst.input; 1733 rt->dst.input = ort->dst.input;
@@ -2013,7 +2006,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2013{ 2006{
2014 struct net *net = dev_net(idev->dev); 2007 struct net *net = dev_net(idev->dev);
2015 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 2008 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2016 net->loopback_dev); 2009 net->loopback_dev, 0);
2017 struct neighbour *neigh; 2010 struct neighbour *neigh;
2018 2011
2019 if (rt == NULL) { 2012 if (rt == NULL) {
@@ -2025,7 +2018,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2025 2018
2026 in6_dev_hold(idev); 2019 in6_dev_hold(idev);
2027 2020
2028 rt->dst.flags = DST_HOST; 2021 rt->dst.flags |= DST_HOST;
2029 rt->dst.input = ip6_input; 2022 rt->dst.input = ip6_input;
2030 rt->dst.output = ip6_output; 2023 rt->dst.output = ip6_output;
2031 rt->rt6i_idev = idev; 2024 rt->rt6i_idev = idev;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 58ffa7d069c7..669d2e32efb6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
877 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 877 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
878 local->sched_scan_ies.ie[i] = kzalloc(2 + 878 local->sched_scan_ies.ie[i] = kzalloc(2 +
879 IEEE80211_MAX_SSID_LEN + 879 IEEE80211_MAX_SSID_LEN +
880 local->scan_ies_len, 880 local->scan_ies_len +
881 req->ie_len,
881 GFP_KERNEL); 882 GFP_KERNEL);
882 if (!local->sched_scan_ies.ie[i]) { 883 if (!local->sched_scan_ies.ie[i]) {
883 ret = -ENOMEM; 884 ret = -ENOMEM;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9dc3b5f26e80..8f6a302d2ac3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
86 struct sk_buff *skb = rx->skb; 86 struct sk_buff *skb = rx->skb;
87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
89 int queue = rx->queue;
90
91 /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
92 if (rx->queue == NUM_RX_DATA_QUEUES - 1)
93 queue = 0;
89 94
90 /* 95 /*
91 * it makes no sense to check for MIC errors on anything other 96 * it makes no sense to check for MIC errors on anything other
@@ -148,13 +153,19 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
148 153
149update_iv: 154update_iv:
150 /* update IV in key information to be able to detect replays */ 155 /* update IV in key information to be able to detect replays */
151 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; 156 rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
152 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; 157 rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
153 158
154 return RX_CONTINUE; 159 return RX_CONTINUE;
155 160
156mic_fail: 161mic_fail:
157 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, 162 /*
163 * In some cases the key can be unset - e.g. a multicast packet, in
164 * a driver that supports HW encryption. Send up the key idx only if
165 * the key is set.
166 */
167 mac80211_ev_michael_mic_failure(rx->sdata,
168 rx->key ? rx->key->conf.keyidx : -1,
158 (void *) skb->data, NULL, GFP_ATOMIC); 169 (void *) skb->data, NULL, GFP_ATOMIC);
159 return RX_DROP_UNUSABLE; 170 return RX_DROP_UNUSABLE;
160} 171}
@@ -235,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
235 struct ieee80211_key *key = rx->key; 246 struct ieee80211_key *key = rx->key;
236 struct sk_buff *skb = rx->skb; 247 struct sk_buff *skb = rx->skb;
237 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 248 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
249 int queue = rx->queue;
250
251 /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
252 if (rx->queue == NUM_RX_DATA_QUEUES - 1)
253 queue = 0;
238 254
239 hdrlen = ieee80211_hdrlen(hdr->frame_control); 255 hdrlen = ieee80211_hdrlen(hdr->frame_control);
240 256
@@ -255,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
255 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 271 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
256 key, skb->data + hdrlen, 272 key, skb->data + hdrlen,
257 skb->len - hdrlen, rx->sta->sta.addr, 273 skb->len - hdrlen, rx->sta->sta.addr,
258 hdr->addr1, hwaccel, rx->queue, 274 hdr->addr1, hwaccel, queue,
259 &rx->tkip_iv32, 275 &rx->tkip_iv32,
260 &rx->tkip_iv16); 276 &rx->tkip_iv16);
261 if (res != TKIP_DECRYPT_OK) 277 if (res != TKIP_DECRYPT_OK)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b4f3cf06d8da..08b3cead6503 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
500 * Note: Adler-32 is no longer applicable, as has been replaced 500 * Note: Adler-32 is no longer applicable, as has been replaced
501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
502 */ 502 */
503 if (!sctp_checksum_disable && 503 if (!sctp_checksum_disable) {
504 !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) { 504 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
506 506
507 /* 3) Put the resultant value into the checksum field in the 507 /* 3) Put the resultant value into the checksum field in the
508 * common header, and leave the rest of the bits unchanged. 508 * common header, and leave the rest of the bits unchanged.
509 */ 509 */
510 sh->checksum = sctp_end_cksum(crc32); 510 sh->checksum = sctp_end_cksum(crc32);
511 } else { 511 } else {
512 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
513 /* no need to seed pseudo checksum for SCTP */ 512 /* no need to seed pseudo checksum for SCTP */
514 nskb->ip_summed = CHECKSUM_PARTIAL; 513 nskb->ip_summed = CHECKSUM_PARTIAL;
515 nskb->csum_start = (skb_transport_header(nskb) - 514 nskb->csum_start = (skb_transport_header(nskb) -
516 nskb->head); 515 nskb->head);
517 nskb->csum_offset = offsetof(struct sctphdr, checksum); 516 nskb->csum_offset = offsetof(struct sctphdr, checksum);
518 } else {
519 nskb->ip_summed = CHECKSUM_UNNECESSARY;
520 } 517 }
521 } 518 }
522 519
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1c88c8911dc5..d03682109b7a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1582#endif /* SCTP_DEBUG */ 1582#endif /* SCTP_DEBUG */
1583 if (transport) { 1583 if (transport) {
1584 if (bytes_acked) { 1584 if (bytes_acked) {
1585 struct sctp_association *asoc = transport->asoc;
1586
1585 /* We may have counted DATA that was migrated 1587 /* We may have counted DATA that was migrated
1586 * to this transport due to DEL-IP operation. 1588 * to this transport due to DEL-IP operation.
1587 * Subtract those bytes, since the were never 1589 * Subtract those bytes, since the were never
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1600 transport->error_count = 0; 1602 transport->error_count = 0;
1601 transport->asoc->overall_error_count = 0; 1603 transport->asoc->overall_error_count = 0;
1602 1604
1605 /*
1606 * While in SHUTDOWN PENDING, we may have started
1607 * the T5 shutdown guard timer after reaching the
1608 * retransmission limit. Stop that timer as soon
1609 * as the receiver acknowledged any data.
1610 */
1611 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1612 del_timer(&asoc->timers
1613 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1614 sctp_association_put(asoc);
1615
1603 /* Mark the destination transport address as 1616 /* Mark the destination transport address as
1604 * active if it is not so marked. 1617 * active if it is not so marked.
1605 */ 1618 */
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1629 * A sender is doing zero window probing when the 1642 * A sender is doing zero window probing when the
1630 * receiver's advertised window is zero, and there is 1643 * receiver's advertised window is zero, and there is
1631 * only one data chunk in flight to the receiver. 1644 * only one data chunk in flight to the receiver.
1645 *
1646 * Allow the association to timeout while in SHUTDOWN
1647 * PENDING or SHUTDOWN RECEIVED in case the receiver
1648 * stays in zero window mode forever.
1632 */ 1649 */
1633 if (!q->asoc->peer.rwnd && 1650 if (!q->asoc->peer.rwnd &&
1634 !list_empty(&tlist) && 1651 !list_empty(&tlist) &&
1635 (sack_ctsn+2 == q->asoc->next_tsn)) { 1652 (sack_ctsn+2 == q->asoc->next_tsn) &&
1653 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1636 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1654 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1637 "window probe: %u\n", 1655 "window probe: %u\n",
1638 __func__, sack_ctsn); 1656 __func__, sack_ctsn);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a29e2e9..207175b2f40a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1058,7 +1058,6 @@ SCTP_STATIC __init int sctp_init(void)
1058 int status = -EINVAL; 1058 int status = -EINVAL;
1059 unsigned long goal; 1059 unsigned long goal;
1060 unsigned long limit; 1060 unsigned long limit;
1061 unsigned long nr_pages;
1062 int max_share; 1061 int max_share;
1063 int order; 1062 int order;
1064 1063
@@ -1148,15 +1147,7 @@ SCTP_STATIC __init int sctp_init(void)
1148 /* Initialize handle used for association ids. */ 1147 /* Initialize handle used for association ids. */
1149 idr_init(&sctp_assocs_id); 1148 idr_init(&sctp_assocs_id);
1150 1149
1151 /* Set the pressure threshold to be a fraction of global memory that 1150 limit = nr_free_buffer_pages() / 8;
1152 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
1153 * memory, with a floor of 128 pages.
1154 * Note this initializes the data in sctpv6_prot too
1155 * Unabashedly stolen from tcp_init
1156 */
1157 nr_pages = totalram_pages - totalhigh_pages;
1158 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
1159 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
1160 limit = max(limit, 128UL); 1151 limit = max(limit, 128UL);
1161 sysctl_sctp_mem[0] = limit / 4 * 3; 1152 sysctl_sctp_mem[0] = limit / 4 * 3;
1162 sysctl_sctp_mem[1] = limit; 1153 sysctl_sctp_mem[1] = limit;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 534c2e5feb05..6e0f88295aaf 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
670 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the 670 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
671 * HEARTBEAT should clear the error counter of the destination 671 * HEARTBEAT should clear the error counter of the destination
672 * transport address to which the HEARTBEAT was sent. 672 * transport address to which the HEARTBEAT was sent.
673 * The association's overall error count is also cleared.
674 */ 673 */
675 t->error_count = 0; 674 t->error_count = 0;
676 t->asoc->overall_error_count = 0; 675
676 /*
677 * Although RFC4960 specifies that the overall error count must
678 * be cleared when a HEARTBEAT ACK is received, we make an
679 * exception while in SHUTDOWN PENDING. If the peer keeps its
680 * window shut forever, we may never be able to transmit our
681 * outstanding data and rely on the retransmission limit be reached
682 * to shutdown the association.
683 */
684 if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
685 t->asoc->overall_error_count = 0;
677 686
678 /* Clear the hb_sent flag to signal that we had a good 687 /* Clear the hb_sent flag to signal that we had a good
679 * acknowledgement. 688 * acknowledgement.
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1437 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); 1446 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
1438 break; 1447 break;
1439 1448
1449 case SCTP_CMD_TIMER_START_ONCE:
1450 timer = &asoc->timers[cmd->obj.to];
1451
1452 if (timer_pending(timer))
1453 break;
1454 /* fall through */
1455
1440 case SCTP_CMD_TIMER_START: 1456 case SCTP_CMD_TIMER_START:
1441 timer = &asoc->timers[cmd->obj.to]; 1457 timer = &asoc->timers[cmd->obj.to];
1442 timeout = asoc->timeouts[cmd->obj.to]; 1458 timeout = asoc->timeouts[cmd->obj.to];
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a297283154d5..246117142b5c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5154 * The sender of the SHUTDOWN MAY also start an overall guard timer 5154 * The sender of the SHUTDOWN MAY also start an overall guard timer
5155 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 5155 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
5156 */ 5156 */
5157 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5157 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5158 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5158 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5159 5159
5160 if (asoc->autoclose) 5160 if (asoc->autoclose)
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5299 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); 5299 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
5300 5300
5301 if (asoc->overall_error_count >= asoc->max_retrans) { 5301 if (asoc->overall_error_count >= asoc->max_retrans) {
5302 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5302 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5303 SCTP_ERROR(ETIMEDOUT)); 5303 /*
5304 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5304 * We are here likely because the receiver had its rwnd
5305 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5305 * closed for a while and we have not been able to
5306 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5306 * transmit the locally queued data within the maximum
5307 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5307 * retransmission attempts limit. Start the T5
5308 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5308 * shutdown guard timer to give the receiver one last
5309 return SCTP_DISPOSITION_DELETE_TCB; 5309 * chance and some additional time to recover before
5310 * aborting.
5311 */
5312 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
5313 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5314 } else {
5315 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5316 SCTP_ERROR(ETIMEDOUT));
5317 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5318 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5319 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5320 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
5321 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
5322 return SCTP_DISPOSITION_DELETE_TCB;
5323 }
5310 } 5324 }
5311 5325
5312 /* E1) For the destination address for which the timer 5326 /* E1) For the destination address for which the timer
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 0338dc6fdc9d..7c211a7f90f4 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
827 /* SCTP_STATE_ESTABLISHED */ \ 827 /* SCTP_STATE_ESTABLISHED */ \
828 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 828 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
829 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 829 /* SCTP_STATE_SHUTDOWN_PENDING */ \
830 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 830 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
831 /* SCTP_STATE_SHUTDOWN_SENT */ \ 831 /* SCTP_STATE_SHUTDOWN_SENT */ \
832 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ 832 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
833 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ 833 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913a53e6..d3ccf7973c59 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1384 struct sctp_endpoint *ep; 1384 struct sctp_endpoint *ep;
1385 struct sctp_association *asoc; 1385 struct sctp_association *asoc;
1386 struct list_head *pos, *temp; 1386 struct list_head *pos, *temp;
1387 unsigned int data_was_unread;
1387 1388
1388 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); 1389 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
1389 1390
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1393 1394
1394 ep = sctp_sk(sk)->ep; 1395 ep = sctp_sk(sk)->ep;
1395 1396
1397 /* Clean up any skbs sitting on the receive queue. */
1398 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1399 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1400
1396 /* Walk all associations on an endpoint. */ 1401 /* Walk all associations on an endpoint. */
1397 list_for_each_safe(pos, temp, &ep->asocs) { 1402 list_for_each_safe(pos, temp, &ep->asocs) {
1398 asoc = list_entry(pos, struct sctp_association, asocs); 1403 asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1410 } 1415 }
1411 } 1416 }
1412 1417
1413 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1418 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1419 !skb_queue_empty(&asoc->ulpq.reasm) ||
1420 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1414 struct sctp_chunk *chunk; 1421 struct sctp_chunk *chunk;
1415 1422
1416 chunk = sctp_make_abort_user(asoc, NULL, 0); 1423 chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1420 sctp_primitive_SHUTDOWN(asoc, NULL); 1427 sctp_primitive_SHUTDOWN(asoc, NULL);
1421 } 1428 }
1422 1429
1423 /* Clean up any skbs sitting on the receive queue. */
1424 sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1425 sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1426
1427 /* On a TCP-style socket, block for at most linger_time if set. */ 1430 /* On a TCP-style socket, block for at most linger_time if set. */
1428 if (sctp_style(sk, TCP) && timeout) 1431 if (sctp_style(sk, TCP) && timeout)
1429 sctp_wait_for_close(sk, timeout); 1432 sctp_wait_for_close(sk, timeout);
@@ -2073,10 +2076,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
2073static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2076static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2074 unsigned int optlen) 2077 unsigned int optlen)
2075{ 2078{
2079 struct sctp_association *asoc;
2080 struct sctp_ulpevent *event;
2081
2076 if (optlen > sizeof(struct sctp_event_subscribe)) 2082 if (optlen > sizeof(struct sctp_event_subscribe))
2077 return -EINVAL; 2083 return -EINVAL;
2078 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2084 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2079 return -EFAULT; 2085 return -EFAULT;
2086
2087 /*
2088 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2089 * if there is no data to be sent or retransmit, the stack will
2090 * immediately send up this notification.
2091 */
2092 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2093 &sctp_sk(sk)->subscribe)) {
2094 asoc = sctp_id2assoc(sk, 0);
2095
2096 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2097 event = sctp_ulpevent_make_sender_dry_event(asoc,
2098 GFP_ATOMIC);
2099 if (!event)
2100 return -ENOMEM;
2101
2102 sctp_ulpq_tail_event(&asoc->ulpq, event);
2103 }
2104 }
2105
2080 return 0; 2106 return 0;
2081} 2107}
2082 2108
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index e70e5fc87890..8a84017834c2 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
1081} 1081}
1082 1082
1083/* Purge the skb lists holding ulpevents. */ 1083/* Purge the skb lists holding ulpevents. */
1084void sctp_queue_purge_ulpevents(struct sk_buff_head *list) 1084unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
1085{ 1085{
1086 struct sk_buff *skb; 1086 struct sk_buff *skb;
1087 while ((skb = skb_dequeue(list)) != NULL) 1087 unsigned int data_unread = 0;
1088 sctp_ulpevent_free(sctp_skb2event(skb)); 1088
1089 while ((skb = skb_dequeue(list)) != NULL) {
1090 struct sctp_ulpevent *event = sctp_skb2event(skb);
1091
1092 if (!sctp_ulpevent_is_notification(event))
1093 data_unread += skb->len;
1094
1095 sctp_ulpevent_free(event);
1096 }
1097
1098 return data_unread;
1089} 1099}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 9a80a922c527..e45d2fbbe5a8 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
597 u32 bind_version; 597 u32 bind_version;
598 struct rpc_xprt *xprt; 598 struct rpc_xprt *xprt;
599 struct rpc_clnt *rpcb_clnt; 599 struct rpc_clnt *rpcb_clnt;
600 static struct rpcbind_args *map; 600 struct rpcbind_args *map;
601 struct rpc_task *child; 601 struct rpc_task *child;
602 struct sockaddr_storage addr; 602 struct sockaddr_storage addr;
603 struct sockaddr *sap = (struct sockaddr *)&addr; 603 struct sockaddr *sap = (struct sockaddr *)&addr;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a27406b1654f..4814e246a874 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
616 BUG_ON(RPC_IS_QUEUED(task)); 616 BUG_ON(RPC_IS_QUEUED(task));
617 617
618 for (;;) { 618 for (;;) {
619 void (*do_action)(struct rpc_task *);
619 620
620 /* 621 /*
621 * Execute any pending callback. 622 * Execute any pending callback first.
622 */ 623 */
623 if (task->tk_callback) { 624 do_action = task->tk_callback;
624 void (*save_callback)(struct rpc_task *); 625 task->tk_callback = NULL;
625 626 if (do_action == NULL) {
626 /*
627 * We set tk_callback to NULL before calling it,
628 * in case it sets the tk_callback field itself:
629 */
630 save_callback = task->tk_callback;
631 task->tk_callback = NULL;
632 save_callback(task);
633 } else {
634 /* 627 /*
635 * Perform the next FSM step. 628 * Perform the next FSM step.
636 * tk_action may be NULL when the task has been killed 629 * tk_action may be NULL if the task has been killed.
637 * by someone else. 630 * In particular, note that rpc_killall_tasks may
631 * do this at any time, so beware when dereferencing.
638 */ 632 */
639 if (task->tk_action == NULL) 633 do_action = task->tk_action;
634 if (do_action == NULL)
640 break; 635 break;
641 task->tk_action(task);
642 } 636 }
637 do_action(task);
643 638
644 /* 639 /*
645 * Lockless check for whether task is sleeping or not. 640 * Lockless check for whether task is sleeping or not.
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c22ef3492ee6..880dbe2e6f94 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
366 366
367 mutex_init(&rdev->mtx); 367 mutex_init(&rdev->mtx);
368 mutex_init(&rdev->devlist_mtx); 368 mutex_init(&rdev->devlist_mtx);
369 mutex_init(&rdev->sched_scan_mtx);
369 INIT_LIST_HEAD(&rdev->netdev_list); 370 INIT_LIST_HEAD(&rdev->netdev_list);
370 spin_lock_init(&rdev->bss_lock); 371 spin_lock_init(&rdev->bss_lock);
371 INIT_LIST_HEAD(&rdev->bss_list); 372 INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
701 rfkill_destroy(rdev->rfkill); 702 rfkill_destroy(rdev->rfkill);
702 mutex_destroy(&rdev->mtx); 703 mutex_destroy(&rdev->mtx);
703 mutex_destroy(&rdev->devlist_mtx); 704 mutex_destroy(&rdev->devlist_mtx);
705 mutex_destroy(&rdev->sched_scan_mtx);
704 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 706 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
705 cfg80211_put_bss(&scan->pub); 707 cfg80211_put_bss(&scan->pub);
706 cfg80211_rdev_free_wowlan(rdev); 708 cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
737 ___cfg80211_scan_done(rdev, true); 739 ___cfg80211_scan_done(rdev, true);
738 } 740 }
739 741
742 cfg80211_unlock_rdev(rdev);
743
744 mutex_lock(&rdev->sched_scan_mtx);
745
740 if (WARN_ON(rdev->sched_scan_req && 746 if (WARN_ON(rdev->sched_scan_req &&
741 rdev->sched_scan_req->dev == wdev->netdev)) { 747 rdev->sched_scan_req->dev == wdev->netdev)) {
742 __cfg80211_stop_sched_scan(rdev, false); 748 __cfg80211_stop_sched_scan(rdev, false);
743 } 749 }
744 750
745 cfg80211_unlock_rdev(rdev); 751 mutex_unlock(&rdev->sched_scan_mtx);
746 752
747 mutex_lock(&rdev->devlist_mtx); 753 mutex_lock(&rdev->devlist_mtx);
748 rdev->opencount--; 754 rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
830 break; 836 break;
831 case NL80211_IFTYPE_P2P_CLIENT: 837 case NL80211_IFTYPE_P2P_CLIENT:
832 case NL80211_IFTYPE_STATION: 838 case NL80211_IFTYPE_STATION:
833 cfg80211_lock_rdev(rdev); 839 mutex_lock(&rdev->sched_scan_mtx);
834 __cfg80211_stop_sched_scan(rdev, false); 840 __cfg80211_stop_sched_scan(rdev, false);
835 cfg80211_unlock_rdev(rdev); 841 mutex_unlock(&rdev->sched_scan_mtx);
836 842
837 wdev_lock(wdev); 843 wdev_lock(wdev);
838#ifdef CONFIG_CFG80211_WEXT 844#ifdef CONFIG_CFG80211_WEXT
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3dce1f167eba..a570ff9214ec 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
65 struct work_struct scan_done_wk; 65 struct work_struct scan_done_wk;
66 struct work_struct sched_scan_results_wk; 66 struct work_struct sched_scan_results_wk;
67 67
68 struct mutex sched_scan_mtx;
69
68#ifdef CONFIG_NL80211_TESTMODE 70#ifdef CONFIG_NL80211_TESTMODE
69 struct genl_info *testmode_info; 71 struct genl_info *testmode_info;
70#endif 72#endif
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98fa8eb6cc4b..cea338150d05 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3461 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3461 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3462 return -EINVAL; 3462 return -EINVAL;
3463 3463
3464 if (rdev->sched_scan_req)
3465 return -EINPROGRESS;
3466
3467 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) 3464 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
3468 return -EINVAL; 3465 return -EINVAL;
3469 3466
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3502 if (ie_len > wiphy->max_scan_ie_len) 3499 if (ie_len > wiphy->max_scan_ie_len)
3503 return -EINVAL; 3500 return -EINVAL;
3504 3501
3502 mutex_lock(&rdev->sched_scan_mtx);
3503
3504 if (rdev->sched_scan_req) {
3505 err = -EINPROGRESS;
3506 goto out;
3507 }
3508
3505 request = kzalloc(sizeof(*request) 3509 request = kzalloc(sizeof(*request)
3506 + sizeof(*request->ssids) * n_ssids 3510 + sizeof(*request->ssids) * n_ssids
3507 + sizeof(*request->channels) * n_channels 3511 + sizeof(*request->channels) * n_channels
3508 + ie_len, GFP_KERNEL); 3512 + ie_len, GFP_KERNEL);
3509 if (!request) 3513 if (!request) {
3510 return -ENOMEM; 3514 err = -ENOMEM;
3515 goto out;
3516 }
3511 3517
3512 if (n_ssids) 3518 if (n_ssids)
3513 request->ssids = (void *)&request->channels[n_channels]; 3519 request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3605out_free: 3611out_free:
3606 kfree(request); 3612 kfree(request);
3607out: 3613out:
3614 mutex_unlock(&rdev->sched_scan_mtx);
3608 return err; 3615 return err;
3609} 3616}
3610 3617
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
3612 struct genl_info *info) 3619 struct genl_info *info)
3613{ 3620{
3614 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3621 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3622 int err;
3615 3623
3616 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || 3624 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3617 !rdev->ops->sched_scan_stop) 3625 !rdev->ops->sched_scan_stop)
3618 return -EOPNOTSUPP; 3626 return -EOPNOTSUPP;
3619 3627
3620 return __cfg80211_stop_sched_scan(rdev, false); 3628 mutex_lock(&rdev->sched_scan_mtx);
3629 err = __cfg80211_stop_sched_scan(rdev, false);
3630 mutex_unlock(&rdev->sched_scan_mtx);
3631
3632 return err;
3621} 3633}
3622 3634
3623static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, 3635static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
@@ -6463,7 +6475,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
6463 if (addr) 6475 if (addr)
6464 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 6476 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
6465 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); 6477 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
6466 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); 6478 if (key_id != -1)
6479 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
6467 if (tsc) 6480 if (tsc)
6468 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); 6481 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
6469 6482
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7a6c67667d70..ae0c2256ba3b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
100 rdev = container_of(wk, struct cfg80211_registered_device, 100 rdev = container_of(wk, struct cfg80211_registered_device,
101 sched_scan_results_wk); 101 sched_scan_results_wk);
102 102
103 cfg80211_lock_rdev(rdev); 103 mutex_lock(&rdev->sched_scan_mtx);
104 104
105 /* we don't have sched_scan_req anymore if the scan is stopping */ 105 /* we don't have sched_scan_req anymore if the scan is stopping */
106 if (rdev->sched_scan_req) 106 if (rdev->sched_scan_req)
107 nl80211_send_sched_scan_results(rdev, 107 nl80211_send_sched_scan_results(rdev,
108 rdev->sched_scan_req->dev); 108 rdev->sched_scan_req->dev);
109 109
110 cfg80211_unlock_rdev(rdev); 110 mutex_unlock(&rdev->sched_scan_mtx);
111} 111}
112 112
113void cfg80211_sched_scan_results(struct wiphy *wiphy) 113void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
123{ 123{
124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
125 125
126 cfg80211_lock_rdev(rdev); 126 mutex_lock(&rdev->sched_scan_mtx);
127 __cfg80211_stop_sched_scan(rdev, true); 127 __cfg80211_stop_sched_scan(rdev, true);
128 cfg80211_unlock_rdev(rdev); 128 mutex_unlock(&rdev->sched_scan_mtx);
129} 129}
130EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 130EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
131 131
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
135 int err; 135 int err;
136 struct net_device *dev; 136 struct net_device *dev;
137 137
138 ASSERT_RDEV_LOCK(rdev); 138 lockdep_assert_held(&rdev->sched_scan_mtx);
139 139
140 if (!rdev->sched_scan_req) 140 if (!rdev->sched_scan_req)
141 return 0; 141 return 0;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9bec2e8a838c..5ce74a385525 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 52static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family); 53static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 54
55 55
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -2241,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2241 2241
2242static int stale_bundle(struct dst_entry *dst) 2242static int stale_bundle(struct dst_entry *dst)
2243{ 2243{
2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC); 2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2245} 2245}
2246 2246
2247void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2247void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2313,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
2313 * still valid. 2313 * still valid.
2314 */ 2314 */
2315 2315
2316static int xfrm_bundle_ok(struct xfrm_dst *first, int family) 2316static int xfrm_bundle_ok(struct xfrm_dst *first)
2317{ 2317{
2318 struct dst_entry *dst = &first->u.dst; 2318 struct dst_entry *dst = &first->u.dst;
2319 struct xfrm_dst *last; 2319 struct xfrm_dst *last;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d70f85eb7864..9414b9c5b1e4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1345,6 +1345,8 @@ out:
1345 xfrm_state_check_expire(x1); 1345 xfrm_state_check_expire(x1);
1346 1346
1347 err = 0; 1347 err = 0;
1348 x->km.state = XFRM_STATE_DEAD;
1349 __xfrm_state_put(x);
1348 } 1350 }
1349 spin_unlock_bh(&x1->lock); 1351 spin_unlock_bh(&x1->lock);
1350 1352