diff options
author | David S. Miller <davem@davemloft.net> | 2011-07-14 10:56:40 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-07-14 10:56:40 -0400 |
commit | 6a7ebdf2fd15417e87b4fd02ff411aeaca34da5f (patch) | |
tree | 86b15d8cd3e25c97b348b5a61bdb16c02726a480 /net | |
parent | f6b72b6217f8c24f2a54988e58af858b4e66024d (diff) | |
parent | 51414d41084496aaefd06d7f19eb8206e8bfac2d (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
net/bluetooth/l2cap_core.c
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_conn.c | 3 | ||||
-rw-r--r-- | net/bluetooth/hidp/core.c | 18 | ||||
-rw-r--r-- | net/bluetooth/hidp/hidp.h | 1 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 5 | ||||
-rw-r--r-- | net/ceph/osd_client.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 10 | ||||
-rw-r--r-- | net/ipv4/udp.c | 10 | ||||
-rw-r--r-- | net/mac80211/scan.c | 3 | ||||
-rw-r--r-- | net/mac80211/wpa.c | 16 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 20 | ||||
-rw-r--r-- | net/sctp/protocol.c | 11 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 20 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 32 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 2 | ||||
-rw-r--r-- | net/sctp/socket.c | 36 | ||||
-rw-r--r-- | net/sctp/ulpevent.c | 16 | ||||
-rw-r--r-- | net/sunrpc/rpcb_clnt.c | 2 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 27 | ||||
-rw-r--r-- | net/wireless/core.c | 12 | ||||
-rw-r--r-- | net/wireless/core.h | 2 | ||||
-rw-r--r-- | net/wireless/nl80211.c | 24 | ||||
-rw-r--r-- | net/wireless/scan.c | 10 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 2 |
23 files changed, 197 insertions, 95 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index fa48c0b3d93c..ea7f031f3b04 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -444,6 +444,9 @@ int hci_conn_del(struct hci_conn *conn) | |||
444 | 444 | ||
445 | hci_dev_put(hdev); | 445 | hci_dev_put(hdev); |
446 | 446 | ||
447 | if (conn->handle == 0) | ||
448 | kfree(conn); | ||
449 | |||
447 | return 0; | 450 | return 0; |
448 | } | 451 | } |
449 | 452 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index c405a954a603..43b4c2deb7cc 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg) | |||
464 | { | 464 | { |
465 | struct hidp_session *session = (struct hidp_session *) arg; | 465 | struct hidp_session *session = (struct hidp_session *) arg; |
466 | 466 | ||
467 | kthread_stop(session->task); | 467 | atomic_inc(&session->terminate); |
468 | wake_up_process(session->task); | ||
468 | } | 469 | } |
469 | 470 | ||
470 | static void hidp_set_timer(struct hidp_session *session) | 471 | static void hidp_set_timer(struct hidp_session *session) |
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session, | |||
535 | skb_queue_purge(&session->ctrl_transmit); | 536 | skb_queue_purge(&session->ctrl_transmit); |
536 | skb_queue_purge(&session->intr_transmit); | 537 | skb_queue_purge(&session->intr_transmit); |
537 | 538 | ||
538 | kthread_stop(session->task); | 539 | atomic_inc(&session->terminate); |
540 | wake_up_process(current); | ||
539 | } | 541 | } |
540 | } | 542 | } |
541 | 543 | ||
@@ -706,9 +708,8 @@ static int hidp_session(void *arg) | |||
706 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); | 708 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); |
707 | session->waiting_for_startup = 0; | 709 | session->waiting_for_startup = 0; |
708 | wake_up_interruptible(&session->startup_queue); | 710 | wake_up_interruptible(&session->startup_queue); |
709 | while (!kthread_should_stop()) { | 711 | set_current_state(TASK_INTERRUPTIBLE); |
710 | set_current_state(TASK_INTERRUPTIBLE); | 712 | while (!atomic_read(&session->terminate)) { |
711 | |||
712 | if (ctrl_sk->sk_state != BT_CONNECTED || | 713 | if (ctrl_sk->sk_state != BT_CONNECTED || |
713 | intr_sk->sk_state != BT_CONNECTED) | 714 | intr_sk->sk_state != BT_CONNECTED) |
714 | break; | 715 | break; |
@@ -726,6 +727,7 @@ static int hidp_session(void *arg) | |||
726 | hidp_process_transmit(session); | 727 | hidp_process_transmit(session); |
727 | 728 | ||
728 | schedule(); | 729 | schedule(); |
730 | set_current_state(TASK_INTERRUPTIBLE); | ||
729 | } | 731 | } |
730 | set_current_state(TASK_RUNNING); | 732 | set_current_state(TASK_RUNNING); |
731 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); | 733 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1060 | err_add_device: | 1062 | err_add_device: |
1061 | hid_destroy_device(session->hid); | 1063 | hid_destroy_device(session->hid); |
1062 | session->hid = NULL; | 1064 | session->hid = NULL; |
1063 | kthread_stop(session->task); | 1065 | atomic_inc(&session->terminate); |
1066 | wake_up_process(session->task); | ||
1064 | 1067 | ||
1065 | unlink: | 1068 | unlink: |
1066 | hidp_del_timer(session); | 1069 | hidp_del_timer(session); |
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req) | |||
1111 | skb_queue_purge(&session->ctrl_transmit); | 1114 | skb_queue_purge(&session->ctrl_transmit); |
1112 | skb_queue_purge(&session->intr_transmit); | 1115 | skb_queue_purge(&session->intr_transmit); |
1113 | 1116 | ||
1114 | kthread_stop(session->task); | 1117 | atomic_inc(&session->terminate); |
1118 | wake_up_process(session->task); | ||
1115 | } | 1119 | } |
1116 | } else | 1120 | } else |
1117 | err = -ENOENT; | 1121 | err = -ENOENT; |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index 19e95004b286..af1bcc823f26 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -142,6 +142,7 @@ struct hidp_session { | |||
142 | uint ctrl_mtu; | 142 | uint ctrl_mtu; |
143 | uint intr_mtu; | 143 | uint intr_mtu; |
144 | 144 | ||
145 | atomic_t terminate; | ||
145 | struct task_struct *task; | 146 | struct task_struct *task; |
146 | 147 | ||
147 | unsigned char keys[8]; | 148 | unsigned char keys[8]; |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 9ec9c8c5eb5e..fc219ec28711 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -2530,7 +2530,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2530 | 2530 | ||
2531 | sk = chan->sk; | 2531 | sk = chan->sk; |
2532 | 2532 | ||
2533 | if (chan->state != BT_CONFIG) { | 2533 | if ((bt_sk(sk)->defer_setup && chan->state != BT_CONNECT2) || |
2534 | (!bt_sk(sk)->defer_setup && chan->state != BT_CONFIG)) { | ||
2534 | struct l2cap_cmd_rej rej; | 2535 | struct l2cap_cmd_rej rej; |
2535 | 2536 | ||
2536 | rej.reason = cpu_to_le16(0x0002); | 2537 | rej.reason = cpu_to_le16(0x0002); |
@@ -2541,7 +2542,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2541 | 2542 | ||
2542 | /* Reject if config buffer is too small. */ | 2543 | /* Reject if config buffer is too small. */ |
2543 | len = cmd_len - sizeof(*req); | 2544 | len = cmd_len - sizeof(*req); |
2544 | if (chan->conf_len + len > sizeof(chan->conf_req)) { | 2545 | if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { |
2545 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 2546 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
2546 | l2cap_build_conf_rsp(chan, rsp, | 2547 | l2cap_build_conf_rsp(chan, rsp, |
2547 | L2CAP_CONF_REJECT, flags), rsp); | 2548 | L2CAP_CONF_REJECT, flags), rsp); |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 9cb627a4073a..7330c2757c0c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |||
477 | calc_layout(osdc, vino, layout, off, plen, req, ops); | 477 | calc_layout(osdc, vino, layout, off, plen, req, ops); |
478 | req->r_file_layout = *layout; /* keep a copy */ | 478 | req->r_file_layout = *layout; /* keep a copy */ |
479 | 479 | ||
480 | /* in case it differs from natural alignment that calc_layout | 480 | /* in case it differs from natural (file) alignment that |
481 | filled in for us */ | 481 | calc_layout filled in for us */ |
482 | req->r_num_pages = calc_pages_for(page_align, *plen); | ||
482 | req->r_page_alignment = page_align; | 483 | req->r_page_alignment = page_align; |
483 | 484 | ||
484 | ceph_osdc_build_request(req, off, plen, ops, | 485 | ceph_osdc_build_request(req, off, plen, ops, |
@@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
2027 | int want = calc_pages_for(req->r_page_alignment, data_len); | 2028 | int want = calc_pages_for(req->r_page_alignment, data_len); |
2028 | 2029 | ||
2029 | if (unlikely(req->r_num_pages < want)) { | 2030 | if (unlikely(req->r_num_pages < want)) { |
2030 | pr_warning("tid %lld reply %d > expected %d pages\n", | 2031 | pr_warning("tid %lld reply has %d bytes %d pages, we" |
2031 | tid, want, m->nr_pages); | 2032 | " had only %d pages ready\n", tid, data_len, |
2033 | want, req->r_num_pages); | ||
2032 | *skip = 1; | 2034 | *skip = 1; |
2033 | ceph_msg_put(m); | 2035 | ceph_msg_put(m); |
2034 | m = NULL; | 2036 | m = NULL; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 054a59d21eb0..46febcacb729 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries); | |||
3220 | void __init tcp_init(void) | 3220 | void __init tcp_init(void) |
3221 | { | 3221 | { |
3222 | struct sk_buff *skb = NULL; | 3222 | struct sk_buff *skb = NULL; |
3223 | unsigned long nr_pages, limit; | 3223 | unsigned long limit; |
3224 | int i, max_share, cnt; | 3224 | int i, max_share, cnt; |
3225 | unsigned long jiffy = jiffies; | 3225 | unsigned long jiffy = jiffies; |
3226 | 3226 | ||
@@ -3277,13 +3277,7 @@ void __init tcp_init(void) | |||
3277 | sysctl_tcp_max_orphans = cnt / 2; | 3277 | sysctl_tcp_max_orphans = cnt / 2; |
3278 | sysctl_max_syn_backlog = max(128, cnt / 256); | 3278 | sysctl_max_syn_backlog = max(128, cnt / 256); |
3279 | 3279 | ||
3280 | /* Set the pressure threshold to be a fraction of global memory that | 3280 | limit = nr_free_buffer_pages() / 8; |
3281 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of | ||
3282 | * memory, with a floor of 128 pages. | ||
3283 | */ | ||
3284 | nr_pages = totalram_pages - totalhigh_pages; | ||
3285 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | ||
3286 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
3287 | limit = max(limit, 128UL); | 3281 | limit = max(limit, 128UL); |
3288 | sysctl_tcp_mem[0] = limit / 4 * 3; | 3282 | sysctl_tcp_mem[0] = limit / 4 * 3; |
3289 | sysctl_tcp_mem[1] = limit; | 3283 | sysctl_tcp_mem[1] = limit; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 95e0d3b8977f..1b5a19340a95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2211,16 +2211,10 @@ void __init udp_table_init(struct udp_table *table, const char *name) | |||
2211 | 2211 | ||
2212 | void __init udp_init(void) | 2212 | void __init udp_init(void) |
2213 | { | 2213 | { |
2214 | unsigned long nr_pages, limit; | 2214 | unsigned long limit; |
2215 | 2215 | ||
2216 | udp_table_init(&udp_table, "UDP"); | 2216 | udp_table_init(&udp_table, "UDP"); |
2217 | /* Set the pressure threshold up by the same strategy of TCP. It is a | 2217 | limit = nr_free_buffer_pages() / 8; |
2218 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | ||
2219 | * toward zero with the amount of memory, with a floor of 128 pages. | ||
2220 | */ | ||
2221 | nr_pages = totalram_pages - totalhigh_pages; | ||
2222 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | ||
2223 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
2224 | limit = max(limit, 128UL); | 2218 | limit = max(limit, 128UL); |
2225 | sysctl_udp_mem[0] = limit / 4 * 3; | 2219 | sysctl_udp_mem[0] = limit / 4 * 3; |
2226 | sysctl_udp_mem[1] = limit; | 2220 | sysctl_udp_mem[1] = limit; |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index e5a6ea4a94ea..08a45ac3d6f8 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -884,7 +884,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, | |||
884 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | 884 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
885 | local->sched_scan_ies.ie[i] = kzalloc(2 + | 885 | local->sched_scan_ies.ie[i] = kzalloc(2 + |
886 | IEEE80211_MAX_SSID_LEN + | 886 | IEEE80211_MAX_SSID_LEN + |
887 | local->scan_ies_len, | 887 | local->scan_ies_len + |
888 | req->ie_len, | ||
888 | GFP_KERNEL); | 889 | GFP_KERNEL); |
889 | if (!local->sched_scan_ies.ie[i]) { | 890 | if (!local->sched_scan_ies.ie[i]) { |
890 | ret = -ENOMEM; | 891 | ret = -ENOMEM; |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index d91c1a26630d..8f6a302d2ac3 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
86 | struct sk_buff *skb = rx->skb; | 86 | struct sk_buff *skb = rx->skb; |
87 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 87 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
88 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 88 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
89 | int queue = rx->queue; | ||
90 | |||
91 | /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ | ||
92 | if (rx->queue == NUM_RX_DATA_QUEUES - 1) | ||
93 | queue = 0; | ||
89 | 94 | ||
90 | /* | 95 | /* |
91 | * it makes no sense to check for MIC errors on anything other | 96 | * it makes no sense to check for MIC errors on anything other |
@@ -148,8 +153,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
148 | 153 | ||
149 | update_iv: | 154 | update_iv: |
150 | /* update IV in key information to be able to detect replays */ | 155 | /* update IV in key information to be able to detect replays */ |
151 | rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; | 156 | rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32; |
152 | rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; | 157 | rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16; |
153 | 158 | ||
154 | return RX_CONTINUE; | 159 | return RX_CONTINUE; |
155 | 160 | ||
@@ -241,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
241 | struct ieee80211_key *key = rx->key; | 246 | struct ieee80211_key *key = rx->key; |
242 | struct sk_buff *skb = rx->skb; | 247 | struct sk_buff *skb = rx->skb; |
243 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 248 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
249 | int queue = rx->queue; | ||
250 | |||
251 | /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ | ||
252 | if (rx->queue == NUM_RX_DATA_QUEUES - 1) | ||
253 | queue = 0; | ||
244 | 254 | ||
245 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 255 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
246 | 256 | ||
@@ -261,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
261 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, | 271 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, |
262 | key, skb->data + hdrlen, | 272 | key, skb->data + hdrlen, |
263 | skb->len - hdrlen, rx->sta->sta.addr, | 273 | skb->len - hdrlen, rx->sta->sta.addr, |
264 | hdr->addr1, hwaccel, rx->queue, | 274 | hdr->addr1, hwaccel, queue, |
265 | &rx->tkip_iv32, | 275 | &rx->tkip_iv32, |
266 | &rx->tkip_iv16); | 276 | &rx->tkip_iv16); |
267 | if (res != TKIP_DECRYPT_OK) | 277 | if (res != TKIP_DECRYPT_OK) |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index edc753297a49..a6d27bf563a5 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -1595,6 +1595,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1595 | #endif /* SCTP_DEBUG */ | 1595 | #endif /* SCTP_DEBUG */ |
1596 | if (transport) { | 1596 | if (transport) { |
1597 | if (bytes_acked) { | 1597 | if (bytes_acked) { |
1598 | struct sctp_association *asoc = transport->asoc; | ||
1599 | |||
1598 | /* We may have counted DATA that was migrated | 1600 | /* We may have counted DATA that was migrated |
1599 | * to this transport due to DEL-IP operation. | 1601 | * to this transport due to DEL-IP operation. |
1600 | * Subtract those bytes, since the were never | 1602 | * Subtract those bytes, since the were never |
@@ -1613,6 +1615,17 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1613 | transport->error_count = 0; | 1615 | transport->error_count = 0; |
1614 | transport->asoc->overall_error_count = 0; | 1616 | transport->asoc->overall_error_count = 0; |
1615 | 1617 | ||
1618 | /* | ||
1619 | * While in SHUTDOWN PENDING, we may have started | ||
1620 | * the T5 shutdown guard timer after reaching the | ||
1621 | * retransmission limit. Stop that timer as soon | ||
1622 | * as the receiver acknowledged any data. | ||
1623 | */ | ||
1624 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && | ||
1625 | del_timer(&asoc->timers | ||
1626 | [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) | ||
1627 | sctp_association_put(asoc); | ||
1628 | |||
1616 | /* Mark the destination transport address as | 1629 | /* Mark the destination transport address as |
1617 | * active if it is not so marked. | 1630 | * active if it is not so marked. |
1618 | */ | 1631 | */ |
@@ -1642,10 +1655,15 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1642 | * A sender is doing zero window probing when the | 1655 | * A sender is doing zero window probing when the |
1643 | * receiver's advertised window is zero, and there is | 1656 | * receiver's advertised window is zero, and there is |
1644 | * only one data chunk in flight to the receiver. | 1657 | * only one data chunk in flight to the receiver. |
1658 | * | ||
1659 | * Allow the association to timeout while in SHUTDOWN | ||
1660 | * PENDING or SHUTDOWN RECEIVED in case the receiver | ||
1661 | * stays in zero window mode forever. | ||
1645 | */ | 1662 | */ |
1646 | if (!q->asoc->peer.rwnd && | 1663 | if (!q->asoc->peer.rwnd && |
1647 | !list_empty(&tlist) && | 1664 | !list_empty(&tlist) && |
1648 | (sack_ctsn+2 == q->asoc->next_tsn)) { | 1665 | (sack_ctsn+2 == q->asoc->next_tsn) && |
1666 | q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { | ||
1649 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " | 1667 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " |
1650 | "window probe: %u\n", | 1668 | "window probe: %u\n", |
1651 | __func__, sack_ctsn); | 1669 | __func__, sack_ctsn); |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index ab5ded2c58de..91784f44a2e2 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1199,7 +1199,6 @@ SCTP_STATIC __init int sctp_init(void) | |||
1199 | int status = -EINVAL; | 1199 | int status = -EINVAL; |
1200 | unsigned long goal; | 1200 | unsigned long goal; |
1201 | unsigned long limit; | 1201 | unsigned long limit; |
1202 | unsigned long nr_pages; | ||
1203 | int max_share; | 1202 | int max_share; |
1204 | int order; | 1203 | int order; |
1205 | 1204 | ||
@@ -1289,15 +1288,7 @@ SCTP_STATIC __init int sctp_init(void) | |||
1289 | /* Initialize handle used for association ids. */ | 1288 | /* Initialize handle used for association ids. */ |
1290 | idr_init(&sctp_assocs_id); | 1289 | idr_init(&sctp_assocs_id); |
1291 | 1290 | ||
1292 | /* Set the pressure threshold to be a fraction of global memory that | 1291 | limit = nr_free_buffer_pages() / 8; |
1293 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of | ||
1294 | * memory, with a floor of 128 pages. | ||
1295 | * Note this initializes the data in sctpv6_prot too | ||
1296 | * Unabashedly stolen from tcp_init | ||
1297 | */ | ||
1298 | nr_pages = totalram_pages - totalhigh_pages; | ||
1299 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | ||
1300 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | ||
1301 | limit = max(limit, 128UL); | 1292 | limit = max(limit, 128UL); |
1302 | sysctl_sctp_mem[0] = limit / 4 * 3; | 1293 | sysctl_sctp_mem[0] = limit / 4 * 3; |
1303 | sysctl_sctp_mem[1] = limit; | 1294 | sysctl_sctp_mem[1] = limit; |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 1b2bb6487342..167c880cf8da 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |||
670 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | 670 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the |
671 | * HEARTBEAT should clear the error counter of the destination | 671 | * HEARTBEAT should clear the error counter of the destination |
672 | * transport address to which the HEARTBEAT was sent. | 672 | * transport address to which the HEARTBEAT was sent. |
673 | * The association's overall error count is also cleared. | ||
674 | */ | 673 | */ |
675 | t->error_count = 0; | 674 | t->error_count = 0; |
676 | t->asoc->overall_error_count = 0; | 675 | |
676 | /* | ||
677 | * Although RFC4960 specifies that the overall error count must | ||
678 | * be cleared when a HEARTBEAT ACK is received, we make an | ||
679 | * exception while in SHUTDOWN PENDING. If the peer keeps its | ||
680 | * window shut forever, we may never be able to transmit our | ||
681 | * outstanding data and rely on the retransmission limit be reached | ||
682 | * to shutdown the association. | ||
683 | */ | ||
684 | if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING) | ||
685 | t->asoc->overall_error_count = 0; | ||
677 | 686 | ||
678 | /* Clear the hb_sent flag to signal that we had a good | 687 | /* Clear the hb_sent flag to signal that we had a good |
679 | * acknowledgement. | 688 | * acknowledgement. |
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1437 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | 1446 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); |
1438 | break; | 1447 | break; |
1439 | 1448 | ||
1449 | case SCTP_CMD_TIMER_START_ONCE: | ||
1450 | timer = &asoc->timers[cmd->obj.to]; | ||
1451 | |||
1452 | if (timer_pending(timer)) | ||
1453 | break; | ||
1454 | /* fall through */ | ||
1455 | |||
1440 | case SCTP_CMD_TIMER_START: | 1456 | case SCTP_CMD_TIMER_START: |
1441 | timer = &asoc->timers[cmd->obj.to]; | 1457 | timer = &asoc->timers[cmd->obj.to]; |
1442 | timeout = asoc->timeouts[cmd->obj.to]; | 1458 | timeout = asoc->timeouts[cmd->obj.to]; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7d00b1777c63..49b847b00f99 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -5155,7 +5155,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( | |||
5155 | * The sender of the SHUTDOWN MAY also start an overall guard timer | 5155 | * The sender of the SHUTDOWN MAY also start an overall guard timer |
5156 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. | 5156 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. |
5157 | */ | 5157 | */ |
5158 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 5158 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
5159 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | 5159 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); |
5160 | 5160 | ||
5161 | if (asoc->autoclose) | 5161 | if (asoc->autoclose) |
@@ -5300,14 +5300,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | |||
5300 | SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); | 5300 | SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); |
5301 | 5301 | ||
5302 | if (asoc->overall_error_count >= asoc->max_retrans) { | 5302 | if (asoc->overall_error_count >= asoc->max_retrans) { |
5303 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 5303 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { |
5304 | SCTP_ERROR(ETIMEDOUT)); | 5304 | /* |
5305 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 5305 | * We are here likely because the receiver had its rwnd |
5306 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 5306 | * closed for a while and we have not been able to |
5307 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); | 5307 | * transmit the locally queued data within the maximum |
5308 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 5308 | * retransmission attempts limit. Start the T5 |
5309 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 5309 | * shutdown guard timer to give the receiver one last |
5310 | return SCTP_DISPOSITION_DELETE_TCB; | 5310 | * chance and some additional time to recover before |
5311 | * aborting. | ||
5312 | */ | ||
5313 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, | ||
5314 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
5315 | } else { | ||
5316 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
5317 | SCTP_ERROR(ETIMEDOUT)); | ||
5318 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | ||
5319 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
5320 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); | ||
5321 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
5322 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
5323 | return SCTP_DISPOSITION_DELETE_TCB; | ||
5324 | } | ||
5311 | } | 5325 | } |
5312 | 5326 | ||
5313 | /* E1) For the destination address for which the timer | 5327 | /* E1) For the destination address for which the timer |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 0338dc6fdc9d..7c211a7f90f4 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ | |||
827 | /* SCTP_STATE_ESTABLISHED */ \ | 827 | /* SCTP_STATE_ESTABLISHED */ \ |
828 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ | 828 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
829 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 829 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
830 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ | 830 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
831 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 831 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
832 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ | 832 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
833 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 833 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1c6aec1f9ec4..836aa63ee121 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1454,6 +1454,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1454 | struct sctp_endpoint *ep; | 1454 | struct sctp_endpoint *ep; |
1455 | struct sctp_association *asoc; | 1455 | struct sctp_association *asoc; |
1456 | struct list_head *pos, *temp; | 1456 | struct list_head *pos, *temp; |
1457 | unsigned int data_was_unread; | ||
1457 | 1458 | ||
1458 | SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); | 1459 | SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); |
1459 | 1460 | ||
@@ -1463,6 +1464,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1463 | 1464 | ||
1464 | ep = sctp_sk(sk)->ep; | 1465 | ep = sctp_sk(sk)->ep; |
1465 | 1466 | ||
1467 | /* Clean up any skbs sitting on the receive queue. */ | ||
1468 | data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); | ||
1469 | data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); | ||
1470 | |||
1466 | /* Walk all associations on an endpoint. */ | 1471 | /* Walk all associations on an endpoint. */ |
1467 | list_for_each_safe(pos, temp, &ep->asocs) { | 1472 | list_for_each_safe(pos, temp, &ep->asocs) { |
1468 | asoc = list_entry(pos, struct sctp_association, asocs); | 1473 | asoc = list_entry(pos, struct sctp_association, asocs); |
@@ -1480,7 +1485,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1480 | } | 1485 | } |
1481 | } | 1486 | } |
1482 | 1487 | ||
1483 | if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1488 | if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || |
1489 | !skb_queue_empty(&asoc->ulpq.reasm) || | ||
1490 | (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { | ||
1484 | struct sctp_chunk *chunk; | 1491 | struct sctp_chunk *chunk; |
1485 | 1492 | ||
1486 | chunk = sctp_make_abort_user(asoc, NULL, 0); | 1493 | chunk = sctp_make_abort_user(asoc, NULL, 0); |
@@ -1490,10 +1497,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1490 | sctp_primitive_SHUTDOWN(asoc, NULL); | 1497 | sctp_primitive_SHUTDOWN(asoc, NULL); |
1491 | } | 1498 | } |
1492 | 1499 | ||
1493 | /* Clean up any skbs sitting on the receive queue. */ | ||
1494 | sctp_queue_purge_ulpevents(&sk->sk_receive_queue); | ||
1495 | sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); | ||
1496 | |||
1497 | /* On a TCP-style socket, block for at most linger_time if set. */ | 1500 | /* On a TCP-style socket, block for at most linger_time if set. */ |
1498 | if (sctp_style(sk, TCP) && timeout) | 1501 | if (sctp_style(sk, TCP) && timeout) |
1499 | sctp_wait_for_close(sk, timeout); | 1502 | sctp_wait_for_close(sk, timeout); |
@@ -2143,10 +2146,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk, | |||
2143 | static int sctp_setsockopt_events(struct sock *sk, char __user *optval, | 2146 | static int sctp_setsockopt_events(struct sock *sk, char __user *optval, |
2144 | unsigned int optlen) | 2147 | unsigned int optlen) |
2145 | { | 2148 | { |
2149 | struct sctp_association *asoc; | ||
2150 | struct sctp_ulpevent *event; | ||
2151 | |||
2146 | if (optlen > sizeof(struct sctp_event_subscribe)) | 2152 | if (optlen > sizeof(struct sctp_event_subscribe)) |
2147 | return -EINVAL; | 2153 | return -EINVAL; |
2148 | if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) | 2154 | if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) |
2149 | return -EFAULT; | 2155 | return -EFAULT; |
2156 | |||
2157 | /* | ||
2158 | * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, | ||
2159 | * if there is no data to be sent or retransmit, the stack will | ||
2160 | * immediately send up this notification. | ||
2161 | */ | ||
2162 | if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, | ||
2163 | &sctp_sk(sk)->subscribe)) { | ||
2164 | asoc = sctp_id2assoc(sk, 0); | ||
2165 | |||
2166 | if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { | ||
2167 | event = sctp_ulpevent_make_sender_dry_event(asoc, | ||
2168 | GFP_ATOMIC); | ||
2169 | if (!event) | ||
2170 | return -ENOMEM; | ||
2171 | |||
2172 | sctp_ulpq_tail_event(&asoc->ulpq, event); | ||
2173 | } | ||
2174 | } | ||
2175 | |||
2150 | return 0; | 2176 | return 0; |
2151 | } | 2177 | } |
2152 | 2178 | ||
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index e70e5fc87890..8a84017834c2 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event) | |||
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | /* Purge the skb lists holding ulpevents. */ | 1083 | /* Purge the skb lists holding ulpevents. */ |
1084 | void sctp_queue_purge_ulpevents(struct sk_buff_head *list) | 1084 | unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) |
1085 | { | 1085 | { |
1086 | struct sk_buff *skb; | 1086 | struct sk_buff *skb; |
1087 | while ((skb = skb_dequeue(list)) != NULL) | 1087 | unsigned int data_unread = 0; |
1088 | sctp_ulpevent_free(sctp_skb2event(skb)); | 1088 | |
1089 | while ((skb = skb_dequeue(list)) != NULL) { | ||
1090 | struct sctp_ulpevent *event = sctp_skb2event(skb); | ||
1091 | |||
1092 | if (!sctp_ulpevent_is_notification(event)) | ||
1093 | data_unread += skb->len; | ||
1094 | |||
1095 | sctp_ulpevent_free(event); | ||
1096 | } | ||
1097 | |||
1098 | return data_unread; | ||
1089 | } | 1099 | } |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 9a80a922c527..e45d2fbbe5a8 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
597 | u32 bind_version; | 597 | u32 bind_version; |
598 | struct rpc_xprt *xprt; | 598 | struct rpc_xprt *xprt; |
599 | struct rpc_clnt *rpcb_clnt; | 599 | struct rpc_clnt *rpcb_clnt; |
600 | static struct rpcbind_args *map; | 600 | struct rpcbind_args *map; |
601 | struct rpc_task *child; | 601 | struct rpc_task *child; |
602 | struct sockaddr_storage addr; | 602 | struct sockaddr_storage addr; |
603 | struct sockaddr *sap = (struct sockaddr *)&addr; | 603 | struct sockaddr *sap = (struct sockaddr *)&addr; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a27406b1654f..4814e246a874 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task) | |||
616 | BUG_ON(RPC_IS_QUEUED(task)); | 616 | BUG_ON(RPC_IS_QUEUED(task)); |
617 | 617 | ||
618 | for (;;) { | 618 | for (;;) { |
619 | void (*do_action)(struct rpc_task *); | ||
619 | 620 | ||
620 | /* | 621 | /* |
621 | * Execute any pending callback. | 622 | * Execute any pending callback first. |
622 | */ | 623 | */ |
623 | if (task->tk_callback) { | 624 | do_action = task->tk_callback; |
624 | void (*save_callback)(struct rpc_task *); | 625 | task->tk_callback = NULL; |
625 | 626 | if (do_action == NULL) { | |
626 | /* | ||
627 | * We set tk_callback to NULL before calling it, | ||
628 | * in case it sets the tk_callback field itself: | ||
629 | */ | ||
630 | save_callback = task->tk_callback; | ||
631 | task->tk_callback = NULL; | ||
632 | save_callback(task); | ||
633 | } else { | ||
634 | /* | 627 | /* |
635 | * Perform the next FSM step. | 628 | * Perform the next FSM step. |
636 | * tk_action may be NULL when the task has been killed | 629 | * tk_action may be NULL if the task has been killed. |
637 | * by someone else. | 630 | * In particular, note that rpc_killall_tasks may |
631 | * do this at any time, so beware when dereferencing. | ||
638 | */ | 632 | */ |
639 | if (task->tk_action == NULL) | 633 | do_action = task->tk_action; |
634 | if (do_action == NULL) | ||
640 | break; | 635 | break; |
641 | task->tk_action(task); | ||
642 | } | 636 | } |
637 | do_action(task); | ||
643 | 638 | ||
644 | /* | 639 | /* |
645 | * Lockless check for whether task is sleeping or not. | 640 | * Lockless check for whether task is sleeping or not. |
diff --git a/net/wireless/core.c b/net/wireless/core.c index c22ef3492ee6..880dbe2e6f94 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
366 | 366 | ||
367 | mutex_init(&rdev->mtx); | 367 | mutex_init(&rdev->mtx); |
368 | mutex_init(&rdev->devlist_mtx); | 368 | mutex_init(&rdev->devlist_mtx); |
369 | mutex_init(&rdev->sched_scan_mtx); | ||
369 | INIT_LIST_HEAD(&rdev->netdev_list); | 370 | INIT_LIST_HEAD(&rdev->netdev_list); |
370 | spin_lock_init(&rdev->bss_lock); | 371 | spin_lock_init(&rdev->bss_lock); |
371 | INIT_LIST_HEAD(&rdev->bss_list); | 372 | INIT_LIST_HEAD(&rdev->bss_list); |
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) | |||
701 | rfkill_destroy(rdev->rfkill); | 702 | rfkill_destroy(rdev->rfkill); |
702 | mutex_destroy(&rdev->mtx); | 703 | mutex_destroy(&rdev->mtx); |
703 | mutex_destroy(&rdev->devlist_mtx); | 704 | mutex_destroy(&rdev->devlist_mtx); |
705 | mutex_destroy(&rdev->sched_scan_mtx); | ||
704 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) | 706 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) |
705 | cfg80211_put_bss(&scan->pub); | 707 | cfg80211_put_bss(&scan->pub); |
706 | cfg80211_rdev_free_wowlan(rdev); | 708 | cfg80211_rdev_free_wowlan(rdev); |
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
737 | ___cfg80211_scan_done(rdev, true); | 739 | ___cfg80211_scan_done(rdev, true); |
738 | } | 740 | } |
739 | 741 | ||
742 | cfg80211_unlock_rdev(rdev); | ||
743 | |||
744 | mutex_lock(&rdev->sched_scan_mtx); | ||
745 | |||
740 | if (WARN_ON(rdev->sched_scan_req && | 746 | if (WARN_ON(rdev->sched_scan_req && |
741 | rdev->sched_scan_req->dev == wdev->netdev)) { | 747 | rdev->sched_scan_req->dev == wdev->netdev)) { |
742 | __cfg80211_stop_sched_scan(rdev, false); | 748 | __cfg80211_stop_sched_scan(rdev, false); |
743 | } | 749 | } |
744 | 750 | ||
745 | cfg80211_unlock_rdev(rdev); | 751 | mutex_unlock(&rdev->sched_scan_mtx); |
746 | 752 | ||
747 | mutex_lock(&rdev->devlist_mtx); | 753 | mutex_lock(&rdev->devlist_mtx); |
748 | rdev->opencount--; | 754 | rdev->opencount--; |
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
830 | break; | 836 | break; |
831 | case NL80211_IFTYPE_P2P_CLIENT: | 837 | case NL80211_IFTYPE_P2P_CLIENT: |
832 | case NL80211_IFTYPE_STATION: | 838 | case NL80211_IFTYPE_STATION: |
833 | cfg80211_lock_rdev(rdev); | 839 | mutex_lock(&rdev->sched_scan_mtx); |
834 | __cfg80211_stop_sched_scan(rdev, false); | 840 | __cfg80211_stop_sched_scan(rdev, false); |
835 | cfg80211_unlock_rdev(rdev); | 841 | mutex_unlock(&rdev->sched_scan_mtx); |
836 | 842 | ||
837 | wdev_lock(wdev); | 843 | wdev_lock(wdev); |
838 | #ifdef CONFIG_CFG80211_WEXT | 844 | #ifdef CONFIG_CFG80211_WEXT |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3dce1f167eba..a570ff9214ec 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -65,6 +65,8 @@ struct cfg80211_registered_device { | |||
65 | struct work_struct scan_done_wk; | 65 | struct work_struct scan_done_wk; |
66 | struct work_struct sched_scan_results_wk; | 66 | struct work_struct sched_scan_results_wk; |
67 | 67 | ||
68 | struct mutex sched_scan_mtx; | ||
69 | |||
68 | #ifdef CONFIG_NL80211_TESTMODE | 70 | #ifdef CONFIG_NL80211_TESTMODE |
69 | struct genl_info *testmode_info; | 71 | struct genl_info *testmode_info; |
70 | #endif | 72 | #endif |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 491b0ba40c43..6a82c898f831 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -3470,9 +3470,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3470 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3470 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3471 | return -EINVAL; | 3471 | return -EINVAL; |
3472 | 3472 | ||
3473 | if (rdev->sched_scan_req) | ||
3474 | return -EINPROGRESS; | ||
3475 | |||
3476 | if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) | 3473 | if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) |
3477 | return -EINVAL; | 3474 | return -EINVAL; |
3478 | 3475 | ||
@@ -3511,12 +3508,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3511 | if (ie_len > wiphy->max_scan_ie_len) | 3508 | if (ie_len > wiphy->max_scan_ie_len) |
3512 | return -EINVAL; | 3509 | return -EINVAL; |
3513 | 3510 | ||
3511 | mutex_lock(&rdev->sched_scan_mtx); | ||
3512 | |||
3513 | if (rdev->sched_scan_req) { | ||
3514 | err = -EINPROGRESS; | ||
3515 | goto out; | ||
3516 | } | ||
3517 | |||
3514 | request = kzalloc(sizeof(*request) | 3518 | request = kzalloc(sizeof(*request) |
3515 | + sizeof(*request->ssids) * n_ssids | 3519 | + sizeof(*request->ssids) * n_ssids |
3516 | + sizeof(*request->channels) * n_channels | 3520 | + sizeof(*request->channels) * n_channels |
3517 | + ie_len, GFP_KERNEL); | 3521 | + ie_len, GFP_KERNEL); |
3518 | if (!request) | 3522 | if (!request) { |
3519 | return -ENOMEM; | 3523 | err = -ENOMEM; |
3524 | goto out; | ||
3525 | } | ||
3520 | 3526 | ||
3521 | if (n_ssids) | 3527 | if (n_ssids) |
3522 | request->ssids = (void *)&request->channels[n_channels]; | 3528 | request->ssids = (void *)&request->channels[n_channels]; |
@@ -3614,6 +3620,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3614 | out_free: | 3620 | out_free: |
3615 | kfree(request); | 3621 | kfree(request); |
3616 | out: | 3622 | out: |
3623 | mutex_unlock(&rdev->sched_scan_mtx); | ||
3617 | return err; | 3624 | return err; |
3618 | } | 3625 | } |
3619 | 3626 | ||
@@ -3621,12 +3628,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb, | |||
3621 | struct genl_info *info) | 3628 | struct genl_info *info) |
3622 | { | 3629 | { |
3623 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 3630 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
3631 | int err; | ||
3624 | 3632 | ||
3625 | if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || | 3633 | if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || |
3626 | !rdev->ops->sched_scan_stop) | 3634 | !rdev->ops->sched_scan_stop) |
3627 | return -EOPNOTSUPP; | 3635 | return -EOPNOTSUPP; |
3628 | 3636 | ||
3629 | return __cfg80211_stop_sched_scan(rdev, false); | 3637 | mutex_lock(&rdev->sched_scan_mtx); |
3638 | err = __cfg80211_stop_sched_scan(rdev, false); | ||
3639 | mutex_unlock(&rdev->sched_scan_mtx); | ||
3640 | |||
3641 | return err; | ||
3630 | } | 3642 | } |
3631 | 3643 | ||
3632 | static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, | 3644 | static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 5d23503dd5e0..2cc9d4ab5578 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) | |||
100 | rdev = container_of(wk, struct cfg80211_registered_device, | 100 | rdev = container_of(wk, struct cfg80211_registered_device, |
101 | sched_scan_results_wk); | 101 | sched_scan_results_wk); |
102 | 102 | ||
103 | cfg80211_lock_rdev(rdev); | 103 | mutex_lock(&rdev->sched_scan_mtx); |
104 | 104 | ||
105 | /* we don't have sched_scan_req anymore if the scan is stopping */ | 105 | /* we don't have sched_scan_req anymore if the scan is stopping */ |
106 | if (rdev->sched_scan_req) | 106 | if (rdev->sched_scan_req) |
107 | nl80211_send_sched_scan_results(rdev, | 107 | nl80211_send_sched_scan_results(rdev, |
108 | rdev->sched_scan_req->dev); | 108 | rdev->sched_scan_req->dev); |
109 | 109 | ||
110 | cfg80211_unlock_rdev(rdev); | 110 | mutex_unlock(&rdev->sched_scan_mtx); |
111 | } | 111 | } |
112 | 112 | ||
113 | void cfg80211_sched_scan_results(struct wiphy *wiphy) | 113 | void cfg80211_sched_scan_results(struct wiphy *wiphy) |
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) | |||
123 | { | 123 | { |
124 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 124 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
125 | 125 | ||
126 | cfg80211_lock_rdev(rdev); | 126 | mutex_lock(&rdev->sched_scan_mtx); |
127 | __cfg80211_stop_sched_scan(rdev, true); | 127 | __cfg80211_stop_sched_scan(rdev, true); |
128 | cfg80211_unlock_rdev(rdev); | 128 | mutex_unlock(&rdev->sched_scan_mtx); |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(cfg80211_sched_scan_stopped); | 130 | EXPORT_SYMBOL(cfg80211_sched_scan_stopped); |
131 | 131 | ||
@@ -134,7 +134,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, | |||
134 | { | 134 | { |
135 | struct net_device *dev; | 135 | struct net_device *dev; |
136 | 136 | ||
137 | ASSERT_RDEV_LOCK(rdev); | 137 | lockdep_assert_held(&rdev->sched_scan_mtx); |
138 | 138 | ||
139 | if (!rdev->sched_scan_req) | 139 | if (!rdev->sched_scan_req) |
140 | return 0; | 140 | return 0; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d70f85eb7864..9414b9c5b1e4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1345,6 +1345,8 @@ out: | |||
1345 | xfrm_state_check_expire(x1); | 1345 | xfrm_state_check_expire(x1); |
1346 | 1346 | ||
1347 | err = 0; | 1347 | err = 0; |
1348 | x->km.state = XFRM_STATE_DEAD; | ||
1349 | __xfrm_state_put(x); | ||
1348 | } | 1350 | } |
1349 | spin_unlock_bh(&x1->lock); | 1351 | spin_unlock_bh(&x1->lock); |
1350 | 1352 | ||