aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c27
-rw-r--r--net/batman-adv/routing.c8
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mlme.c35
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/mac80211/wpa.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/xt_CT.c10
-rw-r--r--net/netfilter/xt_TEE.c1
-rw-r--r--net/netfilter/xt_nat.c8
-rw-r--r--net/netlink/af_netlink.c19
-rw-r--r--net/sunrpc/xprtsock.c41
-rw-r--r--net/wireless/mlme.c12
25 files changed, 145 insertions, 103 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 9096bcb08132..ee070722a3a3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -463,7 +463,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
463 463
464 case NETDEV_PRE_TYPE_CHANGE: 464 case NETDEV_PRE_TYPE_CHANGE:
465 /* Forbid underlaying device to change its type. */ 465 /* Forbid underlaying device to change its type. */
466 return NOTIFY_BAD; 466 if (vlan_uses_dev(dev))
467 return NOTIFY_BAD;
468 break;
467 469
468 case NETDEV_NOTIFY_PEERS: 470 case NETDEV_NOTIFY_PEERS:
469 case NETDEV_BONDING_FAILOVER: 471 case NETDEV_BONDING_FAILOVER:
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 0a9084ad19a6..fd8d5afec0dd 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1167,6 +1167,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1167 uint16_t crc; 1167 uint16_t crc;
1168 unsigned long entrytime; 1168 unsigned long entrytime;
1169 1169
1170 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1171
1170 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1172 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1171 1173
1172 /* setting claim destination address */ 1174 /* setting claim destination address */
@@ -1210,8 +1212,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1210/** 1212/**
1211 * batadv_bla_check_bcast_duplist 1213 * batadv_bla_check_bcast_duplist
1212 * @bat_priv: the bat priv with all the soft interface information 1214 * @bat_priv: the bat priv with all the soft interface information
1213 * @bcast_packet: originator mac address 1215 * @bcast_packet: encapsulated broadcast frame plus batman header
1214 * @hdr_size: maximum length of the frame 1216 * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
1215 * 1217 *
1216 * check if it is on our broadcast list. Another gateway might 1218 * check if it is on our broadcast list. Another gateway might
1217 * have sent the same packet because it is connected to the same backbone, 1219 * have sent the same packet because it is connected to the same backbone,
@@ -1224,20 +1226,22 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1224 */ 1226 */
1225int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 1227int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1226 struct batadv_bcast_packet *bcast_packet, 1228 struct batadv_bcast_packet *bcast_packet,
1227 int hdr_size) 1229 int bcast_packet_len)
1228{ 1230{
1229 int i, length, curr; 1231 int i, length, curr, ret = 0;
1230 uint8_t *content; 1232 uint8_t *content;
1231 uint16_t crc; 1233 uint16_t crc;
1232 struct batadv_bcast_duplist_entry *entry; 1234 struct batadv_bcast_duplist_entry *entry;
1233 1235
1234 length = hdr_size - sizeof(*bcast_packet); 1236 length = bcast_packet_len - sizeof(*bcast_packet);
1235 content = (uint8_t *)bcast_packet; 1237 content = (uint8_t *)bcast_packet;
1236 content += sizeof(*bcast_packet); 1238 content += sizeof(*bcast_packet);
1237 1239
1238 /* calculate the crc ... */ 1240 /* calculate the crc ... */
1239 crc = crc16(0, content, length); 1241 crc = crc16(0, content, length);
1240 1242
1243 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1244
1241 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1245 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1242 curr = (bat_priv->bla.bcast_duplist_curr + i); 1246 curr = (bat_priv->bla.bcast_duplist_curr + i);
1243 curr %= BATADV_DUPLIST_SIZE; 1247 curr %= BATADV_DUPLIST_SIZE;
@@ -1259,9 +1263,12 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1259 /* this entry seems to match: same crc, not too old, 1263 /* this entry seems to match: same crc, not too old,
1260 * and from another gw. therefore return 1 to forbid it. 1264 * and from another gw. therefore return 1 to forbid it.
1261 */ 1265 */
1262 return 1; 1266 ret = 1;
1267 goto out;
1263 } 1268 }
1264 /* not found, add a new entry (overwrite the oldest entry) */ 1269 /* not found, add a new entry (overwrite the oldest entry)
1270 * and allow it, its the first occurence.
1271 */
1265 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1272 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1266 curr %= BATADV_DUPLIST_SIZE; 1273 curr %= BATADV_DUPLIST_SIZE;
1267 entry = &bat_priv->bla.bcast_duplist[curr]; 1274 entry = &bat_priv->bla.bcast_duplist[curr];
@@ -1270,8 +1277,10 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1270 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); 1277 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1271 bat_priv->bla.bcast_duplist_curr = curr; 1278 bat_priv->bla.bcast_duplist_curr = curr;
1272 1279
1273 /* allow it, its the first occurence. */ 1280out:
1274 return 0; 1281 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1282
1283 return ret;
1275} 1284}
1276 1285
1277 1286
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 939fc01371df..376b4cc6ca82 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1124,8 +1124,14 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1124 1124
1125 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1125 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1126 1126
1127 /* keep skb linear for crc calculation */
1128 if (skb_linearize(skb) < 0)
1129 goto out;
1130
1131 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1132
1127 /* check whether this has been sent by another originator before */ 1133 /* check whether this has been sent by another originator before */
1128 if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) 1134 if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len))
1129 goto out; 1135 goto out;
1130 1136
1131 /* rebroadcast packet */ 1137 /* rebroadcast packet */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 2ed82caacdca..ac1e07a80454 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -205,6 +205,8 @@ struct batadv_priv_bla {
205 struct batadv_hashtable *backbone_hash; 205 struct batadv_hashtable *backbone_hash;
206 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; 206 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
207 int bcast_duplist_curr; 207 int bcast_duplist_curr;
208 /* protects bcast_duplist and bcast_duplist_curr */
209 spinlock_t bcast_duplist_lock;
208 struct batadv_bla_claim_dst claim_dest; 210 struct batadv_bla_claim_dst claim_dest;
209 struct delayed_work work; 211 struct delayed_work work;
210}; 212};
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 8c225ef349cd..2ac8d50861e0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -32,6 +32,8 @@
32 32
33#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
34 34
35#define AUTH_REQ_MASK 0x07
36
35static inline void swap128(u8 src[16], u8 dst[16]) 37static inline void swap128(u8 src[16], u8 dst[16])
36{ 38{
37 int i; 39 int i;
@@ -230,7 +232,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
230 req->max_key_size = SMP_MAX_ENC_KEY_SIZE; 232 req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
231 req->init_key_dist = 0; 233 req->init_key_dist = 0;
232 req->resp_key_dist = dist_keys; 234 req->resp_key_dist = dist_keys;
233 req->auth_req = authreq; 235 req->auth_req = (authreq & AUTH_REQ_MASK);
234 return; 236 return;
235 } 237 }
236 238
@@ -239,7 +241,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
239 rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE; 241 rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
240 rsp->init_key_dist = 0; 242 rsp->init_key_dist = 0;
241 rsp->resp_key_dist = req->resp_key_dist & dist_keys; 243 rsp->resp_key_dist = req->resp_key_dist & dist_keys;
242 rsp->auth_req = authreq; 244 rsp->auth_req = (authreq & AUTH_REQ_MASK);
243} 245}
244 246
245static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) 247static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6e04b1fa11f2..4007c1437fda 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3379,10 +3379,12 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3379 3379
3380void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3380void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3381{ 3381{
3382 if (head_stolen) 3382 if (head_stolen) {
3383 skb_release_head_state(skb);
3383 kmem_cache_free(skbuff_head_cache, skb); 3384 kmem_cache_free(skbuff_head_cache, skb);
3384 else 3385 } else {
3385 __kfree_skb(skb); 3386 __kfree_skb(skb);
3387 }
3386} 3388}
3387EXPORT_SYMBOL(kfree_skb_partial); 3389EXPORT_SYMBOL(kfree_skb_partial);
3388 3390
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 432f4bb77238..a8c651216fa6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1163,8 +1163,12 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1163 spin_lock_bh(&fnhe_lock); 1163 spin_lock_bh(&fnhe_lock);
1164 1164
1165 if (daddr == fnhe->fnhe_daddr) { 1165 if (daddr == fnhe->fnhe_daddr) {
1166 struct rtable *orig; 1166 struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
1167 1167 if (orig && rt_is_expired(orig)) {
1168 fnhe->fnhe_gw = 0;
1169 fnhe->fnhe_pmtu = 0;
1170 fnhe->fnhe_expires = 0;
1171 }
1168 if (fnhe->fnhe_pmtu) { 1172 if (fnhe->fnhe_pmtu) {
1169 unsigned long expires = fnhe->fnhe_expires; 1173 unsigned long expires = fnhe->fnhe_expires;
1170 unsigned long diff = expires - jiffies; 1174 unsigned long diff = expires - jiffies;
@@ -1181,7 +1185,6 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1181 } else if (!rt->rt_gateway) 1185 } else if (!rt->rt_gateway)
1182 rt->rt_gateway = daddr; 1186 rt->rt_gateway = daddr;
1183 1187
1184 orig = rcu_dereference(fnhe->fnhe_rth);
1185 rcu_assign_pointer(fnhe->fnhe_rth, rt); 1188 rcu_assign_pointer(fnhe->fnhe_rth, rt);
1186 if (orig) 1189 if (orig)
1187 rt_free(orig); 1190 rt_free(orig);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f32c02e2a543..197c0008503c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -549,14 +549,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
549 !tp->urg_data || 549 !tp->urg_data ||
550 before(tp->urg_seq, tp->copied_seq) || 550 before(tp->urg_seq, tp->copied_seq) ||
551 !before(tp->urg_seq, tp->rcv_nxt)) { 551 !before(tp->urg_seq, tp->rcv_nxt)) {
552 struct sk_buff *skb;
553 552
554 answ = tp->rcv_nxt - tp->copied_seq; 553 answ = tp->rcv_nxt - tp->copied_seq;
555 554
556 /* Subtract 1, if FIN is in queue. */ 555 /* Subtract 1, if FIN was received */
557 skb = skb_peek_tail(&sk->sk_receive_queue); 556 if (answ && sock_flag(sk, SOCK_DONE))
558 if (answ && skb) 557 answ--;
559 answ -= tcp_hdr(skb)->fin;
560 } else 558 } else
561 answ = tp->urg_seq - tp->copied_seq; 559 answ = tp->urg_seq - tp->copied_seq;
562 release_sock(sk); 560 release_sock(sk);
@@ -2766,6 +2764,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2766 info->tcpi_options |= TCPI_OPT_ECN; 2764 info->tcpi_options |= TCPI_OPT_ECN;
2767 if (tp->ecn_flags & TCP_ECN_SEEN) 2765 if (tp->ecn_flags & TCP_ECN_SEEN)
2768 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 2766 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2767 if (tp->syn_data_acked)
2768 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2769 2769
2770 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2770 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2771 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2771 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 432c36649db3..1db663983587 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5646,6 +5646,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5646 tcp_rearm_rto(sk); 5646 tcp_rearm_rto(sk);
5647 return true; 5647 return true;
5648 } 5648 }
5649 tp->syn_data_acked = tp->syn_data;
5649 return false; 5650 return false;
5650} 5651}
5651 5652
@@ -5963,7 +5964,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5963 5964
5964 req = tp->fastopen_rsk; 5965 req = tp->fastopen_rsk;
5965 if (req != NULL) { 5966 if (req != NULL) {
5966 BUG_ON(sk->sk_state != TCP_SYN_RECV && 5967 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5967 sk->sk_state != TCP_FIN_WAIT1); 5968 sk->sk_state != TCP_FIN_WAIT1);
5968 5969
5969 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5970 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
@@ -6052,7 +6053,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6052 * ACK we have received, this would have acknowledged 6053 * ACK we have received, this would have acknowledged
6053 * our SYNACK so stop the SYNACK timer. 6054 * our SYNACK so stop the SYNACK timer.
6054 */ 6055 */
6055 if (acceptable && req != NULL) { 6056 if (req != NULL) {
6057 /* Return RST if ack_seq is invalid.
6058 * Note that RFC793 only says to generate a
6059 * DUPACK for it but for TCP Fast Open it seems
6060 * better to treat this case like TCP_SYN_RECV
6061 * above.
6062 */
6063 if (!acceptable)
6064 return 1;
6056 /* We no longer need the request sock. */ 6065 /* We no longer need the request sock. */
6057 reqsk_fastopen_remove(sk, req, false); 6066 reqsk_fastopen_remove(sk, req, false);
6058 tcp_rearm_rto(sk); 6067 tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ef998b008a57..0c4a64355603 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1461,6 +1461,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1461 skb_set_owner_r(skb, child); 1461 skb_set_owner_r(skb, child);
1462 __skb_queue_tail(&child->sk_receive_queue, skb); 1462 __skb_queue_tail(&child->sk_receive_queue, skb);
1463 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 1463 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1464 tp->syn_data_acked = 1;
1464 } 1465 }
1465 sk->sk_data_ready(sk, 0); 1466 sk->sk_data_ready(sk, 0);
1466 bh_unlock_sock(child); 1467 bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 27536ba16c9d..a7302d974f32 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -510,6 +510,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
510 newtp->rx_opt.mss_clamp = req->mss; 510 newtp->rx_opt.mss_clamp = req->mss;
511 TCP_ECN_openreq_child(newtp, req); 511 TCP_ECN_openreq_child(newtp, req);
512 newtp->fastopen_rsk = NULL; 512 newtp->fastopen_rsk = NULL;
513 newtp->syn_data_acked = 0;
513 514
514 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 515 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
515 } 516 }
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index fc04711e80c8..d47c1b4421a3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -347,8 +347,8 @@ void tcp_retransmit_timer(struct sock *sk)
347 return; 347 return;
348 } 348 }
349 if (tp->fastopen_rsk) { 349 if (tp->fastopen_rsk) {
350 BUG_ON(sk->sk_state != TCP_SYN_RECV && 350 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1); 351 sk->sk_state != TCP_FIN_WAIT1);
352 tcp_fastopen_synack_timer(sk); 352 tcp_fastopen_synack_timer(sk);
353 /* Before we receive ACK to our SYN-ACK don't retransmit 353 /* Before we receive ACK to our SYN-ACK don't retransmit
354 * anything else (e.g., data or FIN segments). 354 * anything else (e.g., data or FIN segments).
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7c7e963260e1..b1e6cf0b95fd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -219,7 +219,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
219}; 219};
220 220
221static const u32 ip6_template_metrics[RTAX_MAX] = { 221static const u32 ip6_template_metrics[RTAX_MAX] = {
222 [RTAX_HOPLIMIT - 1] = 255, 222 [RTAX_HOPLIMIT - 1] = 0,
223}; 223};
224 224
225static const struct rt6_info ip6_null_entry_template = { 225static const struct rt6_info ip6_null_entry_template = {
@@ -1232,7 +1232,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1232 rt->rt6i_dst.addr = fl6->daddr; 1232 rt->rt6i_dst.addr = fl6->daddr;
1233 rt->rt6i_dst.plen = 128; 1233 rt->rt6i_dst.plen = 128;
1234 rt->rt6i_idev = idev; 1234 rt->rt6i_idev = idev;
1235 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1235 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1236 1236
1237 spin_lock_bh(&icmp6_dst_lock); 1237 spin_lock_bh(&icmp6_dst_lock);
1238 rt->dst.next = icmp6_dst_gc_list; 1238 rt->dst.next = icmp6_dst_gc_list;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6f8a73c64fb3..7de7717ad67d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -853,7 +853,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
854 if (info->control.vif == &sdata->vif) { 854 if (info->control.vif == &sdata->vif) {
855 __skb_unlink(skb, &local->pending[i]); 855 __skb_unlink(skb, &local->pending[i]);
856 dev_kfree_skb_irq(skb); 856 ieee80211_free_txskb(&local->hw, skb);
857 } 857 }
858 } 858 }
859 } 859 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e714ed8bb198..1b7eed252fe9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3099,22 +3099,32 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3099 ht_cfreq, ht_oper->primary_chan, 3099 ht_cfreq, ht_oper->primary_chan,
3100 cbss->channel->band); 3100 cbss->channel->band);
3101 ht_oper = NULL; 3101 ht_oper = NULL;
3102 } else {
3103 channel_type = NL80211_CHAN_HT20;
3102 } 3104 }
3103 } 3105 }
3104 3106
3105 if (ht_oper) { 3107 if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3106 channel_type = NL80211_CHAN_HT20; 3108 /*
3109 * cfg80211 already verified that the channel itself can
3110 * be used, but it didn't check that we can do the right
3111 * HT type, so do that here as well. If HT40 isn't allowed
3112 * on this channel, disable 40 MHz operation.
3113 */
3107 3114
3108 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { 3115 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3109 switch (ht_oper->ht_param & 3116 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3110 IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 3117 if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
3111 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3118 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3119 else
3112 channel_type = NL80211_CHAN_HT40PLUS; 3120 channel_type = NL80211_CHAN_HT40PLUS;
3113 break; 3121 break;
3114 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3122 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3123 if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
3124 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3125 else
3115 channel_type = NL80211_CHAN_HT40MINUS; 3126 channel_type = NL80211_CHAN_HT40MINUS;
3116 break; 3127 break;
3117 }
3118 } 3128 }
3119 } 3129 }
3120 3130
@@ -3549,6 +3559,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3549{ 3559{
3550 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3560 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3551 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 3561 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3562 bool tx = !req->local_state_change;
3552 3563
3553 mutex_lock(&ifmgd->mtx); 3564 mutex_lock(&ifmgd->mtx);
3554 3565
@@ -3565,12 +3576,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3565 if (ifmgd->associated && 3576 if (ifmgd->associated &&
3566 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { 3577 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
3567 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 3578 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3568 req->reason_code, true, frame_buf); 3579 req->reason_code, tx, frame_buf);
3569 } else { 3580 } else {
3570 drv_mgd_prepare_tx(sdata->local, sdata); 3581 drv_mgd_prepare_tx(sdata->local, sdata);
3571 ieee80211_send_deauth_disassoc(sdata, req->bssid, 3582 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3572 IEEE80211_STYPE_DEAUTH, 3583 IEEE80211_STYPE_DEAUTH,
3573 req->reason_code, true, 3584 req->reason_code, tx,
3574 frame_buf); 3585 frame_buf);
3575 } 3586 }
3576 3587
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 797dd36a220d..0a4e4c04db89 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -650,7 +650,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
650 */ 650 */
651 if (!skb) 651 if (!skb)
652 break; 652 break;
653 dev_kfree_skb(skb); 653 ieee80211_free_txskb(&local->hw, skb);
654 } 654 }
655 655
656 /* 656 /*
@@ -679,7 +679,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
679 local->total_ps_buffered--; 679 local->total_ps_buffered--;
680 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 680 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
681 sta->sta.addr); 681 sta->sta.addr);
682 dev_kfree_skb(skb); 682 ieee80211_free_txskb(&local->hw, skb);
683 } 683 }
684 684
685 /* 685 /*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22ca35054dd0..94e586873979 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -406,7 +406,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
406 int queue = info->hw_queue; 406 int queue = info->hw_queue;
407 407
408 if (WARN_ON(!info->control.vif)) { 408 if (WARN_ON(!info->control.vif)) {
409 kfree_skb(skb); 409 ieee80211_free_txskb(&local->hw, skb);
410 return; 410 return;
411 } 411 }
412 412
@@ -431,7 +431,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
431 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 431 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
432 432
433 if (WARN_ON(!info->control.vif)) { 433 if (WARN_ON(!info->control.vif)) {
434 kfree_skb(skb); 434 ieee80211_free_txskb(&local->hw, skb);
435 continue; 435 continue;
436 } 436 }
437 437
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bdb53aba888e..8bd2f5c6a56e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
106 if (status->flag & RX_FLAG_MMIC_ERROR) 106 if (status->flag & RX_FLAG_MMIC_ERROR)
107 goto mic_fail; 107 goto mic_fail;
108 108
109 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) 109 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
110 rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
110 goto update_iv; 111 goto update_iv;
111 112
112 return RX_CONTINUE; 113 return RX_CONTINUE;
@@ -545,14 +546,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
545 546
546static void bip_aad(struct sk_buff *skb, u8 *aad) 547static void bip_aad(struct sk_buff *skb, u8 *aad)
547{ 548{
549 __le16 mask_fc;
550 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
551
548 /* BIP AAD: FC(masked) || A1 || A2 || A3 */ 552 /* BIP AAD: FC(masked) || A1 || A2 || A3 */
549 553
550 /* FC type/subtype */ 554 /* FC type/subtype */
551 aad[0] = skb->data[0];
552 /* Mask FC Retry, PwrMgt, MoreData flags to zero */ 555 /* Mask FC Retry, PwrMgt, MoreData flags to zero */
553 aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); 556 mask_fc = hdr->frame_control;
557 mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
558 IEEE80211_FCTL_MOREDATA);
559 put_unaligned(mask_fc, (__le16 *) &aad[0]);
554 /* A1 || A2 || A3 */ 560 /* A1 || A2 || A3 */
555 memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); 561 memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
556} 562}
557 563
558 564
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7e7198b51c06..c4ee43710aab 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2589,6 +2589,8 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
2589 struct ip_vs_proto_data *pd; 2589 struct ip_vs_proto_data *pd;
2590#endif 2590#endif
2591 2591
2592 memset(u, 0, sizeof (*u));
2593
2592#ifdef CONFIG_IP_VS_PROTO_TCP 2594#ifdef CONFIG_IP_VS_PROTO_TCP
2593 pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 2595 pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
2594 u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; 2596 u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
@@ -2766,7 +2768,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2766 { 2768 {
2767 struct ip_vs_timeout_user t; 2769 struct ip_vs_timeout_user t;
2768 2770
2769 memset(&t, 0, sizeof(t));
2770 __ip_vs_get_timeouts(net, &t); 2771 __ip_vs_get_timeouts(net, &t);
2771 if (copy_to_user(user, &t, sizeof(t)) != 0) 2772 if (copy_to_user(user, &t, sizeof(t)) != 0)
2772 ret = -EFAULT; 2773 ret = -EFAULT;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 16c712563860..ae7f5daeee43 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -180,9 +180,9 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
180 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 180 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
181 struct ctnl_timeout *timeout; 181 struct ctnl_timeout *timeout;
182 struct nf_conn_timeout *timeout_ext; 182 struct nf_conn_timeout *timeout_ext;
183 const struct ipt_entry *e = par->entryinfo;
184 struct nf_conntrack_l4proto *l4proto; 183 struct nf_conntrack_l4proto *l4proto;
185 int ret = 0; 184 int ret = 0;
185 u8 proto;
186 186
187 rcu_read_lock(); 187 rcu_read_lock();
188 timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); 188 timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
@@ -192,9 +192,11 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
192 goto out; 192 goto out;
193 } 193 }
194 194
195 if (e->ip.invflags & IPT_INV_PROTO) { 195 proto = xt_ct_find_proto(par);
196 if (!proto) {
196 ret = -EINVAL; 197 ret = -EINVAL;
197 pr_info("You cannot use inversion on L4 protocol\n"); 198 pr_info("You must specify a L4 protocol, and not use "
199 "inversions on it.\n");
198 goto out; 200 goto out;
199 } 201 }
200 202
@@ -214,7 +216,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
214 /* Make sure the timeout policy matches any existing protocol tracker, 216 /* Make sure the timeout policy matches any existing protocol tracker,
215 * otherwise default to generic. 217 * otherwise default to generic.
216 */ 218 */
217 l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto); 219 l4proto = __nf_ct_l4proto_find(par->family, proto);
218 if (timeout->l4proto->l4proto != l4proto->l4proto) { 220 if (timeout->l4proto->l4proto != l4proto->l4proto) {
219 ret = -EINVAL; 221 ret = -EINVAL;
220 pr_info("Timeout policy `%s' can only be used by L4 protocol " 222 pr_info("Timeout policy `%s' can only be used by L4 protocol "
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index ee2e5bc5a8c7..bd93e51d30ac 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -70,6 +70,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
70 fl4.daddr = info->gw.ip; 70 fl4.daddr = info->gw.ip;
71 fl4.flowi4_tos = RT_TOS(iph->tos); 71 fl4.flowi4_tos = RT_TOS(iph->tos);
72 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 72 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
73 fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
73 rt = ip_route_output_key(net, &fl4); 74 rt = ip_route_output_key(net, &fl4);
74 if (IS_ERR(rt)) 75 if (IS_ERR(rt))
75 return false; 76 return false;
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 81aafa8e4fef..bea7464cc43f 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -111,7 +111,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
111 .family = NFPROTO_IPV4, 111 .family = NFPROTO_IPV4,
112 .table = "nat", 112 .table = "nat",
113 .hooks = (1 << NF_INET_POST_ROUTING) | 113 .hooks = (1 << NF_INET_POST_ROUTING) |
114 (1 << NF_INET_LOCAL_OUT), 114 (1 << NF_INET_LOCAL_IN),
115 .me = THIS_MODULE, 115 .me = THIS_MODULE,
116 }, 116 },
117 { 117 {
@@ -123,7 +123,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
123 .family = NFPROTO_IPV4, 123 .family = NFPROTO_IPV4,
124 .table = "nat", 124 .table = "nat",
125 .hooks = (1 << NF_INET_PRE_ROUTING) | 125 .hooks = (1 << NF_INET_PRE_ROUTING) |
126 (1 << NF_INET_LOCAL_IN), 126 (1 << NF_INET_LOCAL_OUT),
127 .me = THIS_MODULE, 127 .me = THIS_MODULE,
128 }, 128 },
129 { 129 {
@@ -133,7 +133,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
133 .targetsize = sizeof(struct nf_nat_range), 133 .targetsize = sizeof(struct nf_nat_range),
134 .table = "nat", 134 .table = "nat",
135 .hooks = (1 << NF_INET_POST_ROUTING) | 135 .hooks = (1 << NF_INET_POST_ROUTING) |
136 (1 << NF_INET_LOCAL_OUT), 136 (1 << NF_INET_LOCAL_IN),
137 .me = THIS_MODULE, 137 .me = THIS_MODULE,
138 }, 138 },
139 { 139 {
@@ -143,7 +143,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
143 .targetsize = sizeof(struct nf_nat_range), 143 .targetsize = sizeof(struct nf_nat_range),
144 .table = "nat", 144 .table = "nat",
145 .hooks = (1 << NF_INET_PRE_ROUTING) | 145 .hooks = (1 << NF_INET_PRE_ROUTING) |
146 (1 << NF_INET_LOCAL_IN), 146 (1 << NF_INET_LOCAL_OUT),
147 .me = THIS_MODULE, 147 .me = THIS_MODULE,
148 }, 148 },
149}; 149};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 01e944a017a4..4da797fa5ec5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -138,6 +138,8 @@ static int netlink_dump(struct sock *sk);
138static DEFINE_RWLOCK(nl_table_lock); 138static DEFINE_RWLOCK(nl_table_lock);
139static atomic_t nl_table_users = ATOMIC_INIT(0); 139static atomic_t nl_table_users = ATOMIC_INIT(0);
140 140
141#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
142
141static ATOMIC_NOTIFIER_HEAD(netlink_chain); 143static ATOMIC_NOTIFIER_HEAD(netlink_chain);
142 144
143static inline u32 netlink_group_mask(u32 group) 145static inline u32 netlink_group_mask(u32 group)
@@ -345,6 +347,11 @@ netlink_update_listeners(struct sock *sk)
345 struct hlist_node *node; 347 struct hlist_node *node;
346 unsigned long mask; 348 unsigned long mask;
347 unsigned int i; 349 unsigned int i;
350 struct listeners *listeners;
351
352 listeners = nl_deref_protected(tbl->listeners);
353 if (!listeners)
354 return;
348 355
349 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 356 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
350 mask = 0; 357 mask = 0;
@@ -352,7 +359,7 @@ netlink_update_listeners(struct sock *sk)
352 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 359 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
353 mask |= nlk_sk(sk)->groups[i]; 360 mask |= nlk_sk(sk)->groups[i];
354 } 361 }
355 tbl->listeners->masks[i] = mask; 362 listeners->masks[i] = mask;
356 } 363 }
357 /* this function is only called with the netlink table "grabbed", which 364 /* this function is only called with the netlink table "grabbed", which
358 * makes sure updates are visible before bind or setsockopt return. */ 365 * makes sure updates are visible before bind or setsockopt return. */
@@ -536,7 +543,11 @@ static int netlink_release(struct socket *sock)
536 if (netlink_is_kernel(sk)) { 543 if (netlink_is_kernel(sk)) {
537 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 544 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
538 if (--nl_table[sk->sk_protocol].registered == 0) { 545 if (--nl_table[sk->sk_protocol].registered == 0) {
539 kfree(nl_table[sk->sk_protocol].listeners); 546 struct listeners *old;
547
548 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
549 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
550 kfree_rcu(old, rcu);
540 nl_table[sk->sk_protocol].module = NULL; 551 nl_table[sk->sk_protocol].module = NULL;
541 nl_table[sk->sk_protocol].bind = NULL; 552 nl_table[sk->sk_protocol].bind = NULL;
542 nl_table[sk->sk_protocol].flags = 0; 553 nl_table[sk->sk_protocol].flags = 0;
@@ -982,7 +993,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
982 rcu_read_lock(); 993 rcu_read_lock();
983 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 994 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
984 995
985 if (group - 1 < nl_table[sk->sk_protocol].groups) 996 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
986 res = test_bit(group - 1, listeners->masks); 997 res = test_bit(group - 1, listeners->masks);
987 998
988 rcu_read_unlock(); 999 rcu_read_unlock();
@@ -1625,7 +1636,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1625 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 1636 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1626 if (!new) 1637 if (!new)
1627 return -ENOMEM; 1638 return -ENOMEM;
1628 old = rcu_dereference_protected(tbl->listeners, 1); 1639 old = nl_deref_protected(tbl->listeners);
1629 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1640 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1630 rcu_assign_pointer(tbl->listeners, new); 1641 rcu_assign_pointer(tbl->listeners, new);
1631 1642
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index aaaadfbe36e9..75853cabf4c9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,6 @@ struct sock_xprt {
254 void (*old_data_ready)(struct sock *, int); 254 void (*old_data_ready)(struct sock *, int);
255 void (*old_state_change)(struct sock *); 255 void (*old_state_change)(struct sock *);
256 void (*old_write_space)(struct sock *); 256 void (*old_write_space)(struct sock *);
257 void (*old_error_report)(struct sock *);
258}; 257};
259 258
260/* 259/*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
737 dprintk("RPC: sendmsg returned unrecognized error %d\n", 736 dprintk("RPC: sendmsg returned unrecognized error %d\n",
738 -status); 737 -status);
739 case -ECONNRESET: 738 case -ECONNRESET:
740 case -EPIPE:
741 xs_tcp_shutdown(xprt); 739 xs_tcp_shutdown(xprt);
742 case -ECONNREFUSED: 740 case -ECONNREFUSED:
743 case -ENOTCONN: 741 case -ENOTCONN:
742 case -EPIPE:
744 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 743 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
745 } 744 }
746 745
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
781 transport->old_data_ready = sk->sk_data_ready; 780 transport->old_data_ready = sk->sk_data_ready;
782 transport->old_state_change = sk->sk_state_change; 781 transport->old_state_change = sk->sk_state_change;
783 transport->old_write_space = sk->sk_write_space; 782 transport->old_write_space = sk->sk_write_space;
784 transport->old_error_report = sk->sk_error_report;
785} 783}
786 784
787static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 785static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
789 sk->sk_data_ready = transport->old_data_ready; 787 sk->sk_data_ready = transport->old_data_ready;
790 sk->sk_state_change = transport->old_state_change; 788 sk->sk_state_change = transport->old_state_change;
791 sk->sk_write_space = transport->old_write_space; 789 sk->sk_write_space = transport->old_write_space;
792 sk->sk_error_report = transport->old_error_report;
793} 790}
794 791
795static void xs_reset_transport(struct sock_xprt *transport) 792static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1453 xprt_clear_connecting(xprt); 1450 xprt_clear_connecting(xprt);
1454} 1451}
1455 1452
1456static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1453static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1457{ 1454{
1458 smp_mb__before_clear_bit(); 1455 smp_mb__before_clear_bit();
1459 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1456 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1461 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1458 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1462 clear_bit(XPRT_CLOSING, &xprt->state); 1459 clear_bit(XPRT_CLOSING, &xprt->state);
1463 smp_mb__after_clear_bit(); 1460 smp_mb__after_clear_bit();
1461}
1462
1463static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1464{
1465 xs_sock_reset_connection_flags(xprt);
1464 /* Mark transport as closed and wake up all pending tasks */ 1466 /* Mark transport as closed and wake up all pending tasks */
1465 xprt_disconnect_done(xprt); 1467 xprt_disconnect_done(xprt);
1466} 1468}
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
1516 case TCP_CLOSE_WAIT: 1518 case TCP_CLOSE_WAIT:
1517 /* The server initiated a shutdown of the socket */ 1519 /* The server initiated a shutdown of the socket */
1518 xprt->connect_cookie++; 1520 xprt->connect_cookie++;
1521 clear_bit(XPRT_CONNECTED, &xprt->state);
1519 xs_tcp_force_close(xprt); 1522 xs_tcp_force_close(xprt);
1520 case TCP_CLOSING: 1523 case TCP_CLOSING:
1521 /* 1524 /*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
1540 read_unlock_bh(&sk->sk_callback_lock); 1543 read_unlock_bh(&sk->sk_callback_lock);
1541} 1544}
1542 1545
1543/**
1544 * xs_error_report - callback mainly for catching socket errors
1545 * @sk: socket
1546 */
1547static void xs_error_report(struct sock *sk)
1548{
1549 struct rpc_xprt *xprt;
1550
1551 read_lock_bh(&sk->sk_callback_lock);
1552 if (!(xprt = xprt_from_sock(sk)))
1553 goto out;
1554 dprintk("RPC: %s client %p...\n"
1555 "RPC: error %d\n",
1556 __func__, xprt, sk->sk_err);
1557 xprt_wake_pending_tasks(xprt, -EAGAIN);
1558out:
1559 read_unlock_bh(&sk->sk_callback_lock);
1560}
1561
1562static void xs_write_space(struct sock *sk) 1546static void xs_write_space(struct sock *sk)
1563{ 1547{
1564 struct socket *sock; 1548 struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1858 sk->sk_user_data = xprt; 1842 sk->sk_user_data = xprt;
1859 sk->sk_data_ready = xs_local_data_ready; 1843 sk->sk_data_ready = xs_local_data_ready;
1860 sk->sk_write_space = xs_udp_write_space; 1844 sk->sk_write_space = xs_udp_write_space;
1861 sk->sk_error_report = xs_error_report;
1862 sk->sk_allocation = GFP_ATOMIC; 1845 sk->sk_allocation = GFP_ATOMIC;
1863 1846
1864 xprt_clear_connected(xprt); 1847 xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1983 sk->sk_user_data = xprt; 1966 sk->sk_user_data = xprt;
1984 sk->sk_data_ready = xs_udp_data_ready; 1967 sk->sk_data_ready = xs_udp_data_ready;
1985 sk->sk_write_space = xs_udp_write_space; 1968 sk->sk_write_space = xs_udp_write_space;
1986 sk->sk_error_report = xs_error_report;
1987 sk->sk_no_check = UDP_CSUM_NORCV; 1969 sk->sk_no_check = UDP_CSUM_NORCV;
1988 sk->sk_allocation = GFP_ATOMIC; 1970 sk->sk_allocation = GFP_ATOMIC;
1989 1971
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
2050 any.sa_family = AF_UNSPEC; 2032 any.sa_family = AF_UNSPEC;
2051 result = kernel_connect(transport->sock, &any, sizeof(any), 0); 2033 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
2052 if (!result) 2034 if (!result)
2053 xs_sock_mark_closed(&transport->xprt); 2035 xs_sock_reset_connection_flags(&transport->xprt);
2054 else 2036 dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
2055 dprintk("RPC: AF_UNSPEC connect return code %d\n",
2056 result);
2057} 2037}
2058 2038
2059static void xs_tcp_reuse_connection(struct sock_xprt *transport) 2039static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2098 sk->sk_data_ready = xs_tcp_data_ready; 2078 sk->sk_data_ready = xs_tcp_data_ready;
2099 sk->sk_state_change = xs_tcp_state_change; 2079 sk->sk_state_change = xs_tcp_state_change;
2100 sk->sk_write_space = xs_tcp_write_space; 2080 sk->sk_write_space = xs_tcp_write_space;
2101 sk->sk_error_report = xs_error_report;
2102 sk->sk_allocation = GFP_ATOMIC; 2081 sk->sk_allocation = GFP_ATOMIC;
2103 2082
2104 /* socket options */ 2083 /* socket options */
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 8016fee0752b..904a7f368325 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -457,20 +457,14 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
457 .reason_code = reason, 457 .reason_code = reason,
458 .ie = ie, 458 .ie = ie,
459 .ie_len = ie_len, 459 .ie_len = ie_len,
460 .local_state_change = local_state_change,
460 }; 461 };
461 462
462 ASSERT_WDEV_LOCK(wdev); 463 ASSERT_WDEV_LOCK(wdev);
463 464
464 if (local_state_change) { 465 if (local_state_change && (!wdev->current_bss ||
465 if (wdev->current_bss && 466 !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
466 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
467 cfg80211_unhold_bss(wdev->current_bss);
468 cfg80211_put_bss(&wdev->current_bss->pub);
469 wdev->current_bss = NULL;
470 }
471
472 return 0; 467 return 0;
473 }
474 468
475 return rdev->ops->deauth(&rdev->wiphy, dev, &req); 469 return rdev->ops->deauth(&rdev->wiphy, dev, &req);
476} 470}