summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-09 18:12:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-09 18:12:33 -0500
commitd48f782e4fb20dc7ec935ca0ca41ae31e4a69362 (patch)
tree482270b85d4ab9b1284e07e4cb439b4dc7af919f /net
parent8586ca8a214471e4573d76356aabe890bfecdc8a (diff)
parent35cc3cefc4de90001c9137e2d01dd9d06b11acfb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A decent batch of fixes here. I'd say about half are for problems that have existed for a while, and half are for new regressions added in the 4.20 merge window. 1) Fix 10G SFP phy module detection in mvpp2, from Baruch Siach. 2) Revert bogus emac driver change, from Benjamin Herrenschmidt. 3) Handle BPF exported data structure with pointers when building 32-bit userland, from Daniel Borkmann. 4) Memory leak fix in act_police, from Davide Caratti. 5) Check RX checksum offload in RX descriptors properly in aquantia driver, from Dmitry Bogdanov. 6) SKB unlink fix in various spots, from Edward Cree. 7) ndo_dflt_fdb_dump() only works with ethernet, enforce this, from Eric Dumazet. 8) Fix FID leak in mlxsw driver, from Ido Schimmel. 9) IOTLB locking fix in vhost, from Jean-Philippe Brucker. 10) Fix SKB truesize accounting in ipv4/ipv6/netfilter frag memory limits otherwise namespace exit can hang. From Jiri Wiesner. 11) Address block parsing length fixes in x25 from Martin Schiller. 12) IRQ and ring accounting fixes in bnxt_en, from Michael Chan. 13) For tun interfaces, only iface delete works with rtnl ops, enforce this by disallowing add. From Nicolas Dichtel. 14) Use after free in liquidio, from Pan Bian. 15) Fix SKB use after passing to netif_receive_skb(), from Prashant Bhole. 16) Static key accounting and other fixes in XPS from Sabrina Dubroca. 17) Partially initialized flow key passed to ip6_route_output(), from Shmulik Ladkani. 18) Fix RTNL deadlock during reset in ibmvnic driver, from Thomas Falcon. 19) Several small TCP fixes (off-by-one on window probe abort, NULL deref in tail loss probe, SNMP mis-estimations) from Yuchung Cheng" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (93 commits) net/sched: cls_flower: Reject duplicated rules also under skip_sw bnxt_en: Fix _bnxt_get_max_rings() for 57500 chips. bnxt_en: Fix NQ/CP rings accounting on the new 57500 chips. bnxt_en: Keep track of reserved IRQs. bnxt_en: Fix CNP CoS queue regression. net/mlx4_core: Correctly set PFC param if global pause is turned off. Revert "net/ibm/emac: wrong bit is used for STA control" neighbour: Avoid writing before skb->head in neigh_hh_output() ipv6: Check available headroom in ip6_xmit() even without options tcp: lack of available data can also cause TSO defer ipv6: sr: properly initialize flowi6 prior passing to ip6_route_output mlxsw: spectrum_switchdev: Fix VLAN device deletion via ioctl mlxsw: spectrum_router: Relax GRE decap matching check mlxsw: spectrum_switchdev: Avoid leaking FID's reference count mlxsw: spectrum_nve: Remove easily triggerable warnings ipv4: ipv6: netfilter: Adjust the frag mem limit when truesize changes sctp: frag_point sanity check tcp: fix NULL ref in tail loss probe tcp: Do not underestimate rwnd_limited net: use skb_list_del_init() to remove from RX sublists ...
Diffstat (limited to 'net')
-rw-r--r--net/bpf/test_run.c21
-rw-r--r--net/core/dev.c65
-rw-r--r--net/core/filter.c27
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/dsa/master.c34
-rw-r--r--net/dsa/slave.c28
-rw-r--r--net/ipv4/ip_fragment.c7
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/tcp_output.c45
-rw-r--r--net/ipv4/tcp_timer.c10
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/ip6_output.c42
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/seg6_iptunnel.c1
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/openvswitch/conntrack.c2
-rw-r--r--net/sched/act_police.c24
-rw-r--r--net/sched/cls_flower.c23
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sctp/associola.c9
-rw-r--r--net/sctp/chunk.c6
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/wireless/mlme.c4
-rw-r--r--net/wireless/nl80211.c1
-rw-r--r--net/wireless/sme.c8
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/x25/af_x25.c18
-rw-r--r--net/x25/x25_in.c9
35 files changed, 278 insertions, 176 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..25001913d03b 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
28 return ret; 28 return ret;
29} 29}
30 30
31static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
32{ 33{
33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
34 enum bpf_cgroup_storage_type stype; 35 enum bpf_cgroup_storage_type stype;
35 u64 time_start, time_spent = 0; 36 u64 time_start, time_spent = 0;
36 u32 ret = 0, i; 37 u32 i;
37 38
38 for_each_cgroup_storage_type(stype) { 39 for_each_cgroup_storage_type(stype) {
39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 40 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
49 repeat = 1; 50 repeat = 1;
50 time_start = ktime_get_ns(); 51 time_start = ktime_get_ns();
51 for (i = 0; i < repeat; i++) { 52 for (i = 0; i < repeat; i++) {
52 ret = bpf_test_run_one(prog, ctx, storage); 53 *ret = bpf_test_run_one(prog, ctx, storage);
53 if (need_resched()) { 54 if (need_resched()) {
54 if (signal_pending(current)) 55 if (signal_pending(current))
55 break; 56 break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
65 for_each_cgroup_storage_type(stype) 66 for_each_cgroup_storage_type(stype)
66 bpf_cgroup_storage_free(storage[stype]); 67 bpf_cgroup_storage_free(storage[stype]);
67 68
68 return ret; 69 return 0;
69} 70}
70 71
71static int bpf_test_finish(const union bpf_attr *kattr, 72static int bpf_test_finish(const union bpf_attr *kattr,
@@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
165 __skb_push(skb, hh_len); 166 __skb_push(skb, hh_len);
166 if (is_direct_pkt_access) 167 if (is_direct_pkt_access)
167 bpf_compute_data_pointers(skb); 168 bpf_compute_data_pointers(skb);
168 retval = bpf_test_run(prog, skb, repeat, &duration); 169 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
170 if (ret) {
171 kfree_skb(skb);
172 kfree(sk);
173 return ret;
174 }
169 if (!is_l2) { 175 if (!is_l2) {
170 if (skb_headroom(skb) < hh_len) { 176 if (skb_headroom(skb) < hh_len) {
171 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 177 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
212 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 218 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
213 xdp.rxq = &rxqueue->xdp_rxq; 219 xdp.rxq = &rxqueue->xdp_rxq;
214 220
215 retval = bpf_test_run(prog, &xdp, repeat, &duration); 221 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
222 if (ret)
223 goto out;
216 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 224 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
217 xdp.data_end != xdp.data + size) 225 xdp.data_end != xdp.data + size)
218 size = xdp.data_end - xdp.data; 226 size = xdp.data_end - xdp.data;
219 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 227 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
228out:
220 kfree(data); 229 kfree(data);
221 return ret; 230 return ret;
222} 231}
diff --git a/net/core/dev.c b/net/core/dev.c
index ddc551f24ba2..722d50dbf8a4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
2175 return active; 2175 return active;
2176} 2176}
2177 2177
2178static void reset_xps_maps(struct net_device *dev,
2179 struct xps_dev_maps *dev_maps,
2180 bool is_rxqs_map)
2181{
2182 if (is_rxqs_map) {
2183 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2184 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2185 } else {
2186 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2187 }
2188 static_key_slow_dec_cpuslocked(&xps_needed);
2189 kfree_rcu(dev_maps, rcu);
2190}
2191
2178static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2192static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2179 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2193 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2180 u16 offset, u16 count, bool is_rxqs_map) 2194 u16 offset, u16 count, bool is_rxqs_map)
@@ -2186,18 +2200,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2186 j < nr_ids;) 2200 j < nr_ids;)
2187 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2201 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2188 count); 2202 count);
2189 if (!active) { 2203 if (!active)
2190 if (is_rxqs_map) { 2204 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2191 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2192 } else {
2193 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2194 2205
2195 for (i = offset + (count - 1); count--; i--) 2206 if (!is_rxqs_map) {
2196 netdev_queue_numa_node_write( 2207 for (i = offset + (count - 1); count--; i--) {
2197 netdev_get_tx_queue(dev, i), 2208 netdev_queue_numa_node_write(
2198 NUMA_NO_NODE); 2209 netdev_get_tx_queue(dev, i),
2210 NUMA_NO_NODE);
2199 } 2211 }
2200 kfree_rcu(dev_maps, rcu);
2201 } 2212 }
2202} 2213}
2203 2214
@@ -2234,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2234 false); 2245 false);
2235 2246
2236out_no_maps: 2247out_no_maps:
2237 if (static_key_enabled(&xps_rxqs_needed))
2238 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2239
2240 static_key_slow_dec_cpuslocked(&xps_needed);
2241 mutex_unlock(&xps_map_mutex); 2248 mutex_unlock(&xps_map_mutex);
2242 cpus_read_unlock(); 2249 cpus_read_unlock();
2243} 2250}
@@ -2355,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2355 if (!new_dev_maps) 2362 if (!new_dev_maps)
2356 goto out_no_new_maps; 2363 goto out_no_new_maps;
2357 2364
2358 static_key_slow_inc_cpuslocked(&xps_needed); 2365 if (!dev_maps) {
2359 if (is_rxqs_map) 2366 /* Increment static keys at most once per type */
2360 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2367 static_key_slow_inc_cpuslocked(&xps_needed);
2368 if (is_rxqs_map)
2369 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2370 }
2361 2371
2362 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2372 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2363 j < nr_ids;) { 2373 j < nr_ids;) {
@@ -2455,13 +2465,8 @@ out_no_new_maps:
2455 } 2465 }
2456 2466
2457 /* free map if not active */ 2467 /* free map if not active */
2458 if (!active) { 2468 if (!active)
2459 if (is_rxqs_map) 2469 reset_xps_maps(dev, dev_maps, is_rxqs_map);
2460 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2461 else
2462 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2463 kfree_rcu(dev_maps, rcu);
2464 }
2465 2470
2466out_no_maps: 2471out_no_maps:
2467 mutex_unlock(&xps_map_mutex); 2472 mutex_unlock(&xps_map_mutex);
@@ -5009,7 +5014,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
5009 struct net_device *orig_dev = skb->dev; 5014 struct net_device *orig_dev = skb->dev;
5010 struct packet_type *pt_prev = NULL; 5015 struct packet_type *pt_prev = NULL;
5011 5016
5012 list_del(&skb->list); 5017 skb_list_del_init(skb);
5013 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5018 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
5014 if (!pt_prev) 5019 if (!pt_prev)
5015 continue; 5020 continue;
@@ -5165,7 +5170,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
5165 INIT_LIST_HEAD(&sublist); 5170 INIT_LIST_HEAD(&sublist);
5166 list_for_each_entry_safe(skb, next, head, list) { 5171 list_for_each_entry_safe(skb, next, head, list) {
5167 net_timestamp_check(netdev_tstamp_prequeue, skb); 5172 net_timestamp_check(netdev_tstamp_prequeue, skb);
5168 list_del(&skb->list); 5173 skb_list_del_init(skb);
5169 if (!skb_defer_rx_timestamp(skb)) 5174 if (!skb_defer_rx_timestamp(skb))
5170 list_add_tail(&skb->list, &sublist); 5175 list_add_tail(&skb->list, &sublist);
5171 } 5176 }
@@ -5176,7 +5181,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
5176 rcu_read_lock(); 5181 rcu_read_lock();
5177 list_for_each_entry_safe(skb, next, head, list) { 5182 list_for_each_entry_safe(skb, next, head, list) {
5178 xdp_prog = rcu_dereference(skb->dev->xdp_prog); 5183 xdp_prog = rcu_dereference(skb->dev->xdp_prog);
5179 list_del(&skb->list); 5184 skb_list_del_init(skb);
5180 if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) 5185 if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
5181 list_add_tail(&skb->list, &sublist); 5186 list_add_tail(&skb->list, &sublist);
5182 } 5187 }
@@ -5195,7 +5200,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
5195 5200
5196 if (cpu >= 0) { 5201 if (cpu >= 0) {
5197 /* Will be handled, remove from list */ 5202 /* Will be handled, remove from list */
5198 list_del(&skb->list); 5203 skb_list_del_init(skb);
5199 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5204 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5200 } 5205 }
5201 } 5206 }
@@ -6204,8 +6209,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6204 napi->skb = NULL; 6209 napi->skb = NULL;
6205 napi->poll = poll; 6210 napi->poll = poll;
6206 if (weight > NAPI_POLL_WEIGHT) 6211 if (weight > NAPI_POLL_WEIGHT)
6207 pr_err_once("netif_napi_add() called with weight %d on device %s\n", 6212 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6208 weight, dev->name); 6213 weight);
6209 napi->weight = weight; 6214 napi->weight = weight;
6210 list_add(&napi->dev_list, &dev->napi_list); 6215 list_add(&napi->dev_list, &dev->napi_list);
6211 napi->dev = dev; 6216 napi->dev = dev;
diff --git a/net/core/filter.c b/net/core/filter.c
index 9a1327eb25fa..8d2c629501e2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
4890 struct net *net; 4890 struct net *net;
4891 4891
4892 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; 4892 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
4893 if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) 4893 if (unlikely(family == AF_UNSPEC || flags ||
4894 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
4894 goto out; 4895 goto out;
4895 4896
4896 if (skb->dev) 4897 if (skb->dev)
4897 caller_net = dev_net(skb->dev); 4898 caller_net = dev_net(skb->dev);
4898 else 4899 else
4899 caller_net = sock_net(skb->sk); 4900 caller_net = sock_net(skb->sk);
4900 if (netns_id) { 4901 if ((s32)netns_id < 0) {
4902 net = caller_net;
4903 sk = sk_lookup(net, tuple, skb, family, proto);
4904 } else {
4901 net = get_net_ns_by_id(caller_net, netns_id); 4905 net = get_net_ns_by_id(caller_net, netns_id);
4902 if (unlikely(!net)) 4906 if (unlikely(!net))
4903 goto out; 4907 goto out;
4904 sk = sk_lookup(net, tuple, skb, family, proto); 4908 sk = sk_lookup(net, tuple, skb, family, proto);
4905 put_net(net); 4909 put_net(net);
4906 } else {
4907 net = caller_net;
4908 sk = sk_lookup(net, tuple, skb, family, proto);
4909 } 4910 }
4910 4911
4911 if (sk) 4912 if (sk)
@@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
5435 if (size != size_default) 5436 if (size != size_default)
5436 return false; 5437 return false;
5437 break; 5438 break;
5438 case bpf_ctx_range(struct __sk_buff, flow_keys): 5439 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5439 if (size != sizeof(struct bpf_flow_keys *)) 5440 if (size != sizeof(__u64))
5440 return false; 5441 return false;
5441 break; 5442 break;
5442 default: 5443 default:
@@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size,
5464 case bpf_ctx_range(struct __sk_buff, data): 5465 case bpf_ctx_range(struct __sk_buff, data):
5465 case bpf_ctx_range(struct __sk_buff, data_meta): 5466 case bpf_ctx_range(struct __sk_buff, data_meta):
5466 case bpf_ctx_range(struct __sk_buff, data_end): 5467 case bpf_ctx_range(struct __sk_buff, data_end):
5467 case bpf_ctx_range(struct __sk_buff, flow_keys): 5468 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5468 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5469 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5469 return false; 5470 return false;
5470 } 5471 }
@@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size,
5489 switch (off) { 5490 switch (off) {
5490 case bpf_ctx_range(struct __sk_buff, tc_classid): 5491 case bpf_ctx_range(struct __sk_buff, tc_classid):
5491 case bpf_ctx_range(struct __sk_buff, data_meta): 5492 case bpf_ctx_range(struct __sk_buff, data_meta):
5492 case bpf_ctx_range(struct __sk_buff, flow_keys): 5493 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5493 return false; 5494 return false;
5494 case bpf_ctx_range(struct __sk_buff, data): 5495 case bpf_ctx_range(struct __sk_buff, data):
5495 case bpf_ctx_range(struct __sk_buff, data_end): 5496 case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size,
5530 case bpf_ctx_range(struct __sk_buff, tc_classid): 5531 case bpf_ctx_range(struct __sk_buff, tc_classid):
5531 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5532 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5532 case bpf_ctx_range(struct __sk_buff, data_meta): 5533 case bpf_ctx_range(struct __sk_buff, data_meta):
5533 case bpf_ctx_range(struct __sk_buff, flow_keys): 5534 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5534 return false; 5535 return false;
5535 } 5536 }
5536 5537
@@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
5756 case bpf_ctx_range(struct __sk_buff, data_end): 5757 case bpf_ctx_range(struct __sk_buff, data_end):
5757 info->reg_type = PTR_TO_PACKET_END; 5758 info->reg_type = PTR_TO_PACKET_END;
5758 break; 5759 break;
5759 case bpf_ctx_range(struct __sk_buff, flow_keys): 5760 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5760 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 5761 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
5761 return false; 5762 return false;
5762 } 5763 }
@@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size,
5958 switch (off) { 5959 switch (off) {
5959 case bpf_ctx_range(struct __sk_buff, tc_classid): 5960 case bpf_ctx_range(struct __sk_buff, tc_classid):
5960 case bpf_ctx_range(struct __sk_buff, data_meta): 5961 case bpf_ctx_range(struct __sk_buff, data_meta):
5961 case bpf_ctx_range(struct __sk_buff, flow_keys): 5962 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
5962 return false; 5963 return false;
5963 } 5964 }
5964 5965
@@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
6039 case bpf_ctx_range(struct __sk_buff, data_end): 6040 case bpf_ctx_range(struct __sk_buff, data_end):
6040 info->reg_type = PTR_TO_PACKET_END; 6041 info->reg_type = PTR_TO_PACKET_END;
6041 break; 6042 break;
6042 case bpf_ctx_range(struct __sk_buff, flow_keys): 6043 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6043 info->reg_type = PTR_TO_FLOW_KEYS; 6044 info->reg_type = PTR_TO_FLOW_KEYS;
6044 break; 6045 break;
6045 case bpf_ctx_range(struct __sk_buff, tc_classid): 6046 case bpf_ctx_range(struct __sk_buff, tc_classid):
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33d9227a8b80..7819f7804eeb 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3800,6 +3800,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
3800{ 3800{
3801 int err; 3801 int err;
3802 3802
3803 if (dev->type != ARPHRD_ETHER)
3804 return -EINVAL;
3805
3803 netif_addr_lock_bh(dev); 3806 netif_addr_lock_bh(dev);
3804 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); 3807 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3805 if (err) 3808 if (err)
diff --git a/net/dsa/master.c b/net/dsa/master.c
index c90ee3227dea..5e8c9bef78bd 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -158,8 +158,31 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
158 cpu_dp->orig_ethtool_ops = NULL; 158 cpu_dp->orig_ethtool_ops = NULL;
159} 159}
160 160
161static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
162 char *buf)
163{
164 struct net_device *dev = to_net_dev(d);
165 struct dsa_port *cpu_dp = dev->dsa_ptr;
166
167 return sprintf(buf, "%s\n",
168 dsa_tag_protocol_to_str(cpu_dp->tag_ops));
169}
170static DEVICE_ATTR_RO(tagging);
171
172static struct attribute *dsa_slave_attrs[] = {
173 &dev_attr_tagging.attr,
174 NULL
175};
176
177static const struct attribute_group dsa_group = {
178 .name = "dsa",
179 .attrs = dsa_slave_attrs,
180};
181
161int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 182int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
162{ 183{
184 int ret;
185
163 /* If we use a tagging format that doesn't have an ethertype 186 /* If we use a tagging format that doesn't have an ethertype
164 * field, make sure that all packets from this point on get 187 * field, make sure that all packets from this point on get
165 * sent to the tag format's receive function. 188 * sent to the tag format's receive function.
@@ -168,11 +191,20 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
168 191
169 dev->dsa_ptr = cpu_dp; 192 dev->dsa_ptr = cpu_dp;
170 193
171 return dsa_master_ethtool_setup(dev); 194 ret = dsa_master_ethtool_setup(dev);
195 if (ret)
196 return ret;
197
198 ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
199 if (ret)
200 dsa_master_ethtool_teardown(dev);
201
202 return ret;
172} 203}
173 204
174void dsa_master_teardown(struct net_device *dev) 205void dsa_master_teardown(struct net_device *dev)
175{ 206{
207 sysfs_remove_group(&dev->dev.kobj, &dsa_group);
176 dsa_master_ethtool_teardown(dev); 208 dsa_master_ethtool_teardown(dev);
177 209
178 dev->dsa_ptr = NULL; 210 dev->dsa_ptr = NULL;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7d0c19e7edcf..aec78f5aca72 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1058,27 +1058,6 @@ static struct device_type dsa_type = {
1058 .name = "dsa", 1058 .name = "dsa",
1059}; 1059};
1060 1060
1061static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
1062 char *buf)
1063{
1064 struct net_device *dev = to_net_dev(d);
1065 struct dsa_port *dp = dsa_slave_to_port(dev);
1066
1067 return sprintf(buf, "%s\n",
1068 dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
1069}
1070static DEVICE_ATTR_RO(tagging);
1071
1072static struct attribute *dsa_slave_attrs[] = {
1073 &dev_attr_tagging.attr,
1074 NULL
1075};
1076
1077static const struct attribute_group dsa_group = {
1078 .name = "dsa",
1079 .attrs = dsa_slave_attrs,
1080};
1081
1082static void dsa_slave_phylink_validate(struct net_device *dev, 1061static void dsa_slave_phylink_validate(struct net_device *dev,
1083 unsigned long *supported, 1062 unsigned long *supported,
1084 struct phylink_link_state *state) 1063 struct phylink_link_state *state)
@@ -1374,14 +1353,8 @@ int dsa_slave_create(struct dsa_port *port)
1374 goto out_phy; 1353 goto out_phy;
1375 } 1354 }
1376 1355
1377 ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
1378 if (ret)
1379 goto out_unreg;
1380
1381 return 0; 1356 return 0;
1382 1357
1383out_unreg:
1384 unregister_netdev(slave_dev);
1385out_phy: 1358out_phy:
1386 rtnl_lock(); 1359 rtnl_lock();
1387 phylink_disconnect_phy(p->dp->pl); 1360 phylink_disconnect_phy(p->dp->pl);
@@ -1405,7 +1378,6 @@ void dsa_slave_destroy(struct net_device *slave_dev)
1405 rtnl_unlock(); 1378 rtnl_unlock();
1406 1379
1407 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); 1380 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1408 sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
1409 unregister_netdev(slave_dev); 1381 unregister_netdev(slave_dev);
1410 phylink_destroy(dp->pl); 1382 phylink_destroy(dp->pl);
1411 free_percpu(p->stats64); 1383 free_percpu(p->stats64);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index d6ee343fdb86..aa0b22697998 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -515,6 +515,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
515 struct rb_node *rbn; 515 struct rb_node *rbn;
516 int len; 516 int len;
517 int ihlen; 517 int ihlen;
518 int delta;
518 int err; 519 int err;
519 u8 ecn; 520 u8 ecn;
520 521
@@ -556,10 +557,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
556 if (len > 65535) 557 if (len > 65535)
557 goto out_oversize; 558 goto out_oversize;
558 559
560 delta = - head->truesize;
561
559 /* Head of list must not be cloned. */ 562 /* Head of list must not be cloned. */
560 if (skb_unclone(head, GFP_ATOMIC)) 563 if (skb_unclone(head, GFP_ATOMIC))
561 goto out_nomem; 564 goto out_nomem;
562 565
566 delta += head->truesize;
567 if (delta)
568 add_frag_mem_limit(qp->q.net, delta);
569
563 /* If the first fragment is fragmented itself, we split 570 /* If the first fragment is fragmented itself, we split
564 * it to two chunks: the first with data and paged part 571 * it to two chunks: the first with data and paged part
565 * and the second, holding only fragments. */ 572 * and the second, holding only fragments. */
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 35a786c0aaa0..e609b08c9df4 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -547,7 +547,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
547 list_for_each_entry_safe(skb, next, head, list) { 547 list_for_each_entry_safe(skb, next, head, list) {
548 struct dst_entry *dst; 548 struct dst_entry *dst;
549 549
550 list_del(&skb->list); 550 skb_list_del_init(skb);
551 /* if ingress device is enslaved to an L3 master device pass the 551 /* if ingress device is enslaved to an L3 master device pass the
552 * skb to its handler for processing 552 * skb to its handler for processing
553 */ 553 */
@@ -594,7 +594,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
594 struct net_device *dev = skb->dev; 594 struct net_device *dev = skb->dev;
595 struct net *net = dev_net(dev); 595 struct net *net = dev_net(dev);
596 596
597 list_del(&skb->list); 597 skb_list_del_init(skb);
598 skb = ip_rcv_core(skb, net); 598 skb = ip_rcv_core(skb, net);
599 if (skb == NULL) 599 if (skb == NULL)
600 continue; 600 continue;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f510cad0b3e..d1676d8a6ed7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1904 * This algorithm is from John Heffner. 1904 * This algorithm is from John Heffner.
1905 */ 1905 */
1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited,
1908 bool *is_rwnd_limited,
1909 u32 max_segs)
1908{ 1910{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1911 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 age, send_win, cong_win, limit, in_flight; 1912 u32 age, send_win, cong_win, limit, in_flight;
@@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1912 struct sk_buff *head; 1914 struct sk_buff *head;
1913 int win_divisor; 1915 int win_divisor;
1914 1916
1915 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916 goto send_now;
1917
1918 if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1917 if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1919 goto send_now; 1918 goto send_now;
1920 1919
@@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1973 if (age < (tp->srtt_us >> 4)) 1972 if (age < (tp->srtt_us >> 4))
1974 goto send_now; 1973 goto send_now;
1975 1974
1976 /* Ok, it looks like it is advisable to defer. */ 1975 /* Ok, it looks like it is advisable to defer.
1976 * Three cases are tracked :
1977 * 1) We are cwnd-limited
1978 * 2) We are rwnd-limited
1979 * 3) We are application limited.
1980 */
1981 if (cong_win < send_win) {
1982 if (cong_win <= skb->len) {
1983 *is_cwnd_limited = true;
1984 return true;
1985 }
1986 } else {
1987 if (send_win <= skb->len) {
1988 *is_rwnd_limited = true;
1989 return true;
1990 }
1991 }
1977 1992
1978 if (cong_win < send_win && cong_win <= skb->len) 1993 /* If this packet won't get more data, do not wait. */
1979 *is_cwnd_limited = true; 1994 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1995 goto send_now;
1980 1996
1981 return true; 1997 return true;
1982 1998
@@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2356 } else { 2372 } else {
2357 if (!push_one && 2373 if (!push_one &&
2358 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2374 tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2359 max_segs)) 2375 &is_rwnd_limited, max_segs))
2360 break; 2376 break;
2361 } 2377 }
2362 2378
@@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk)
2494 goto rearm_timer; 2510 goto rearm_timer;
2495 } 2511 }
2496 skb = skb_rb_last(&sk->tcp_rtx_queue); 2512 skb = skb_rb_last(&sk->tcp_rtx_queue);
2513 if (unlikely(!skb)) {
2514 WARN_ONCE(tp->packets_out,
2515 "invalid inflight: %u state %u cwnd %u mss %d\n",
2516 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2517 inet_csk(sk)->icsk_pending = 0;
2518 return;
2519 }
2497 2520
2498 /* At most one outstanding TLP retransmission. */ 2521 /* At most one outstanding TLP retransmission. */
2499 if (tp->tlp_high_seq) 2522 if (tp->tlp_high_seq)
2500 goto rearm_timer; 2523 goto rearm_timer;
2501 2524
2502 /* Retransmit last segment. */
2503 if (WARN_ON(!skb))
2504 goto rearm_timer;
2505
2506 if (skb_still_in_host_queue(sk, skb)) 2525 if (skb_still_in_host_queue(sk, skb))
2507 goto rearm_timer; 2526 goto rearm_timer;
2508 2527
@@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2920 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2939 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2921 trace_tcp_retransmit_skb(sk, skb); 2940 trace_tcp_retransmit_skb(sk, skb);
2922 } else if (err != -EBUSY) { 2941 } else if (err != -EBUSY) {
2923 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2942 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
2924 } 2943 }
2925 return err; 2944 return err;
2926} 2945}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 091c53925e4d..f87dbc78b6bc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -378,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk)
378 return; 378 return;
379 } 379 }
380 380
381 if (icsk->icsk_probes_out > max_probes) { 381 if (icsk->icsk_probes_out >= max_probes) {
382abort: tcp_write_err(sk); 382abort: tcp_write_err(sk);
383 } else { 383 } else {
384 /* Only send another probe if we didn't close things up. */ 384 /* Only send another probe if we didn't close things up. */
@@ -484,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk)
484 goto out_reset_timer; 484 goto out_reset_timer;
485 } 485 }
486 486
487 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
487 if (tcp_write_timeout(sk)) 488 if (tcp_write_timeout(sk))
488 goto out; 489 goto out;
489 490
490 if (icsk->icsk_retransmits == 0) { 491 if (icsk->icsk_retransmits == 0) {
491 int mib_idx; 492 int mib_idx = 0;
492 493
493 if (icsk->icsk_ca_state == TCP_CA_Recovery) { 494 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
494 if (tcp_is_sack(tp)) 495 if (tcp_is_sack(tp))
@@ -503,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk)
503 mib_idx = LINUX_MIB_TCPSACKFAILURES; 504 mib_idx = LINUX_MIB_TCPSACKFAILURES;
504 else 505 else
505 mib_idx = LINUX_MIB_TCPRENOFAILURES; 506 mib_idx = LINUX_MIB_TCPRENOFAILURES;
506 } else {
507 mib_idx = LINUX_MIB_TCPTIMEOUTS;
508 } 507 }
509 __NET_INC_STATS(sock_net(sk), mib_idx); 508 if (mib_idx)
509 __NET_INC_STATS(sock_net(sk), mib_idx);
510 } 510 }
511 511
512 tcp_enter_loss(sk); 512 tcp_enter_loss(sk);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 96577e742afd..c1d85830c906 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
95 list_for_each_entry_safe(skb, next, head, list) { 95 list_for_each_entry_safe(skb, next, head, list) {
96 struct dst_entry *dst; 96 struct dst_entry *dst;
97 97
98 list_del(&skb->list); 98 skb_list_del_init(skb);
99 /* if ingress device is enslaved to an L3 master device pass the 99 /* if ingress device is enslaved to an L3 master device pass the
100 * skb to its handler for processing 100 * skb to its handler for processing
101 */ 101 */
@@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
296 struct net_device *dev = skb->dev; 296 struct net_device *dev = skb->dev;
297 struct net *net = dev_net(dev); 297 struct net *net = dev_net(dev);
298 298
299 list_del(&skb->list); 299 skb_list_del_init(skb);
300 skb = ip6_rcv_core(skb, dev, net); 300 skb = ip6_rcv_core(skb, dev, net);
301 if (skb == NULL) 301 if (skb == NULL)
302 continue; 302 continue;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 827a3f5ff3bb..fcd3c66ded16 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
195 const struct ipv6_pinfo *np = inet6_sk(sk); 195 const struct ipv6_pinfo *np = inet6_sk(sk);
196 struct in6_addr *first_hop = &fl6->daddr; 196 struct in6_addr *first_hop = &fl6->daddr;
197 struct dst_entry *dst = skb_dst(skb); 197 struct dst_entry *dst = skb_dst(skb);
198 unsigned int head_room;
198 struct ipv6hdr *hdr; 199 struct ipv6hdr *hdr;
199 u8 proto = fl6->flowi6_proto; 200 u8 proto = fl6->flowi6_proto;
200 int seg_len = skb->len; 201 int seg_len = skb->len;
201 int hlimit = -1; 202 int hlimit = -1;
202 u32 mtu; 203 u32 mtu;
203 204
204 if (opt) { 205 head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
205 unsigned int head_room; 206 if (opt)
207 head_room += opt->opt_nflen + opt->opt_flen;
206 208
207 /* First: exthdrs may take lots of space (~8K for now) 209 if (unlikely(skb_headroom(skb) < head_room)) {
208 MAX_HEADER is not enough. 210 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
209 */ 211 if (!skb2) {
210 head_room = opt->opt_nflen + opt->opt_flen; 212 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
211 seg_len += head_room; 213 IPSTATS_MIB_OUTDISCARDS);
212 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); 214 kfree_skb(skb);
213 215 return -ENOBUFS;
214 if (skb_headroom(skb) < head_room) {
215 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
216 if (!skb2) {
217 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
218 IPSTATS_MIB_OUTDISCARDS);
219 kfree_skb(skb);
220 return -ENOBUFS;
221 }
222 if (skb->sk)
223 skb_set_owner_w(skb2, skb->sk);
224 consume_skb(skb);
225 skb = skb2;
226 } 216 }
217 if (skb->sk)
218 skb_set_owner_w(skb2, skb->sk);
219 consume_skb(skb);
220 skb = skb2;
221 }
222
223 if (opt) {
224 seg_len += opt->opt_nflen + opt->opt_flen;
225
227 if (opt->opt_flen) 226 if (opt->opt_flen)
228 ipv6_push_frag_opts(skb, opt, &proto); 227 ipv6_push_frag_opts(skb, opt, &proto);
228
229 if (opt->opt_nflen) 229 if (opt->opt_nflen)
230 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop, 230 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
231 &fl6->saddr); 231 &fl6->saddr);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d219979c3e52..181da2c40f9a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -341,7 +341,7 @@ static bool
341nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) 341nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
342{ 342{
343 struct sk_buff *fp, *head = fq->q.fragments; 343 struct sk_buff *fp, *head = fq->q.fragments;
344 int payload_len; 344 int payload_len, delta;
345 u8 ecn; 345 u8 ecn;
346 346
347 inet_frag_kill(&fq->q); 347 inet_frag_kill(&fq->q);
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
363 return false; 363 return false;
364 } 364 }
365 365
366 delta = - head->truesize;
367
366 /* Head of list must not be cloned. */ 368 /* Head of list must not be cloned. */
367 if (skb_unclone(head, GFP_ATOMIC)) 369 if (skb_unclone(head, GFP_ATOMIC))
368 return false; 370 return false;
369 371
372 delta += head->truesize;
373 if (delta)
374 add_frag_mem_limit(fq->q.net, delta);
375
370 /* If the first fragment is fragmented itself, we split 376 /* If the first fragment is fragmented itself, we split
371 * it to two chunks: the first with data and paged part 377 * it to two chunks: the first with data and paged part
372 * and the second, holding only fragments. */ 378 * and the second, holding only fragments. */
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5c3c92713096..aa26c45486d9 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
281{ 281{
282 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 282 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
283 struct sk_buff *fp, *head = fq->q.fragments; 283 struct sk_buff *fp, *head = fq->q.fragments;
284 int payload_len; 284 int payload_len, delta;
285 unsigned int nhoff; 285 unsigned int nhoff;
286 int sum_truesize; 286 int sum_truesize;
287 u8 ecn; 287 u8 ecn;
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
322 if (payload_len > IPV6_MAXPLEN) 322 if (payload_len > IPV6_MAXPLEN)
323 goto out_oversize; 323 goto out_oversize;
324 324
325 delta = - head->truesize;
326
325 /* Head of list must not be cloned. */ 327 /* Head of list must not be cloned. */
326 if (skb_unclone(head, GFP_ATOMIC)) 328 if (skb_unclone(head, GFP_ATOMIC))
327 goto out_oom; 329 goto out_oom;
328 330
331 delta += head->truesize;
332 if (delta)
333 add_frag_mem_limit(fq->q.net, delta);
334
329 /* If the first fragment is fragmented itself, we split 335 /* If the first fragment is fragmented itself, we split
330 * it to two chunks: the first with data and paged part 336 * it to two chunks: the first with data and paged part
331 * and the second, holding only fragments. */ 337 * and the second, holding only fragments. */
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index a8854dd3e9c5..8181ee7e1e27 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
347 struct ipv6hdr *hdr = ipv6_hdr(skb); 347 struct ipv6hdr *hdr = ipv6_hdr(skb);
348 struct flowi6 fl6; 348 struct flowi6 fl6;
349 349
350 memset(&fl6, 0, sizeof(fl6));
350 fl6.daddr = hdr->daddr; 351 fl6.daddr = hdr->daddr;
351 fl6.saddr = hdr->saddr; 352 fl6.saddr = hdr->saddr;
352 fl6.flowlabel = ip6_flowinfo(hdr); 353 fl6.flowlabel = ip6_flowinfo(hdr);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 51622333d460..818aa0060349 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2891,7 +2891,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
2891 2891
2892 len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + 2892 len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
2893 beacon->proberesp_ies_len + beacon->assocresp_ies_len + 2893 beacon->proberesp_ies_len + beacon->assocresp_ies_len +
2894 beacon->probe_resp_len; 2894 beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len;
2895 2895
2896 new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); 2896 new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
2897 if (!new_beacon) 2897 if (!new_beacon)
@@ -2934,8 +2934,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
2934 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); 2934 memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
2935 pos += beacon->probe_resp_len; 2935 pos += beacon->probe_resp_len;
2936 } 2936 }
2937 if (beacon->ftm_responder) 2937
2938 new_beacon->ftm_responder = beacon->ftm_responder; 2938 /* might copy -1, meaning no changes requested */
2939 new_beacon->ftm_responder = beacon->ftm_responder;
2939 if (beacon->lci) { 2940 if (beacon->lci) {
2940 new_beacon->lci_len = beacon->lci_len; 2941 new_beacon->lci_len = beacon->lci_len;
2941 new_beacon->lci = pos; 2942 new_beacon->lci = pos;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5836ddeac9e3..5f3c81e705c7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
1015 if (local->open_count == 0) 1015 if (local->open_count == 0)
1016 ieee80211_clear_tx_pending(local); 1016 ieee80211_clear_tx_pending(local);
1017 1017
1018 sdata->vif.bss_conf.beacon_int = 0;
1019
1018 /* 1020 /*
1019 * If the interface goes down while suspended, presumably because 1021 * If the interface goes down while suspended, presumably because
1020 * the device was unplugged and that happens before our resume, 1022 * the device was unplugged and that happens before our resume,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d2bc8d57c87e..bcf5ffc1567a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2766,6 +2766,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
2766{ 2766{
2767 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2767 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2768 struct sta_info *sta; 2768 struct sta_info *sta;
2769 bool result = true;
2769 2770
2770 sdata_info(sdata, "authenticated\n"); 2771 sdata_info(sdata, "authenticated\n");
2771 ifmgd->auth_data->done = true; 2772 ifmgd->auth_data->done = true;
@@ -2778,15 +2779,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
2778 sta = sta_info_get(sdata, bssid); 2779 sta = sta_info_get(sdata, bssid);
2779 if (!sta) { 2780 if (!sta) {
2780 WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); 2781 WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
2781 return false; 2782 result = false;
2783 goto out;
2782 } 2784 }
2783 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { 2785 if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
2784 sdata_info(sdata, "failed moving %pM to auth\n", bssid); 2786 sdata_info(sdata, "failed moving %pM to auth\n", bssid);
2785 return false; 2787 result = false;
2788 goto out;
2786 } 2789 }
2787 mutex_unlock(&sdata->local->sta_mtx);
2788 2790
2789 return true; 2791out:
2792 mutex_unlock(&sdata->local->sta_mtx);
2793 return result;
2790} 2794}
2791 2795
2792static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, 2796static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3bd3b5769797..428f7ad5f9b5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1403,6 +1403,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1403 return RX_CONTINUE; 1403 return RX_CONTINUE;
1404 1404
1405 if (ieee80211_is_ctl(hdr->frame_control) || 1405 if (ieee80211_is_ctl(hdr->frame_control) ||
1406 ieee80211_is_nullfunc(hdr->frame_control) ||
1406 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1407 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
1407 is_multicast_ether_addr(hdr->addr1)) 1408 is_multicast_ether_addr(hdr->addr1))
1408 return RX_CONTINUE; 1409 return RX_CONTINUE;
@@ -3063,7 +3064,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3063 cfg80211_sta_opmode_change_notify(sdata->dev, 3064 cfg80211_sta_opmode_change_notify(sdata->dev,
3064 rx->sta->addr, 3065 rx->sta->addr,
3065 &sta_opmode, 3066 &sta_opmode,
3066 GFP_KERNEL); 3067 GFP_ATOMIC);
3067 goto handled; 3068 goto handled;
3068 } 3069 }
3069 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3070 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
@@ -3100,7 +3101,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3100 cfg80211_sta_opmode_change_notify(sdata->dev, 3101 cfg80211_sta_opmode_change_notify(sdata->dev,
3101 rx->sta->addr, 3102 rx->sta->addr,
3102 &sta_opmode, 3103 &sta_opmode,
3103 GFP_KERNEL); 3104 GFP_ATOMIC);
3104 goto handled; 3105 goto handled;
3105 } 3106 }
3106 default: 3107 default:
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index aa4afbf0abaf..a794ca729000 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
964 /* Track when last TDLS packet was ACKed */ 964 /* Track when last TDLS packet was ACKed */
965 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) 965 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
966 sta->status_stats.last_tdls_pkt_time = jiffies; 966 sta->status_stats.last_tdls_pkt_time = jiffies;
967 } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
968 return;
967 } else { 969 } else {
968 ieee80211_lost_packet(sta, info); 970 ieee80211_lost_packet(sta, info);
969 } 971 }
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e0ccee23fbcd..1f536ba573b4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
439 if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) 439 if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
440 info->hw_queue = tx->sdata->vif.cab_queue; 440 info->hw_queue = tx->sdata->vif.cab_queue;
441 441
442 /* no stations in PS mode */ 442 /* no stations in PS mode and no buffered packets */
443 if (!atomic_read(&ps->num_sta_ps)) 443 if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
444 return TX_CONTINUE; 444 return TX_CONTINUE;
445 445
446 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; 446 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index a4660c48ff01..cd94f925495a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1166 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 1166 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1167 if (err) { 1167 if (err) {
1168 net_warn_ratelimited("openvswitch: zone: %u " 1168 net_warn_ratelimited("openvswitch: zone: %u "
1169 "execeeds conntrack limit\n", 1169 "exceeds conntrack limit\n",
1170 info->zone.id); 1170 info->zone.id);
1171 return err; 1171 return err;
1172 } 1172 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 37c9b8f0e10f..ec8ec55e0fe8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -85,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
85 int ovr, int bind, bool rtnl_held, 85 int ovr, int bind, bool rtnl_held,
86 struct netlink_ext_ack *extack) 86 struct netlink_ext_ack *extack)
87{ 87{
88 int ret = 0, err; 88 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
89 struct nlattr *tb[TCA_POLICE_MAX + 1]; 89 struct nlattr *tb[TCA_POLICE_MAX + 1];
90 struct tc_police *parm; 90 struct tc_police *parm;
91 struct tcf_police *police; 91 struct tcf_police *police;
@@ -93,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
93 struct tc_action_net *tn = net_generic(net, police_net_id); 93 struct tc_action_net *tn = net_generic(net, police_net_id);
94 struct tcf_police_params *new; 94 struct tcf_police_params *new;
95 bool exists = false; 95 bool exists = false;
96 int size;
97 96
98 if (nla == NULL) 97 if (nla == NULL)
99 return -EINVAL; 98 return -EINVAL;
@@ -160,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
160 goto failure; 159 goto failure;
161 } 160 }
162 161
162 if (tb[TCA_POLICE_RESULT]) {
163 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
164 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
165 NL_SET_ERR_MSG(extack,
166 "goto chain not allowed on fallback");
167 err = -EINVAL;
168 goto failure;
169 }
170 }
171
163 new = kzalloc(sizeof(*new), GFP_KERNEL); 172 new = kzalloc(sizeof(*new), GFP_KERNEL);
164 if (unlikely(!new)) { 173 if (unlikely(!new)) {
165 err = -ENOMEM; 174 err = -ENOMEM;
@@ -167,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
167 } 176 }
168 177
169 /* No failure allowed after this point */ 178 /* No failure allowed after this point */
179 new->tcfp_result = tcfp_result;
170 new->tcfp_mtu = parm->mtu; 180 new->tcfp_mtu = parm->mtu;
171 if (!new->tcfp_mtu) { 181 if (!new->tcfp_mtu) {
172 new->tcfp_mtu = ~0; 182 new->tcfp_mtu = ~0;
@@ -196,16 +206,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
196 if (tb[TCA_POLICE_AVRATE]) 206 if (tb[TCA_POLICE_AVRATE])
197 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); 207 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
198 208
199 if (tb[TCA_POLICE_RESULT]) {
200 new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
201 if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) {
202 NL_SET_ERR_MSG(extack,
203 "goto chain not allowed on fallback");
204 err = -EINVAL;
205 goto failure;
206 }
207 }
208
209 spin_lock_bh(&police->tcf_lock); 209 spin_lock_bh(&police->tcf_lock);
210 spin_lock_bh(&police->tcfp_lock); 210 spin_lock_bh(&police->tcfp_lock);
211 police->tcfp_t_c = ktime_get_ns(); 211 police->tcfp_t_c = ktime_get_ns();
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c6c327874abc..71312d7bd8f4 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1238,18 +1238,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1238 if (err) 1238 if (err)
1239 goto errout_idr; 1239 goto errout_idr;
1240 1240
1241 if (!tc_skip_sw(fnew->flags)) { 1241 if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
1242 if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { 1242 err = -EEXIST;
1243 err = -EEXIST; 1243 goto errout_mask;
1244 goto errout_mask;
1245 }
1246
1247 err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1248 fnew->mask->filter_ht_params);
1249 if (err)
1250 goto errout_mask;
1251 } 1244 }
1252 1245
1246 err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1247 fnew->mask->filter_ht_params);
1248 if (err)
1249 goto errout_mask;
1250
1253 if (!tc_skip_hw(fnew->flags)) { 1251 if (!tc_skip_hw(fnew->flags)) {
1254 err = fl_hw_replace_filter(tp, fnew, extack); 1252 err = fl_hw_replace_filter(tp, fnew, extack);
1255 if (err) 1253 if (err)
@@ -1303,9 +1301,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1303 struct cls_fl_head *head = rtnl_dereference(tp->root); 1301 struct cls_fl_head *head = rtnl_dereference(tp->root);
1304 struct cls_fl_filter *f = arg; 1302 struct cls_fl_filter *f = arg;
1305 1303
1306 if (!tc_skip_sw(f->flags)) 1304 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
1307 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 1305 f->mask->filter_ht_params);
1308 f->mask->filter_ht_params);
1309 __fl_delete(tp, f, extack); 1306 __fl_delete(tp, f, extack);
1310 *last = list_empty(&head->masks); 1307 *last = list_empty(&head->masks);
1311 return 0; 1308 return 0;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2c38e3d07924..22cd46a60057 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -431,6 +431,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
431 int count = 1; 431 int count = 1;
432 int rc = NET_XMIT_SUCCESS; 432 int rc = NET_XMIT_SUCCESS;
433 433
434 /* Do not fool qdisc_drop_all() */
435 skb->prev = NULL;
436
434 /* Random duplication */ 437 /* Random duplication */
435 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) 438 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
436 ++count; 439 ++count;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6a28b96e779e..914750b819b2 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
118 asoc->flowlabel = sp->flowlabel; 118 asoc->flowlabel = sp->flowlabel;
119 asoc->dscp = sp->dscp; 119 asoc->dscp = sp->dscp;
120 120
121 /* Initialize default path MTU. */
122 asoc->pathmtu = sp->pathmtu;
123
124 /* Set association default SACK delay */ 121 /* Set association default SACK delay */
125 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 122 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
126 asoc->sackfreq = sp->sackfreq; 123 asoc->sackfreq = sp->sackfreq;
@@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
252 0, gfp)) 249 0, gfp))
253 goto fail_init; 250 goto fail_init;
254 251
252 /* Initialize default path MTU. */
253 asoc->pathmtu = sp->pathmtu;
254 sctp_assoc_update_frag_point(asoc);
255
255 /* Assume that peer would support both address types unless we are 256 /* Assume that peer would support both address types unless we are
256 * told otherwise. 257 * told otherwise.
257 */ 258 */
@@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
434 435
435 WARN_ON(atomic_read(&asoc->rmem_alloc)); 436 WARN_ON(atomic_read(&asoc->rmem_alloc));
436 437
437 kfree(asoc); 438 kfree_rcu(asoc, rcu);
438 SCTP_DBG_OBJCNT_DEC(assoc); 439 SCTP_DBG_OBJCNT_DEC(assoc);
439} 440}
440 441
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index ce8087846f05..d2048de86e7c 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
191 * the packet 191 * the packet
192 */ 192 */
193 max_data = asoc->frag_point; 193 max_data = asoc->frag_point;
194 if (unlikely(!max_data)) {
195 max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
196 sctp_datachk_len(&asoc->stream));
197 pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
198 __func__, asoc, max_data);
199 }
194 200
195 /* If the the peer requested that we authenticate DATA chunks 201 /* If the the peer requested that we authenticate DATA chunks
196 * we need to account for bundling of the AUTH chunks along with 202 * we need to account for bundling of the AUTH chunks along with
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4a4fd1971255..f4ac6c592e13 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2462 asoc->c.sinit_max_instreams, gfp)) 2462 asoc->c.sinit_max_instreams, gfp))
2463 goto clean_up; 2463 goto clean_up;
2464 2464
2465 /* Update frag_point when stream_interleave may get changed. */
2466 sctp_assoc_update_frag_point(asoc);
2467
2465 if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) 2468 if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
2466 goto clean_up; 2469 goto clean_up;
2467 2470
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bf618d1b41fd..b8cebd5a87e5 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
3324 __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : 3324 __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
3325 sizeof(struct sctp_data_chunk); 3325 sizeof(struct sctp_data_chunk);
3326 3326
3327 min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, 3327 min_len = sctp_min_frag_point(sp, datasize);
3328 datasize);
3329 max_len = SCTP_MAX_CHUNK_LEN - datasize; 3328 max_len = SCTP_MAX_CHUNK_LEN - datasize;
3330 3329
3331 if (val < min_len || val > max_len) 3330 if (val < min_len || val > max_len)
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 12b3edf70a7b..1615e503f8e3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
272 272
273 p1 = (u8*)(ht_capa); 273 p1 = (u8*)(ht_capa);
274 p2 = (u8*)(ht_capa_mask); 274 p2 = (u8*)(ht_capa_mask);
275 for (i = 0; i<sizeof(*ht_capa); i++) 275 for (i = 0; i < sizeof(*ht_capa); i++)
276 p1[i] &= p2[i]; 276 p1[i] &= p2[i];
277} 277}
278 278
279/* Do a logical ht_capa &= ht_capa_mask. */ 279/* Do a logical vht_capa &= vht_capa_mask. */
280void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, 280void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
281 const struct ieee80211_vht_cap *vht_capa_mask) 281 const struct ieee80211_vht_cap *vht_capa_mask)
282{ 282{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 744b5851bbf9..8d763725498c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7870,6 +7870,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
7870 } 7870 }
7871 7871
7872 memset(&params, 0, sizeof(params)); 7872 memset(&params, 0, sizeof(params));
7873 params.beacon_csa.ftm_responder = -1;
7873 7874
7874 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || 7875 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
7875 !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) 7876 !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d536b07582f8..f741d8376a46 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void)
642 * All devices must be idle as otherwise if you are actively 642 * All devices must be idle as otherwise if you are actively
643 * scanning some new beacon hints could be learned and would 643 * scanning some new beacon hints could be learned and would
644 * count as new regulatory hints. 644 * count as new regulatory hints.
645 * Also if there is any other active beaconing interface we
646 * need not issue a disconnect hint and reset any info such
647 * as chan dfs state, etc.
645 */ 648 */
646 list_for_each_entry(rdev, &cfg80211_rdev_list, list) { 649 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
647 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 650 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
648 wdev_lock(wdev); 651 wdev_lock(wdev);
649 if (wdev->conn || wdev->current_bss) 652 if (wdev->conn || wdev->current_bss ||
653 cfg80211_beaconing_iface_active(wdev))
650 is_all_idle = false; 654 is_all_idle = false;
651 wdev_unlock(wdev); 655 wdev_unlock(wdev);
652 } 656 }
@@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1171 1175
1172 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, 1176 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
1173 rdev->wiphy.ht_capa_mod_mask); 1177 rdev->wiphy.ht_capa_mod_mask);
1178 cfg80211_oper_and_vht_capa(&connect->vht_capa_mask,
1179 rdev->wiphy.vht_capa_mod_mask);
1174 1180
1175 if (connkeys && connkeys->def >= 0) { 1181 if (connkeys && connkeys->def >= 0) {
1176 int idx; 1182 int idx;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ef14d80ca03e..d473bd135da8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
1421 ies[pos + ext], 1421 ies[pos + ext],
1422 ext == 2)) 1422 ext == 2))
1423 pos = skip_ie(ies, ielen, pos); 1423 pos = skip_ie(ies, ielen, pos);
1424 else
1425 break;
1424 } 1426 }
1425 } else { 1427 } else {
1426 pos = skip_ie(ies, ielen, pos); 1428 pos = skip_ie(ies, ielen, pos);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d49aa79b7997..5121729b8b63 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
100 } 100 }
101 101
102 len = *skb->data; 102 len = *skb->data;
103 needed = 1 + (len >> 4) + (len & 0x0f); 103 needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
104 104
105 if (!pskb_may_pull(skb, needed)) { 105 if (!pskb_may_pull(skb, needed)) {
106 /* packet is too short to hold the addresses it claims 106 /* packet is too short to hold the addresses it claims
@@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr,
288 sk_for_each(s, &x25_list) 288 sk_for_each(s, &x25_list)
289 if ((!strcmp(addr->x25_addr, 289 if ((!strcmp(addr->x25_addr,
290 x25_sk(s)->source_addr.x25_addr) || 290 x25_sk(s)->source_addr.x25_addr) ||
291 !strcmp(addr->x25_addr, 291 !strcmp(x25_sk(s)->source_addr.x25_addr,
292 null_x25_address.x25_addr)) && 292 null_x25_address.x25_addr)) &&
293 s->sk_state == TCP_LISTEN) { 293 s->sk_state == TCP_LISTEN) {
294 /* 294 /*
@@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
688 goto out; 688 goto out;
689 } 689 }
690 690
691 len = strlen(addr->sx25_addr.x25_addr); 691 /* check for the null_x25_address */
692 for (i = 0; i < len; i++) { 692 if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
693 if (!isdigit(addr->sx25_addr.x25_addr[i])) { 693
694 rc = -EINVAL; 694 len = strlen(addr->sx25_addr.x25_addr);
695 goto out; 695 for (i = 0; i < len; i++) {
696 if (!isdigit(addr->sx25_addr.x25_addr[i])) {
697 rc = -EINVAL;
698 goto out;
699 }
696 } 700 }
697 } 701 }
698 702
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 3c12cae32001..afb26221d8a8 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
142 sk->sk_state_change(sk); 142 sk->sk_state_change(sk);
143 break; 143 break;
144 } 144 }
145 case X25_CALL_REQUEST:
146 /* call collision */
147 x25->causediag.cause = 0x01;
148 x25->causediag.diagnostic = 0x48;
149
150 x25_write_internal(sk, X25_CLEAR_REQUEST);
151 x25_disconnect(sk, EISCONN, 0x01, 0x48);
152 break;
153
145 case X25_CLEAR_REQUEST: 154 case X25_CLEAR_REQUEST:
146 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) 155 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
147 goto out_clear; 156 goto out_clear;