aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 14:05:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 14:05:20 -0400
commitfbd01410e89a66f346ba1b3c0161e1198449b746 (patch)
tree1a3cfbb076c8b995e5f6e752c30163532dcbd781 /net
parentfbf4432ff71b7a25bef993a5312906946d27f446 (diff)
parent9beb8bedb05c5f3a353dba62b8fa7cbbb9ec685e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "The iwlwifi firmware compat fix is in here as well as some other stuff: 1) Fix request socket leak introduced by BPF deadlock fix, from Eric Dumazet. 2) Fix VLAN handling with TXQs in mac80211, from Johannes Berg. 3) Missing __qdisc_drop conversions in prio and qfq schedulers, from Gao Feng. 4) Use after free in netlink nlk groups handling, from Xin Long. 5) Handle MTU update properly in ipv6 gre tunnels, from Xin Long. 6) Fix leak of ipv6 fib tables on netns teardown, from Sabrina Dubroca with follow-on fix from Eric Dumazet. 7) Need RCU and preemption disabled during generic XDP data patch, from John Fastabend" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (54 commits) bpf: make error reporting in bpf_warn_invalid_xdp_action more clear Revert "mdio_bus: Remove unneeded gpiod NULL check" bpf: devmap, use cond_resched instead of cpu_relax bpf: add support for sockmap detach programs net: rcu lock and preempt disable missing around generic xdp bpf: don't select potentially stale ri->map from buggy xdp progs net: tulip: Constify tulip_tbl net: ethernet: ti: netcp_core: no need in netif_napi_del davicom: Display proper debug level up to 6 net: phy: sfp: rename dt properties to match the binding dt-binding: net: sfp binding documentation dt-bindings: add SFF vendor prefix dt-bindings: net: don't confuse with generic PHY property ip6_tunnel: fix setting hop_limit value for ipv6 tunnel ip_tunnel: fix setting ttl and tos value in collect_md mode ipv6: fix typo in fib6_net_exit() tcp: fix a request socket leak sctp: fix missing wake ups in some situations netfilter: xt_hashlimit: fix build error caused by 64bit division netfilter: xt_hashlimit: alloc hashtable with right size ...
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/filter.c27
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_fib.c25
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/mac80211/agg-rx.c32
-rw-r--r--net/mac80211/agg-tx.c8
-rw-r--r--net/mac80211/ht.c24
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c20
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/tx.c36
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c8
-rw-r--r--net/netfilter/nf_nat_core.c146
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netlink/af_netlink.c22
-rw-r--r--net/rds/send.c10
-rw-r--r--net/sched/cls_api.c18
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sctp/ulpqueue.c3
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c20
35 files changed, 323 insertions, 177 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6f845e4fec17..fb766d906148 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3981,8 +3981,13 @@ static int netif_rx_internal(struct sk_buff *skb)
3981 trace_netif_rx(skb); 3981 trace_netif_rx(skb);
3982 3982
3983 if (static_key_false(&generic_xdp_needed)) { 3983 if (static_key_false(&generic_xdp_needed)) {
3984 int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), 3984 int ret;
3985 skb); 3985
3986 preempt_disable();
3987 rcu_read_lock();
3988 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
3989 rcu_read_unlock();
3990 preempt_enable();
3986 3991
3987 /* Consider XDP consuming the packet a success from 3992 /* Consider XDP consuming the packet a success from
3988 * the netdev point of view we do not want to count 3993 * the netdev point of view we do not want to count
@@ -4500,18 +4505,20 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
4500 if (skb_defer_rx_timestamp(skb)) 4505 if (skb_defer_rx_timestamp(skb))
4501 return NET_RX_SUCCESS; 4506 return NET_RX_SUCCESS;
4502 4507
4503 rcu_read_lock();
4504
4505 if (static_key_false(&generic_xdp_needed)) { 4508 if (static_key_false(&generic_xdp_needed)) {
4506 int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), 4509 int ret;
4507 skb);
4508 4510
4509 if (ret != XDP_PASS) { 4511 preempt_disable();
4510 rcu_read_unlock(); 4512 rcu_read_lock();
4513 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4514 rcu_read_unlock();
4515 preempt_enable();
4516
4517 if (ret != XDP_PASS)
4511 return NET_RX_DROP; 4518 return NET_RX_DROP;
4512 }
4513 } 4519 }
4514 4520
4521 rcu_read_lock();
4515#ifdef CONFIG_RPS 4522#ifdef CONFIG_RPS
4516 if (static_key_false(&rps_needed)) { 4523 if (static_key_false(&rps_needed)) {
4517 struct rps_dev_flow voidflow, *rflow = &voidflow; 4524 struct rps_dev_flow voidflow, *rflow = &voidflow;
diff --git a/net/core/filter.c b/net/core/filter.c
index 5912c738a7b2..3a50a9b021e2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1794,6 +1794,7 @@ struct redirect_info {
1794 u32 flags; 1794 u32 flags;
1795 struct bpf_map *map; 1795 struct bpf_map *map;
1796 struct bpf_map *map_to_flush; 1796 struct bpf_map *map_to_flush;
1797 const struct bpf_prog *map_owner;
1797}; 1798};
1798 1799
1799static DEFINE_PER_CPU(struct redirect_info, redirect_info); 1800static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -1807,7 +1808,6 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
1807 1808
1808 ri->ifindex = ifindex; 1809 ri->ifindex = ifindex;
1809 ri->flags = flags; 1810 ri->flags = flags;
1810 ri->map = NULL;
1811 1811
1812 return TC_ACT_REDIRECT; 1812 return TC_ACT_REDIRECT;
1813} 1813}
@@ -2504,6 +2504,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
2504 struct bpf_prog *xdp_prog) 2504 struct bpf_prog *xdp_prog)
2505{ 2505{
2506 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 2506 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2507 const struct bpf_prog *map_owner = ri->map_owner;
2507 struct bpf_map *map = ri->map; 2508 struct bpf_map *map = ri->map;
2508 u32 index = ri->ifindex; 2509 u32 index = ri->ifindex;
2509 struct net_device *fwd; 2510 struct net_device *fwd;
@@ -2511,6 +2512,15 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
2511 2512
2512 ri->ifindex = 0; 2513 ri->ifindex = 0;
2513 ri->map = NULL; 2514 ri->map = NULL;
2515 ri->map_owner = NULL;
2516
2517 /* This is really only caused by a deliberately crappy
2518 * BPF program, normally we would never hit that case,
2519 * so no need to inform someone via tracepoints either,
2520 * just bail out.
2521 */
2522 if (unlikely(map_owner != xdp_prog))
2523 return -EINVAL;
2514 2524
2515 fwd = __dev_map_lookup_elem(map, index); 2525 fwd = __dev_map_lookup_elem(map, index);
2516 if (!fwd) { 2526 if (!fwd) {
@@ -2607,6 +2617,8 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
2607 2617
2608 ri->ifindex = ifindex; 2618 ri->ifindex = ifindex;
2609 ri->flags = flags; 2619 ri->flags = flags;
2620 ri->map = NULL;
2621 ri->map_owner = NULL;
2610 2622
2611 return XDP_REDIRECT; 2623 return XDP_REDIRECT;
2612} 2624}
@@ -2619,7 +2631,8 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
2619 .arg2_type = ARG_ANYTHING, 2631 .arg2_type = ARG_ANYTHING,
2620}; 2632};
2621 2633
2622BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags) 2634BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
2635 const struct bpf_prog *, map_owner)
2623{ 2636{
2624 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 2637 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2625 2638
@@ -2629,10 +2642,14 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags
2629 ri->ifindex = ifindex; 2642 ri->ifindex = ifindex;
2630 ri->flags = flags; 2643 ri->flags = flags;
2631 ri->map = map; 2644 ri->map = map;
2645 ri->map_owner = map_owner;
2632 2646
2633 return XDP_REDIRECT; 2647 return XDP_REDIRECT;
2634} 2648}
2635 2649
2650/* Note, arg4 is hidden from users and populated by the verifier
2651 * with the right pointer.
2652 */
2636static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { 2653static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
2637 .func = bpf_xdp_redirect_map, 2654 .func = bpf_xdp_redirect_map,
2638 .gpl_only = false, 2655 .gpl_only = false,
@@ -3592,7 +3609,11 @@ static bool xdp_is_valid_access(int off, int size,
3592 3609
3593void bpf_warn_invalid_xdp_action(u32 act) 3610void bpf_warn_invalid_xdp_action(u32 act)
3594{ 3611{
3595 WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act); 3612 const u32 act_max = XDP_REDIRECT;
3613
3614 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
3615 act > act_max ? "Illegal" : "Driver unsupported",
3616 act);
3596} 3617}
3597EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); 3618EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
3598 3619
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 68065d7d383f..16982de649b9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -710,14 +710,11 @@ EXPORT_SYMBOL(consume_skb);
710 * consume_stateless_skb - free an skbuff, assuming it is stateless 710 * consume_stateless_skb - free an skbuff, assuming it is stateless
711 * @skb: buffer to free 711 * @skb: buffer to free
712 * 712 *
713 * Works like consume_skb(), but this variant assumes that all the head 713 * Alike consume_skb(), but this variant assumes that this is the last
714 * states have been already dropped. 714 * skb reference and all the head states have been already dropped
715 */ 715 */
716void consume_stateless_skb(struct sk_buff *skb) 716void __consume_stateless_skb(struct sk_buff *skb)
717{ 717{
718 if (!skb_unref(skb))
719 return;
720
721 trace_consume_skb(skb); 718 trace_consume_skb(skb);
722 skb_release_data(skb); 719 skb_release_data(skb);
723 kfree_skbmem(skb); 720 kfree_skbmem(skb);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 129d1a3616f8..e1856bfa753d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -618,8 +618,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
618 ip_rt_put(rt); 618 ip_rt_put(rt);
619 goto tx_dropped; 619 goto tx_dropped;
620 } 620 }
621 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos, 621 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
622 key->ttl, df, !net_eq(tunnel->net, dev_net(dev))); 622 df, !net_eq(tunnel->net, dev_net(dev)));
623 return; 623 return;
624tx_error: 624tx_error:
625 dev->stats.tx_errors++; 625 dev->stats.tx_errors++;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e04457198f93..9e2770fd00be 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -629,6 +629,7 @@ static void get_counters(const struct xt_table_info *t,
629 629
630 ADD_COUNTER(counters[i], bcnt, pcnt); 630 ADD_COUNTER(counters[i], bcnt, pcnt);
631 ++i; 631 ++i;
632 cond_resched();
632 } 633 }
633 } 634 }
634} 635}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 576cba2b57e9..39286e543ee6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -776,6 +776,7 @@ get_counters(const struct xt_table_info *t,
776 776
777 ADD_COUNTER(counters[i], bcnt, pcnt); 777 ADD_COUNTER(counters[i], bcnt, pcnt);
778 ++i; /* macro does multi eval of i */ 778 ++i; /* macro does multi eval of i */
779 cond_resched();
779 } 780 }
780 } 781 }
781} 782}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a63486afa7a7..d9416b5162bc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1669,9 +1669,9 @@ process:
1669 */ 1669 */
1670 sock_hold(sk); 1670 sock_hold(sk);
1671 refcounted = true; 1671 refcounted = true;
1672 if (tcp_filter(sk, skb)) 1672 nsk = NULL;
1673 goto discard_and_relse; 1673 if (!tcp_filter(sk, skb))
1674 nsk = tcp_check_req(sk, skb, req, false); 1674 nsk = tcp_check_req(sk, skb, req, false);
1675 if (!nsk) { 1675 if (!nsk) {
1676 reqsk_put(req); 1676 reqsk_put(req);
1677 goto discard_and_relse; 1677 goto discard_and_relse;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index db1c9e78c83c..ef29df8648e4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1397,12 +1397,15 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
1397 unlock_sock_fast(sk, slow); 1397 unlock_sock_fast(sk, slow);
1398 } 1398 }
1399 1399
1400 if (!skb_unref(skb))
1401 return;
1402
1400 /* In the more common cases we cleared the head states previously, 1403 /* In the more common cases we cleared the head states previously,
1401 * see __udp_queue_rcv_skb(). 1404 * see __udp_queue_rcv_skb().
1402 */ 1405 */
1403 if (unlikely(udp_skb_has_head_state(skb))) 1406 if (unlikely(udp_skb_has_head_state(skb)))
1404 skb_release_head_state(skb); 1407 skb_release_head_state(skb);
1405 consume_stateless_skb(skb); 1408 __consume_stateless_skb(skb);
1406} 1409}
1407EXPORT_SYMBOL_GPL(skb_consume_udp); 1410EXPORT_SYMBOL_GPL(skb_consume_udp);
1408 1411
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index a3b5c163325f..e5308d7cbd75 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -191,6 +191,12 @@ void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
191} 191}
192EXPORT_SYMBOL_GPL(rt6_free_pcpu); 192EXPORT_SYMBOL_GPL(rt6_free_pcpu);
193 193
194static void fib6_free_table(struct fib6_table *table)
195{
196 inetpeer_invalidate_tree(&table->tb6_peers);
197 kfree(table);
198}
199
194static void fib6_link_table(struct net *net, struct fib6_table *tb) 200static void fib6_link_table(struct net *net, struct fib6_table *tb)
195{ 201{
196 unsigned int h; 202 unsigned int h;
@@ -2022,15 +2028,22 @@ out_timer:
2022 2028
2023static void fib6_net_exit(struct net *net) 2029static void fib6_net_exit(struct net *net)
2024{ 2030{
2031 unsigned int i;
2032
2025 rt6_ifdown(net, NULL); 2033 rt6_ifdown(net, NULL);
2026 del_timer_sync(&net->ipv6.ip6_fib_timer); 2034 del_timer_sync(&net->ipv6.ip6_fib_timer);
2027 2035
2028#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2036 for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
2029 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers); 2037 struct hlist_head *head = &net->ipv6.fib_table_hash[i];
2030 kfree(net->ipv6.fib6_local_tbl); 2038 struct hlist_node *tmp;
2031#endif 2039 struct fib6_table *tb;
2032 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers); 2040
2033 kfree(net->ipv6.fib6_main_tbl); 2041 hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
2042 hlist_del(&tb->tb6_hlist);
2043 fib6_free_table(tb);
2044 }
2045 }
2046
2034 kfree(net->ipv6.fib_table_hash); 2047 kfree(net->ipv6.fib_table_hash);
2035 kfree(net->ipv6.rt6_stats); 2048 kfree(net->ipv6.rt6_stats);
2036 fib6_notifier_exit(net); 2049 fib6_notifier_exit(net);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 67ff2aaf5dcb..b7a72d409334 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -432,7 +432,9 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
432 } 432 }
433 break; 433 break;
434 case ICMPV6_PKT_TOOBIG: 434 case ICMPV6_PKT_TOOBIG:
435 mtu = be32_to_cpu(info) - offset; 435 mtu = be32_to_cpu(info) - offset - t->tun_hlen;
436 if (t->dev->type == ARPHRD_ETHER)
437 mtu -= ETH_HLEN;
436 if (mtu < IPV6_MIN_MTU) 438 if (mtu < IPV6_MIN_MTU)
437 mtu = IPV6_MIN_MTU; 439 mtu = IPV6_MIN_MTU;
438 t->dev->mtu = mtu; 440 t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3a0ba2ae4b0f..10a693a19323 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1184,6 +1184,7 @@ route_lookup:
1184 init_tel_txopt(&opt, encap_limit); 1184 init_tel_txopt(&opt, encap_limit);
1185 ipv6_push_frag_opts(skb, &opt.ops, &proto); 1185 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1186 } 1186 }
1187 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1187 1188
1188 /* Calculate max headroom for all the headers and adjust 1189 /* Calculate max headroom for all the headers and adjust
1189 * needed_headroom if necessary. 1190 * needed_headroom if necessary.
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 54b1e75eded1..01bd3ee5ebc6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -795,6 +795,7 @@ get_counters(const struct xt_table_info *t,
795 795
796 ADD_COUNTER(counters[i], bcnt, pcnt); 796 ADD_COUNTER(counters[i], bcnt, pcnt);
797 ++i; 797 ++i;
798 cond_resched();
798 } 799 }
799 } 800 }
800} 801}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 38f76d8b231e..64d94afa427f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1460,9 +1460,9 @@ process:
1460 } 1460 }
1461 sock_hold(sk); 1461 sock_hold(sk);
1462 refcounted = true; 1462 refcounted = true;
1463 if (tcp_filter(sk, skb)) 1463 nsk = NULL;
1464 goto discard_and_relse; 1464 if (!tcp_filter(sk, skb))
1465 nsk = tcp_check_req(sk, skb, req, false); 1465 nsk = tcp_check_req(sk, skb, req, false);
1466 if (!nsk) { 1466 if (!nsk) {
1467 reqsk_put(req); 1467 reqsk_put(req);
1468 goto discard_and_relse; 1468 goto discard_and_relse;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 2b36eff5d97e..2849a1fc41c5 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -245,10 +245,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
245 ieee80211_tx_skb(sdata, skb); 245 ieee80211_tx_skb(sdata, skb);
246} 246}
247 247
248void __ieee80211_start_rx_ba_session(struct sta_info *sta, 248void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
249 u8 dialog_token, u16 timeout, 249 u8 dialog_token, u16 timeout,
250 u16 start_seq_num, u16 ba_policy, u16 tid, 250 u16 start_seq_num, u16 ba_policy, u16 tid,
251 u16 buf_size, bool tx, bool auto_seq) 251 u16 buf_size, bool tx, bool auto_seq)
252{ 252{
253 struct ieee80211_local *local = sta->sdata->local; 253 struct ieee80211_local *local = sta->sdata->local;
254 struct tid_ampdu_rx *tid_agg_rx; 254 struct tid_ampdu_rx *tid_agg_rx;
@@ -267,7 +267,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
267 ht_dbg(sta->sdata, 267 ht_dbg(sta->sdata,
268 "STA %pM requests BA session on unsupported tid %d\n", 268 "STA %pM requests BA session on unsupported tid %d\n",
269 sta->sta.addr, tid); 269 sta->sta.addr, tid);
270 goto end_no_lock; 270 goto end;
271 } 271 }
272 272
273 if (!sta->sta.ht_cap.ht_supported) { 273 if (!sta->sta.ht_cap.ht_supported) {
@@ -275,14 +275,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
275 "STA %pM erroneously requests BA session on tid %d w/o QoS\n", 275 "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
276 sta->sta.addr, tid); 276 sta->sta.addr, tid);
277 /* send a response anyway, it's an error case if we get here */ 277 /* send a response anyway, it's an error case if we get here */
278 goto end_no_lock; 278 goto end;
279 } 279 }
280 280
281 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 281 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
282 ht_dbg(sta->sdata, 282 ht_dbg(sta->sdata,
283 "Suspend in progress - Denying ADDBA request (%pM tid %d)\n", 283 "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
284 sta->sta.addr, tid); 284 sta->sta.addr, tid);
285 goto end_no_lock; 285 goto end;
286 } 286 }
287 287
288 /* sanity check for incoming parameters: 288 /* sanity check for incoming parameters:
@@ -296,7 +296,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
296 ht_dbg_ratelimited(sta->sdata, 296 ht_dbg_ratelimited(sta->sdata,
297 "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", 297 "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
298 sta->sta.addr, tid, ba_policy, buf_size); 298 sta->sta.addr, tid, ba_policy, buf_size);
299 goto end_no_lock; 299 goto end;
300 } 300 }
301 /* determine default buffer size */ 301 /* determine default buffer size */
302 if (buf_size == 0) 302 if (buf_size == 0)
@@ -311,7 +311,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
311 buf_size, sta->sta.addr); 311 buf_size, sta->sta.addr);
312 312
313 /* examine state machine */ 313 /* examine state machine */
314 mutex_lock(&sta->ampdu_mlme.mtx); 314 lockdep_assert_held(&sta->ampdu_mlme.mtx);
315 315
316 if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) { 316 if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
317 if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) { 317 if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) {
@@ -415,15 +415,25 @@ end:
415 __clear_bit(tid, sta->ampdu_mlme.unexpected_agg); 415 __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
416 sta->ampdu_mlme.tid_rx_token[tid] = dialog_token; 416 sta->ampdu_mlme.tid_rx_token[tid] = dialog_token;
417 } 417 }
418 mutex_unlock(&sta->ampdu_mlme.mtx);
419 418
420end_no_lock:
421 if (tx) 419 if (tx)
422 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, 420 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
423 dialog_token, status, 1, buf_size, 421 dialog_token, status, 1, buf_size,
424 timeout); 422 timeout);
425} 423}
426 424
425void __ieee80211_start_rx_ba_session(struct sta_info *sta,
426 u8 dialog_token, u16 timeout,
427 u16 start_seq_num, u16 ba_policy, u16 tid,
428 u16 buf_size, bool tx, bool auto_seq)
429{
430 mutex_lock(&sta->ampdu_mlme.mtx);
431 ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
432 start_seq_num, ba_policy, tid,
433 buf_size, tx, auto_seq);
434 mutex_unlock(&sta->ampdu_mlme.mtx);
435}
436
427void ieee80211_process_addba_request(struct ieee80211_local *local, 437void ieee80211_process_addba_request(struct ieee80211_local *local,
428 struct sta_info *sta, 438 struct sta_info *sta,
429 struct ieee80211_mgmt *mgmt, 439 struct ieee80211_mgmt *mgmt,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index cbd48762256c..bef516ec47f9 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -226,7 +226,11 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
226 clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags); 226 clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
227 227
228 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); 228 clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
229 local_bh_disable();
230 rcu_read_lock();
229 drv_wake_tx_queue(sta->sdata->local, txqi); 231 drv_wake_tx_queue(sta->sdata->local, txqi);
232 rcu_read_unlock();
233 local_bh_enable();
230} 234}
231 235
232/* 236/*
@@ -436,7 +440,7 @@ static void sta_addba_resp_timer_expired(unsigned long data)
436 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 440 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
437 rcu_read_unlock(); 441 rcu_read_unlock();
438 ht_dbg(sta->sdata, 442 ht_dbg(sta->sdata,
439 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n", 443 "timer expired on %pM tid %d not expecting addBA response\n",
440 sta->sta.addr, tid); 444 sta->sta.addr, tid);
441 return; 445 return;
442 } 446 }
@@ -639,7 +643,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
639 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 643 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
640 HT_AGG_RETRIES_PERIOD)) { 644 HT_AGG_RETRIES_PERIOD)) {
641 ht_dbg(sdata, 645 ht_dbg(sdata,
642 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n", 646 "BA request denied - %d failed requests on %pM tid %u\n",
643 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); 647 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
644 ret = -EBUSY; 648 ret = -EBUSY;
645 goto err_unlock_sta; 649 goto err_unlock_sta;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index c92df492e898..d6d0b4201e40 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -300,6 +300,24 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
300 300
301 /* stopping might queue the work again - so cancel only afterwards */ 301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work); 302 cancel_work_sync(&sta->ampdu_mlme.work);
303
304 /*
305 * In case the tear down is part of a reconfigure due to HW restart
306 * request, it is possible that the low level driver requested to stop
307 * the BA session, so handle it to properly clean tid_tx data.
308 */
309 mutex_lock(&sta->ampdu_mlme.mtx);
310 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
311 struct tid_ampdu_tx *tid_tx =
312 rcu_dereference_protected_tid_tx(sta, i);
313
314 if (!tid_tx)
315 continue;
316
317 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
318 ieee80211_stop_tx_ba_cb(sta, i, tid_tx);
319 }
320 mutex_unlock(&sta->ampdu_mlme.mtx);
303} 321}
304 322
305void ieee80211_ba_session_work(struct work_struct *work) 323void ieee80211_ba_session_work(struct work_struct *work)
@@ -333,9 +351,9 @@ void ieee80211_ba_session_work(struct work_struct *work)
333 351
334 if (test_and_clear_bit(tid, 352 if (test_and_clear_bit(tid,
335 sta->ampdu_mlme.tid_rx_manage_offl)) 353 sta->ampdu_mlme.tid_rx_manage_offl))
336 __ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, 354 ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
337 IEEE80211_MAX_AMPDU_BUF, 355 IEEE80211_MAX_AMPDU_BUF,
338 false, true); 356 false, true);
339 357
340 if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, 358 if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
341 sta->ampdu_mlme.tid_rx_manage_offl)) 359 sta->ampdu_mlme.tid_rx_manage_offl))
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2197c62a0a6e..9675814f64db 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1760,6 +1760,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
1760 u8 dialog_token, u16 timeout, 1760 u8 dialog_token, u16 timeout,
1761 u16 start_seq_num, u16 ba_policy, u16 tid, 1761 u16 start_seq_num, u16 ba_policy, u16 tid,
1762 u16 buf_size, bool tx, bool auto_seq); 1762 u16 buf_size, bool tx, bool auto_seq);
1763void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
1764 u8 dialog_token, u16 timeout,
1765 u16 start_seq_num, u16 ba_policy, u16 tid,
1766 u16 buf_size, bool tx, bool auto_seq);
1763void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, 1767void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
1764 enum ieee80211_agg_stop_reason reason); 1768 enum ieee80211_agg_stop_reason reason);
1765void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, 1769void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 9228ac73c429..f75029abf728 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -731,7 +731,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
731 sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 731 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
732 local->ops->wake_tx_queue) { 732 local->ops->wake_tx_queue) {
733 /* XXX: for AP_VLAN, actually track AP queues */ 733 /* XXX: for AP_VLAN, actually track AP queues */
734 netif_tx_start_all_queues(dev); 734 if (dev)
735 netif_tx_start_all_queues(dev);
735 } else if (dev) { 736 } else if (dev) {
736 unsigned long flags; 737 unsigned long flags;
737 int n_acs = IEEE80211_NUM_ACS; 738 int n_acs = IEEE80211_NUM_ACS;
@@ -792,6 +793,7 @@ static int ieee80211_open(struct net_device *dev)
792static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, 793static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
793 bool going_down) 794 bool going_down)
794{ 795{
796 struct ieee80211_sub_if_data *txq_sdata = sdata;
795 struct ieee80211_local *local = sdata->local; 797 struct ieee80211_local *local = sdata->local;
796 struct fq *fq = &local->fq; 798 struct fq *fq = &local->fq;
797 unsigned long flags; 799 unsigned long flags;
@@ -937,6 +939,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
937 939
938 switch (sdata->vif.type) { 940 switch (sdata->vif.type) {
939 case NL80211_IFTYPE_AP_VLAN: 941 case NL80211_IFTYPE_AP_VLAN:
942 txq_sdata = container_of(sdata->bss,
943 struct ieee80211_sub_if_data, u.ap);
944
940 mutex_lock(&local->mtx); 945 mutex_lock(&local->mtx);
941 list_del(&sdata->u.vlan.list); 946 list_del(&sdata->u.vlan.list);
942 mutex_unlock(&local->mtx); 947 mutex_unlock(&local->mtx);
@@ -1007,8 +1012,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
1007 } 1012 }
1008 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1013 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1009 1014
1010 if (sdata->vif.txq) { 1015 if (txq_sdata->vif.txq) {
1011 struct txq_info *txqi = to_txq_info(sdata->vif.txq); 1016 struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
1017
1018 /*
1019 * FIXME FIXME
1020 *
1021 * We really shouldn't purge the *entire* txqi since that
1022 * contains frames for the other AP_VLANs (and possibly
1023 * the AP itself) as well, but there's no API in FQ now
1024 * to be able to filter.
1025 */
1012 1026
1013 spin_lock_bh(&fq->lock); 1027 spin_lock_bh(&fq->lock);
1014 ieee80211_txq_purge(local, txqi); 1028 ieee80211_txq_purge(local, txqi);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b588e593b0ec..3b8e2709d8de 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3155,7 +3155,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
3155 if (len < 24 + 6) 3155 if (len < 24 + 6)
3156 return; 3156 return;
3157 3157
3158 reassoc = ieee80211_is_reassoc_req(mgmt->frame_control); 3158 reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control);
3159 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 3159 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
3160 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 3160 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
3161 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 3161 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f8e7a8bbc618..faf4f6055000 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
707 if (!cookie) 707 if (!cookie)
708 return -ENOENT; 708 return -ENOENT;
709 709
710 flush_work(&local->hw_roc_start);
711
710 mutex_lock(&local->mtx); 712 mutex_lock(&local->mtx);
711 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { 713 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
712 if (!mgmt_tx && roc->cookie != cookie) 714 if (!mgmt_tx && roc->cookie != cookie)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8858f4f185e9..94826680cf2b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1276,11 +1276,6 @@ static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
1276 IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time(); 1276 IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
1277} 1277}
1278 1278
1279static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
1280{
1281 IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
1282}
1283
1284static u32 codel_skb_len_func(const struct sk_buff *skb) 1279static u32 codel_skb_len_func(const struct sk_buff *skb)
1285{ 1280{
1286 return skb->len; 1281 return skb->len;
@@ -3414,6 +3409,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
3414 struct ieee80211_tx_info *info; 3409 struct ieee80211_tx_info *info;
3415 struct ieee80211_tx_data tx; 3410 struct ieee80211_tx_data tx;
3416 ieee80211_tx_result r; 3411 ieee80211_tx_result r;
3412 struct ieee80211_vif *vif;
3417 3413
3418 spin_lock_bh(&fq->lock); 3414 spin_lock_bh(&fq->lock);
3419 3415
@@ -3430,8 +3426,6 @@ begin:
3430 if (!skb) 3426 if (!skb)
3431 goto out; 3427 goto out;
3432 3428
3433 ieee80211_set_skb_vif(skb, txqi);
3434
3435 hdr = (struct ieee80211_hdr *)skb->data; 3429 hdr = (struct ieee80211_hdr *)skb->data;
3436 info = IEEE80211_SKB_CB(skb); 3430 info = IEEE80211_SKB_CB(skb);
3437 3431
@@ -3488,6 +3482,34 @@ begin:
3488 } 3482 }
3489 } 3483 }
3490 3484
3485 switch (tx.sdata->vif.type) {
3486 case NL80211_IFTYPE_MONITOR:
3487 if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
3488 vif = &tx.sdata->vif;
3489 break;
3490 }
3491 tx.sdata = rcu_dereference(local->monitor_sdata);
3492 if (tx.sdata) {
3493 vif = &tx.sdata->vif;
3494 info->hw_queue =
3495 vif->hw_queue[skb_get_queue_mapping(skb)];
3496 } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
3497 ieee80211_free_txskb(&local->hw, skb);
3498 goto begin;
3499 } else {
3500 vif = NULL;
3501 }
3502 break;
3503 case NL80211_IFTYPE_AP_VLAN:
3504 tx.sdata = container_of(tx.sdata->bss,
3505 struct ieee80211_sub_if_data, u.ap);
3506 /* fall through */
3507 default:
3508 vif = &tx.sdata->vif;
3509 break;
3510 }
3511
3512 IEEE80211_SKB_CB(skb)->control.vif = vif;
3491out: 3513out:
3492 spin_unlock_bh(&fq->lock); 3514 spin_unlock_bh(&fq->lock);
3493 3515
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 259698de569f..6aef6793d052 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1436,7 +1436,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
1436 WLAN_EID_SSID_LIST, 1436 WLAN_EID_SSID_LIST,
1437 WLAN_EID_CHANNEL_USAGE, 1437 WLAN_EID_CHANNEL_USAGE,
1438 WLAN_EID_INTERWORKING, 1438 WLAN_EID_INTERWORKING,
1439 /* mesh ID can't happen here */ 1439 WLAN_EID_MESH_ID,
1440 /* 60 GHz can't happen here right now */ 1440 /* 60 GHz can't happen here right now */
1441 }; 1441 };
1442 noffset = ieee80211_ie_split(ie, ie_len, 1442 noffset = ieee80211_ie_split(ie, ie_len,
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 04fe25abc5f6..52cd2901a097 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -215,7 +215,7 @@ static void *__nf_hook_entries_try_shrink(struct nf_hook_entries __rcu **pp)
215 if (skip == hook_entries) 215 if (skip == hook_entries)
216 goto out_assign; 216 goto out_assign;
217 217
218 if (WARN_ON(skip == 0)) 218 if (skip == 0)
219 return NULL; 219 return NULL;
220 220
221 hook_entries -= skip; 221 hook_entries -= skip;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index e1efa446b305..57c8ee66491e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -24,9 +24,13 @@ sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
24 if (sh) { 24 if (sh) {
25 sch = skb_header_pointer(skb, iph->len + sizeof(_sctph), 25 sch = skb_header_pointer(skb, iph->len + sizeof(_sctph),
26 sizeof(_schunkh), &_schunkh); 26 sizeof(_schunkh), &_schunkh);
27 if (sch && (sch->type == SCTP_CID_INIT || 27 if (sch) {
28 sysctl_sloppy_sctp(ipvs))) 28 if (sch->type == SCTP_CID_ABORT ||
29 !(sysctl_sloppy_sctp(ipvs) ||
30 sch->type == SCTP_CID_INIT))
31 return 1;
29 ports = &sh->source; 32 ports = &sh->source;
33 }
30 } 34 }
31 } else { 35 } else {
32 ports = skb_header_pointer( 36 ports = skb_header_pointer(
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 40573aa6c133..f393a7086025 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -30,19 +30,17 @@
30#include <net/netfilter/nf_conntrack_zones.h> 30#include <net/netfilter/nf_conntrack_zones.h>
31#include <linux/netfilter/nf_nat.h> 31#include <linux/netfilter/nf_nat.h>
32 32
33static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
34
33static DEFINE_MUTEX(nf_nat_proto_mutex); 35static DEFINE_MUTEX(nf_nat_proto_mutex);
34static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 36static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
35 __read_mostly; 37 __read_mostly;
36static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 38static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
37 __read_mostly; 39 __read_mostly;
38 40
39struct nf_nat_conn_key { 41static struct hlist_head *nf_nat_bysource __read_mostly;
40 const struct net *net; 42static unsigned int nf_nat_htable_size __read_mostly;
41 const struct nf_conntrack_tuple *tuple; 43static unsigned int nf_nat_hash_rnd __read_mostly;
42 const struct nf_conntrack_zone *zone;
43};
44
45static struct rhltable nf_nat_bysource_table;
46 44
47inline const struct nf_nat_l3proto * 45inline const struct nf_nat_l3proto *
48__nf_nat_l3proto_find(u8 family) 46__nf_nat_l3proto_find(u8 family)
@@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
118EXPORT_SYMBOL(nf_xfrm_me_harder); 116EXPORT_SYMBOL(nf_xfrm_me_harder);
119#endif /* CONFIG_XFRM */ 117#endif /* CONFIG_XFRM */
120 118
121static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed) 119/* We keep an extra hash for each conntrack, for fast searching. */
120static unsigned int
121hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
122{ 122{
123 const struct nf_conntrack_tuple *t; 123 unsigned int hash;
124 const struct nf_conn *ct = data; 124
125 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
125 126
126 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
127 /* Original src, to ensure we map it consistently if poss. */ 127 /* Original src, to ensure we map it consistently if poss. */
128 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
129 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
128 130
129 seed ^= net_hash_mix(nf_ct_net(ct)); 131 return reciprocal_scale(hash, nf_nat_htable_size);
130 return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
131 t->dst.protonum ^ seed);
132} 132}
133 133
134/* Is this tuple already taken? (not by us) */ 134/* Is this tuple already taken? (not by us) */
@@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct,
184 t->src.u.all == tuple->src.u.all); 184 t->src.u.all == tuple->src.u.all);
185} 185}
186 186
187static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
188 const void *obj)
189{
190 const struct nf_nat_conn_key *key = arg->key;
191 const struct nf_conn *ct = obj;
192
193 if (!same_src(ct, key->tuple) ||
194 !net_eq(nf_ct_net(ct), key->net) ||
195 !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
196 return 1;
197
198 return 0;
199}
200
201static struct rhashtable_params nf_nat_bysource_params = {
202 .head_offset = offsetof(struct nf_conn, nat_bysource),
203 .obj_hashfn = nf_nat_bysource_hash,
204 .obj_cmpfn = nf_nat_bysource_cmp,
205 .nelem_hint = 256,
206 .min_size = 1024,
207};
208
209/* Only called for SRC manip */ 187/* Only called for SRC manip */
210static int 188static int
211find_appropriate_src(struct net *net, 189find_appropriate_src(struct net *net,
@@ -216,26 +194,22 @@ find_appropriate_src(struct net *net,
216 struct nf_conntrack_tuple *result, 194 struct nf_conntrack_tuple *result,
217 const struct nf_nat_range *range) 195 const struct nf_nat_range *range)
218{ 196{
197 unsigned int h = hash_by_src(net, tuple);
219 const struct nf_conn *ct; 198 const struct nf_conn *ct;
220 struct nf_nat_conn_key key = {
221 .net = net,
222 .tuple = tuple,
223 .zone = zone
224 };
225 struct rhlist_head *hl, *h;
226
227 hl = rhltable_lookup(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params);
229
230 rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
231 nf_ct_invert_tuplepr(result,
232 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
233 result->dst = tuple->dst;
234 199
235 if (in_range(l3proto, l4proto, result, range)) 200 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
236 return 1; 201 if (same_src(ct, tuple) &&
202 net_eq(net, nf_ct_net(ct)) &&
203 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
204 /* Copy source part from reply tuple. */
205 nf_ct_invert_tuplepr(result,
206 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
207 result->dst = tuple->dst;
208
209 if (in_range(l3proto, l4proto, result, range))
210 return 1;
211 }
237 } 212 }
238
239 return 0; 213 return 0;
240} 214}
241 215
@@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct,
408 const struct nf_nat_range *range, 382 const struct nf_nat_range *range,
409 enum nf_nat_manip_type maniptype) 383 enum nf_nat_manip_type maniptype)
410{ 384{
385 struct net *net = nf_ct_net(ct);
411 struct nf_conntrack_tuple curr_tuple, new_tuple; 386 struct nf_conntrack_tuple curr_tuple, new_tuple;
412 387
413 /* Can't setup nat info for confirmed ct. */ 388 /* Can't setup nat info for confirmed ct. */
@@ -416,7 +391,9 @@ nf_nat_setup_info(struct nf_conn *ct,
416 391
417 WARN_ON(maniptype != NF_NAT_MANIP_SRC && 392 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
418 maniptype != NF_NAT_MANIP_DST); 393 maniptype != NF_NAT_MANIP_DST);
419 BUG_ON(nf_nat_initialized(ct, maniptype)); 394
395 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
396 return NF_DROP;
420 397
421 /* What we've got will look like inverse of reply. Normally 398 /* What we've got will look like inverse of reply. Normally
422 * this is what is in the conntrack, except for prior 399 * this is what is in the conntrack, except for prior
@@ -447,19 +424,16 @@ nf_nat_setup_info(struct nf_conn *ct,
447 } 424 }
448 425
449 if (maniptype == NF_NAT_MANIP_SRC) { 426 if (maniptype == NF_NAT_MANIP_SRC) {
450 struct nf_nat_conn_key key = { 427 unsigned int srchash;
451 .net = nf_ct_net(ct), 428 spinlock_t *lock;
452 .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 429
453 .zone = nf_ct_zone(ct), 430 srchash = hash_by_src(net,
454 }; 431 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
455 int err; 432 lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
456 433 spin_lock_bh(lock);
457 err = rhltable_insert_key(&nf_nat_bysource_table, 434 hlist_add_head_rcu(&ct->nat_bysource,
458 &key, 435 &nf_nat_bysource[srchash]);
459 &ct->nat_bysource, 436 spin_unlock_bh(lock);
460 nf_nat_bysource_params);
461 if (err)
462 return NF_DROP;
463 } 437 }
464 438
465 /* It's done. */ 439 /* It's done. */
@@ -553,6 +527,16 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
553 return i->status & IPS_NAT_MASK ? 1 : 0; 527 return i->status & IPS_NAT_MASK ? 1 : 0;
554} 528}
555 529
530static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
531{
532 unsigned int h;
533
534 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
535 spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
536 hlist_del_rcu(&ct->nat_bysource);
537 spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
538}
539
556static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 540static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
557{ 541{
558 if (nf_nat_proto_remove(ct, data)) 542 if (nf_nat_proto_remove(ct, data))
@@ -568,8 +552,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
568 * will delete entry from already-freed table. 552 * will delete entry from already-freed table.
569 */ 553 */
570 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); 554 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
571 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 555 __nf_nat_cleanup_conntrack(ct);
572 nf_nat_bysource_params);
573 556
574 /* don't delete conntrack. Although that would make things a lot 557 /* don't delete conntrack. Although that would make things a lot
575 * simpler, we'd end up flushing all conntracks on nat rmmod. 558 * simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +681,7 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
698static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 681static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
699{ 682{
700 if (ct->status & IPS_SRC_NAT_DONE) 683 if (ct->status & IPS_SRC_NAT_DONE)
701 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 684 __nf_nat_cleanup_conntrack(ct);
702 nf_nat_bysource_params);
703} 685}
704 686
705static struct nf_ct_ext_type nat_extend __read_mostly = { 687static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -821,19 +803,27 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
821 803
822static int __init nf_nat_init(void) 804static int __init nf_nat_init(void)
823{ 805{
824 int ret; 806 int ret, i;
825 807
826 ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 808 /* Leave them the same for the moment. */
827 if (ret) 809 nf_nat_htable_size = nf_conntrack_htable_size;
828 return ret; 810 if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
811 nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
812
813 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
814 if (!nf_nat_bysource)
815 return -ENOMEM;
829 816
830 ret = nf_ct_extend_register(&nat_extend); 817 ret = nf_ct_extend_register(&nat_extend);
831 if (ret < 0) { 818 if (ret < 0) {
832 rhltable_destroy(&nf_nat_bysource_table); 819 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
833 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 820 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
834 return ret; 821 return ret;
835 } 822 }
836 823
824 for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
825 spin_lock_init(&nf_nat_locks[i]);
826
837 nf_ct_helper_expectfn_register(&follow_master_nat); 827 nf_ct_helper_expectfn_register(&follow_master_nat);
838 828
839 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 829 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -863,8 +853,8 @@ static void __exit nf_nat_cleanup(void)
863 853
864 for (i = 0; i < NFPROTO_NUMPROTO; i++) 854 for (i = 0; i < NFPROTO_NUMPROTO; i++)
865 kfree(nf_nat_l4protos[i]); 855 kfree(nf_nat_l4protos[i]);
866 856 synchronize_net();
867 rhltable_destroy(&nf_nat_bysource_table); 857 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
868} 858}
869 859
870MODULE_LICENSE("GPL"); 860MODULE_LICENSE("GPL");
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 10d48234f5f4..5da8746f7b88 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -35,6 +35,7 @@
35#include <linux/netfilter_ipv6/ip6_tables.h> 35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter/xt_hashlimit.h> 36#include <linux/netfilter/xt_hashlimit.h>
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/kernel.h>
38 39
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 41MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -279,7 +280,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
279 size = cfg->size; 280 size = cfg->size;
280 } else { 281 } else {
281 size = (totalram_pages << PAGE_SHIFT) / 16384 / 282 size = (totalram_pages << PAGE_SHIFT) / 16384 /
282 sizeof(struct list_head); 283 sizeof(struct hlist_head);
283 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) 284 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
284 size = 8192; 285 size = 8192;
285 if (size < 16) 286 if (size < 16)
@@ -287,7 +288,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
287 } 288 }
288 /* FIXME: don't use vmalloc() here or anywhere else -HW */ 289 /* FIXME: don't use vmalloc() here or anywhere else -HW */
289 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + 290 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
290 sizeof(struct list_head) * size); 291 sizeof(struct hlist_head) * size);
291 if (hinfo == NULL) 292 if (hinfo == NULL)
292 return -ENOMEM; 293 return -ENOMEM;
293 *out_hinfo = hinfo; 294 *out_hinfo = hinfo;
@@ -527,12 +528,12 @@ static u64 user2rate(u64 user)
527 } 528 }
528} 529}
529 530
530static u64 user2rate_bytes(u64 user) 531static u64 user2rate_bytes(u32 user)
531{ 532{
532 u64 r; 533 u64 r;
533 534
534 r = user ? 0xFFFFFFFFULL / user : 0xFFFFFFFFULL; 535 r = user ? U32_MAX / user : U32_MAX;
535 r = (r - 1) << 4; 536 r = (r - 1) << XT_HASHLIMIT_BYTE_SHIFT;
536 return r; 537 return r;
537} 538}
538 539
@@ -588,7 +589,8 @@ static void rateinfo_init(struct dsthash_ent *dh,
588 dh->rateinfo.prev_window = 0; 589 dh->rateinfo.prev_window = 0;
589 dh->rateinfo.current_rate = 0; 590 dh->rateinfo.current_rate = 0;
590 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { 591 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
591 dh->rateinfo.rate = user2rate_bytes(hinfo->cfg.avg); 592 dh->rateinfo.rate =
593 user2rate_bytes((u32)hinfo->cfg.avg);
592 if (hinfo->cfg.burst) 594 if (hinfo->cfg.burst)
593 dh->rateinfo.burst = 595 dh->rateinfo.burst =
594 hinfo->cfg.burst * dh->rateinfo.rate; 596 hinfo->cfg.burst * dh->rateinfo.rate;
@@ -870,7 +872,7 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
870 872
871 /* Check for overflow. */ 873 /* Check for overflow. */
872 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { 874 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
873 if (cfg->avg == 0) { 875 if (cfg->avg == 0 || cfg->avg > U32_MAX) {
874 pr_info("hashlimit invalid rate\n"); 876 pr_info("hashlimit invalid rate\n");
875 return -ERANGE; 877 return -ERANGE;
876 } 878 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5acee49db90b..327807731b44 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -691,6 +691,9 @@ static void deferred_put_nlk_sk(struct rcu_head *head)
691 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); 691 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
692 struct sock *sk = &nlk->sk; 692 struct sock *sk = &nlk->sk;
693 693
694 kfree(nlk->groups);
695 nlk->groups = NULL;
696
694 if (!refcount_dec_and_test(&sk->sk_refcnt)) 697 if (!refcount_dec_and_test(&sk->sk_refcnt))
695 return; 698 return;
696 699
@@ -769,9 +772,6 @@ static int netlink_release(struct socket *sock)
769 netlink_table_ungrab(); 772 netlink_table_ungrab();
770 } 773 }
771 774
772 kfree(nlk->groups);
773 nlk->groups = NULL;
774
775 local_bh_disable(); 775 local_bh_disable();
776 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); 776 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
777 local_bh_enable(); 777 local_bh_enable();
@@ -955,7 +955,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
955 struct net *net = sock_net(sk); 955 struct net *net = sock_net(sk);
956 struct netlink_sock *nlk = nlk_sk(sk); 956 struct netlink_sock *nlk = nlk_sk(sk);
957 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 957 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
958 int err; 958 int err = 0;
959 long unsigned int groups = nladdr->nl_groups; 959 long unsigned int groups = nladdr->nl_groups;
960 bool bound; 960 bool bound;
961 961
@@ -983,6 +983,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
983 return -EINVAL; 983 return -EINVAL;
984 } 984 }
985 985
986 netlink_lock_table();
986 if (nlk->netlink_bind && groups) { 987 if (nlk->netlink_bind && groups) {
987 int group; 988 int group;
988 989
@@ -993,7 +994,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
993 if (!err) 994 if (!err)
994 continue; 995 continue;
995 netlink_undo_bind(group, groups, sk); 996 netlink_undo_bind(group, groups, sk);
996 return err; 997 goto unlock;
997 } 998 }
998 } 999 }
999 1000
@@ -1006,12 +1007,13 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1006 netlink_autobind(sock); 1007 netlink_autobind(sock);
1007 if (err) { 1008 if (err) {
1008 netlink_undo_bind(nlk->ngroups, groups, sk); 1009 netlink_undo_bind(nlk->ngroups, groups, sk);
1009 return err; 1010 goto unlock;
1010 } 1011 }
1011 } 1012 }
1012 1013
1013 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 1014 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1014 return 0; 1015 goto unlock;
1016 netlink_unlock_table();
1015 1017
1016 netlink_table_grab(); 1018 netlink_table_grab();
1017 netlink_update_subscriptions(sk, nlk->subscriptions + 1019 netlink_update_subscriptions(sk, nlk->subscriptions +
@@ -1022,6 +1024,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1022 netlink_table_ungrab(); 1024 netlink_table_ungrab();
1023 1025
1024 return 0; 1026 return 0;
1027
1028unlock:
1029 netlink_unlock_table();
1030 return err;
1025} 1031}
1026 1032
1027static int netlink_connect(struct socket *sock, struct sockaddr *addr, 1033static int netlink_connect(struct socket *sock, struct sockaddr *addr,
@@ -1079,7 +1085,9 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1079 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 1085 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1080 } else { 1086 } else {
1081 nladdr->nl_pid = nlk->portid; 1087 nladdr->nl_pid = nlk->portid;
1088 netlink_lock_table();
1082 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 1089 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1090 netlink_unlock_table();
1083 } 1091 }
1084 return 0; 1092 return 0;
1085} 1093}
diff --git a/net/rds/send.c b/net/rds/send.c
index 058a40743041..b52cdc8ae428 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -428,14 +428,18 @@ over_batch:
428 * some work and we will skip our goto 428 * some work and we will skip our goto
429 */ 429 */
430 if (ret == 0) { 430 if (ret == 0) {
431 bool raced;
432
431 smp_mb(); 433 smp_mb();
434 raced = send_gen != READ_ONCE(cp->cp_send_gen);
435
432 if ((test_bit(0, &conn->c_map_queued) || 436 if ((test_bit(0, &conn->c_map_queued) ||
433 !list_empty(&cp->cp_send_queue)) && 437 !list_empty(&cp->cp_send_queue)) && !raced) {
434 send_gen == READ_ONCE(cp->cp_send_gen)) {
435 rds_stats_inc(s_send_lock_queue_raced);
436 if (batch_count < send_batch_count) 438 if (batch_count < send_batch_count)
437 goto restart; 439 goto restart;
438 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); 440 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
441 } else if (raced) {
442 rds_stats_inc(s_send_lock_queue_raced);
439 } 443 }
440 } 444 }
441out: 445out:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ea6c65fd5fc5..c743f03cfebd 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -182,7 +182,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
182 list_add_tail(&chain->list, &block->chain_list); 182 list_add_tail(&chain->list, &block->chain_list);
183 chain->block = block; 183 chain->block = block;
184 chain->index = chain_index; 184 chain->index = chain_index;
185 chain->refcnt = 1; 185 chain->refcnt = 0;
186 return chain; 186 return chain;
187} 187}
188 188
@@ -217,15 +217,15 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
217 struct tcf_chain *chain; 217 struct tcf_chain *chain;
218 218
219 list_for_each_entry(chain, &block->chain_list, list) { 219 list_for_each_entry(chain, &block->chain_list, list) {
220 if (chain->index == chain_index) { 220 if (chain->index == chain_index)
221 chain->refcnt++; 221 goto incref;
222 return chain;
223 }
224 } 222 }
225 if (create) 223 chain = create ? tcf_chain_create(block, chain_index) : NULL;
226 return tcf_chain_create(block, chain_index); 224
227 else 225incref:
228 return NULL; 226 if (chain)
227 chain->refcnt++;
228 return chain;
229} 229}
230EXPORT_SYMBOL(tcf_chain_get); 230EXPORT_SYMBOL(tcf_chain_get);
231 231
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f31b28f788c0..2dd6c68ae91e 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -80,7 +80,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
80 80
81 if (ret & __NET_XMIT_BYPASS) 81 if (ret & __NET_XMIT_BYPASS)
82 qdisc_qstats_drop(sch); 82 qdisc_qstats_drop(sch);
83 kfree_skb(skb); 83 __qdisc_drop(skb, to_free);
84 return ret; 84 return ret;
85 } 85 }
86#endif 86#endif
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index cd661a7f81e6..6ddfd4991108 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1215,7 +1215,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1215 if (cl == NULL) { 1215 if (cl == NULL) {
1216 if (err & __NET_XMIT_BYPASS) 1216 if (err & __NET_XMIT_BYPASS)
1217 qdisc_qstats_drop(sch); 1217 qdisc_qstats_drop(sch);
1218 kfree_skb(skb); 1218 __qdisc_drop(skb, to_free);
1219 return err; 1219 return err;
1220 } 1220 }
1221 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1221 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 0225d62a869f..a71be33f3afe 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -265,7 +265,8 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
265 sctp_ulpq_clear_pd(ulpq); 265 sctp_ulpq_clear_pd(ulpq);
266 266
267 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { 267 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
268 sp->data_ready_signalled = 1; 268 if (!sock_owned_by_user(sk))
269 sp->data_ready_signalled = 1;
269 sk->sk_data_ready(sk); 270 sk->sk_data_ready(sk);
270 } 271 }
271 return 1; 272 return 1;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index ac1d66d7e1fd..47ec121574ce 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -637,7 +637,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
637 break; 637 break;
638 case NETDEV_UNREGISTER: 638 case NETDEV_UNREGISTER:
639 case NETDEV_CHANGENAME: 639 case NETDEV_CHANGENAME:
640 bearer_disable(dev_net(dev), b); 640 bearer_disable(net, b);
641 break; 641 break;
642 } 642 }
643 return NOTIFY_OK; 643 return NOTIFY_OK;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 8ce85420ecb0..0df8023f480b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3791,8 +3791,8 @@ static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
3791static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) 3791static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
3792{ 3792{
3793 const struct cfg80211_beacon_data *bcn = &params->beacon; 3793 const struct cfg80211_beacon_data *bcn = &params->beacon;
3794 size_t ies_len = bcn->beacon_ies_len; 3794 size_t ies_len = bcn->tail_len;
3795 const u8 *ies = bcn->beacon_ies; 3795 const u8 *ies = bcn->tail;
3796 const u8 *rates; 3796 const u8 *rates;
3797 const u8 *cap; 3797 const u8 *cap;
3798 3798
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5fae296a6a58..6e94f6934a0e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -4,6 +4,7 @@
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com> 5 * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2017 Intel Deutschland GmbH
7 * 8 *
8 * Permission to use, copy, modify, and/or distribute this software for any 9 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 10 * purpose with or without fee is hereby granted, provided that the above
@@ -1483,7 +1484,9 @@ static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1483{ 1484{
1484 struct ieee80211_supported_band *sband = wiphy->bands[channel->band]; 1485 struct ieee80211_supported_band *sband = wiphy->bands[channel->band];
1485 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; 1486 struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
1487 const struct ieee80211_regdomain *regd;
1486 unsigned int i; 1488 unsigned int i;
1489 u32 flags;
1487 1490
1488 if (!is_ht40_allowed(channel)) { 1491 if (!is_ht40_allowed(channel)) {
1489 channel->flags |= IEEE80211_CHAN_NO_HT40; 1492 channel->flags |= IEEE80211_CHAN_NO_HT40;
@@ -1503,17 +1506,30 @@ static void reg_process_ht_flags_channel(struct wiphy *wiphy,
1503 channel_after = c; 1506 channel_after = c;
1504 } 1507 }
1505 1508
1509 flags = 0;
1510 regd = get_wiphy_regdom(wiphy);
1511 if (regd) {
1512 const struct ieee80211_reg_rule *reg_rule =
1513 freq_reg_info_regd(MHZ_TO_KHZ(channel->center_freq),
1514 regd, MHZ_TO_KHZ(20));
1515
1516 if (!IS_ERR(reg_rule))
1517 flags = reg_rule->flags;
1518 }
1519
1506 /* 1520 /*
1507 * Please note that this assumes target bandwidth is 20 MHz, 1521 * Please note that this assumes target bandwidth is 20 MHz,
1508 * if that ever changes we also need to change the below logic 1522 * if that ever changes we also need to change the below logic
1509 * to include that as well. 1523 * to include that as well.
1510 */ 1524 */
1511 if (!is_ht40_allowed(channel_before)) 1525 if (!is_ht40_allowed(channel_before) ||
1526 flags & NL80211_RRF_NO_HT40MINUS)
1512 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; 1527 channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
1513 else 1528 else
1514 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 1529 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
1515 1530
1516 if (!is_ht40_allowed(channel_after)) 1531 if (!is_ht40_allowed(channel_after) ||
1532 flags & NL80211_RRF_NO_HT40PLUS)
1517 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; 1533 channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
1518 else 1534 else
1519 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 1535 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;