aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-24 20:31:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-24 20:31:47 -0400
commit0723090656a03940c5ea536342f109e34b8d1257 (patch)
treee44648aec5b63bbdd7ab71501b6207d6431dc709 /net
parentf89ed2f880ccb117246ba095e12087d9c3df89c5 (diff)
parent03bc7cab7d7218088412a75e141696a89059ab00 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Handle stations tied to AP_VLANs properly during mac80211 hw reconfig. From Manikanta Pubbisetty. 2) Fix jump stack depth validation in nf_tables, from Taehee Yoo. 3) Fix quota handling in aRFS flow expiration of mlx5 driver, from Eran Ben Elisha. 4) Exit path handling fix in powerpc64 BPF JIT, from Daniel Borkmann. 5) Use ptr_ring_consume_bh() in page pool code, from Tariq Toukan. 6) Fix cached netdev name leak in nf_tables, from Florian Westphal. 7) Fix memory leaks on chain rename, also from Florian Westphal. 8) Several fixes to DCTCP congestion control ACK handling, from Yuchunk Cheng. 9) Missing rcu_read_unlock() in CAIF protocol code, from Yue Haibing. 10) Fix link local address handling with VRF, from David Ahern. 11) Don't clobber 'err' on a successful call to __skb_linearize() in skb_segment(). From Eric Dumazet. 12) Fix vxlan fdb notification races, from Roopa Prabhu. 13) Hash UDP fragments consistently, from Paolo Abeni. 14) If TCP receives lots of out of order tiny packets, we do really silly stuff. Make the out-of-order queue ending more robust to this kind of behavior, from Eric Dumazet. 15) Don't leak netlink dump state in nf_tables, from Florian Westphal. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits) net: axienet: Fix double deregister of mdio qmi_wwan: fix interface number for DW5821e production firmware ip: in cmsg IP(V6)_ORIGDSTADDR call pskb_may_pull bnx2x: Fix invalid memory access in rss hash config path. net/mlx4_core: Save the qpn from the input modifier in RST2INIT wrapper r8169: restore previous behavior to accept BIOS WoL settings cfg80211: never ignore user regulatory hint sock: fix sg page frag coalescing in sk_alloc_sg netfilter: nf_tables: move dumper state allocation into ->start tcp: add tcp_ooo_try_coalesce() helper tcp: call tcp_drop() from tcp_data_queue_ofo() tcp: detect malicious patterns in tcp_collapse_ofo_queue() tcp: avoid collapses in tcp_prune_queue() if possible tcp: free batches of packets in tcp_prune_ofo_queue() ip: hash fragments consistently ipv6: use fib6_info_hold_safe() when necessary can: xilinx_can: fix power management handling can: xilinx_can: fix incorrect clear of non-processed interrupts can: xilinx_can: fix RX overflow interrupt not being enabled can: xilinx_can: keep only 1-2 frames in TX FIFO to fix TX accounting ...
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/core/page_pool.c2
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp_dctcp.c52
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_output.c32
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/route.c41
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c8
-rw-r--r--net/netfilter/nf_tables_api.c304
-rw-r--r--net/netfilter/nft_immediate.c3
-rw-r--r--net/netfilter/nft_lookup.c13
-rw-r--r--net/netfilter/nft_set_hash.c1
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/tls/tls_sw.c3
-rw-r--r--net/wireless/nl80211.c25
-rw-r--r--net/wireless/reg.c28
-rw-r--r--net/wireless/trace.h18
30 files changed, 390 insertions, 287 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e0adcd123f48..711d7156efd8 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
131 caifd = caif_get(skb->dev); 131 caifd = caif_get(skb->dev);
132 132
133 WARN_ON(caifd == NULL); 133 WARN_ON(caifd == NULL);
134 if (caifd == NULL) 134 if (!caifd) {
135 rcu_read_unlock();
135 return; 136 return;
137 }
136 138
137 caifd_hold(caifd); 139 caifd_hold(caifd);
138 rcu_read_unlock(); 140 rcu_read_unlock();
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 68bf07206744..43a932cb609b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
269 struct page *page; 269 struct page *page;
270 270
271 /* Empty recycle ring */ 271 /* Empty recycle ring */
272 while ((page = ptr_ring_consume(&pool->ring))) { 272 while ((page = ptr_ring_consume_bh(&pool->ring))) {
273 /* Verify the refcnt invariant of cached pages */ 273 /* Verify the refcnt invariant of cached pages */
274 if (!(page_ref_count(page) == 1)) 274 if (!(page_ref_count(page) == 1))
275 pr_crit("%s() page_pool refcnt %d violation\n", 275 pr_crit("%s() page_pool refcnt %d violation\n",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5ef61222fdef..e3f743c141b3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2759 return err; 2759 return err;
2760 } 2760 }
2761 2761
2762 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2762 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2763 2763 __dev_notify_flags(dev, old_flags, 0U);
2764 __dev_notify_flags(dev, old_flags, ~0U); 2764 } else {
2765 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2766 __dev_notify_flags(dev, old_flags, ~0U);
2767 }
2765 return 0; 2768 return 0;
2766} 2769}
2767EXPORT_SYMBOL(rtnl_configure_link); 2770EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e51f8555e11..fb35b62af272 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3720,6 +3720,7 @@ normal:
3720 net_warn_ratelimited( 3720 net_warn_ratelimited(
3721 "skb_segment: too many frags: %u %u\n", 3721 "skb_segment: too many frags: %u %u\n",
3722 pos, mss); 3722 pos, mss);
3723 err = -EINVAL;
3723 goto err; 3724 goto err;
3724 } 3725 }
3725 3726
@@ -3753,11 +3754,10 @@ skip_fraglist:
3753 3754
3754perform_csum_check: 3755perform_csum_check:
3755 if (!csum) { 3756 if (!csum) {
3756 if (skb_has_shared_frag(nskb)) { 3757 if (skb_has_shared_frag(nskb) &&
3757 err = __skb_linearize(nskb); 3758 __skb_linearize(nskb))
3758 if (err) 3759 goto err;
3759 goto err; 3760
3760 }
3761 if (!nskb->remcsum_offload) 3761 if (!nskb->remcsum_offload)
3762 nskb->ip_summed = CHECKSUM_NONE; 3762 nskb->ip_summed = CHECKSUM_NONE;
3763 SKB_GSO_CB(nskb)->csum = 3763 SKB_GSO_CB(nskb)->csum =
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e8f65585b81..bc2d7a37297f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2277 pfrag->offset += use; 2277 pfrag->offset += use;
2278 2278
2279 sge = sg + sg_curr - 1; 2279 sge = sg + sg_curr - 1;
2280 if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && 2280 if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
2281 sg->offset + sg->length == orig_offset) { 2281 sge->offset + sge->length == orig_offset) {
2282 sg->length += use; 2282 sge->length += use;
2283 } else { 2283 } else {
2284 sge = sg + sg_curr; 2284 sge = sg + sg_curr;
2285 sg_unmark_end(sge); 2285 sg_unmark_end(sge);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b3c899a630a0..28fef7d15959 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1200 spin_lock_bh(&im->lock); 1200 spin_lock_bh(&im->lock);
1201 if (pmc) { 1201 if (pmc) {
1202 im->interface = pmc->interface; 1202 im->interface = pmc->interface;
1203 im->sfmode = pmc->sfmode; 1203 if (im->sfmode == MCAST_INCLUDE) {
1204 if (pmc->sfmode == MCAST_INCLUDE) {
1205 im->tomb = pmc->tomb; 1204 im->tomb = pmc->tomb;
1206 im->sources = pmc->sources; 1205 im->sources = pmc->sources;
1207 for (psf = im->sources; psf; psf = psf->sf_next) 1206 for (psf = im->sources; psf; psf = psf->sf_next)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b3308e9d9762..0e3edd25f881 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
523 to->dev = from->dev; 523 to->dev = from->dev;
524 to->mark = from->mark; 524 to->mark = from->mark;
525 525
526 skb_copy_hash(to, from);
527
526 /* Copy the flags to each fragment. */ 528 /* Copy the flags to each fragment. */
527 IPCB(to)->flags = IPCB(from)->flags; 529 IPCB(to)->flags = IPCB(from)->flags;
528 530
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64c76dcf7386..c0fe5ad996f2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 struct sockaddr_in sin; 151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb); 152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports = (__be16 *)skb_transport_header(skb); 153 __be16 *ports;
154 int end;
154 155
155 if (skb_transport_offset(skb) + 4 > (int)skb->len) 156 end = skb_transport_offset(skb) + 4;
157 if (end > 0 && !pskb_may_pull(skb, end))
156 return; 158 return;
157 159
158 /* All current transport protocols have the port numbers in the 160 /* All current transport protocols have the port numbers in the
159 * first four bytes of the transport header and this function is 161 * first four bytes of the transport header and this function is
160 * written with this assumption in mind. 162 * written with this assumption in mind.
161 */ 163 */
164 ports = (__be16 *)skb_transport_header(skb);
162 165
163 sin.sin_family = AF_INET; 166 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = iph->daddr; 167 sin.sin_addr.s_addr = iph->daddr;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 5869f89ca656..8b637f9f23a2 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
129 struct dctcp *ca = inet_csk_ca(sk); 129 struct dctcp *ca = inet_csk_ca(sk);
130 struct tcp_sock *tp = tcp_sk(sk); 130 struct tcp_sock *tp = tcp_sk(sk);
131 131
132 /* State has changed from CE=0 to CE=1 and delayed 132 if (!ca->ce_state) {
133 * ACK has not sent yet. 133 /* State has changed from CE=0 to CE=1, force an immediate
134 */ 134 * ACK to reflect the new CE state. If an ACK was delayed,
135 if (!ca->ce_state && 135 * send that first to reflect the prior CE state.
136 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 136 */
137 u32 tmp_rcv_nxt; 137 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
138 138 __tcp_send_ack(sk, ca->prior_rcv_nxt);
139 /* Save current rcv_nxt. */ 139 tcp_enter_quickack_mode(sk, 1);
140 tmp_rcv_nxt = tp->rcv_nxt;
141
142 /* Generate previous ack with CE=0. */
143 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
144 tp->rcv_nxt = ca->prior_rcv_nxt;
145
146 tcp_send_ack(sk);
147
148 /* Recover current rcv_nxt. */
149 tp->rcv_nxt = tmp_rcv_nxt;
150 } 140 }
151 141
152 ca->prior_rcv_nxt = tp->rcv_nxt; 142 ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
160 struct dctcp *ca = inet_csk_ca(sk); 150 struct dctcp *ca = inet_csk_ca(sk);
161 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
162 152
163 /* State has changed from CE=1 to CE=0 and delayed 153 if (ca->ce_state) {
164 * ACK has not sent yet. 154 /* State has changed from CE=1 to CE=0, force an immediate
165 */ 155 * ACK to reflect the new CE state. If an ACK was delayed,
166 if (ca->ce_state && 156 * send that first to reflect the prior CE state.
167 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 157 */
168 u32 tmp_rcv_nxt; 158 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
169 159 __tcp_send_ack(sk, ca->prior_rcv_nxt);
170 /* Save current rcv_nxt. */ 160 tcp_enter_quickack_mode(sk, 1);
171 tmp_rcv_nxt = tp->rcv_nxt;
172
173 /* Generate previous ack with CE=1. */
174 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
175 tp->rcv_nxt = ca->prior_rcv_nxt;
176
177 tcp_send_ack(sk);
178
179 /* Recover current rcv_nxt. */
180 tp->rcv_nxt = tmp_rcv_nxt;
181 } 161 }
182 162
183 ca->prior_rcv_nxt = tp->rcv_nxt; 163 ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8e5522c6833a..3bcd30a2ba06 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
215 icsk->icsk_ack.quick = quickacks; 215 icsk->icsk_ack.quick = quickacks;
216} 216}
217 217
218static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 218void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
219{ 219{
220 struct inet_connection_sock *icsk = inet_csk(sk); 220 struct inet_connection_sock *icsk = inet_csk(sk);
221 221
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
223 icsk->icsk_ack.pingpong = 0; 223 icsk->icsk_ack.pingpong = 0;
224 icsk->icsk_ack.ato = TCP_ATO_MIN; 224 icsk->icsk_ack.ato = TCP_ATO_MIN;
225} 225}
226EXPORT_SYMBOL(tcp_enter_quickack_mode);
226 227
227/* Send ACKs quickly, if "quick" count is not exhausted 228/* Send ACKs quickly, if "quick" count is not exhausted
228 * and the session is not interactive. 229 * and the session is not interactive.
@@ -4357,6 +4358,23 @@ static bool tcp_try_coalesce(struct sock *sk,
4357 return true; 4358 return true;
4358} 4359}
4359 4360
4361static bool tcp_ooo_try_coalesce(struct sock *sk,
4362 struct sk_buff *to,
4363 struct sk_buff *from,
4364 bool *fragstolen)
4365{
4366 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4367
4368 /* In case tcp_drop() is called later, update to->gso_segs */
4369 if (res) {
4370 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4371 max_t(u16, 1, skb_shinfo(from)->gso_segs);
4372
4373 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4374 }
4375 return res;
4376}
4377
4360static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4378static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4361{ 4379{
4362 sk_drops_add(sk, skb); 4380 sk_drops_add(sk, skb);
@@ -4480,8 +4498,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4480 /* In the typical case, we are adding an skb to the end of the list. 4498 /* In the typical case, we are adding an skb to the end of the list.
4481 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4499 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4482 */ 4500 */
4483 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4501 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4484 skb, &fragstolen)) { 4502 skb, &fragstolen)) {
4485coalesce_done: 4503coalesce_done:
4486 tcp_grow_window(sk, skb); 4504 tcp_grow_window(sk, skb);
4487 kfree_skb_partial(skb, fragstolen); 4505 kfree_skb_partial(skb, fragstolen);
@@ -4509,7 +4527,7 @@ coalesce_done:
4509 /* All the bits are present. Drop. */ 4527 /* All the bits are present. Drop. */
4510 NET_INC_STATS(sock_net(sk), 4528 NET_INC_STATS(sock_net(sk),
4511 LINUX_MIB_TCPOFOMERGE); 4529 LINUX_MIB_TCPOFOMERGE);
4512 __kfree_skb(skb); 4530 tcp_drop(sk, skb);
4513 skb = NULL; 4531 skb = NULL;
4514 tcp_dsack_set(sk, seq, end_seq); 4532 tcp_dsack_set(sk, seq, end_seq);
4515 goto add_sack; 4533 goto add_sack;
@@ -4528,11 +4546,11 @@ coalesce_done:
4528 TCP_SKB_CB(skb1)->end_seq); 4546 TCP_SKB_CB(skb1)->end_seq);
4529 NET_INC_STATS(sock_net(sk), 4547 NET_INC_STATS(sock_net(sk),
4530 LINUX_MIB_TCPOFOMERGE); 4548 LINUX_MIB_TCPOFOMERGE);
4531 __kfree_skb(skb1); 4549 tcp_drop(sk, skb1);
4532 goto merge_right; 4550 goto merge_right;
4533 } 4551 }
4534 } else if (tcp_try_coalesce(sk, skb1, 4552 } else if (tcp_ooo_try_coalesce(sk, skb1,
4535 skb, &fragstolen)) { 4553 skb, &fragstolen)) {
4536 goto coalesce_done; 4554 goto coalesce_done;
4537 } 4555 }
4538 p = &parent->rb_right; 4556 p = &parent->rb_right;
@@ -4901,6 +4919,7 @@ end:
4901static void tcp_collapse_ofo_queue(struct sock *sk) 4919static void tcp_collapse_ofo_queue(struct sock *sk)
4902{ 4920{
4903 struct tcp_sock *tp = tcp_sk(sk); 4921 struct tcp_sock *tp = tcp_sk(sk);
4922 u32 range_truesize, sum_tiny = 0;
4904 struct sk_buff *skb, *head; 4923 struct sk_buff *skb, *head;
4905 u32 start, end; 4924 u32 start, end;
4906 4925
@@ -4912,6 +4931,7 @@ new_range:
4912 } 4931 }
4913 start = TCP_SKB_CB(skb)->seq; 4932 start = TCP_SKB_CB(skb)->seq;
4914 end = TCP_SKB_CB(skb)->end_seq; 4933 end = TCP_SKB_CB(skb)->end_seq;
4934 range_truesize = skb->truesize;
4915 4935
4916 for (head = skb;;) { 4936 for (head = skb;;) {
4917 skb = skb_rb_next(skb); 4937 skb = skb_rb_next(skb);
@@ -4922,11 +4942,20 @@ new_range:
4922 if (!skb || 4942 if (!skb ||
4923 after(TCP_SKB_CB(skb)->seq, end) || 4943 after(TCP_SKB_CB(skb)->seq, end) ||
4924 before(TCP_SKB_CB(skb)->end_seq, start)) { 4944 before(TCP_SKB_CB(skb)->end_seq, start)) {
4925 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4945 /* Do not attempt collapsing tiny skbs */
4926 head, skb, start, end); 4946 if (range_truesize != head->truesize ||
4947 end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4948 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
4949 head, skb, start, end);
4950 } else {
4951 sum_tiny += range_truesize;
4952 if (sum_tiny > sk->sk_rcvbuf >> 3)
4953 return;
4954 }
4927 goto new_range; 4955 goto new_range;
4928 } 4956 }
4929 4957
4958 range_truesize += skb->truesize;
4930 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4959 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
4931 start = TCP_SKB_CB(skb)->seq; 4960 start = TCP_SKB_CB(skb)->seq;
4932 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4961 if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4941,6 +4970,7 @@ new_range:
4941 * 2) not add too big latencies if thousands of packets sit there. 4970 * 2) not add too big latencies if thousands of packets sit there.
4942 * (But if application shrinks SO_RCVBUF, we could still end up 4971 * (But if application shrinks SO_RCVBUF, we could still end up
4943 * freeing whole queue here) 4972 * freeing whole queue here)
4973 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
4944 * 4974 *
4945 * Return true if queue has shrunk. 4975 * Return true if queue has shrunk.
4946 */ 4976 */
@@ -4948,20 +4978,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
4948{ 4978{
4949 struct tcp_sock *tp = tcp_sk(sk); 4979 struct tcp_sock *tp = tcp_sk(sk);
4950 struct rb_node *node, *prev; 4980 struct rb_node *node, *prev;
4981 int goal;
4951 4982
4952 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4983 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
4953 return false; 4984 return false;
4954 4985
4955 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4986 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
4987 goal = sk->sk_rcvbuf >> 3;
4956 node = &tp->ooo_last_skb->rbnode; 4988 node = &tp->ooo_last_skb->rbnode;
4957 do { 4989 do {
4958 prev = rb_prev(node); 4990 prev = rb_prev(node);
4959 rb_erase(node, &tp->out_of_order_queue); 4991 rb_erase(node, &tp->out_of_order_queue);
4992 goal -= rb_to_skb(node)->truesize;
4960 tcp_drop(sk, rb_to_skb(node)); 4993 tcp_drop(sk, rb_to_skb(node));
4961 sk_mem_reclaim(sk); 4994 if (!prev || goal <= 0) {
4962 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 4995 sk_mem_reclaim(sk);
4963 !tcp_under_memory_pressure(sk)) 4996 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
4964 break; 4997 !tcp_under_memory_pressure(sk))
4998 break;
4999 goal = sk->sk_rcvbuf >> 3;
5000 }
4965 node = prev; 5001 node = prev;
4966 } while (node); 5002 } while (node);
4967 tp->ooo_last_skb = rb_to_skb(prev); 5003 tp->ooo_last_skb = rb_to_skb(prev);
@@ -4996,6 +5032,9 @@ static int tcp_prune_queue(struct sock *sk)
4996 else if (tcp_under_memory_pressure(sk)) 5032 else if (tcp_under_memory_pressure(sk))
4997 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 5033 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4998 5034
5035 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5036 return 0;
5037
4999 tcp_collapse_ofo_queue(sk); 5038 tcp_collapse_ofo_queue(sk);
5000 if (!skb_queue_empty(&sk->sk_receive_queue)) 5039 if (!skb_queue_empty(&sk->sk_receive_queue))
5001 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 5040 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 00e5a300ddb9..c4172c1fb198 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
160} 160}
161 161
162/* Account for an ACK we sent. */ 162/* Account for an ACK we sent. */
163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
164 u32 rcv_nxt)
164{ 165{
165 struct tcp_sock *tp = tcp_sk(sk); 166 struct tcp_sock *tp = tcp_sk(sk);
166 167
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
171 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 172 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
172 __sock_put(sk); 173 __sock_put(sk);
173 } 174 }
175
176 if (unlikely(rcv_nxt != tp->rcv_nxt))
177 return; /* Special ACK sent by DCTCP to reflect ECN */
174 tcp_dec_quickack_mode(sk, pkts); 178 tcp_dec_quickack_mode(sk, pkts);
175 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 179 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
176} 180}
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
1023 * We are working here with either a clone of the original 1027 * We are working here with either a clone of the original
1024 * SKB, or a fresh unique copy made by the retransmit engine. 1028 * SKB, or a fresh unique copy made by the retransmit engine.
1025 */ 1029 */
1026static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1030static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1027 gfp_t gfp_mask) 1031 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1028{ 1032{
1029 const struct inet_connection_sock *icsk = inet_csk(sk); 1033 const struct inet_connection_sock *icsk = inet_csk(sk);
1030 struct inet_sock *inet; 1034 struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1100 th->source = inet->inet_sport; 1104 th->source = inet->inet_sport;
1101 th->dest = inet->inet_dport; 1105 th->dest = inet->inet_dport;
1102 th->seq = htonl(tcb->seq); 1106 th->seq = htonl(tcb->seq);
1103 th->ack_seq = htonl(tp->rcv_nxt); 1107 th->ack_seq = htonl(rcv_nxt);
1104 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 1108 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
1105 tcb->tcp_flags); 1109 tcb->tcp_flags);
1106 1110
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1141 icsk->icsk_af_ops->send_check(sk, skb); 1145 icsk->icsk_af_ops->send_check(sk, skb);
1142 1146
1143 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1147 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1144 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1148 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1145 1149
1146 if (skb->len != tcp_header_size) { 1150 if (skb->len != tcp_header_size) {
1147 tcp_event_data_sent(tp, sk); 1151 tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1178 return err; 1182 return err;
1179} 1183}
1180 1184
1185static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1186 gfp_t gfp_mask)
1187{
1188 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1189 tcp_sk(sk)->rcv_nxt);
1190}
1191
1181/* This routine just queues the buffer for sending. 1192/* This routine just queues the buffer for sending.
1182 * 1193 *
1183 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1194 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3571,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
3571} 3582}
3572 3583
3573/* This routine sends an ack and also updates the window. */ 3584/* This routine sends an ack and also updates the window. */
3574void tcp_send_ack(struct sock *sk) 3585void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3575{ 3586{
3576 struct sk_buff *buff; 3587 struct sk_buff *buff;
3577 3588
@@ -3604,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
3604 skb_set_tcp_pure_ack(buff); 3615 skb_set_tcp_pure_ack(buff);
3605 3616
3606 /* Send it off, this clears delayed acks for us. */ 3617 /* Send it off, this clears delayed acks for us. */
3607 tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 3618 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3619}
3620EXPORT_SYMBOL_GPL(__tcp_send_ack);
3621
3622void tcp_send_ack(struct sock *sk)
3623{
3624 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3608} 3625}
3609EXPORT_SYMBOL_GPL(tcp_send_ack);
3610 3626
3611/* This routine sends a packet with an out of date sequence 3627/* This routine sends a packet with an out of date sequence
3612 * number. It assumes the other end will try to ack it. 3628 * number. It assumes the other end will try to ack it.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 91580c62bb86..f66a1cae3366 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2374 continue; 2374 continue;
2375 if ((rt->fib6_flags & noflags) != 0) 2375 if ((rt->fib6_flags & noflags) != 0)
2376 continue; 2376 continue;
2377 fib6_info_hold(rt); 2377 if (!fib6_info_hold_safe(rt))
2378 continue;
2378 break; 2379 break;
2379 } 2380 }
2380out: 2381out:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2ee08b6a86a4..1a1f876f8e28 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
700 } 700 }
701 if (np->rxopt.bits.rxorigdstaddr) { 701 if (np->rxopt.bits.rxorigdstaddr) {
702 struct sockaddr_in6 sin6; 702 struct sockaddr_in6 sin6;
703 __be16 *ports = (__be16 *) skb_transport_header(skb); 703 __be16 *ports;
704 int end;
704 705
705 if (skb_transport_offset(skb) + 4 <= (int)skb->len) { 706 end = skb_transport_offset(skb) + 4;
707 if (end <= 0 || pskb_may_pull(skb, end)) {
706 /* All current transport protocols have the port numbers in the 708 /* All current transport protocols have the port numbers in the
707 * first four bytes of the transport header and this function is 709 * first four bytes of the transport header and this function is
708 * written with this assumption in mind. 710 * written with this assumption in mind.
709 */ 711 */
712 ports = (__be16 *)skb_transport_header(skb);
710 713
711 sin6.sin6_family = AF_INET6; 714 sin6.sin6_family = AF_INET6;
712 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 715 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index be491bf6ab6e..ef2505aefc15 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
402 402
403 /* for local traffic to local address, skb dev is the loopback 403 /* for local traffic to local address, skb dev is the loopback
404 * device. Check if there is a dst attached to the skb and if so 404 * device. Check if there is a dst attached to the skb and if so
405 * get the real device index. 405 * get the real device index. Same is needed for replies to a link
406 * local address on a device enslaved to an L3 master device
406 */ 407 */
407 if (unlikely(iif == LOOPBACK_IFINDEX)) { 408 if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
408 const struct rt6_info *rt6 = skb_rt6_info(skb); 409 const struct rt6_info *rt6 = skb_rt6_info(skb);
409 410
410 if (rt6) 411 if (rt6)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a14fb4fcdf18..3168847c30d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
570 to->dev = from->dev; 570 to->dev = from->dev;
571 to->mark = from->mark; 571 to->mark = from->mark;
572 572
573 skb_copy_hash(to, from);
574
573#ifdef CONFIG_NET_SCHED 575#ifdef CONFIG_NET_SCHED
574 to->tc_index = from->tc_index; 576 to->tc_index = from->tc_index;
575#endif 577#endif
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 2699be7202be..f60f310785fd 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
790 spin_lock_bh(&im->mca_lock); 790 spin_lock_bh(&im->mca_lock);
791 if (pmc) { 791 if (pmc) {
792 im->idev = pmc->idev; 792 im->idev = pmc->idev;
793 im->mca_sfmode = pmc->mca_sfmode; 793 if (im->mca_sfmode == MCAST_INCLUDE) {
794 if (pmc->mca_sfmode == MCAST_INCLUDE) {
795 im->mca_tomb = pmc->mca_tomb; 794 im->mca_tomb = pmc->mca_tomb;
796 im->mca_sources = pmc->mca_sources; 795 im->mca_sources = pmc->mca_sources;
797 for (psf = im->mca_sources; psf; psf = psf->sf_next) 796 for (psf = im->mca_sources; psf; psf = psf->sf_next)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2ce0bd17de4f..ec18b3ce8b6d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
972 rt->dst.lastuse = jiffies; 972 rt->dst.lastuse = jiffies;
973} 973}
974 974
975/* Caller must already hold reference to @from */
975static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 976static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
976{ 977{
977 rt->rt6i_flags &= ~RTF_EXPIRES; 978 rt->rt6i_flags &= ~RTF_EXPIRES;
978 fib6_info_hold(from);
979 rcu_assign_pointer(rt->from, from); 979 rcu_assign_pointer(rt->from, from);
980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
981 if (from->fib6_metrics != &dst_default_metrics) { 981 if (from->fib6_metrics != &dst_default_metrics) {
@@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
984 } 984 }
985} 985}
986 986
987/* Caller must already hold reference to @ort */
987static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) 988static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
988{ 989{
989 struct net_device *dev = fib6_info_nh_dev(ort); 990 struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1044 struct net_device *dev = rt->fib6_nh.nh_dev; 1045 struct net_device *dev = rt->fib6_nh.nh_dev;
1045 struct rt6_info *nrt; 1046 struct rt6_info *nrt;
1046 1047
1048 if (!fib6_info_hold_safe(rt))
1049 return NULL;
1050
1047 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1051 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1048 if (nrt) 1052 if (nrt)
1049 ip6_rt_copy_init(nrt, rt); 1053 ip6_rt_copy_init(nrt, rt);
1054 else
1055 fib6_info_release(rt);
1050 1056
1051 return nrt; 1057 return nrt;
1052} 1058}
@@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
1178 * Clone the route. 1184 * Clone the route.
1179 */ 1185 */
1180 1186
1187 if (!fib6_info_hold_safe(ort))
1188 return NULL;
1189
1181 dev = ip6_rt_get_dev_rcu(ort); 1190 dev = ip6_rt_get_dev_rcu(ort);
1182 rt = ip6_dst_alloc(dev_net(dev), dev, 0); 1191 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1183 if (!rt) 1192 if (!rt) {
1193 fib6_info_release(ort);
1184 return NULL; 1194 return NULL;
1195 }
1185 1196
1186 ip6_rt_copy_init(rt, ort); 1197 ip6_rt_copy_init(rt, ort);
1187 rt->rt6i_flags |= RTF_CACHE; 1198 rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
1210 struct net_device *dev; 1221 struct net_device *dev;
1211 struct rt6_info *pcpu_rt; 1222 struct rt6_info *pcpu_rt;
1212 1223
1224 if (!fib6_info_hold_safe(rt))
1225 return NULL;
1226
1213 rcu_read_lock(); 1227 rcu_read_lock();
1214 dev = ip6_rt_get_dev_rcu(rt); 1228 dev = ip6_rt_get_dev_rcu(rt);
1215 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); 1229 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1216 rcu_read_unlock(); 1230 rcu_read_unlock();
1217 if (!pcpu_rt) 1231 if (!pcpu_rt) {
1232 fib6_info_release(rt);
1218 return NULL; 1233 return NULL;
1234 }
1219 ip6_rt_copy_init(pcpu_rt, rt); 1235 ip6_rt_copy_init(pcpu_rt, rt);
1220 pcpu_rt->rt6i_flags |= RTF_PCPU; 1236 pcpu_rt->rt6i_flags |= RTF_PCPU;
1221 return pcpu_rt; 1237 return pcpu_rt;
@@ -2486,7 +2502,7 @@ restart:
2486 2502
2487out: 2503out:
2488 if (ret) 2504 if (ret)
2489 dst_hold(&ret->dst); 2505 ip6_hold_safe(net, &ret, true);
2490 else 2506 else
2491 ret = ip6_create_rt_rcu(rt); 2507 ret = ip6_create_rt_rcu(rt);
2492 2508
@@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg,
3303 continue; 3319 continue;
3304 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) 3320 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3305 continue; 3321 continue;
3306 fib6_info_hold(rt); 3322 if (!fib6_info_hold_safe(rt))
3323 continue;
3307 rcu_read_unlock(); 3324 rcu_read_unlock();
3308 3325
3309 /* if gateway was specified only delete the one hop */ 3326 /* if gateway was specified only delete the one hop */
@@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
3409 3426
3410 rcu_read_lock(); 3427 rcu_read_lock();
3411 from = rcu_dereference(rt->from); 3428 from = rcu_dereference(rt->from);
3429 /* This fib6_info_hold() is safe here because we hold reference to rt
3430 * and rt already holds reference to fib6_info.
3431 */
3412 fib6_info_hold(from); 3432 fib6_info_hold(from);
3413 rcu_read_unlock(); 3433 rcu_read_unlock();
3414 3434
@@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
3470 continue; 3490 continue;
3471 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) 3491 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
3472 continue; 3492 continue;
3473 fib6_info_hold(rt); 3493 if (!fib6_info_hold_safe(rt))
3494 continue;
3474 break; 3495 break;
3475 } 3496 }
3476out: 3497out:
@@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
3530 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) 3551 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
3531 break; 3552 break;
3532 } 3553 }
3533 if (rt) 3554 if (rt && !fib6_info_hold_safe(rt))
3534 fib6_info_hold(rt); 3555 rt = NULL;
3535 rcu_read_unlock(); 3556 rcu_read_unlock();
3536 return rt; 3557 return rt;
3537} 3558}
@@ -3579,8 +3600,8 @@ restart:
3579 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 3600 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3580 3601
3581 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 3602 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3582 (!idev || idev->cnf.accept_ra != 2)) { 3603 (!idev || idev->cnf.accept_ra != 2) &&
3583 fib6_info_hold(rt); 3604 fib6_info_hold_safe(rt)) {
3584 rcu_read_unlock(); 3605 rcu_read_unlock();
3585 ip6_del_rt(net, rt); 3606 ip6_del_rt(net, rt);
3586 goto restart; 3607 goto restart;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7efa9fd7e109..03e6b7a2bc53 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
938 &tcp_hashinfo, NULL, 0, 938 &tcp_hashinfo, NULL, 0,
939 &ipv6h->saddr, 939 &ipv6h->saddr,
940 th->source, &ipv6h->daddr, 940 th->source, &ipv6h->daddr,
941 ntohs(th->source), tcp_v6_iif(skb), 941 ntohs(th->source),
942 tcp_v6_iif_l3_slave(skb),
942 tcp_v6_sdif(skb)); 943 tcp_v6_sdif(skb));
943 if (!sk1) 944 if (!sk1)
944 goto out; 945 goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
1609 skb, __tcp_hdrlen(th), 1610 skb, __tcp_hdrlen(th),
1610 &ipv6_hdr(skb)->saddr, th->source, 1611 &ipv6_hdr(skb)->saddr, th->source,
1611 &ipv6_hdr(skb)->daddr, 1612 &ipv6_hdr(skb)->daddr,
1612 ntohs(th->dest), tcp_v6_iif(skb), 1613 ntohs(th->dest),
1614 tcp_v6_iif_l3_slave(skb),
1613 sdif); 1615 sdif);
1614 if (sk2) { 1616 if (sk2) {
1615 struct inet_timewait_sock *tw = inet_twsk(sk); 1617 struct inet_timewait_sock *tw = inet_twsk(sk);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0a38cc1cbebc..932985ca4e66 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2254 sdata->control_port_over_nl80211)) { 2254 sdata->control_port_over_nl80211)) {
2255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2256 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2256 bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
2257 struct ethhdr *ehdr = eth_hdr(skb);
2258 2257
2259 cfg80211_rx_control_port(dev, skb->data, skb->len, 2258 cfg80211_rx_control_port(dev, skb, noencrypt);
2260 ehdr->h_source,
2261 be16_to_cpu(skb->protocol), noencrypt);
2262 dev_kfree_skb(skb); 2259 dev_kfree_skb(skb);
2263 } else { 2260 } else {
2264 /* deliver to local stack */ 2261 /* deliver to local stack */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5e2e511c4a6f..d02fbfec3783 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2111 if (!sta->uploaded) 2111 if (!sta->uploaded)
2112 continue; 2112 continue;
2113 2113
2114 if (sta->sdata->vif.type != NL80211_IFTYPE_AP) 2114 if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
2115 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
2115 continue; 2116 continue;
2116 2117
2117 for (state = IEEE80211_STA_NOTEXIST; 2118 for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index abe647d5b8c6..9ce6336d1e55 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
243 * We currently ignore Sync packets 243 * We currently ignore Sync packets
244 * 244 *
245 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 245 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
246 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 246 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
247 }, 247 },
248 [DCCP_PKT_SYNCACK] = { 248 [DCCP_PKT_SYNCACK] = {
249 /* 249 /*
250 * We currently ignore SyncAck packets 250 * We currently ignore SyncAck packets
251 * 251 *
252 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 252 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
253 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 253 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
254 }, 254 },
255 }, 255 },
256 [CT_DCCP_ROLE_SERVER] = { 256 [CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
371 * We currently ignore Sync packets 371 * We currently ignore Sync packets
372 * 372 *
373 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 373 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
374 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 374 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
375 }, 375 },
376 [DCCP_PKT_SYNCACK] = { 376 [DCCP_PKT_SYNCACK] = {
377 /* 377 /*
378 * We currently ignore SyncAck packets 378 * We currently ignore SyncAck packets
379 * 379 *
380 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 380 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
381 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 381 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
382 }, 382 },
383 }, 383 },
384}; 384};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 896d4a36081d..f5745e4c6513 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
75{ 75{
76 ctx->net = net; 76 ctx->net = net;
77 ctx->family = family; 77 ctx->family = family;
78 ctx->level = 0;
78 ctx->table = table; 79 ctx->table = table;
79 ctx->chain = chain; 80 ctx->chain = chain;
80 ctx->nla = nla; 81 ctx->nla = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1597 struct nft_base_chain *basechain; 1598 struct nft_base_chain *basechain;
1598 struct nft_stats *stats = NULL; 1599 struct nft_stats *stats = NULL;
1599 struct nft_chain_hook hook; 1600 struct nft_chain_hook hook;
1600 const struct nlattr *name;
1601 struct nf_hook_ops *ops; 1601 struct nf_hook_ops *ops;
1602 struct nft_trans *trans; 1602 struct nft_trans *trans;
1603 int err; 1603 int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1645 return PTR_ERR(stats); 1645 return PTR_ERR(stats);
1646 } 1646 }
1647 1647
1648 err = -ENOMEM;
1648 trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, 1649 trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
1649 sizeof(struct nft_trans_chain)); 1650 sizeof(struct nft_trans_chain));
1650 if (trans == NULL) { 1651 if (trans == NULL)
1651 free_percpu(stats); 1652 goto err;
1652 return -ENOMEM;
1653 }
1654 1653
1655 nft_trans_chain_stats(trans) = stats; 1654 nft_trans_chain_stats(trans) = stats;
1656 nft_trans_chain_update(trans) = true; 1655 nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1660 else 1659 else
1661 nft_trans_chain_policy(trans) = -1; 1660 nft_trans_chain_policy(trans) = -1;
1662 1661
1663 name = nla[NFTA_CHAIN_NAME]; 1662 if (nla[NFTA_CHAIN_HANDLE] &&
1664 if (nla[NFTA_CHAIN_HANDLE] && name) { 1663 nla[NFTA_CHAIN_NAME]) {
1665 nft_trans_chain_name(trans) = 1664 struct nft_trans *tmp;
1666 nla_strdup(name, GFP_KERNEL); 1665 char *name;
1667 if (!nft_trans_chain_name(trans)) { 1666
1668 kfree(trans); 1667 err = -ENOMEM;
1669 free_percpu(stats); 1668 name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
1670 return -ENOMEM; 1669 if (!name)
1670 goto err;
1671
1672 err = -EEXIST;
1673 list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
1674 if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
1675 tmp->ctx.table == table &&
1676 nft_trans_chain_update(tmp) &&
1677 nft_trans_chain_name(tmp) &&
1678 strcmp(name, nft_trans_chain_name(tmp)) == 0) {
1679 kfree(name);
1680 goto err;
1681 }
1671 } 1682 }
1683
1684 nft_trans_chain_name(trans) = name;
1672 } 1685 }
1673 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 1686 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
1674 1687
1675 return 0; 1688 return 0;
1689err:
1690 free_percpu(stats);
1691 kfree(trans);
1692 return err;
1676} 1693}
1677 1694
1678static int nf_tables_newchain(struct net *net, struct sock *nlsk, 1695static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
2254 return skb->len; 2271 return skb->len;
2255} 2272}
2256 2273
2274static int nf_tables_dump_rules_start(struct netlink_callback *cb)
2275{
2276 const struct nlattr * const *nla = cb->data;
2277 struct nft_rule_dump_ctx *ctx = NULL;
2278
2279 if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
2280 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2281 if (!ctx)
2282 return -ENOMEM;
2283
2284 if (nla[NFTA_RULE_TABLE]) {
2285 ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
2286 GFP_ATOMIC);
2287 if (!ctx->table) {
2288 kfree(ctx);
2289 return -ENOMEM;
2290 }
2291 }
2292 if (nla[NFTA_RULE_CHAIN]) {
2293 ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
2294 GFP_ATOMIC);
2295 if (!ctx->chain) {
2296 kfree(ctx->table);
2297 kfree(ctx);
2298 return -ENOMEM;
2299 }
2300 }
2301 }
2302
2303 cb->data = ctx;
2304 return 0;
2305}
2306
2257static int nf_tables_dump_rules_done(struct netlink_callback *cb) 2307static int nf_tables_dump_rules_done(struct netlink_callback *cb)
2258{ 2308{
2259 struct nft_rule_dump_ctx *ctx = cb->data; 2309 struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
2283 2333
2284 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2334 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2285 struct netlink_dump_control c = { 2335 struct netlink_dump_control c = {
2336 .start= nf_tables_dump_rules_start,
2286 .dump = nf_tables_dump_rules, 2337 .dump = nf_tables_dump_rules,
2287 .done = nf_tables_dump_rules_done, 2338 .done = nf_tables_dump_rules_done,
2288 .module = THIS_MODULE, 2339 .module = THIS_MODULE,
2340 .data = (void *)nla,
2289 }; 2341 };
2290 2342
2291 if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
2292 struct nft_rule_dump_ctx *ctx;
2293
2294 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2295 if (!ctx)
2296 return -ENOMEM;
2297
2298 if (nla[NFTA_RULE_TABLE]) {
2299 ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
2300 GFP_ATOMIC);
2301 if (!ctx->table) {
2302 kfree(ctx);
2303 return -ENOMEM;
2304 }
2305 }
2306 if (nla[NFTA_RULE_CHAIN]) {
2307 ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
2308 GFP_ATOMIC);
2309 if (!ctx->chain) {
2310 kfree(ctx->table);
2311 kfree(ctx);
2312 return -ENOMEM;
2313 }
2314 }
2315 c.data = ctx;
2316 }
2317
2318 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 2343 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
2319 } 2344 }
2320 2345
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
2384 struct nft_rule *rule; 2409 struct nft_rule *rule;
2385 int err; 2410 int err;
2386 2411
2412 if (ctx->level == NFT_JUMP_STACK_SIZE)
2413 return -EMLINK;
2414
2387 list_for_each_entry(rule, &chain->rules, list) { 2415 list_for_each_entry(rule, &chain->rules, list) {
2388 if (!nft_is_active_next(ctx->net, rule)) 2416 if (!nft_is_active_next(ctx->net, rule))
2389 continue; 2417 continue;
@@ -3161,6 +3189,18 @@ done:
3161 return skb->len; 3189 return skb->len;
3162} 3190}
3163 3191
3192static int nf_tables_dump_sets_start(struct netlink_callback *cb)
3193{
3194 struct nft_ctx *ctx_dump = NULL;
3195
3196 ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
3197 if (ctx_dump == NULL)
3198 return -ENOMEM;
3199
3200 cb->data = ctx_dump;
3201 return 0;
3202}
3203
3164static int nf_tables_dump_sets_done(struct netlink_callback *cb) 3204static int nf_tables_dump_sets_done(struct netlink_callback *cb)
3165{ 3205{
3166 kfree(cb->data); 3206 kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
3188 3228
3189 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3229 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3190 struct netlink_dump_control c = { 3230 struct netlink_dump_control c = {
3231 .start = nf_tables_dump_sets_start,
3191 .dump = nf_tables_dump_sets, 3232 .dump = nf_tables_dump_sets,
3192 .done = nf_tables_dump_sets_done, 3233 .done = nf_tables_dump_sets_done,
3234 .data = &ctx,
3193 .module = THIS_MODULE, 3235 .module = THIS_MODULE,
3194 }; 3236 };
3195 struct nft_ctx *ctx_dump;
3196
3197 ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
3198 if (ctx_dump == NULL)
3199 return -ENOMEM;
3200
3201 *ctx_dump = ctx;
3202 c.data = ctx_dump;
3203 3237
3204 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 3238 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
3205 } 3239 }
@@ -3849,6 +3883,15 @@ nla_put_failure:
3849 return -ENOSPC; 3883 return -ENOSPC;
3850} 3884}
3851 3885
3886static int nf_tables_dump_set_start(struct netlink_callback *cb)
3887{
3888 struct nft_set_dump_ctx *dump_ctx = cb->data;
3889
3890 cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
3891
3892 return cb->data ? 0 : -ENOMEM;
3893}
3894
3852static int nf_tables_dump_set_done(struct netlink_callback *cb) 3895static int nf_tables_dump_set_done(struct netlink_callback *cb)
3853{ 3896{
3854 kfree(cb->data); 3897 kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
4002 4045
4003 if (nlh->nlmsg_flags & NLM_F_DUMP) { 4046 if (nlh->nlmsg_flags & NLM_F_DUMP) {
4004 struct netlink_dump_control c = { 4047 struct netlink_dump_control c = {
4048 .start = nf_tables_dump_set_start,
4005 .dump = nf_tables_dump_set, 4049 .dump = nf_tables_dump_set,
4006 .done = nf_tables_dump_set_done, 4050 .done = nf_tables_dump_set_done,
4007 .module = THIS_MODULE, 4051 .module = THIS_MODULE,
4008 }; 4052 };
4009 struct nft_set_dump_ctx *dump_ctx; 4053 struct nft_set_dump_ctx dump_ctx = {
4010 4054 .set = set,
4011 dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); 4055 .ctx = ctx,
4012 if (!dump_ctx) 4056 };
4013 return -ENOMEM;
4014
4015 dump_ctx->set = set;
4016 dump_ctx->ctx = ctx;
4017 4057
4018 c.data = dump_ctx; 4058 c.data = &dump_ctx;
4019 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 4059 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
4020 } 4060 }
4021 4061
@@ -4975,38 +5015,42 @@ done:
4975 return skb->len; 5015 return skb->len;
4976} 5016}
4977 5017
4978static int nf_tables_dump_obj_done(struct netlink_callback *cb) 5018static int nf_tables_dump_obj_start(struct netlink_callback *cb)
4979{ 5019{
4980 struct nft_obj_filter *filter = cb->data; 5020 const struct nlattr * const *nla = cb->data;
5021 struct nft_obj_filter *filter = NULL;
4981 5022
4982 if (filter) { 5023 if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
4983 kfree(filter->table); 5024 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
4984 kfree(filter); 5025 if (!filter)
5026 return -ENOMEM;
5027
5028 if (nla[NFTA_OBJ_TABLE]) {
5029 filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
5030 if (!filter->table) {
5031 kfree(filter);
5032 return -ENOMEM;
5033 }
5034 }
5035
5036 if (nla[NFTA_OBJ_TYPE])
5037 filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
4985 } 5038 }
4986 5039
5040 cb->data = filter;
4987 return 0; 5041 return 0;
4988} 5042}
4989 5043
4990static struct nft_obj_filter * 5044static int nf_tables_dump_obj_done(struct netlink_callback *cb)
4991nft_obj_filter_alloc(const struct nlattr * const nla[])
4992{ 5045{
4993 struct nft_obj_filter *filter; 5046 struct nft_obj_filter *filter = cb->data;
4994
4995 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
4996 if (!filter)
4997 return ERR_PTR(-ENOMEM);
4998 5047
4999 if (nla[NFTA_OBJ_TABLE]) { 5048 if (filter) {
5000 filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); 5049 kfree(filter->table);
5001 if (!filter->table) { 5050 kfree(filter);
5002 kfree(filter);
5003 return ERR_PTR(-ENOMEM);
5004 }
5005 } 5051 }
5006 if (nla[NFTA_OBJ_TYPE])
5007 filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
5008 5052
5009 return filter; 5053 return 0;
5010} 5054}
5011 5055
5012/* called with rcu_read_lock held */ 5056/* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
5027 5071
5028 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5072 if (nlh->nlmsg_flags & NLM_F_DUMP) {
5029 struct netlink_dump_control c = { 5073 struct netlink_dump_control c = {
5074 .start = nf_tables_dump_obj_start,
5030 .dump = nf_tables_dump_obj, 5075 .dump = nf_tables_dump_obj,
5031 .done = nf_tables_dump_obj_done, 5076 .done = nf_tables_dump_obj_done,
5032 .module = THIS_MODULE, 5077 .module = THIS_MODULE,
5078 .data = (void *)nla,
5033 }; 5079 };
5034 5080
5035 if (nla[NFTA_OBJ_TABLE] ||
5036 nla[NFTA_OBJ_TYPE]) {
5037 struct nft_obj_filter *filter;
5038
5039 filter = nft_obj_filter_alloc(nla);
5040 if (IS_ERR(filter))
5041 return -ENOMEM;
5042
5043 c.data = filter;
5044 }
5045 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5081 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
5046 } 5082 }
5047 5083
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
5320 flowtable->ops[i].priv = &flowtable->data; 5356 flowtable->ops[i].priv = &flowtable->data;
5321 flowtable->ops[i].hook = flowtable->data.type->hook; 5357 flowtable->ops[i].hook = flowtable->data.type->hook;
5322 flowtable->ops[i].dev = dev_array[i]; 5358 flowtable->ops[i].dev = dev_array[i];
5323 flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
5324 GFP_KERNEL);
5325 } 5359 }
5326 5360
5327 return err; 5361 return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5479err6: 5513err6:
5480 i = flowtable->ops_len; 5514 i = flowtable->ops_len;
5481err5: 5515err5:
5482 for (k = i - 1; k >= 0; k--) { 5516 for (k = i - 1; k >= 0; k--)
5483 kfree(flowtable->dev_name[k]);
5484 nf_unregister_net_hook(net, &flowtable->ops[k]); 5517 nf_unregister_net_hook(net, &flowtable->ops[k]);
5485 }
5486 5518
5487 kfree(flowtable->ops); 5519 kfree(flowtable->ops);
5488err4: 5520err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
5581 goto nla_put_failure; 5613 goto nla_put_failure;
5582 5614
5583 for (i = 0; i < flowtable->ops_len; i++) { 5615 for (i = 0; i < flowtable->ops_len; i++) {
5584 if (flowtable->dev_name[i][0] && 5616 const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
5585 nla_put_string(skb, NFTA_DEVICE_NAME, 5617
5586 flowtable->dev_name[i])) 5618 if (dev &&
5619 nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
5587 goto nla_put_failure; 5620 goto nla_put_failure;
5588 } 5621 }
5589 nla_nest_end(skb, nest_devs); 5622 nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
5650 return skb->len; 5683 return skb->len;
5651} 5684}
5652 5685
5653static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) 5686static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
5654{ 5687{
5655 struct nft_flowtable_filter *filter = cb->data; 5688 const struct nlattr * const *nla = cb->data;
5689 struct nft_flowtable_filter *filter = NULL;
5656 5690
5657 if (!filter) 5691 if (nla[NFTA_FLOWTABLE_TABLE]) {
5658 return 0; 5692 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
5693 if (!filter)
5694 return -ENOMEM;
5659 5695
5660 kfree(filter->table); 5696 filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
5661 kfree(filter); 5697 GFP_ATOMIC);
5698 if (!filter->table) {
5699 kfree(filter);
5700 return -ENOMEM;
5701 }
5702 }
5662 5703
5704 cb->data = filter;
5663 return 0; 5705 return 0;
5664} 5706}
5665 5707
5666static struct nft_flowtable_filter * 5708static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
5667nft_flowtable_filter_alloc(const struct nlattr * const nla[])
5668{ 5709{
5669 struct nft_flowtable_filter *filter; 5710 struct nft_flowtable_filter *filter = cb->data;
5670 5711
5671 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
5672 if (!filter) 5712 if (!filter)
5673 return ERR_PTR(-ENOMEM); 5713 return 0;
5674 5714
5675 if (nla[NFTA_FLOWTABLE_TABLE]) { 5715 kfree(filter->table);
5676 filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], 5716 kfree(filter);
5677 GFP_ATOMIC); 5717
5678 if (!filter->table) { 5718 return 0;
5679 kfree(filter);
5680 return ERR_PTR(-ENOMEM);
5681 }
5682 }
5683 return filter;
5684} 5719}
5685 5720
5686/* called with rcu_read_lock held */ 5721/* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
5700 5735
5701 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5736 if (nlh->nlmsg_flags & NLM_F_DUMP) {
5702 struct netlink_dump_control c = { 5737 struct netlink_dump_control c = {
5738 .start = nf_tables_dump_flowtable_start,
5703 .dump = nf_tables_dump_flowtable, 5739 .dump = nf_tables_dump_flowtable,
5704 .done = nf_tables_dump_flowtable_done, 5740 .done = nf_tables_dump_flowtable_done,
5705 .module = THIS_MODULE, 5741 .module = THIS_MODULE,
5742 .data = (void *)nla,
5706 }; 5743 };
5707 5744
5708 if (nla[NFTA_FLOWTABLE_TABLE]) {
5709 struct nft_flowtable_filter *filter;
5710
5711 filter = nft_flowtable_filter_alloc(nla);
5712 if (IS_ERR(filter))
5713 return -ENOMEM;
5714
5715 c.data = filter;
5716 }
5717 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5745 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
5718 } 5746 }
5719 5747
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5783 kfree(flowtable->name); 5811 kfree(flowtable->name);
5784 flowtable->data.type->free(&flowtable->data); 5812 flowtable->data.type->free(&flowtable->data);
5785 module_put(flowtable->data.type->owner); 5813 module_put(flowtable->data.type->owner);
5814 kfree(flowtable);
5786} 5815}
5787 5816
5788static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, 5817static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
5825 continue; 5854 continue;
5826 5855
5827 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); 5856 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
5828 flowtable->dev_name[i][0] = '\0';
5829 flowtable->ops[i].dev = NULL; 5857 flowtable->ops[i].dev = NULL;
5830 break; 5858 break;
5831 } 5859 }
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
6086 case NFT_MSG_DELTABLE: 6114 case NFT_MSG_DELTABLE:
6087 nf_tables_table_destroy(&trans->ctx); 6115 nf_tables_table_destroy(&trans->ctx);
6088 break; 6116 break;
6117 case NFT_MSG_NEWCHAIN:
6118 kfree(nft_trans_chain_name(trans));
6119 break;
6089 case NFT_MSG_DELCHAIN: 6120 case NFT_MSG_DELCHAIN:
6090 nf_tables_chain_destroy(&trans->ctx); 6121 nf_tables_chain_destroy(&trans->ctx);
6091 break; 6122 break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6315 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); 6346 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
6316 break; 6347 break;
6317 case NFT_MSG_NEWCHAIN: 6348 case NFT_MSG_NEWCHAIN:
6318 if (nft_trans_chain_update(trans)) 6349 if (nft_trans_chain_update(trans)) {
6319 nft_chain_commit_update(trans); 6350 nft_chain_commit_update(trans);
6320 else 6351 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
6352 /* trans destroyed after rcu grace period */
6353 } else {
6321 nft_clear(net, trans->ctx.chain); 6354 nft_clear(net, trans->ctx.chain);
6322 6355 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
6323 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 6356 nft_trans_destroy(trans);
6324 nft_trans_destroy(trans); 6357 }
6325 break; 6358 break;
6326 case NFT_MSG_DELCHAIN: 6359 case NFT_MSG_DELCHAIN:
6327 nft_chain_del(trans->ctx.chain); 6360 nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
6471 case NFT_MSG_NEWCHAIN: 6504 case NFT_MSG_NEWCHAIN:
6472 if (nft_trans_chain_update(trans)) { 6505 if (nft_trans_chain_update(trans)) {
6473 free_percpu(nft_trans_chain_stats(trans)); 6506 free_percpu(nft_trans_chain_stats(trans));
6474 6507 kfree(nft_trans_chain_name(trans));
6475 nft_trans_destroy(trans); 6508 nft_trans_destroy(trans);
6476 } else { 6509 } else {
6477 trans->ctx.table->use--; 6510 trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
6837 err = nf_tables_check_loops(ctx, data->verdict.chain); 6870 err = nf_tables_check_loops(ctx, data->verdict.chain);
6838 if (err < 0) 6871 if (err < 0)
6839 return err; 6872 return err;
6840
6841 if (ctx->chain->level + 1 >
6842 data->verdict.chain->level) {
6843 if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
6844 return -EMLINK;
6845 data->verdict.chain->level = ctx->chain->level + 1;
6846 }
6847 } 6873 }
6848 6874
6849 return 0; 6875 return 0;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 15adf8ca82c3..0777a93211e2 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
98 const struct nft_data **d) 98 const struct nft_data **d)
99{ 99{
100 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 100 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
101 struct nft_ctx *pctx = (struct nft_ctx *)ctx;
101 const struct nft_data *data; 102 const struct nft_data *data;
102 int err; 103 int err;
103 104
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
109 switch (data->verdict.code) { 110 switch (data->verdict.code) {
110 case NFT_JUMP: 111 case NFT_JUMP:
111 case NFT_GOTO: 112 case NFT_GOTO:
113 pctx->level++;
112 err = nft_chain_validate(ctx, data->verdict.chain); 114 err = nft_chain_validate(ctx, data->verdict.chain);
113 if (err < 0) 115 if (err < 0)
114 return err; 116 return err;
117 pctx->level--;
115 break; 118 break;
116 default: 119 default:
117 break; 120 break;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 42e6fadf1417..c2a1d84cdfc4 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
155 struct nft_set_elem *elem) 155 struct nft_set_elem *elem)
156{ 156{
157 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 157 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
158 struct nft_ctx *pctx = (struct nft_ctx *)ctx;
158 const struct nft_data *data; 159 const struct nft_data *data;
160 int err;
159 161
160 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && 162 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
161 *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) 163 *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
165 switch (data->verdict.code) { 167 switch (data->verdict.code) {
166 case NFT_JUMP: 168 case NFT_JUMP:
167 case NFT_GOTO: 169 case NFT_GOTO:
168 return nft_chain_validate(ctx, data->verdict.chain); 170 pctx->level++;
171 err = nft_chain_validate(ctx, data->verdict.chain);
172 if (err < 0)
173 return err;
174 pctx->level--;
175 break;
169 default: 176 default:
170 return 0; 177 break;
171 } 178 }
179
180 return 0;
172} 181}
173 182
174static int nft_lookup_validate(const struct nft_ctx *ctx, 183static int nft_lookup_validate(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 72ef35b51cac..90c3e7e6cacb 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
387 struct nft_rhash *priv = nft_set_priv(set); 387 struct nft_rhash *priv = nft_set_priv(set);
388 388
389 cancel_delayed_work_sync(&priv->gc_work); 389 cancel_delayed_work_sync(&priv->gc_work);
390 rcu_barrier();
390 rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, 391 rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
391 (void *)set); 392 (void *)set);
392} 393}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 1f8f257cb518..9873d734b494 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
381 381
382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); 382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
383 if (!gcb) 383 if (!gcb)
384 goto out; 384 break;
385 385
386 atomic_dec(&set->nelems); 386 atomic_dec(&set->nelems);
387 nft_set_gc_batch_add(gcb, rbe); 387 nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
390 rbe = rb_entry(prev, struct nft_rbtree_elem, node); 390 rbe = rb_entry(prev, struct nft_rbtree_elem, node);
391 atomic_dec(&set->nelems); 391 atomic_dec(&set->nelems);
392 nft_set_gc_batch_add(gcb, rbe); 392 nft_set_gc_batch_add(gcb, rbe);
393 prev = NULL;
393 } 394 }
394 node = rb_next(node); 395 node = rb_next(node);
396 if (!node)
397 break;
395 } 398 }
396out:
397 if (gcb) { 399 if (gcb) {
398 for (i = 0; i < gcb->head.cnt; i++) { 400 for (i = 0; i < gcb->head.cnt; i++) {
399 rbe = gcb->elems[i]; 401 rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
440 struct rb_node *node; 442 struct rb_node *node;
441 443
442 cancel_delayed_work_sync(&priv->gc_work); 444 cancel_delayed_work_sync(&priv->gc_work);
445 rcu_barrier();
443 while ((node = priv->root.rb_node) != NULL) { 446 while ((node = priv->root.rb_node) != NULL) {
444 rb_erase(node, &priv->root); 447 rb_erase(node, &priv->root);
445 rbe = rb_entry(node, struct nft_rbtree_elem, node); 448 rbe = rb_entry(node, struct nft_rbtree_elem, node);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4618f1c31137..1f3d9789af30 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
646 return NULL; 646 return NULL;
647 } 647 }
648 648
649 if (sk->sk_shutdown & RCV_SHUTDOWN)
650 return NULL;
651
649 if (sock_flag(sk, SOCK_DONE)) 652 if (sock_flag(sk, SOCK_DONE))
650 return NULL; 653 return NULL;
651 654
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4eece06be1e7..80bc986c79e5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
4409 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | 4409 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
4410 BIT(NL80211_STA_FLAG_MFP) | 4410 BIT(NL80211_STA_FLAG_MFP) |
4411 BIT(NL80211_STA_FLAG_AUTHORIZED); 4411 BIT(NL80211_STA_FLAG_AUTHORIZED);
4412 break;
4412 default: 4413 default:
4413 return -EINVAL; 4414 return -EINVAL;
4414 } 4415 }
@@ -14923,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
14923EXPORT_SYMBOL(cfg80211_mgmt_tx_status); 14924EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
14924 14925
14925static int __nl80211_rx_control_port(struct net_device *dev, 14926static int __nl80211_rx_control_port(struct net_device *dev,
14926 const u8 *buf, size_t len, 14927 struct sk_buff *skb,
14927 const u8 *addr, u16 proto,
14928 bool unencrypted, gfp_t gfp) 14928 bool unencrypted, gfp_t gfp)
14929{ 14929{
14930 struct wireless_dev *wdev = dev->ieee80211_ptr; 14930 struct wireless_dev *wdev = dev->ieee80211_ptr;
14931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 14931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
14932 struct ethhdr *ehdr = eth_hdr(skb);
14933 const u8 *addr = ehdr->h_source;
14934 u16 proto = be16_to_cpu(skb->protocol);
14932 struct sk_buff *msg; 14935 struct sk_buff *msg;
14933 void *hdr; 14936 void *hdr;
14937 struct nlattr *frame;
14938
14934 u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); 14939 u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
14935 14940
14936 if (!nlportid) 14941 if (!nlportid)
14937 return -ENOENT; 14942 return -ENOENT;
14938 14943
14939 msg = nlmsg_new(100 + len, gfp); 14944 msg = nlmsg_new(100 + skb->len, gfp);
14940 if (!msg) 14945 if (!msg)
14941 return -ENOMEM; 14946 return -ENOMEM;
14942 14947
@@ -14950,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
14950 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 14955 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
14951 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 14956 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
14952 NL80211_ATTR_PAD) || 14957 NL80211_ATTR_PAD) ||
14953 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
14954 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || 14958 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
14955 nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || 14959 nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
14956 (unencrypted && nla_put_flag(msg, 14960 (unencrypted && nla_put_flag(msg,
14957 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) 14961 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
14958 goto nla_put_failure; 14962 goto nla_put_failure;
14959 14963
14964 frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
14965 if (!frame)
14966 goto nla_put_failure;
14967
14968 skb_copy_bits(skb, 0, nla_data(frame), skb->len);
14960 genlmsg_end(msg, hdr); 14969 genlmsg_end(msg, hdr);
14961 14970
14962 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); 14971 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14967,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
14967} 14976}
14968 14977
14969bool cfg80211_rx_control_port(struct net_device *dev, 14978bool cfg80211_rx_control_port(struct net_device *dev,
14970 const u8 *buf, size_t len, 14979 struct sk_buff *skb, bool unencrypted)
14971 const u8 *addr, u16 proto, bool unencrypted)
14972{ 14980{
14973 int ret; 14981 int ret;
14974 14982
14975 trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); 14983 trace_cfg80211_rx_control_port(dev, skb, unencrypted);
14976 ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, 14984 ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
14977 unencrypted, GFP_ATOMIC);
14978 trace_cfg80211_return_bool(ret == 0); 14985 trace_cfg80211_return_bool(ret == 0);
14979 return ret == 0; 14986 return ret == 0;
14980} 14987}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bbe6298e4bb9..4fc66a117b7d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
2240 * as some drivers used this to restore its orig_* reg domain. 2240 * as some drivers used this to restore its orig_* reg domain.
2241 */ 2241 */
2242 if (initiator == NL80211_REGDOM_SET_BY_CORE && 2242 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
2243 wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) 2243 wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
2244 !(wiphy->regulatory_flags &
2245 REGULATORY_WIPHY_SELF_MANAGED))
2244 reg_call_notifier(wiphy, lr); 2246 reg_call_notifier(wiphy, lr);
2245 return; 2247 return;
2246 } 2248 }
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
2787 } 2789 }
2788} 2790}
2789 2791
2790static bool reg_only_self_managed_wiphys(void)
2791{
2792 struct cfg80211_registered_device *rdev;
2793 struct wiphy *wiphy;
2794 bool self_managed_found = false;
2795
2796 ASSERT_RTNL();
2797
2798 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
2799 wiphy = &rdev->wiphy;
2800 if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
2801 self_managed_found = true;
2802 else
2803 return false;
2804 }
2805
2806 /* make sure at least one self-managed wiphy exists */
2807 return self_managed_found;
2808}
2809
2810/* 2792/*
2811 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* 2793 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
2812 * Regulatory hints come on a first come first serve basis and we 2794 * Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
2839 spin_unlock(&reg_requests_lock); 2821 spin_unlock(&reg_requests_lock);
2840 2822
2841 notify_self_managed_wiphys(reg_request); 2823 notify_self_managed_wiphys(reg_request);
2842 if (reg_only_self_managed_wiphys()) {
2843 reg_free_request(reg_request);
2844 return;
2845 }
2846 2824
2847 reg_process_hint(reg_request); 2825 reg_process_hint(reg_request);
2848 2826
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 2b417a2fe63f..7c73510b161f 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
2627); 2627);
2628 2628
2629TRACE_EVENT(cfg80211_rx_control_port, 2629TRACE_EVENT(cfg80211_rx_control_port,
2630 TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, 2630 TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
2631 const u8 *addr, u16 proto, bool unencrypted), 2631 bool unencrypted),
2632 TP_ARGS(netdev, buf, len, addr, proto, unencrypted), 2632 TP_ARGS(netdev, skb, unencrypted),
2633 TP_STRUCT__entry( 2633 TP_STRUCT__entry(
2634 NETDEV_ENTRY 2634 NETDEV_ENTRY
2635 MAC_ENTRY(addr) 2635 __field(int, len)
2636 MAC_ENTRY(from)
2636 __field(u16, proto) 2637 __field(u16, proto)
2637 __field(bool, unencrypted) 2638 __field(bool, unencrypted)
2638 ), 2639 ),
2639 TP_fast_assign( 2640 TP_fast_assign(
2640 NETDEV_ASSIGN; 2641 NETDEV_ASSIGN;
2641 MAC_ASSIGN(addr, addr); 2642 __entry->len = skb->len;
2642 __entry->proto = proto; 2643 MAC_ASSIGN(from, eth_hdr(skb)->h_source);
2644 __entry->proto = be16_to_cpu(skb->protocol);
2643 __entry->unencrypted = unencrypted; 2645 __entry->unencrypted = unencrypted;
2644 ), 2646 ),
2645 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", 2647 TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
2646 NETDEV_PR_ARG, MAC_PR_ARG(addr), 2648 NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
2647 __entry->proto, BOOL_TO_STR(__entry->unencrypted)) 2649 __entry->proto, BOOL_TO_STR(__entry->unencrypted))
2648); 2650);
2649 2651