diff options
author | David S. Miller <davem@davemloft.net> | 2018-07-24 22:21:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-07-24 22:21:58 -0400 |
commit | 19725496da5602b401eae389736ab00d1817e264 (patch) | |
tree | 4c1a94bf0121769110f1b9c08ee337a55679a48a /net | |
parent | aea5f654e6b78a0c976f7a25950155932c77a53f (diff) | |
parent | 9981b4fb8684883dcc0daf088891ff32260b9794 (diff) |
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r-- | net/caif/caif_dev.c | 4 | ||||
-rw-r--r-- | net/core/page_pool.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 10 | ||||
-rw-r--r-- | net/core/sock.c | 6 | ||||
-rw-r--r-- | net/ipv4/igmp.c | 3 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 2 | ||||
-rw-r--r-- | net/ipv4/ip_sockglue.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_dctcp.c | 52 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 65 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 32 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 3 | ||||
-rw-r--r-- | net/ipv6/datagram.c | 7 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 5 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 2 | ||||
-rw-r--r-- | net/ipv6/mcast.c | 3 | ||||
-rw-r--r-- | net/ipv6/route.c | 41 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 6 | ||||
-rw-r--r-- | net/mac80211/rx.c | 5 | ||||
-rw-r--r-- | net/mac80211/util.c | 3 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_proto_dccp.c | 8 | ||||
-rw-r--r-- | net/netfilter/nf_tables_api.c | 304 | ||||
-rw-r--r-- | net/netfilter/nft_immediate.c | 3 | ||||
-rw-r--r-- | net/netfilter/nft_lookup.c | 13 | ||||
-rw-r--r-- | net/netfilter/nft_set_hash.c | 1 | ||||
-rw-r--r-- | net/netfilter/nft_set_rbtree.c | 7 | ||||
-rw-r--r-- | net/tls/tls_sw.c | 3 | ||||
-rw-r--r-- | net/wireless/nl80211.c | 25 | ||||
-rw-r--r-- | net/wireless/reg.c | 28 | ||||
-rw-r--r-- | net/wireless/trace.h | 18 |
29 files changed, 384 insertions, 284 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index e0adcd123f48..711d7156efd8 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) | |||
131 | caifd = caif_get(skb->dev); | 131 | caifd = caif_get(skb->dev); |
132 | 132 | ||
133 | WARN_ON(caifd == NULL); | 133 | WARN_ON(caifd == NULL); |
134 | if (caifd == NULL) | 134 | if (!caifd) { |
135 | rcu_read_unlock(); | ||
135 | return; | 136 | return; |
137 | } | ||
136 | 138 | ||
137 | caifd_hold(caifd); | 139 | caifd_hold(caifd); |
138 | rcu_read_unlock(); | 140 | rcu_read_unlock(); |
diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 68bf07206744..43a932cb609b 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c | |||
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool) | |||
269 | struct page *page; | 269 | struct page *page; |
270 | 270 | ||
271 | /* Empty recycle ring */ | 271 | /* Empty recycle ring */ |
272 | while ((page = ptr_ring_consume(&pool->ring))) { | 272 | while ((page = ptr_ring_consume_bh(&pool->ring))) { |
273 | /* Verify the refcnt invariant of cached pages */ | 273 | /* Verify the refcnt invariant of cached pages */ |
274 | if (!(page_ref_count(page) == 1)) | 274 | if (!(page_ref_count(page) == 1)) |
275 | pr_crit("%s() page_pool refcnt %d violation\n", | 275 | pr_crit("%s() page_pool refcnt %d violation\n", |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0c1a00672ba9..266b954f763e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3720,6 +3720,7 @@ normal: | |||
3720 | net_warn_ratelimited( | 3720 | net_warn_ratelimited( |
3721 | "skb_segment: too many frags: %u %u\n", | 3721 | "skb_segment: too many frags: %u %u\n", |
3722 | pos, mss); | 3722 | pos, mss); |
3723 | err = -EINVAL; | ||
3723 | goto err; | 3724 | goto err; |
3724 | } | 3725 | } |
3725 | 3726 | ||
@@ -3753,11 +3754,10 @@ skip_fraglist: | |||
3753 | 3754 | ||
3754 | perform_csum_check: | 3755 | perform_csum_check: |
3755 | if (!csum) { | 3756 | if (!csum) { |
3756 | if (skb_has_shared_frag(nskb)) { | 3757 | if (skb_has_shared_frag(nskb) && |
3757 | err = __skb_linearize(nskb); | 3758 | __skb_linearize(nskb)) |
3758 | if (err) | 3759 | goto err; |
3759 | goto err; | 3760 | |
3760 | } | ||
3761 | if (!nskb->remcsum_offload) | 3761 | if (!nskb->remcsum_offload) |
3762 | nskb->ip_summed = CHECKSUM_NONE; | 3762 | nskb->ip_summed = CHECKSUM_NONE; |
3763 | SKB_GSO_CB(nskb)->csum = | 3763 | SKB_GSO_CB(nskb)->csum = |
diff --git a/net/core/sock.c b/net/core/sock.c index 03fdea5b0f57..9c6ebbdfebf3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2316,9 +2316,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg, | |||
2316 | pfrag->offset += use; | 2316 | pfrag->offset += use; |
2317 | 2317 | ||
2318 | sge = sg + sg_curr - 1; | 2318 | sge = sg + sg_curr - 1; |
2319 | if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && | 2319 | if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page && |
2320 | sg->offset + sg->length == orig_offset) { | 2320 | sge->offset + sge->length == orig_offset) { |
2321 | sg->length += use; | 2321 | sge->length += use; |
2322 | } else { | 2322 | } else { |
2323 | sge = sg + sg_curr; | 2323 | sge = sg + sg_curr; |
2324 | sg_unmark_end(sge); | 2324 | sg_unmark_end(sge); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 598333b123b9..bae9096821be 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) | |||
1200 | spin_lock_bh(&im->lock); | 1200 | spin_lock_bh(&im->lock); |
1201 | if (pmc) { | 1201 | if (pmc) { |
1202 | im->interface = pmc->interface; | 1202 | im->interface = pmc->interface; |
1203 | im->sfmode = pmc->sfmode; | 1203 | if (im->sfmode == MCAST_INCLUDE) { |
1204 | if (pmc->sfmode == MCAST_INCLUDE) { | ||
1205 | im->tomb = pmc->tomb; | 1204 | im->tomb = pmc->tomb; |
1206 | im->sources = pmc->sources; | 1205 | im->sources = pmc->sources; |
1207 | for (psf = im->sources; psf; psf = psf->sf_next) | 1206 | for (psf = im->sources; psf; psf = psf->sf_next) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e2b6bd478afb..9c4e72e9c60a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -524,6 +524,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
524 | to->dev = from->dev; | 524 | to->dev = from->dev; |
525 | to->mark = from->mark; | 525 | to->mark = from->mark; |
526 | 526 | ||
527 | skb_copy_hash(to, from); | ||
528 | |||
527 | /* Copy the flags to each fragment. */ | 529 | /* Copy the flags to each fragment. */ |
528 | IPCB(to)->flags = IPCB(from)->flags; | 530 | IPCB(to)->flags = IPCB(from)->flags; |
529 | 531 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 64c76dcf7386..c0fe5ad996f2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) | |||
150 | { | 150 | { |
151 | struct sockaddr_in sin; | 151 | struct sockaddr_in sin; |
152 | const struct iphdr *iph = ip_hdr(skb); | 152 | const struct iphdr *iph = ip_hdr(skb); |
153 | __be16 *ports = (__be16 *)skb_transport_header(skb); | 153 | __be16 *ports; |
154 | int end; | ||
154 | 155 | ||
155 | if (skb_transport_offset(skb) + 4 > (int)skb->len) | 156 | end = skb_transport_offset(skb) + 4; |
157 | if (end > 0 && !pskb_may_pull(skb, end)) | ||
156 | return; | 158 | return; |
157 | 159 | ||
158 | /* All current transport protocols have the port numbers in the | 160 | /* All current transport protocols have the port numbers in the |
159 | * first four bytes of the transport header and this function is | 161 | * first four bytes of the transport header and this function is |
160 | * written with this assumption in mind. | 162 | * written with this assumption in mind. |
161 | */ | 163 | */ |
164 | ports = (__be16 *)skb_transport_header(skb); | ||
162 | 165 | ||
163 | sin.sin_family = AF_INET; | 166 | sin.sin_family = AF_INET; |
164 | sin.sin_addr.s_addr = iph->daddr; | 167 | sin.sin_addr.s_addr = iph->daddr; |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5869f89ca656..8b637f9f23a2 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) | |||
129 | struct dctcp *ca = inet_csk_ca(sk); | 129 | struct dctcp *ca = inet_csk_ca(sk); |
130 | struct tcp_sock *tp = tcp_sk(sk); | 130 | struct tcp_sock *tp = tcp_sk(sk); |
131 | 131 | ||
132 | /* State has changed from CE=0 to CE=1 and delayed | 132 | if (!ca->ce_state) { |
133 | * ACK has not sent yet. | 133 | /* State has changed from CE=0 to CE=1, force an immediate |
134 | */ | 134 | * ACK to reflect the new CE state. If an ACK was delayed, |
135 | if (!ca->ce_state && | 135 | * send that first to reflect the prior CE state. |
136 | inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { | 136 | */ |
137 | u32 tmp_rcv_nxt; | 137 | if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
138 | 138 | __tcp_send_ack(sk, ca->prior_rcv_nxt); | |
139 | /* Save current rcv_nxt. */ | 139 | tcp_enter_quickack_mode(sk, 1); |
140 | tmp_rcv_nxt = tp->rcv_nxt; | ||
141 | |||
142 | /* Generate previous ack with CE=0. */ | ||
143 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; | ||
144 | tp->rcv_nxt = ca->prior_rcv_nxt; | ||
145 | |||
146 | tcp_send_ack(sk); | ||
147 | |||
148 | /* Recover current rcv_nxt. */ | ||
149 | tp->rcv_nxt = tmp_rcv_nxt; | ||
150 | } | 140 | } |
151 | 141 | ||
152 | ca->prior_rcv_nxt = tp->rcv_nxt; | 142 | ca->prior_rcv_nxt = tp->rcv_nxt; |
@@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) | |||
160 | struct dctcp *ca = inet_csk_ca(sk); | 150 | struct dctcp *ca = inet_csk_ca(sk); |
161 | struct tcp_sock *tp = tcp_sk(sk); | 151 | struct tcp_sock *tp = tcp_sk(sk); |
162 | 152 | ||
163 | /* State has changed from CE=1 to CE=0 and delayed | 153 | if (ca->ce_state) { |
164 | * ACK has not sent yet. | 154 | /* State has changed from CE=1 to CE=0, force an immediate |
165 | */ | 155 | * ACK to reflect the new CE state. If an ACK was delayed, |
166 | if (ca->ce_state && | 156 | * send that first to reflect the prior CE state. |
167 | inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { | 157 | */ |
168 | u32 tmp_rcv_nxt; | 158 | if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
169 | 159 | __tcp_send_ack(sk, ca->prior_rcv_nxt); | |
170 | /* Save current rcv_nxt. */ | 160 | tcp_enter_quickack_mode(sk, 1); |
171 | tmp_rcv_nxt = tp->rcv_nxt; | ||
172 | |||
173 | /* Generate previous ack with CE=1. */ | ||
174 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; | ||
175 | tp->rcv_nxt = ca->prior_rcv_nxt; | ||
176 | |||
177 | tcp_send_ack(sk); | ||
178 | |||
179 | /* Recover current rcv_nxt. */ | ||
180 | tp->rcv_nxt = tmp_rcv_nxt; | ||
181 | } | 161 | } |
182 | 162 | ||
183 | ca->prior_rcv_nxt = tp->rcv_nxt; | 163 | ca->prior_rcv_nxt = tp->rcv_nxt; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 91dbb9afb950..d51fa358b2b1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -216,7 +216,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) | |||
216 | icsk->icsk_ack.quick = quickacks; | 216 | icsk->icsk_ack.quick = quickacks; |
217 | } | 217 | } |
218 | 218 | ||
219 | static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) | 219 | void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) |
220 | { | 220 | { |
221 | struct inet_connection_sock *icsk = inet_csk(sk); | 221 | struct inet_connection_sock *icsk = inet_csk(sk); |
222 | 222 | ||
@@ -224,6 +224,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) | |||
224 | icsk->icsk_ack.pingpong = 0; | 224 | icsk->icsk_ack.pingpong = 0; |
225 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 225 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
226 | } | 226 | } |
227 | EXPORT_SYMBOL(tcp_enter_quickack_mode); | ||
227 | 228 | ||
228 | /* Send ACKs quickly, if "quick" count is not exhausted | 229 | /* Send ACKs quickly, if "quick" count is not exhausted |
229 | * and the session is not interactive. | 230 | * and the session is not interactive. |
@@ -4366,6 +4367,23 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
4366 | return true; | 4367 | return true; |
4367 | } | 4368 | } |
4368 | 4369 | ||
4370 | static bool tcp_ooo_try_coalesce(struct sock *sk, | ||
4371 | struct sk_buff *to, | ||
4372 | struct sk_buff *from, | ||
4373 | bool *fragstolen) | ||
4374 | { | ||
4375 | bool res = tcp_try_coalesce(sk, to, from, fragstolen); | ||
4376 | |||
4377 | /* In case tcp_drop() is called later, update to->gso_segs */ | ||
4378 | if (res) { | ||
4379 | u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + | ||
4380 | max_t(u16, 1, skb_shinfo(from)->gso_segs); | ||
4381 | |||
4382 | skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); | ||
4383 | } | ||
4384 | return res; | ||
4385 | } | ||
4386 | |||
4369 | static void tcp_drop(struct sock *sk, struct sk_buff *skb) | 4387 | static void tcp_drop(struct sock *sk, struct sk_buff *skb) |
4370 | { | 4388 | { |
4371 | sk_drops_add(sk, skb); | 4389 | sk_drops_add(sk, skb); |
@@ -4489,8 +4507,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4489 | /* In the typical case, we are adding an skb to the end of the list. | 4507 | /* In the typical case, we are adding an skb to the end of the list. |
4490 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. | 4508 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. |
4491 | */ | 4509 | */ |
4492 | if (tcp_try_coalesce(sk, tp->ooo_last_skb, | 4510 | if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, |
4493 | skb, &fragstolen)) { | 4511 | skb, &fragstolen)) { |
4494 | coalesce_done: | 4512 | coalesce_done: |
4495 | tcp_grow_window(sk, skb); | 4513 | tcp_grow_window(sk, skb); |
4496 | kfree_skb_partial(skb, fragstolen); | 4514 | kfree_skb_partial(skb, fragstolen); |
@@ -4518,7 +4536,7 @@ coalesce_done: | |||
4518 | /* All the bits are present. Drop. */ | 4536 | /* All the bits are present. Drop. */ |
4519 | NET_INC_STATS(sock_net(sk), | 4537 | NET_INC_STATS(sock_net(sk), |
4520 | LINUX_MIB_TCPOFOMERGE); | 4538 | LINUX_MIB_TCPOFOMERGE); |
4521 | __kfree_skb(skb); | 4539 | tcp_drop(sk, skb); |
4522 | skb = NULL; | 4540 | skb = NULL; |
4523 | tcp_dsack_set(sk, seq, end_seq); | 4541 | tcp_dsack_set(sk, seq, end_seq); |
4524 | goto add_sack; | 4542 | goto add_sack; |
@@ -4537,11 +4555,11 @@ coalesce_done: | |||
4537 | TCP_SKB_CB(skb1)->end_seq); | 4555 | TCP_SKB_CB(skb1)->end_seq); |
4538 | NET_INC_STATS(sock_net(sk), | 4556 | NET_INC_STATS(sock_net(sk), |
4539 | LINUX_MIB_TCPOFOMERGE); | 4557 | LINUX_MIB_TCPOFOMERGE); |
4540 | __kfree_skb(skb1); | 4558 | tcp_drop(sk, skb1); |
4541 | goto merge_right; | 4559 | goto merge_right; |
4542 | } | 4560 | } |
4543 | } else if (tcp_try_coalesce(sk, skb1, | 4561 | } else if (tcp_ooo_try_coalesce(sk, skb1, |
4544 | skb, &fragstolen)) { | 4562 | skb, &fragstolen)) { |
4545 | goto coalesce_done; | 4563 | goto coalesce_done; |
4546 | } | 4564 | } |
4547 | p = &parent->rb_right; | 4565 | p = &parent->rb_right; |
@@ -4924,6 +4942,7 @@ end: | |||
4924 | static void tcp_collapse_ofo_queue(struct sock *sk) | 4942 | static void tcp_collapse_ofo_queue(struct sock *sk) |
4925 | { | 4943 | { |
4926 | struct tcp_sock *tp = tcp_sk(sk); | 4944 | struct tcp_sock *tp = tcp_sk(sk); |
4945 | u32 range_truesize, sum_tiny = 0; | ||
4927 | struct sk_buff *skb, *head; | 4946 | struct sk_buff *skb, *head; |
4928 | u32 start, end; | 4947 | u32 start, end; |
4929 | 4948 | ||
@@ -4935,6 +4954,7 @@ new_range: | |||
4935 | } | 4954 | } |
4936 | start = TCP_SKB_CB(skb)->seq; | 4955 | start = TCP_SKB_CB(skb)->seq; |
4937 | end = TCP_SKB_CB(skb)->end_seq; | 4956 | end = TCP_SKB_CB(skb)->end_seq; |
4957 | range_truesize = skb->truesize; | ||
4938 | 4958 | ||
4939 | for (head = skb;;) { | 4959 | for (head = skb;;) { |
4940 | skb = skb_rb_next(skb); | 4960 | skb = skb_rb_next(skb); |
@@ -4945,11 +4965,20 @@ new_range: | |||
4945 | if (!skb || | 4965 | if (!skb || |
4946 | after(TCP_SKB_CB(skb)->seq, end) || | 4966 | after(TCP_SKB_CB(skb)->seq, end) || |
4947 | before(TCP_SKB_CB(skb)->end_seq, start)) { | 4967 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
4948 | tcp_collapse(sk, NULL, &tp->out_of_order_queue, | 4968 | /* Do not attempt collapsing tiny skbs */ |
4949 | head, skb, start, end); | 4969 | if (range_truesize != head->truesize || |
4970 | end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { | ||
4971 | tcp_collapse(sk, NULL, &tp->out_of_order_queue, | ||
4972 | head, skb, start, end); | ||
4973 | } else { | ||
4974 | sum_tiny += range_truesize; | ||
4975 | if (sum_tiny > sk->sk_rcvbuf >> 3) | ||
4976 | return; | ||
4977 | } | ||
4950 | goto new_range; | 4978 | goto new_range; |
4951 | } | 4979 | } |
4952 | 4980 | ||
4981 | range_truesize += skb->truesize; | ||
4953 | if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) | 4982 | if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) |
4954 | start = TCP_SKB_CB(skb)->seq; | 4983 | start = TCP_SKB_CB(skb)->seq; |
4955 | if (after(TCP_SKB_CB(skb)->end_seq, end)) | 4984 | if (after(TCP_SKB_CB(skb)->end_seq, end)) |
@@ -4964,6 +4993,7 @@ new_range: | |||
4964 | * 2) not add too big latencies if thousands of packets sit there. | 4993 | * 2) not add too big latencies if thousands of packets sit there. |
4965 | * (But if application shrinks SO_RCVBUF, we could still end up | 4994 | * (But if application shrinks SO_RCVBUF, we could still end up |
4966 | * freeing whole queue here) | 4995 | * freeing whole queue here) |
4996 | * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. | ||
4967 | * | 4997 | * |
4968 | * Return true if queue has shrunk. | 4998 | * Return true if queue has shrunk. |
4969 | */ | 4999 | */ |
@@ -4971,20 +5001,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) | |||
4971 | { | 5001 | { |
4972 | struct tcp_sock *tp = tcp_sk(sk); | 5002 | struct tcp_sock *tp = tcp_sk(sk); |
4973 | struct rb_node *node, *prev; | 5003 | struct rb_node *node, *prev; |
5004 | int goal; | ||
4974 | 5005 | ||
4975 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) | 5006 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) |
4976 | return false; | 5007 | return false; |
4977 | 5008 | ||
4978 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); | 5009 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); |
5010 | goal = sk->sk_rcvbuf >> 3; | ||
4979 | node = &tp->ooo_last_skb->rbnode; | 5011 | node = &tp->ooo_last_skb->rbnode; |
4980 | do { | 5012 | do { |
4981 | prev = rb_prev(node); | 5013 | prev = rb_prev(node); |
4982 | rb_erase(node, &tp->out_of_order_queue); | 5014 | rb_erase(node, &tp->out_of_order_queue); |
5015 | goal -= rb_to_skb(node)->truesize; | ||
4983 | tcp_drop(sk, rb_to_skb(node)); | 5016 | tcp_drop(sk, rb_to_skb(node)); |
4984 | sk_mem_reclaim(sk); | 5017 | if (!prev || goal <= 0) { |
4985 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | 5018 | sk_mem_reclaim(sk); |
4986 | !tcp_under_memory_pressure(sk)) | 5019 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
4987 | break; | 5020 | !tcp_under_memory_pressure(sk)) |
5021 | break; | ||
5022 | goal = sk->sk_rcvbuf >> 3; | ||
5023 | } | ||
4988 | node = prev; | 5024 | node = prev; |
4989 | } while (node); | 5025 | } while (node); |
4990 | tp->ooo_last_skb = rb_to_skb(prev); | 5026 | tp->ooo_last_skb = rb_to_skb(prev); |
@@ -5019,6 +5055,9 @@ static int tcp_prune_queue(struct sock *sk) | |||
5019 | else if (tcp_under_memory_pressure(sk)) | 5055 | else if (tcp_under_memory_pressure(sk)) |
5020 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 5056 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
5021 | 5057 | ||
5058 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | ||
5059 | return 0; | ||
5060 | |||
5022 | tcp_collapse_ofo_queue(sk); | 5061 | tcp_collapse_ofo_queue(sk); |
5023 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 5062 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
5024 | tcp_collapse(sk, &sk->sk_receive_queue, NULL, | 5063 | tcp_collapse(sk, &sk->sk_receive_queue, NULL, |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6cbab56e7407..490df62f26d4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp, | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /* Account for an ACK we sent. */ | 162 | /* Account for an ACK we sent. */ |
163 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | 163 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, |
164 | u32 rcv_nxt) | ||
164 | { | 165 | { |
165 | struct tcp_sock *tp = tcp_sk(sk); | 166 | struct tcp_sock *tp = tcp_sk(sk); |
166 | 167 | ||
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | |||
171 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) | 172 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
172 | __sock_put(sk); | 173 | __sock_put(sk); |
173 | } | 174 | } |
175 | |||
176 | if (unlikely(rcv_nxt != tp->rcv_nxt)) | ||
177 | return; /* Special ACK sent by DCTCP to reflect ECN */ | ||
174 | tcp_dec_quickack_mode(sk, pkts); | 178 | tcp_dec_quickack_mode(sk, pkts); |
175 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | 179 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
176 | } | 180 | } |
@@ -1009,8 +1013,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) | |||
1009 | * We are working here with either a clone of the original | 1013 | * We are working here with either a clone of the original |
1010 | * SKB, or a fresh unique copy made by the retransmit engine. | 1014 | * SKB, or a fresh unique copy made by the retransmit engine. |
1011 | */ | 1015 | */ |
1012 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | 1016 | static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, |
1013 | gfp_t gfp_mask) | 1017 | int clone_it, gfp_t gfp_mask, u32 rcv_nxt) |
1014 | { | 1018 | { |
1015 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1019 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1016 | struct inet_sock *inet; | 1020 | struct inet_sock *inet; |
@@ -1086,7 +1090,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
1086 | th->source = inet->inet_sport; | 1090 | th->source = inet->inet_sport; |
1087 | th->dest = inet->inet_dport; | 1091 | th->dest = inet->inet_dport; |
1088 | th->seq = htonl(tcb->seq); | 1092 | th->seq = htonl(tcb->seq); |
1089 | th->ack_seq = htonl(tp->rcv_nxt); | 1093 | th->ack_seq = htonl(rcv_nxt); |
1090 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | | 1094 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
1091 | tcb->tcp_flags); | 1095 | tcb->tcp_flags); |
1092 | 1096 | ||
@@ -1127,7 +1131,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
1127 | icsk->icsk_af_ops->send_check(sk, skb); | 1131 | icsk->icsk_af_ops->send_check(sk, skb); |
1128 | 1132 | ||
1129 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) | 1133 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) |
1130 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); | 1134 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); |
1131 | 1135 | ||
1132 | if (skb->len != tcp_header_size) { | 1136 | if (skb->len != tcp_header_size) { |
1133 | tcp_event_data_sent(tp, sk); | 1137 | tcp_event_data_sent(tp, sk); |
@@ -1164,6 +1168,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
1164 | return err; | 1168 | return err; |
1165 | } | 1169 | } |
1166 | 1170 | ||
1171 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | ||
1172 | gfp_t gfp_mask) | ||
1173 | { | ||
1174 | return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, | ||
1175 | tcp_sk(sk)->rcv_nxt); | ||
1176 | } | ||
1177 | |||
1167 | /* This routine just queues the buffer for sending. | 1178 | /* This routine just queues the buffer for sending. |
1168 | * | 1179 | * |
1169 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 1180 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
@@ -3557,7 +3568,7 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
3557 | } | 3568 | } |
3558 | 3569 | ||
3559 | /* This routine sends an ack and also updates the window. */ | 3570 | /* This routine sends an ack and also updates the window. */ |
3560 | void tcp_send_ack(struct sock *sk) | 3571 | void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) |
3561 | { | 3572 | { |
3562 | struct sk_buff *buff; | 3573 | struct sk_buff *buff; |
3563 | 3574 | ||
@@ -3590,9 +3601,14 @@ void tcp_send_ack(struct sock *sk) | |||
3590 | skb_set_tcp_pure_ack(buff); | 3601 | skb_set_tcp_pure_ack(buff); |
3591 | 3602 | ||
3592 | /* Send it off, this clears delayed acks for us. */ | 3603 | /* Send it off, this clears delayed acks for us. */ |
3593 | tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); | 3604 | __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); |
3605 | } | ||
3606 | EXPORT_SYMBOL_GPL(__tcp_send_ack); | ||
3607 | |||
3608 | void tcp_send_ack(struct sock *sk) | ||
3609 | { | ||
3610 | __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); | ||
3594 | } | 3611 | } |
3595 | EXPORT_SYMBOL_GPL(tcp_send_ack); | ||
3596 | 3612 | ||
3597 | /* This routine sends a packet with an out of date sequence | 3613 | /* This routine sends a packet with an out of date sequence |
3598 | * number. It assumes the other end will try to ack it. | 3614 | * number. It assumes the other end will try to ack it. |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1659a6b3cf42..2fac4ad74867 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2372,7 +2372,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, | |||
2372 | continue; | 2372 | continue; |
2373 | if ((rt->fib6_flags & noflags) != 0) | 2373 | if ((rt->fib6_flags & noflags) != 0) |
2374 | continue; | 2374 | continue; |
2375 | fib6_info_hold(rt); | 2375 | if (!fib6_info_hold_safe(rt)) |
2376 | continue; | ||
2376 | break; | 2377 | break; |
2377 | } | 2378 | } |
2378 | out: | 2379 | out: |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 201306b9b5ea..5a094f58fe8a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, | |||
700 | } | 700 | } |
701 | if (np->rxopt.bits.rxorigdstaddr) { | 701 | if (np->rxopt.bits.rxorigdstaddr) { |
702 | struct sockaddr_in6 sin6; | 702 | struct sockaddr_in6 sin6; |
703 | __be16 *ports = (__be16 *) skb_transport_header(skb); | 703 | __be16 *ports; |
704 | int end; | ||
704 | 705 | ||
705 | if (skb_transport_offset(skb) + 4 <= (int)skb->len) { | 706 | end = skb_transport_offset(skb) + 4; |
707 | if (end <= 0 || pskb_may_pull(skb, end)) { | ||
706 | /* All current transport protocols have the port numbers in the | 708 | /* All current transport protocols have the port numbers in the |
707 | * first four bytes of the transport header and this function is | 709 | * first four bytes of the transport header and this function is |
708 | * written with this assumption in mind. | 710 | * written with this assumption in mind. |
709 | */ | 711 | */ |
712 | ports = (__be16 *)skb_transport_header(skb); | ||
710 | 713 | ||
711 | sin6.sin6_family = AF_INET6; | 714 | sin6.sin6_family = AF_INET6; |
712 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; | 715 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 24611c8b0562..00d159d431dc 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb) | |||
402 | 402 | ||
403 | /* for local traffic to local address, skb dev is the loopback | 403 | /* for local traffic to local address, skb dev is the loopback |
404 | * device. Check if there is a dst attached to the skb and if so | 404 | * device. Check if there is a dst attached to the skb and if so |
405 | * get the real device index. | 405 | * get the real device index. Same is needed for replies to a link |
406 | * local address on a device enslaved to an L3 master device | ||
406 | */ | 407 | */ |
407 | if (unlikely(iif == LOOPBACK_IFINDEX)) { | 408 | if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { |
408 | const struct rt6_info *rt6 = skb_rt6_info(skb); | 409 | const struct rt6_info *rt6 = skb_rt6_info(skb); |
409 | 410 | ||
410 | if (rt6) | 411 | if (rt6) |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8047fd41ba88..16f200f06500 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
570 | to->dev = from->dev; | 570 | to->dev = from->dev; |
571 | to->mark = from->mark; | 571 | to->mark = from->mark; |
572 | 572 | ||
573 | skb_copy_hash(to, from); | ||
574 | |||
573 | #ifdef CONFIG_NET_SCHED | 575 | #ifdef CONFIG_NET_SCHED |
574 | to->tc_index = from->tc_index; | 576 | to->tc_index = from->tc_index; |
575 | #endif | 577 | #endif |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 195ed2db2207..4ae54aaca373 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) | |||
790 | spin_lock_bh(&im->mca_lock); | 790 | spin_lock_bh(&im->mca_lock); |
791 | if (pmc) { | 791 | if (pmc) { |
792 | im->idev = pmc->idev; | 792 | im->idev = pmc->idev; |
793 | im->mca_sfmode = pmc->mca_sfmode; | 793 | if (im->mca_sfmode == MCAST_INCLUDE) { |
794 | if (pmc->mca_sfmode == MCAST_INCLUDE) { | ||
795 | im->mca_tomb = pmc->mca_tomb; | 794 | im->mca_tomb = pmc->mca_tomb; |
796 | im->mca_sources = pmc->mca_sources; | 795 | im->mca_sources = pmc->mca_sources; |
797 | for (psf = im->mca_sources; psf; psf = psf->sf_next) | 796 | for (psf = im->mca_sources; psf; psf = psf->sf_next) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2ce0bd17de4f..ec18b3ce8b6d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | |||
972 | rt->dst.lastuse = jiffies; | 972 | rt->dst.lastuse = jiffies; |
973 | } | 973 | } |
974 | 974 | ||
975 | /* Caller must already hold reference to @from */ | ||
975 | static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) | 976 | static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) |
976 | { | 977 | { |
977 | rt->rt6i_flags &= ~RTF_EXPIRES; | 978 | rt->rt6i_flags &= ~RTF_EXPIRES; |
978 | fib6_info_hold(from); | ||
979 | rcu_assign_pointer(rt->from, from); | 979 | rcu_assign_pointer(rt->from, from); |
980 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); | 980 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); |
981 | if (from->fib6_metrics != &dst_default_metrics) { | 981 | if (from->fib6_metrics != &dst_default_metrics) { |
@@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) | |||
984 | } | 984 | } |
985 | } | 985 | } |
986 | 986 | ||
987 | /* Caller must already hold reference to @ort */ | ||
987 | static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) | 988 | static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) |
988 | { | 989 | { |
989 | struct net_device *dev = fib6_info_nh_dev(ort); | 990 | struct net_device *dev = fib6_info_nh_dev(ort); |
@@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) | |||
1044 | struct net_device *dev = rt->fib6_nh.nh_dev; | 1045 | struct net_device *dev = rt->fib6_nh.nh_dev; |
1045 | struct rt6_info *nrt; | 1046 | struct rt6_info *nrt; |
1046 | 1047 | ||
1048 | if (!fib6_info_hold_safe(rt)) | ||
1049 | return NULL; | ||
1050 | |||
1047 | nrt = ip6_dst_alloc(dev_net(dev), dev, flags); | 1051 | nrt = ip6_dst_alloc(dev_net(dev), dev, flags); |
1048 | if (nrt) | 1052 | if (nrt) |
1049 | ip6_rt_copy_init(nrt, rt); | 1053 | ip6_rt_copy_init(nrt, rt); |
1054 | else | ||
1055 | fib6_info_release(rt); | ||
1050 | 1056 | ||
1051 | return nrt; | 1057 | return nrt; |
1052 | } | 1058 | } |
@@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, | |||
1178 | * Clone the route. | 1184 | * Clone the route. |
1179 | */ | 1185 | */ |
1180 | 1186 | ||
1187 | if (!fib6_info_hold_safe(ort)) | ||
1188 | return NULL; | ||
1189 | |||
1181 | dev = ip6_rt_get_dev_rcu(ort); | 1190 | dev = ip6_rt_get_dev_rcu(ort); |
1182 | rt = ip6_dst_alloc(dev_net(dev), dev, 0); | 1191 | rt = ip6_dst_alloc(dev_net(dev), dev, 0); |
1183 | if (!rt) | 1192 | if (!rt) { |
1193 | fib6_info_release(ort); | ||
1184 | return NULL; | 1194 | return NULL; |
1195 | } | ||
1185 | 1196 | ||
1186 | ip6_rt_copy_init(rt, ort); | 1197 | ip6_rt_copy_init(rt, ort); |
1187 | rt->rt6i_flags |= RTF_CACHE; | 1198 | rt->rt6i_flags |= RTF_CACHE; |
@@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt) | |||
1210 | struct net_device *dev; | 1221 | struct net_device *dev; |
1211 | struct rt6_info *pcpu_rt; | 1222 | struct rt6_info *pcpu_rt; |
1212 | 1223 | ||
1224 | if (!fib6_info_hold_safe(rt)) | ||
1225 | return NULL; | ||
1226 | |||
1213 | rcu_read_lock(); | 1227 | rcu_read_lock(); |
1214 | dev = ip6_rt_get_dev_rcu(rt); | 1228 | dev = ip6_rt_get_dev_rcu(rt); |
1215 | pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); | 1229 | pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); |
1216 | rcu_read_unlock(); | 1230 | rcu_read_unlock(); |
1217 | if (!pcpu_rt) | 1231 | if (!pcpu_rt) { |
1232 | fib6_info_release(rt); | ||
1218 | return NULL; | 1233 | return NULL; |
1234 | } | ||
1219 | ip6_rt_copy_init(pcpu_rt, rt); | 1235 | ip6_rt_copy_init(pcpu_rt, rt); |
1220 | pcpu_rt->rt6i_flags |= RTF_PCPU; | 1236 | pcpu_rt->rt6i_flags |= RTF_PCPU; |
1221 | return pcpu_rt; | 1237 | return pcpu_rt; |
@@ -2486,7 +2502,7 @@ restart: | |||
2486 | 2502 | ||
2487 | out: | 2503 | out: |
2488 | if (ret) | 2504 | if (ret) |
2489 | dst_hold(&ret->dst); | 2505 | ip6_hold_safe(net, &ret, true); |
2490 | else | 2506 | else |
2491 | ret = ip6_create_rt_rcu(rt); | 2507 | ret = ip6_create_rt_rcu(rt); |
2492 | 2508 | ||
@@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg, | |||
3303 | continue; | 3319 | continue; |
3304 | if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) | 3320 | if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) |
3305 | continue; | 3321 | continue; |
3306 | fib6_info_hold(rt); | 3322 | if (!fib6_info_hold_safe(rt)) |
3323 | continue; | ||
3307 | rcu_read_unlock(); | 3324 | rcu_read_unlock(); |
3308 | 3325 | ||
3309 | /* if gateway was specified only delete the one hop */ | 3326 | /* if gateway was specified only delete the one hop */ |
@@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu | |||
3409 | 3426 | ||
3410 | rcu_read_lock(); | 3427 | rcu_read_lock(); |
3411 | from = rcu_dereference(rt->from); | 3428 | from = rcu_dereference(rt->from); |
3429 | /* This fib6_info_hold() is safe here because we hold reference to rt | ||
3430 | * and rt already holds reference to fib6_info. | ||
3431 | */ | ||
3412 | fib6_info_hold(from); | 3432 | fib6_info_hold(from); |
3413 | rcu_read_unlock(); | 3433 | rcu_read_unlock(); |
3414 | 3434 | ||
@@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net, | |||
3470 | continue; | 3490 | continue; |
3471 | if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) | 3491 | if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) |
3472 | continue; | 3492 | continue; |
3473 | fib6_info_hold(rt); | 3493 | if (!fib6_info_hold_safe(rt)) |
3494 | continue; | ||
3474 | break; | 3495 | break; |
3475 | } | 3496 | } |
3476 | out: | 3497 | out: |
@@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net, | |||
3530 | ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) | 3551 | ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) |
3531 | break; | 3552 | break; |
3532 | } | 3553 | } |
3533 | if (rt) | 3554 | if (rt && !fib6_info_hold_safe(rt)) |
3534 | fib6_info_hold(rt); | 3555 | rt = NULL; |
3535 | rcu_read_unlock(); | 3556 | rcu_read_unlock(); |
3536 | return rt; | 3557 | return rt; |
3537 | } | 3558 | } |
@@ -3579,8 +3600,8 @@ restart: | |||
3579 | struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; | 3600 | struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; |
3580 | 3601 | ||
3581 | if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && | 3602 | if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && |
3582 | (!idev || idev->cnf.accept_ra != 2)) { | 3603 | (!idev || idev->cnf.accept_ra != 2) && |
3583 | fib6_info_hold(rt); | 3604 | fib6_info_hold_safe(rt)) { |
3584 | rcu_read_unlock(); | 3605 | rcu_read_unlock(); |
3585 | ip6_del_rt(net, rt); | 3606 | ip6_del_rt(net, rt); |
3586 | goto restart; | 3607 | goto restart; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7efa9fd7e109..03e6b7a2bc53 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) | |||
938 | &tcp_hashinfo, NULL, 0, | 938 | &tcp_hashinfo, NULL, 0, |
939 | &ipv6h->saddr, | 939 | &ipv6h->saddr, |
940 | th->source, &ipv6h->daddr, | 940 | th->source, &ipv6h->daddr, |
941 | ntohs(th->source), tcp_v6_iif(skb), | 941 | ntohs(th->source), |
942 | tcp_v6_iif_l3_slave(skb), | ||
942 | tcp_v6_sdif(skb)); | 943 | tcp_v6_sdif(skb)); |
943 | if (!sk1) | 944 | if (!sk1) |
944 | goto out; | 945 | goto out; |
@@ -1609,7 +1610,8 @@ do_time_wait: | |||
1609 | skb, __tcp_hdrlen(th), | 1610 | skb, __tcp_hdrlen(th), |
1610 | &ipv6_hdr(skb)->saddr, th->source, | 1611 | &ipv6_hdr(skb)->saddr, th->source, |
1611 | &ipv6_hdr(skb)->daddr, | 1612 | &ipv6_hdr(skb)->daddr, |
1612 | ntohs(th->dest), tcp_v6_iif(skb), | 1613 | ntohs(th->dest), |
1614 | tcp_v6_iif_l3_slave(skb), | ||
1613 | sdif); | 1615 | sdif); |
1614 | if (sk2) { | 1616 | if (sk2) { |
1615 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1617 | struct inet_timewait_sock *tw = inet_twsk(sk); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a16ba568e2a3..64742f2765c4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2370,11 +2370,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, | |||
2370 | sdata->control_port_over_nl80211)) { | 2370 | sdata->control_port_over_nl80211)) { |
2371 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2371 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2372 | bool noencrypt = status->flag & RX_FLAG_DECRYPTED; | 2372 | bool noencrypt = status->flag & RX_FLAG_DECRYPTED; |
2373 | struct ethhdr *ehdr = eth_hdr(skb); | ||
2374 | 2373 | ||
2375 | cfg80211_rx_control_port(dev, skb->data, skb->len, | 2374 | cfg80211_rx_control_port(dev, skb, noencrypt); |
2376 | ehdr->h_source, | ||
2377 | be16_to_cpu(skb->protocol), noencrypt); | ||
2378 | dev_kfree_skb(skb); | 2375 | dev_kfree_skb(skb); |
2379 | } else { | 2376 | } else { |
2380 | /* deliver to local stack */ | 2377 | /* deliver to local stack */ |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3e68132a41fa..88efda7c9f8a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -2140,7 +2140,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2140 | if (!sta->uploaded) | 2140 | if (!sta->uploaded) |
2141 | continue; | 2141 | continue; |
2142 | 2142 | ||
2143 | if (sta->sdata->vif.type != NL80211_IFTYPE_AP) | 2143 | if (sta->sdata->vif.type != NL80211_IFTYPE_AP && |
2144 | sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) | ||
2144 | continue; | 2145 | continue; |
2145 | 2146 | ||
2146 | for (state = IEEE80211_STA_NOTEXIST; | 2147 | for (state = IEEE80211_STA_NOTEXIST; |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index f476d116c816..8c58f96b59e7 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -244,14 +244,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = | |||
244 | * We currently ignore Sync packets | 244 | * We currently ignore Sync packets |
245 | * | 245 | * |
246 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ | 246 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ |
247 | sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, | 247 | sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, |
248 | }, | 248 | }, |
249 | [DCCP_PKT_SYNCACK] = { | 249 | [DCCP_PKT_SYNCACK] = { |
250 | /* | 250 | /* |
251 | * We currently ignore SyncAck packets | 251 | * We currently ignore SyncAck packets |
252 | * | 252 | * |
253 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ | 253 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ |
254 | sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, | 254 | sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, |
255 | }, | 255 | }, |
256 | }, | 256 | }, |
257 | [CT_DCCP_ROLE_SERVER] = { | 257 | [CT_DCCP_ROLE_SERVER] = { |
@@ -372,14 +372,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = | |||
372 | * We currently ignore Sync packets | 372 | * We currently ignore Sync packets |
373 | * | 373 | * |
374 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ | 374 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ |
375 | sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, | 375 | sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, |
376 | }, | 376 | }, |
377 | [DCCP_PKT_SYNCACK] = { | 377 | [DCCP_PKT_SYNCACK] = { |
378 | /* | 378 | /* |
379 | * We currently ignore SyncAck packets | 379 | * We currently ignore SyncAck packets |
380 | * | 380 | * |
381 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ | 381 | * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ |
382 | sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, | 382 | sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, |
383 | }, | 383 | }, |
384 | }, | 384 | }, |
385 | }; | 385 | }; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c0fb2bcd30fe..f18085639807 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -76,6 +76,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, | |||
76 | { | 76 | { |
77 | ctx->net = net; | 77 | ctx->net = net; |
78 | ctx->family = family; | 78 | ctx->family = family; |
79 | ctx->level = 0; | ||
79 | ctx->table = table; | 80 | ctx->table = table; |
80 | ctx->chain = chain; | 81 | ctx->chain = chain; |
81 | ctx->nla = nla; | 82 | ctx->nla = nla; |
@@ -1651,7 +1652,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, | |||
1651 | struct nft_base_chain *basechain; | 1652 | struct nft_base_chain *basechain; |
1652 | struct nft_stats *stats = NULL; | 1653 | struct nft_stats *stats = NULL; |
1653 | struct nft_chain_hook hook; | 1654 | struct nft_chain_hook hook; |
1654 | const struct nlattr *name; | ||
1655 | struct nf_hook_ops *ops; | 1655 | struct nf_hook_ops *ops; |
1656 | struct nft_trans *trans; | 1656 | struct nft_trans *trans; |
1657 | int err; | 1657 | int err; |
@@ -1700,12 +1700,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, | |||
1700 | return PTR_ERR(stats); | 1700 | return PTR_ERR(stats); |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | err = -ENOMEM; | ||
1703 | trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, | 1704 | trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, |
1704 | sizeof(struct nft_trans_chain)); | 1705 | sizeof(struct nft_trans_chain)); |
1705 | if (trans == NULL) { | 1706 | if (trans == NULL) |
1706 | free_percpu(stats); | 1707 | goto err; |
1707 | return -ENOMEM; | ||
1708 | } | ||
1709 | 1708 | ||
1710 | nft_trans_chain_stats(trans) = stats; | 1709 | nft_trans_chain_stats(trans) = stats; |
1711 | nft_trans_chain_update(trans) = true; | 1710 | nft_trans_chain_update(trans) = true; |
@@ -1715,19 +1714,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, | |||
1715 | else | 1714 | else |
1716 | nft_trans_chain_policy(trans) = -1; | 1715 | nft_trans_chain_policy(trans) = -1; |
1717 | 1716 | ||
1718 | name = nla[NFTA_CHAIN_NAME]; | 1717 | if (nla[NFTA_CHAIN_HANDLE] && |
1719 | if (nla[NFTA_CHAIN_HANDLE] && name) { | 1718 | nla[NFTA_CHAIN_NAME]) { |
1720 | nft_trans_chain_name(trans) = | 1719 | struct nft_trans *tmp; |
1721 | nla_strdup(name, GFP_KERNEL); | 1720 | char *name; |
1722 | if (!nft_trans_chain_name(trans)) { | 1721 | |
1723 | kfree(trans); | 1722 | err = -ENOMEM; |
1724 | free_percpu(stats); | 1723 | name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); |
1725 | return -ENOMEM; | 1724 | if (!name) |
1725 | goto err; | ||
1726 | |||
1727 | err = -EEXIST; | ||
1728 | list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { | ||
1729 | if (tmp->msg_type == NFT_MSG_NEWCHAIN && | ||
1730 | tmp->ctx.table == table && | ||
1731 | nft_trans_chain_update(tmp) && | ||
1732 | nft_trans_chain_name(tmp) && | ||
1733 | strcmp(name, nft_trans_chain_name(tmp)) == 0) { | ||
1734 | kfree(name); | ||
1735 | goto err; | ||
1736 | } | ||
1726 | } | 1737 | } |
1738 | |||
1739 | nft_trans_chain_name(trans) = name; | ||
1727 | } | 1740 | } |
1728 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 1741 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
1729 | 1742 | ||
1730 | return 0; | 1743 | return 0; |
1744 | err: | ||
1745 | free_percpu(stats); | ||
1746 | kfree(trans); | ||
1747 | return err; | ||
1731 | } | 1748 | } |
1732 | 1749 | ||
1733 | static int nf_tables_newchain(struct net *net, struct sock *nlsk, | 1750 | static int nf_tables_newchain(struct net *net, struct sock *nlsk, |
@@ -2309,6 +2326,39 @@ done: | |||
2309 | return skb->len; | 2326 | return skb->len; |
2310 | } | 2327 | } |
2311 | 2328 | ||
2329 | static int nf_tables_dump_rules_start(struct netlink_callback *cb) | ||
2330 | { | ||
2331 | const struct nlattr * const *nla = cb->data; | ||
2332 | struct nft_rule_dump_ctx *ctx = NULL; | ||
2333 | |||
2334 | if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { | ||
2335 | ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); | ||
2336 | if (!ctx) | ||
2337 | return -ENOMEM; | ||
2338 | |||
2339 | if (nla[NFTA_RULE_TABLE]) { | ||
2340 | ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], | ||
2341 | GFP_ATOMIC); | ||
2342 | if (!ctx->table) { | ||
2343 | kfree(ctx); | ||
2344 | return -ENOMEM; | ||
2345 | } | ||
2346 | } | ||
2347 | if (nla[NFTA_RULE_CHAIN]) { | ||
2348 | ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], | ||
2349 | GFP_ATOMIC); | ||
2350 | if (!ctx->chain) { | ||
2351 | kfree(ctx->table); | ||
2352 | kfree(ctx); | ||
2353 | return -ENOMEM; | ||
2354 | } | ||
2355 | } | ||
2356 | } | ||
2357 | |||
2358 | cb->data = ctx; | ||
2359 | return 0; | ||
2360 | } | ||
2361 | |||
2312 | static int nf_tables_dump_rules_done(struct netlink_callback *cb) | 2362 | static int nf_tables_dump_rules_done(struct netlink_callback *cb) |
2313 | { | 2363 | { |
2314 | struct nft_rule_dump_ctx *ctx = cb->data; | 2364 | struct nft_rule_dump_ctx *ctx = cb->data; |
@@ -2338,38 +2388,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, | |||
2338 | 2388 | ||
2339 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 2389 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
2340 | struct netlink_dump_control c = { | 2390 | struct netlink_dump_control c = { |
2391 | .start= nf_tables_dump_rules_start, | ||
2341 | .dump = nf_tables_dump_rules, | 2392 | .dump = nf_tables_dump_rules, |
2342 | .done = nf_tables_dump_rules_done, | 2393 | .done = nf_tables_dump_rules_done, |
2343 | .module = THIS_MODULE, | 2394 | .module = THIS_MODULE, |
2395 | .data = (void *)nla, | ||
2344 | }; | 2396 | }; |
2345 | 2397 | ||
2346 | if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { | ||
2347 | struct nft_rule_dump_ctx *ctx; | ||
2348 | |||
2349 | ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); | ||
2350 | if (!ctx) | ||
2351 | return -ENOMEM; | ||
2352 | |||
2353 | if (nla[NFTA_RULE_TABLE]) { | ||
2354 | ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], | ||
2355 | GFP_ATOMIC); | ||
2356 | if (!ctx->table) { | ||
2357 | kfree(ctx); | ||
2358 | return -ENOMEM; | ||
2359 | } | ||
2360 | } | ||
2361 | if (nla[NFTA_RULE_CHAIN]) { | ||
2362 | ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], | ||
2363 | GFP_ATOMIC); | ||
2364 | if (!ctx->chain) { | ||
2365 | kfree(ctx->table); | ||
2366 | kfree(ctx); | ||
2367 | return -ENOMEM; | ||
2368 | } | ||
2369 | } | ||
2370 | c.data = ctx; | ||
2371 | } | ||
2372 | |||
2373 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); | 2398 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); |
2374 | } | 2399 | } |
2375 | 2400 | ||
@@ -2440,6 +2465,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) | |||
2440 | struct nft_rule *rule; | 2465 | struct nft_rule *rule; |
2441 | int err; | 2466 | int err; |
2442 | 2467 | ||
2468 | if (ctx->level == NFT_JUMP_STACK_SIZE) | ||
2469 | return -EMLINK; | ||
2470 | |||
2443 | list_for_each_entry(rule, &chain->rules, list) { | 2471 | list_for_each_entry(rule, &chain->rules, list) { |
2444 | if (!nft_is_active_next(ctx->net, rule)) | 2472 | if (!nft_is_active_next(ctx->net, rule)) |
2445 | continue; | 2473 | continue; |
@@ -3227,6 +3255,18 @@ done: | |||
3227 | return skb->len; | 3255 | return skb->len; |
3228 | } | 3256 | } |
3229 | 3257 | ||
3258 | static int nf_tables_dump_sets_start(struct netlink_callback *cb) | ||
3259 | { | ||
3260 | struct nft_ctx *ctx_dump = NULL; | ||
3261 | |||
3262 | ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC); | ||
3263 | if (ctx_dump == NULL) | ||
3264 | return -ENOMEM; | ||
3265 | |||
3266 | cb->data = ctx_dump; | ||
3267 | return 0; | ||
3268 | } | ||
3269 | |||
3230 | static int nf_tables_dump_sets_done(struct netlink_callback *cb) | 3270 | static int nf_tables_dump_sets_done(struct netlink_callback *cb) |
3231 | { | 3271 | { |
3232 | kfree(cb->data); | 3272 | kfree(cb->data); |
@@ -3254,18 +3294,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, | |||
3254 | 3294 | ||
3255 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 3295 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
3256 | struct netlink_dump_control c = { | 3296 | struct netlink_dump_control c = { |
3297 | .start = nf_tables_dump_sets_start, | ||
3257 | .dump = nf_tables_dump_sets, | 3298 | .dump = nf_tables_dump_sets, |
3258 | .done = nf_tables_dump_sets_done, | 3299 | .done = nf_tables_dump_sets_done, |
3300 | .data = &ctx, | ||
3259 | .module = THIS_MODULE, | 3301 | .module = THIS_MODULE, |
3260 | }; | 3302 | }; |
3261 | struct nft_ctx *ctx_dump; | ||
3262 | |||
3263 | ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC); | ||
3264 | if (ctx_dump == NULL) | ||
3265 | return -ENOMEM; | ||
3266 | |||
3267 | *ctx_dump = ctx; | ||
3268 | c.data = ctx_dump; | ||
3269 | 3303 | ||
3270 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); | 3304 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); |
3271 | } | 3305 | } |
@@ -3915,6 +3949,15 @@ nla_put_failure: | |||
3915 | return -ENOSPC; | 3949 | return -ENOSPC; |
3916 | } | 3950 | } |
3917 | 3951 | ||
3952 | static int nf_tables_dump_set_start(struct netlink_callback *cb) | ||
3953 | { | ||
3954 | struct nft_set_dump_ctx *dump_ctx = cb->data; | ||
3955 | |||
3956 | cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC); | ||
3957 | |||
3958 | return cb->data ? 0 : -ENOMEM; | ||
3959 | } | ||
3960 | |||
3918 | static int nf_tables_dump_set_done(struct netlink_callback *cb) | 3961 | static int nf_tables_dump_set_done(struct netlink_callback *cb) |
3919 | { | 3962 | { |
3920 | kfree(cb->data); | 3963 | kfree(cb->data); |
@@ -4068,20 +4111,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, | |||
4068 | 4111 | ||
4069 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 4112 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
4070 | struct netlink_dump_control c = { | 4113 | struct netlink_dump_control c = { |
4114 | .start = nf_tables_dump_set_start, | ||
4071 | .dump = nf_tables_dump_set, | 4115 | .dump = nf_tables_dump_set, |
4072 | .done = nf_tables_dump_set_done, | 4116 | .done = nf_tables_dump_set_done, |
4073 | .module = THIS_MODULE, | 4117 | .module = THIS_MODULE, |
4074 | }; | 4118 | }; |
4075 | struct nft_set_dump_ctx *dump_ctx; | 4119 | struct nft_set_dump_ctx dump_ctx = { |
4076 | 4120 | .set = set, | |
4077 | dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); | 4121 | .ctx = ctx, |
4078 | if (!dump_ctx) | 4122 | }; |
4079 | return -ENOMEM; | ||
4080 | |||
4081 | dump_ctx->set = set; | ||
4082 | dump_ctx->ctx = ctx; | ||
4083 | 4123 | ||
4084 | c.data = dump_ctx; | 4124 | c.data = &dump_ctx; |
4085 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); | 4125 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); |
4086 | } | 4126 | } |
4087 | 4127 | ||
@@ -5041,38 +5081,42 @@ done: | |||
5041 | return skb->len; | 5081 | return skb->len; |
5042 | } | 5082 | } |
5043 | 5083 | ||
5044 | static int nf_tables_dump_obj_done(struct netlink_callback *cb) | 5084 | static int nf_tables_dump_obj_start(struct netlink_callback *cb) |
5045 | { | 5085 | { |
5046 | struct nft_obj_filter *filter = cb->data; | 5086 | const struct nlattr * const *nla = cb->data; |
5087 | struct nft_obj_filter *filter = NULL; | ||
5047 | 5088 | ||
5048 | if (filter) { | 5089 | if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) { |
5049 | kfree(filter->table); | 5090 | filter = kzalloc(sizeof(*filter), GFP_ATOMIC); |
5050 | kfree(filter); | 5091 | if (!filter) |
5092 | return -ENOMEM; | ||
5093 | |||
5094 | if (nla[NFTA_OBJ_TABLE]) { | ||
5095 | filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); | ||
5096 | if (!filter->table) { | ||
5097 | kfree(filter); | ||
5098 | return -ENOMEM; | ||
5099 | } | ||
5100 | } | ||
5101 | |||
5102 | if (nla[NFTA_OBJ_TYPE]) | ||
5103 | filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); | ||
5051 | } | 5104 | } |
5052 | 5105 | ||
5106 | cb->data = filter; | ||
5053 | return 0; | 5107 | return 0; |
5054 | } | 5108 | } |
5055 | 5109 | ||
5056 | static struct nft_obj_filter * | 5110 | static int nf_tables_dump_obj_done(struct netlink_callback *cb) |
5057 | nft_obj_filter_alloc(const struct nlattr * const nla[]) | ||
5058 | { | 5111 | { |
5059 | struct nft_obj_filter *filter; | 5112 | struct nft_obj_filter *filter = cb->data; |
5060 | |||
5061 | filter = kzalloc(sizeof(*filter), GFP_ATOMIC); | ||
5062 | if (!filter) | ||
5063 | return ERR_PTR(-ENOMEM); | ||
5064 | 5113 | ||
5065 | if (nla[NFTA_OBJ_TABLE]) { | 5114 | if (filter) { |
5066 | filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); | 5115 | kfree(filter->table); |
5067 | if (!filter->table) { | 5116 | kfree(filter); |
5068 | kfree(filter); | ||
5069 | return ERR_PTR(-ENOMEM); | ||
5070 | } | ||
5071 | } | 5117 | } |
5072 | if (nla[NFTA_OBJ_TYPE]) | ||
5073 | filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); | ||
5074 | 5118 | ||
5075 | return filter; | 5119 | return 0; |
5076 | } | 5120 | } |
5077 | 5121 | ||
5078 | /* called with rcu_read_lock held */ | 5122 | /* called with rcu_read_lock held */ |
@@ -5093,21 +5137,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, | |||
5093 | 5137 | ||
5094 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 5138 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
5095 | struct netlink_dump_control c = { | 5139 | struct netlink_dump_control c = { |
5140 | .start = nf_tables_dump_obj_start, | ||
5096 | .dump = nf_tables_dump_obj, | 5141 | .dump = nf_tables_dump_obj, |
5097 | .done = nf_tables_dump_obj_done, | 5142 | .done = nf_tables_dump_obj_done, |
5098 | .module = THIS_MODULE, | 5143 | .module = THIS_MODULE, |
5144 | .data = (void *)nla, | ||
5099 | }; | 5145 | }; |
5100 | 5146 | ||
5101 | if (nla[NFTA_OBJ_TABLE] || | ||
5102 | nla[NFTA_OBJ_TYPE]) { | ||
5103 | struct nft_obj_filter *filter; | ||
5104 | |||
5105 | filter = nft_obj_filter_alloc(nla); | ||
5106 | if (IS_ERR(filter)) | ||
5107 | return -ENOMEM; | ||
5108 | |||
5109 | c.data = filter; | ||
5110 | } | ||
5111 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); | 5147 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); |
5112 | } | 5148 | } |
5113 | 5149 | ||
@@ -5386,8 +5422,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, | |||
5386 | flowtable->ops[i].priv = &flowtable->data; | 5422 | flowtable->ops[i].priv = &flowtable->data; |
5387 | flowtable->ops[i].hook = flowtable->data.type->hook; | 5423 | flowtable->ops[i].hook = flowtable->data.type->hook; |
5388 | flowtable->ops[i].dev = dev_array[i]; | 5424 | flowtable->ops[i].dev = dev_array[i]; |
5389 | flowtable->dev_name[i] = kstrdup(dev_array[i]->name, | ||
5390 | GFP_KERNEL); | ||
5391 | } | 5425 | } |
5392 | 5426 | ||
5393 | return err; | 5427 | return err; |
@@ -5545,10 +5579,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, | |||
5545 | err6: | 5579 | err6: |
5546 | i = flowtable->ops_len; | 5580 | i = flowtable->ops_len; |
5547 | err5: | 5581 | err5: |
5548 | for (k = i - 1; k >= 0; k--) { | 5582 | for (k = i - 1; k >= 0; k--) |
5549 | kfree(flowtable->dev_name[k]); | ||
5550 | nf_unregister_net_hook(net, &flowtable->ops[k]); | 5583 | nf_unregister_net_hook(net, &flowtable->ops[k]); |
5551 | } | ||
5552 | 5584 | ||
5553 | kfree(flowtable->ops); | 5585 | kfree(flowtable->ops); |
5554 | err4: | 5586 | err4: |
@@ -5647,9 +5679,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, | |||
5647 | goto nla_put_failure; | 5679 | goto nla_put_failure; |
5648 | 5680 | ||
5649 | for (i = 0; i < flowtable->ops_len; i++) { | 5681 | for (i = 0; i < flowtable->ops_len; i++) { |
5650 | if (flowtable->dev_name[i][0] && | 5682 | const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev); |
5651 | nla_put_string(skb, NFTA_DEVICE_NAME, | 5683 | |
5652 | flowtable->dev_name[i])) | 5684 | if (dev && |
5685 | nla_put_string(skb, NFTA_DEVICE_NAME, dev->name)) | ||
5653 | goto nla_put_failure; | 5686 | goto nla_put_failure; |
5654 | } | 5687 | } |
5655 | nla_nest_end(skb, nest_devs); | 5688 | nla_nest_end(skb, nest_devs); |
@@ -5716,37 +5749,39 @@ done: | |||
5716 | return skb->len; | 5749 | return skb->len; |
5717 | } | 5750 | } |
5718 | 5751 | ||
5719 | static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) | 5752 | static int nf_tables_dump_flowtable_start(struct netlink_callback *cb) |
5720 | { | 5753 | { |
5721 | struct nft_flowtable_filter *filter = cb->data; | 5754 | const struct nlattr * const *nla = cb->data; |
5755 | struct nft_flowtable_filter *filter = NULL; | ||
5722 | 5756 | ||
5723 | if (!filter) | 5757 | if (nla[NFTA_FLOWTABLE_TABLE]) { |
5724 | return 0; | 5758 | filter = kzalloc(sizeof(*filter), GFP_ATOMIC); |
5759 | if (!filter) | ||
5760 | return -ENOMEM; | ||
5725 | 5761 | ||
5726 | kfree(filter->table); | 5762 | filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], |
5727 | kfree(filter); | 5763 | GFP_ATOMIC); |
5764 | if (!filter->table) { | ||
5765 | kfree(filter); | ||
5766 | return -ENOMEM; | ||
5767 | } | ||
5768 | } | ||
5728 | 5769 | ||
5770 | cb->data = filter; | ||
5729 | return 0; | 5771 | return 0; |
5730 | } | 5772 | } |
5731 | 5773 | ||
5732 | static struct nft_flowtable_filter * | 5774 | static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) |
5733 | nft_flowtable_filter_alloc(const struct nlattr * const nla[]) | ||
5734 | { | 5775 | { |
5735 | struct nft_flowtable_filter *filter; | 5776 | struct nft_flowtable_filter *filter = cb->data; |
5736 | 5777 | ||
5737 | filter = kzalloc(sizeof(*filter), GFP_ATOMIC); | ||
5738 | if (!filter) | 5778 | if (!filter) |
5739 | return ERR_PTR(-ENOMEM); | 5779 | return 0; |
5740 | 5780 | ||
5741 | if (nla[NFTA_FLOWTABLE_TABLE]) { | 5781 | kfree(filter->table); |
5742 | filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], | 5782 | kfree(filter); |
5743 | GFP_ATOMIC); | 5783 | |
5744 | if (!filter->table) { | 5784 | return 0; |
5745 | kfree(filter); | ||
5746 | return ERR_PTR(-ENOMEM); | ||
5747 | } | ||
5748 | } | ||
5749 | return filter; | ||
5750 | } | 5785 | } |
5751 | 5786 | ||
5752 | /* called with rcu_read_lock held */ | 5787 | /* called with rcu_read_lock held */ |
@@ -5766,20 +5801,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, | |||
5766 | 5801 | ||
5767 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 5802 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
5768 | struct netlink_dump_control c = { | 5803 | struct netlink_dump_control c = { |
5804 | .start = nf_tables_dump_flowtable_start, | ||
5769 | .dump = nf_tables_dump_flowtable, | 5805 | .dump = nf_tables_dump_flowtable, |
5770 | .done = nf_tables_dump_flowtable_done, | 5806 | .done = nf_tables_dump_flowtable_done, |
5771 | .module = THIS_MODULE, | 5807 | .module = THIS_MODULE, |
5808 | .data = (void *)nla, | ||
5772 | }; | 5809 | }; |
5773 | 5810 | ||
5774 | if (nla[NFTA_FLOWTABLE_TABLE]) { | ||
5775 | struct nft_flowtable_filter *filter; | ||
5776 | |||
5777 | filter = nft_flowtable_filter_alloc(nla); | ||
5778 | if (IS_ERR(filter)) | ||
5779 | return -ENOMEM; | ||
5780 | |||
5781 | c.data = filter; | ||
5782 | } | ||
5783 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); | 5811 | return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); |
5784 | } | 5812 | } |
5785 | 5813 | ||
@@ -5849,6 +5877,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) | |||
5849 | kfree(flowtable->name); | 5877 | kfree(flowtable->name); |
5850 | flowtable->data.type->free(&flowtable->data); | 5878 | flowtable->data.type->free(&flowtable->data); |
5851 | module_put(flowtable->data.type->owner); | 5879 | module_put(flowtable->data.type->owner); |
5880 | kfree(flowtable); | ||
5852 | } | 5881 | } |
5853 | 5882 | ||
5854 | static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, | 5883 | static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, |
@@ -5891,7 +5920,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, | |||
5891 | continue; | 5920 | continue; |
5892 | 5921 | ||
5893 | nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); | 5922 | nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); |
5894 | flowtable->dev_name[i][0] = '\0'; | ||
5895 | flowtable->ops[i].dev = NULL; | 5923 | flowtable->ops[i].dev = NULL; |
5896 | break; | 5924 | break; |
5897 | } | 5925 | } |
@@ -6152,6 +6180,9 @@ static void nft_commit_release(struct nft_trans *trans) | |||
6152 | case NFT_MSG_DELTABLE: | 6180 | case NFT_MSG_DELTABLE: |
6153 | nf_tables_table_destroy(&trans->ctx); | 6181 | nf_tables_table_destroy(&trans->ctx); |
6154 | break; | 6182 | break; |
6183 | case NFT_MSG_NEWCHAIN: | ||
6184 | kfree(nft_trans_chain_name(trans)); | ||
6185 | break; | ||
6155 | case NFT_MSG_DELCHAIN: | 6186 | case NFT_MSG_DELCHAIN: |
6156 | nf_tables_chain_destroy(&trans->ctx); | 6187 | nf_tables_chain_destroy(&trans->ctx); |
6157 | break; | 6188 | break; |
@@ -6381,13 +6412,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
6381 | nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); | 6412 | nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); |
6382 | break; | 6413 | break; |
6383 | case NFT_MSG_NEWCHAIN: | 6414 | case NFT_MSG_NEWCHAIN: |
6384 | if (nft_trans_chain_update(trans)) | 6415 | if (nft_trans_chain_update(trans)) { |
6385 | nft_chain_commit_update(trans); | 6416 | nft_chain_commit_update(trans); |
6386 | else | 6417 | nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); |
6418 | /* trans destroyed after rcu grace period */ | ||
6419 | } else { | ||
6387 | nft_clear(net, trans->ctx.chain); | 6420 | nft_clear(net, trans->ctx.chain); |
6388 | 6421 | nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); | |
6389 | nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); | 6422 | nft_trans_destroy(trans); |
6390 | nft_trans_destroy(trans); | 6423 | } |
6391 | break; | 6424 | break; |
6392 | case NFT_MSG_DELCHAIN: | 6425 | case NFT_MSG_DELCHAIN: |
6393 | nft_chain_del(trans->ctx.chain); | 6426 | nft_chain_del(trans->ctx.chain); |
@@ -6538,7 +6571,7 @@ static int __nf_tables_abort(struct net *net) | |||
6538 | case NFT_MSG_NEWCHAIN: | 6571 | case NFT_MSG_NEWCHAIN: |
6539 | if (nft_trans_chain_update(trans)) { | 6572 | if (nft_trans_chain_update(trans)) { |
6540 | free_percpu(nft_trans_chain_stats(trans)); | 6573 | free_percpu(nft_trans_chain_stats(trans)); |
6541 | 6574 | kfree(nft_trans_chain_name(trans)); | |
6542 | nft_trans_destroy(trans); | 6575 | nft_trans_destroy(trans); |
6543 | } else { | 6576 | } else { |
6544 | trans->ctx.table->use--; | 6577 | trans->ctx.table->use--; |
@@ -6918,13 +6951,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx, | |||
6918 | err = nf_tables_check_loops(ctx, data->verdict.chain); | 6951 | err = nf_tables_check_loops(ctx, data->verdict.chain); |
6919 | if (err < 0) | 6952 | if (err < 0) |
6920 | return err; | 6953 | return err; |
6921 | |||
6922 | if (ctx->chain->level + 1 > | ||
6923 | data->verdict.chain->level) { | ||
6924 | if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) | ||
6925 | return -EMLINK; | ||
6926 | data->verdict.chain->level = ctx->chain->level + 1; | ||
6927 | } | ||
6928 | } | 6954 | } |
6929 | 6955 | ||
6930 | return 0; | 6956 | return 0; |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 15adf8ca82c3..0777a93211e2 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, | |||
98 | const struct nft_data **d) | 98 | const struct nft_data **d) |
99 | { | 99 | { |
100 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 100 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
101 | struct nft_ctx *pctx = (struct nft_ctx *)ctx; | ||
101 | const struct nft_data *data; | 102 | const struct nft_data *data; |
102 | int err; | 103 | int err; |
103 | 104 | ||
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, | |||
109 | switch (data->verdict.code) { | 110 | switch (data->verdict.code) { |
110 | case NFT_JUMP: | 111 | case NFT_JUMP: |
111 | case NFT_GOTO: | 112 | case NFT_GOTO: |
113 | pctx->level++; | ||
112 | err = nft_chain_validate(ctx, data->verdict.chain); | 114 | err = nft_chain_validate(ctx, data->verdict.chain); |
113 | if (err < 0) | 115 | if (err < 0) |
114 | return err; | 116 | return err; |
117 | pctx->level--; | ||
115 | break; | 118 | break; |
116 | default: | 119 | default: |
117 | break; | 120 | break; |
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 42e6fadf1417..c2a1d84cdfc4 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, | |||
155 | struct nft_set_elem *elem) | 155 | struct nft_set_elem *elem) |
156 | { | 156 | { |
157 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); | 157 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); |
158 | struct nft_ctx *pctx = (struct nft_ctx *)ctx; | ||
158 | const struct nft_data *data; | 159 | const struct nft_data *data; |
160 | int err; | ||
159 | 161 | ||
160 | if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && | 162 | if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && |
161 | *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) | 163 | *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) |
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, | |||
165 | switch (data->verdict.code) { | 167 | switch (data->verdict.code) { |
166 | case NFT_JUMP: | 168 | case NFT_JUMP: |
167 | case NFT_GOTO: | 169 | case NFT_GOTO: |
168 | return nft_chain_validate(ctx, data->verdict.chain); | 170 | pctx->level++; |
171 | err = nft_chain_validate(ctx, data->verdict.chain); | ||
172 | if (err < 0) | ||
173 | return err; | ||
174 | pctx->level--; | ||
175 | break; | ||
169 | default: | 176 | default: |
170 | return 0; | 177 | break; |
171 | } | 178 | } |
179 | |||
180 | return 0; | ||
172 | } | 181 | } |
173 | 182 | ||
174 | static int nft_lookup_validate(const struct nft_ctx *ctx, | 183 | static int nft_lookup_validate(const struct nft_ctx *ctx, |
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 72ef35b51cac..90c3e7e6cacb 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set) | |||
387 | struct nft_rhash *priv = nft_set_priv(set); | 387 | struct nft_rhash *priv = nft_set_priv(set); |
388 | 388 | ||
389 | cancel_delayed_work_sync(&priv->gc_work); | 389 | cancel_delayed_work_sync(&priv->gc_work); |
390 | rcu_barrier(); | ||
390 | rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, | 391 | rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, |
391 | (void *)set); | 392 | (void *)set); |
392 | } | 393 | } |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 1f8f257cb518..9873d734b494 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work) | |||
381 | 381 | ||
382 | gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); | 382 | gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); |
383 | if (!gcb) | 383 | if (!gcb) |
384 | goto out; | 384 | break; |
385 | 385 | ||
386 | atomic_dec(&set->nelems); | 386 | atomic_dec(&set->nelems); |
387 | nft_set_gc_batch_add(gcb, rbe); | 387 | nft_set_gc_batch_add(gcb, rbe); |
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work) | |||
390 | rbe = rb_entry(prev, struct nft_rbtree_elem, node); | 390 | rbe = rb_entry(prev, struct nft_rbtree_elem, node); |
391 | atomic_dec(&set->nelems); | 391 | atomic_dec(&set->nelems); |
392 | nft_set_gc_batch_add(gcb, rbe); | 392 | nft_set_gc_batch_add(gcb, rbe); |
393 | prev = NULL; | ||
393 | } | 394 | } |
394 | node = rb_next(node); | 395 | node = rb_next(node); |
396 | if (!node) | ||
397 | break; | ||
395 | } | 398 | } |
396 | out: | ||
397 | if (gcb) { | 399 | if (gcb) { |
398 | for (i = 0; i < gcb->head.cnt; i++) { | 400 | for (i = 0; i < gcb->head.cnt; i++) { |
399 | rbe = gcb->elems[i]; | 401 | rbe = gcb->elems[i]; |
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
440 | struct rb_node *node; | 442 | struct rb_node *node; |
441 | 443 | ||
442 | cancel_delayed_work_sync(&priv->gc_work); | 444 | cancel_delayed_work_sync(&priv->gc_work); |
445 | rcu_barrier(); | ||
443 | while ((node = priv->root.rb_node) != NULL) { | 446 | while ((node = priv->root.rb_node) != NULL) { |
444 | rb_erase(node, &priv->root); | 447 | rb_erase(node, &priv->root); |
445 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 448 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 0c2d029c9d4c..03f1370f5db1 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -631,6 +631,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, | |||
631 | return NULL; | 631 | return NULL; |
632 | } | 632 | } |
633 | 633 | ||
634 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
635 | return NULL; | ||
636 | |||
634 | if (sock_flag(sk, SOCK_DONE)) | 637 | if (sock_flag(sk, SOCK_DONE)) |
635 | return NULL; | 638 | return NULL; |
636 | 639 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e4e5f025d16b..5fb9b7dd9831 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4466,6 +4466,7 @@ static int parse_station_flags(struct genl_info *info, | |||
4466 | params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | | 4466 | params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | |
4467 | BIT(NL80211_STA_FLAG_MFP) | | 4467 | BIT(NL80211_STA_FLAG_MFP) | |
4468 | BIT(NL80211_STA_FLAG_AUTHORIZED); | 4468 | BIT(NL80211_STA_FLAG_AUTHORIZED); |
4469 | break; | ||
4469 | default: | 4470 | default: |
4470 | return -EINVAL; | 4471 | return -EINVAL; |
4471 | } | 4472 | } |
@@ -15029,20 +15030,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, | |||
15029 | EXPORT_SYMBOL(cfg80211_mgmt_tx_status); | 15030 | EXPORT_SYMBOL(cfg80211_mgmt_tx_status); |
15030 | 15031 | ||
15031 | static int __nl80211_rx_control_port(struct net_device *dev, | 15032 | static int __nl80211_rx_control_port(struct net_device *dev, |
15032 | const u8 *buf, size_t len, | 15033 | struct sk_buff *skb, |
15033 | const u8 *addr, u16 proto, | ||
15034 | bool unencrypted, gfp_t gfp) | 15034 | bool unencrypted, gfp_t gfp) |
15035 | { | 15035 | { |
15036 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 15036 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
15037 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 15037 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
15038 | struct ethhdr *ehdr = eth_hdr(skb); | ||
15039 | const u8 *addr = ehdr->h_source; | ||
15040 | u16 proto = be16_to_cpu(skb->protocol); | ||
15038 | struct sk_buff *msg; | 15041 | struct sk_buff *msg; |
15039 | void *hdr; | 15042 | void *hdr; |
15043 | struct nlattr *frame; | ||
15044 | |||
15040 | u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); | 15045 | u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); |
15041 | 15046 | ||
15042 | if (!nlportid) | 15047 | if (!nlportid) |
15043 | return -ENOENT; | 15048 | return -ENOENT; |
15044 | 15049 | ||
15045 | msg = nlmsg_new(100 + len, gfp); | 15050 | msg = nlmsg_new(100 + skb->len, gfp); |
15046 | if (!msg) | 15051 | if (!msg) |
15047 | return -ENOMEM; | 15052 | return -ENOMEM; |
15048 | 15053 | ||
@@ -15056,13 +15061,17 @@ static int __nl80211_rx_control_port(struct net_device *dev, | |||
15056 | nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || | 15061 | nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || |
15057 | nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), | 15062 | nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), |
15058 | NL80211_ATTR_PAD) || | 15063 | NL80211_ATTR_PAD) || |
15059 | nla_put(msg, NL80211_ATTR_FRAME, len, buf) || | ||
15060 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || | 15064 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || |
15061 | nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || | 15065 | nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || |
15062 | (unencrypted && nla_put_flag(msg, | 15066 | (unencrypted && nla_put_flag(msg, |
15063 | NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) | 15067 | NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) |
15064 | goto nla_put_failure; | 15068 | goto nla_put_failure; |
15065 | 15069 | ||
15070 | frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); | ||
15071 | if (!frame) | ||
15072 | goto nla_put_failure; | ||
15073 | |||
15074 | skb_copy_bits(skb, 0, nla_data(frame), skb->len); | ||
15066 | genlmsg_end(msg, hdr); | 15075 | genlmsg_end(msg, hdr); |
15067 | 15076 | ||
15068 | return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); | 15077 | return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); |
@@ -15073,14 +15082,12 @@ static int __nl80211_rx_control_port(struct net_device *dev, | |||
15073 | } | 15082 | } |
15074 | 15083 | ||
15075 | bool cfg80211_rx_control_port(struct net_device *dev, | 15084 | bool cfg80211_rx_control_port(struct net_device *dev, |
15076 | const u8 *buf, size_t len, | 15085 | struct sk_buff *skb, bool unencrypted) |
15077 | const u8 *addr, u16 proto, bool unencrypted) | ||
15078 | { | 15086 | { |
15079 | int ret; | 15087 | int ret; |
15080 | 15088 | ||
15081 | trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); | 15089 | trace_cfg80211_rx_control_port(dev, skb, unencrypted); |
15082 | ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, | 15090 | ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC); |
15083 | unencrypted, GFP_ATOMIC); | ||
15084 | trace_cfg80211_return_bool(ret == 0); | 15091 | trace_cfg80211_return_bool(ret == 0); |
15085 | return ret == 0; | 15092 | return ret == 0; |
15086 | } | 15093 | } |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bbe6298e4bb9..4fc66a117b7d 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, | |||
2240 | * as some drivers used this to restore its orig_* reg domain. | 2240 | * as some drivers used this to restore its orig_* reg domain. |
2241 | */ | 2241 | */ |
2242 | if (initiator == NL80211_REGDOM_SET_BY_CORE && | 2242 | if (initiator == NL80211_REGDOM_SET_BY_CORE && |
2243 | wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) | 2243 | wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && |
2244 | !(wiphy->regulatory_flags & | ||
2245 | REGULATORY_WIPHY_SELF_MANAGED)) | ||
2244 | reg_call_notifier(wiphy, lr); | 2246 | reg_call_notifier(wiphy, lr); |
2245 | return; | 2247 | return; |
2246 | } | 2248 | } |
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request) | |||
2787 | } | 2789 | } |
2788 | } | 2790 | } |
2789 | 2791 | ||
2790 | static bool reg_only_self_managed_wiphys(void) | ||
2791 | { | ||
2792 | struct cfg80211_registered_device *rdev; | ||
2793 | struct wiphy *wiphy; | ||
2794 | bool self_managed_found = false; | ||
2795 | |||
2796 | ASSERT_RTNL(); | ||
2797 | |||
2798 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | ||
2799 | wiphy = &rdev->wiphy; | ||
2800 | if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) | ||
2801 | self_managed_found = true; | ||
2802 | else | ||
2803 | return false; | ||
2804 | } | ||
2805 | |||
2806 | /* make sure at least one self-managed wiphy exists */ | ||
2807 | return self_managed_found; | ||
2808 | } | ||
2809 | |||
2810 | /* | 2792 | /* |
2811 | * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* | 2793 | * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* |
2812 | * Regulatory hints come on a first come first serve basis and we | 2794 | * Regulatory hints come on a first come first serve basis and we |
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void) | |||
2839 | spin_unlock(®_requests_lock); | 2821 | spin_unlock(®_requests_lock); |
2840 | 2822 | ||
2841 | notify_self_managed_wiphys(reg_request); | 2823 | notify_self_managed_wiphys(reg_request); |
2842 | if (reg_only_self_managed_wiphys()) { | ||
2843 | reg_free_request(reg_request); | ||
2844 | return; | ||
2845 | } | ||
2846 | 2824 | ||
2847 | reg_process_hint(reg_request); | 2825 | reg_process_hint(reg_request); |
2848 | 2826 | ||
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 2b417a2fe63f..7c73510b161f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status, | |||
2627 | ); | 2627 | ); |
2628 | 2628 | ||
2629 | TRACE_EVENT(cfg80211_rx_control_port, | 2629 | TRACE_EVENT(cfg80211_rx_control_port, |
2630 | TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, | 2630 | TP_PROTO(struct net_device *netdev, struct sk_buff *skb, |
2631 | const u8 *addr, u16 proto, bool unencrypted), | 2631 | bool unencrypted), |
2632 | TP_ARGS(netdev, buf, len, addr, proto, unencrypted), | 2632 | TP_ARGS(netdev, skb, unencrypted), |
2633 | TP_STRUCT__entry( | 2633 | TP_STRUCT__entry( |
2634 | NETDEV_ENTRY | 2634 | NETDEV_ENTRY |
2635 | MAC_ENTRY(addr) | 2635 | __field(int, len) |
2636 | MAC_ENTRY(from) | ||
2636 | __field(u16, proto) | 2637 | __field(u16, proto) |
2637 | __field(bool, unencrypted) | 2638 | __field(bool, unencrypted) |
2638 | ), | 2639 | ), |
2639 | TP_fast_assign( | 2640 | TP_fast_assign( |
2640 | NETDEV_ASSIGN; | 2641 | NETDEV_ASSIGN; |
2641 | MAC_ASSIGN(addr, addr); | 2642 | __entry->len = skb->len; |
2642 | __entry->proto = proto; | 2643 | MAC_ASSIGN(from, eth_hdr(skb)->h_source); |
2644 | __entry->proto = be16_to_cpu(skb->protocol); | ||
2643 | __entry->unencrypted = unencrypted; | 2645 | __entry->unencrypted = unencrypted; |
2644 | ), | 2646 | ), |
2645 | TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", | 2647 | TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s", |
2646 | NETDEV_PR_ARG, MAC_PR_ARG(addr), | 2648 | NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), |
2647 | __entry->proto, BOOL_TO_STR(__entry->unencrypted)) | 2649 | __entry->proto, BOOL_TO_STR(__entry->unencrypted)) |
2648 | ); | 2650 | ); |
2649 | 2651 | ||