diff options
author | David S. Miller <davem@davemloft.net> | 2017-10-22 08:36:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-22 08:39:14 -0400 |
commit | f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch) | |
tree | 0a6432aba336bae42313613f4c891bcfce02bd4e /net/ipv4/tcp_ipv4.c | |
parent | bdd091bab8c631bd2801af838e344fad34566410 (diff) | |
parent | b5ac3beb5a9f0ef0ea64cd85faf94c0dc4de0e42 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here.
Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.
Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly. If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.
In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().
Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.
The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 28ca4e177047..e22439f05e46 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
877 | 877 | ||
878 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, | 878 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, |
879 | ireq->ir_rmt_addr, | 879 | ireq->ir_rmt_addr, |
880 | ireq->opt); | 880 | rcu_dereference(ireq->ireq_opt)); |
881 | err = net_xmit_eval(err); | 881 | err = net_xmit_eval(err); |
882 | } | 882 | } |
883 | 883 | ||
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
889 | */ | 889 | */ |
890 | static void tcp_v4_reqsk_destructor(struct request_sock *req) | 890 | static void tcp_v4_reqsk_destructor(struct request_sock *req) |
891 | { | 891 | { |
892 | kfree(inet_rsk(req)->opt); | 892 | kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); |
893 | } | 893 | } |
894 | 894 | ||
895 | #ifdef CONFIG_TCP_MD5SIG | 895 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req, | |||
1265 | struct sk_buff *skb) | 1265 | struct sk_buff *skb) |
1266 | { | 1266 | { |
1267 | struct inet_request_sock *ireq = inet_rsk(req); | 1267 | struct inet_request_sock *ireq = inet_rsk(req); |
1268 | struct net *net = sock_net(sk_listener); | ||
1268 | 1269 | ||
1269 | sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); | 1270 | sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); |
1270 | sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); | 1271 | sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); |
1271 | ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); | 1272 | RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); |
1272 | } | 1273 | } |
1273 | 1274 | ||
1274 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, | 1275 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, |
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | |||
1355 | sk_daddr_set(newsk, ireq->ir_rmt_addr); | 1356 | sk_daddr_set(newsk, ireq->ir_rmt_addr); |
1356 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); | 1357 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); |
1357 | newsk->sk_bound_dev_if = ireq->ir_iif; | 1358 | newsk->sk_bound_dev_if = ireq->ir_iif; |
1358 | newinet->inet_saddr = ireq->ir_loc_addr; | 1359 | newinet->inet_saddr = ireq->ir_loc_addr; |
1359 | inet_opt = ireq->opt; | 1360 | inet_opt = rcu_dereference(ireq->ireq_opt); |
1360 | rcu_assign_pointer(newinet->inet_opt, inet_opt); | 1361 | RCU_INIT_POINTER(newinet->inet_opt, inet_opt); |
1361 | ireq->opt = NULL; | ||
1362 | newinet->mc_index = inet_iif(skb); | 1362 | newinet->mc_index = inet_iif(skb); |
1363 | newinet->mc_ttl = ip_hdr(skb)->ttl; | 1363 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
1364 | newinet->rcv_tos = ip_hdr(skb)->tos; | 1364 | newinet->rcv_tos = ip_hdr(skb)->tos; |
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | |||
1403 | if (__inet_inherit_port(sk, newsk) < 0) | 1403 | if (__inet_inherit_port(sk, newsk) < 0) |
1404 | goto put_and_exit; | 1404 | goto put_and_exit; |
1405 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); | 1405 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
1406 | if (*own_req) | 1406 | if (likely(*own_req)) { |
1407 | tcp_move_syn(newtp, req); | 1407 | tcp_move_syn(newtp, req); |
1408 | 1408 | ireq->ireq_opt = NULL; | |
1409 | } else { | ||
1410 | newinet->inet_opt = NULL; | ||
1411 | } | ||
1409 | return newsk; | 1412 | return newsk; |
1410 | 1413 | ||
1411 | exit_overflow: | 1414 | exit_overflow: |
@@ -1416,6 +1419,7 @@ exit: | |||
1416 | tcp_listendrop(sk); | 1419 | tcp_listendrop(sk); |
1417 | return NULL; | 1420 | return NULL; |
1418 | put_and_exit: | 1421 | put_and_exit: |
1422 | newinet->inet_opt = NULL; | ||
1419 | inet_csk_prepare_forced_close(newsk); | 1423 | inet_csk_prepare_forced_close(newsk); |
1420 | tcp_done(newsk); | 1424 | tcp_done(newsk); |
1421 | goto exit; | 1425 | goto exit; |