aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2017-02-26 15:34:42 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-02-26 15:34:42 -0500
commit8e22e1b3499a446df48c2b26667ca36c55bf864c (patch)
tree5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /net/ipv6/tcp_ipv6.c
parent00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff)
parent64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff)
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed drivers. Otherwise we'll have chaos even before 4.12 started in earnest. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 73bc8fc68acd..eaad72c3d746 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
469 opt = ireq->ipv6_opt; 469 opt = ireq->ipv6_opt;
470 if (!opt) 470 if (!opt)
471 opt = rcu_dereference(np->opt); 471 opt = rcu_dereference(np->opt);
472 err = ip6_xmit(sk, skb, fl6, opt, np->tclass); 472 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
473 rcu_read_unlock(); 473 rcu_read_unlock();
474 err = net_xmit_eval(err); 474 err = net_xmit_eval(err);
475 } 475 }
@@ -840,7 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
841 if (!IS_ERR(dst)) { 841 if (!IS_ERR(dst)) {
842 skb_dst_set(buff, dst); 842 skb_dst_set(buff, dst);
843 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 843 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
845 if (rst) 845 if (rst)
846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -991,6 +991,16 @@ drop:
991 return 0; /* don't send reset */ 991 return 0; /* don't send reset */
992} 992}
993 993
994static void tcp_v6_restore_cb(struct sk_buff *skb)
995{
996 /* We need to move header back to the beginning if xfrm6_policy_check()
997 * and tcp_v6_fill_cb() are going to be called again.
998 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
999 */
1000 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1001 sizeof(struct inet6_skb_parm));
1002}
1003
994static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1004static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
995 struct request_sock *req, 1005 struct request_sock *req,
996 struct dst_entry *dst, 1006 struct dst_entry *dst,
@@ -1182,8 +1192,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1182 sk_gfp_mask(sk, GFP_ATOMIC)); 1192 sk_gfp_mask(sk, GFP_ATOMIC));
1183 consume_skb(ireq->pktopts); 1193 consume_skb(ireq->pktopts);
1184 ireq->pktopts = NULL; 1194 ireq->pktopts = NULL;
1185 if (newnp->pktoptions) 1195 if (newnp->pktoptions) {
1196 tcp_v6_restore_cb(newnp->pktoptions);
1186 skb_set_owner_r(newnp->pktoptions, newsk); 1197 skb_set_owner_r(newnp->pktoptions, newsk);
1198 }
1187 } 1199 }
1188 } 1200 }
1189 1201
@@ -1198,16 +1210,6 @@ out:
1198 return NULL; 1210 return NULL;
1199} 1211}
1200 1212
1201static void tcp_v6_restore_cb(struct sk_buff *skb)
1202{
1203 /* We need to move header back to the beginning if xfrm6_policy_check()
1204 * and tcp_v6_fill_cb() are going to be called again.
1205 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1206 */
1207 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1208 sizeof(struct inet6_skb_parm));
1209}
1210
1211/* The socket must have it's spinlock held when we get 1213/* The socket must have it's spinlock held when we get
1212 * here, unless it is a TCP_LISTEN socket. 1214 * here, unless it is a TCP_LISTEN socket.
1213 * 1215 *