aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c174
1 files changed, 79 insertions, 95 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 20aa95e37359..4f49e5dd41bb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -131,7 +131,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
131 struct tcp_sock *tp = tcp_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p, final; 132 struct in6_addr *saddr = NULL, *final_p, final;
133 struct rt6_info *rt; 133 struct rt6_info *rt;
134 struct flowi fl; 134 struct flowi6 fl6;
135 struct dst_entry *dst; 135 struct dst_entry *dst;
136 int addr_type; 136 int addr_type;
137 int err; 137 int err;
@@ -142,14 +142,14 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
142 if (usin->sin6_family != AF_INET6) 142 if (usin->sin6_family != AF_INET6)
143 return -EAFNOSUPPORT; 143 return -EAFNOSUPPORT;
144 144
145 memset(&fl, 0, sizeof(fl)); 145 memset(&fl6, 0, sizeof(fl6));
146 146
147 if (np->sndflow) { 147 if (np->sndflow) {
148 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 148 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
149 IP6_ECN_flow_init(fl.fl6_flowlabel); 149 IP6_ECN_flow_init(fl6.flowlabel);
150 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 150 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
151 struct ip6_flowlabel *flowlabel; 151 struct ip6_flowlabel *flowlabel;
152 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 152 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
153 if (flowlabel == NULL) 153 if (flowlabel == NULL)
154 return -EINVAL; 154 return -EINVAL;
155 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 155 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
@@ -195,7 +195,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
195 } 195 }
196 196
197 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 197 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
198 np->flow_label = fl.fl6_flowlabel; 198 np->flow_label = fl6.flowlabel;
199 199
200 /* 200 /*
201 * TCP over IPv4 201 * TCP over IPv4
@@ -242,35 +242,27 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
242 if (!ipv6_addr_any(&np->rcv_saddr)) 242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr; 243 saddr = &np->rcv_saddr;
244 244
245 fl.proto = IPPROTO_TCP; 245 fl6.flowi6_proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 246 ipv6_addr_copy(&fl6.daddr, &np->daddr);
247 ipv6_addr_copy(&fl.fl6_src, 247 ipv6_addr_copy(&fl6.saddr,
248 (saddr ? saddr : &np->saddr)); 248 (saddr ? saddr : &np->saddr));
249 fl.oif = sk->sk_bound_dev_if; 249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl.mark = sk->sk_mark; 250 fl6.flowi6_mark = sk->sk_mark;
251 fl.fl_ip_dport = usin->sin6_port; 251 fl6.fl6_dport = usin->sin6_port;
252 fl.fl_ip_sport = inet->inet_sport; 252 fl6.fl6_sport = inet->inet_sport;
253 253
254 final_p = fl6_update_dst(&fl, np->opt, &final); 254 final_p = fl6_update_dst(&fl6, np->opt, &final);
255 255
256 security_sk_classify_flow(sk, &fl); 256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
257 257
258 err = ip6_dst_lookup(sk, &dst, &fl); 258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
259 if (err) 259 if (IS_ERR(dst)) {
260 err = PTR_ERR(dst);
260 goto failure; 261 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
270 } 262 }
271 263
272 if (saddr == NULL) { 264 if (saddr == NULL) {
273 saddr = &fl.fl6_src; 265 saddr = &fl6.saddr;
274 ipv6_addr_copy(&np->rcv_saddr, saddr); 266 ipv6_addr_copy(&np->rcv_saddr, saddr);
275 } 267 }
276 268
@@ -385,7 +377,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
385 np = inet6_sk(sk); 377 np = inet6_sk(sk);
386 378
387 if (type == ICMPV6_PKT_TOOBIG) { 379 if (type == ICMPV6_PKT_TOOBIG) {
388 struct dst_entry *dst = NULL; 380 struct dst_entry *dst;
389 381
390 if (sock_owned_by_user(sk)) 382 if (sock_owned_by_user(sk))
391 goto out; 383 goto out;
@@ -397,29 +389,25 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
397 389
398 if (dst == NULL) { 390 if (dst == NULL) {
399 struct inet_sock *inet = inet_sk(sk); 391 struct inet_sock *inet = inet_sk(sk);
400 struct flowi fl; 392 struct flowi6 fl6;
401 393
402 /* BUGGG_FUTURE: Again, it is not clear how 394 /* BUGGG_FUTURE: Again, it is not clear how
403 to handle rthdr case. Ignore this complexity 395 to handle rthdr case. Ignore this complexity
404 for now. 396 for now.
405 */ 397 */
406 memset(&fl, 0, sizeof(fl)); 398 memset(&fl6, 0, sizeof(fl6));
407 fl.proto = IPPROTO_TCP; 399 fl6.flowi6_proto = IPPROTO_TCP;
408 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 400 ipv6_addr_copy(&fl6.daddr, &np->daddr);
409 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 401 ipv6_addr_copy(&fl6.saddr, &np->saddr);
410 fl.oif = sk->sk_bound_dev_if; 402 fl6.flowi6_oif = sk->sk_bound_dev_if;
411 fl.mark = sk->sk_mark; 403 fl6.flowi6_mark = sk->sk_mark;
412 fl.fl_ip_dport = inet->inet_dport; 404 fl6.fl6_dport = inet->inet_dport;
413 fl.fl_ip_sport = inet->inet_sport; 405 fl6.fl6_sport = inet->inet_sport;
414 security_skb_classify_flow(skb, &fl); 406 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
415 407
416 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 408 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
417 sk->sk_err_soft = -err; 409 if (IS_ERR(dst)) {
418 goto out; 410 sk->sk_err_soft = -PTR_ERR(dst);
419 }
420
421 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
422 sk->sk_err_soft = -err;
423 goto out; 411 goto out;
424 } 412 }
425 413
@@ -494,38 +482,37 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
494 struct sk_buff * skb; 482 struct sk_buff * skb;
495 struct ipv6_txoptions *opt = NULL; 483 struct ipv6_txoptions *opt = NULL;
496 struct in6_addr * final_p, final; 484 struct in6_addr * final_p, final;
497 struct flowi fl; 485 struct flowi6 fl6;
498 struct dst_entry *dst; 486 struct dst_entry *dst;
499 int err = -1; 487 int err;
500 488
501 memset(&fl, 0, sizeof(fl)); 489 memset(&fl6, 0, sizeof(fl6));
502 fl.proto = IPPROTO_TCP; 490 fl6.flowi6_proto = IPPROTO_TCP;
503 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 491 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
504 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 492 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
505 fl.fl6_flowlabel = 0; 493 fl6.flowlabel = 0;
506 fl.oif = treq->iif; 494 fl6.flowi6_oif = treq->iif;
507 fl.mark = sk->sk_mark; 495 fl6.flowi6_mark = sk->sk_mark;
508 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 496 fl6.fl6_dport = inet_rsk(req)->rmt_port;
509 fl.fl_ip_sport = inet_rsk(req)->loc_port; 497 fl6.fl6_sport = inet_rsk(req)->loc_port;
510 security_req_classify_flow(req, &fl); 498 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
511 499
512 opt = np->opt; 500 opt = np->opt;
513 final_p = fl6_update_dst(&fl, opt, &final); 501 final_p = fl6_update_dst(&fl6, opt, &final);
514 502
515 err = ip6_dst_lookup(sk, &dst, &fl); 503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
516 if (err) 504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst);
506 dst = NULL;
517 goto done; 507 goto done;
518 if (final_p) 508 }
519 ipv6_addr_copy(&fl.fl6_dst, final_p);
520 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
521 goto done;
522
523 skb = tcp_make_synack(sk, dst, req, rvp); 509 skb = tcp_make_synack(sk, dst, req, rvp);
510 err = -ENOMEM;
524 if (skb) { 511 if (skb) {
525 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 512 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
526 513
527 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 514 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
528 err = ip6_xmit(sk, skb, &fl, opt); 515 err = ip6_xmit(sk, skb, &fl6, opt);
529 err = net_xmit_eval(err); 516 err = net_xmit_eval(err);
530 } 517 }
531 518
@@ -1006,7 +993,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1006{ 993{
1007 struct tcphdr *th = tcp_hdr(skb), *t1; 994 struct tcphdr *th = tcp_hdr(skb), *t1;
1008 struct sk_buff *buff; 995 struct sk_buff *buff;
1009 struct flowi fl; 996 struct flowi6 fl6;
1010 struct net *net = dev_net(skb_dst(skb)->dev); 997 struct net *net = dev_net(skb_dst(skb)->dev);
1011 struct sock *ctl_sk = net->ipv6.tcp_sk; 998 struct sock *ctl_sk = net->ipv6.tcp_sk;
1012 unsigned int tot_len = sizeof(struct tcphdr); 999 unsigned int tot_len = sizeof(struct tcphdr);
@@ -1060,34 +1047,33 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1060 } 1047 }
1061#endif 1048#endif
1062 1049
1063 memset(&fl, 0, sizeof(fl)); 1050 memset(&fl6, 0, sizeof(fl6));
1064 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1051 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1065 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1052 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1066 1053
1067 buff->ip_summed = CHECKSUM_PARTIAL; 1054 buff->ip_summed = CHECKSUM_PARTIAL;
1068 buff->csum = 0; 1055 buff->csum = 0;
1069 1056
1070 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst); 1057 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1071 1058
1072 fl.proto = IPPROTO_TCP; 1059 fl6.flowi6_proto = IPPROTO_TCP;
1073 fl.oif = inet6_iif(skb); 1060 fl6.flowi6_oif = inet6_iif(skb);
1074 fl.fl_ip_dport = t1->dest; 1061 fl6.fl6_dport = t1->dest;
1075 fl.fl_ip_sport = t1->source; 1062 fl6.fl6_sport = t1->source;
1076 security_skb_classify_flow(skb, &fl); 1063 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1077 1064
1078 /* Pass a socket to ip6_dst_lookup either it is for RST 1065 /* Pass a socket to ip6_dst_lookup either it is for RST
1079 * Underlying function will use this to retrieve the network 1066 * Underlying function will use this to retrieve the network
1080 * namespace 1067 * namespace
1081 */ 1068 */
1082 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 1069 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1083 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 1070 if (!IS_ERR(dst)) {
1084 skb_dst_set(buff, dst); 1071 skb_dst_set(buff, dst);
1085 ip6_xmit(ctl_sk, buff, &fl, NULL); 1072 ip6_xmit(ctl_sk, buff, &fl6, NULL);
1086 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1073 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1087 if (rst) 1074 if (rst)
1088 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1089 return; 1076 return;
1090 }
1091 } 1077 }
1092 1078
1093 kfree_skb(buff); 1079 kfree_skb(buff);
@@ -1323,7 +1309,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1323 tcp_death_row.sysctl_tw_recycle && 1309 tcp_death_row.sysctl_tw_recycle &&
1324 (dst = inet6_csk_route_req(sk, req)) != NULL && 1310 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1325 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && 1311 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1326 ipv6_addr_equal((struct in6_addr *)peer->daddr.a6, 1312 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1327 &treq->rmt_addr)) { 1313 &treq->rmt_addr)) {
1328 inet_peer_refcheck(peer); 1314 inet_peer_refcheck(peer);
1329 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1315 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1636,10 +1622,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1636 opt_skb = skb_clone(skb, GFP_ATOMIC); 1622 opt_skb = skb_clone(skb, GFP_ATOMIC);
1637 1623
1638 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1639 TCP_CHECK_TIMER(sk); 1625 sock_rps_save_rxhash(sk, skb->rxhash);
1640 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1641 goto reset; 1627 goto reset;
1642 TCP_CHECK_TIMER(sk);
1643 if (opt_skb) 1628 if (opt_skb)
1644 goto ipv6_pktoptions; 1629 goto ipv6_pktoptions;
1645 return 0; 1630 return 0;
@@ -1665,12 +1650,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1665 __kfree_skb(opt_skb); 1650 __kfree_skb(opt_skb);
1666 return 0; 1651 return 0;
1667 } 1652 }
1668 } 1653 } else
1654 sock_rps_save_rxhash(sk, skb->rxhash);
1669 1655
1670 TCP_CHECK_TIMER(sk);
1671 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1656 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1672 goto reset; 1657 goto reset;
1673 TCP_CHECK_TIMER(sk);
1674 if (opt_skb) 1658 if (opt_skb)
1675 goto ipv6_pktoptions; 1659 goto ipv6_pktoptions;
1676 return 0; 1660 return 0;