aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c72
1 files changed, 38 insertions, 34 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f443c6b0ce16..c4efaa97280c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -234,7 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
234 fl6.fl6_dport = usin->sin6_port; 234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport; 235 fl6.fl6_sport = inet->inet_sport;
236 236
237 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); 237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final); 238 final_p = fl6_update_dst(&fl6, opt, &final);
239 239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
@@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
336 skb->dev->ifindex); 336 skb->dev->ifindex);
337 337
338 if (!sk) { 338 if (!sk) {
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS); 340 ICMP6_MIB_INERRORS);
341 return; 341 return;
342 } 342 }
343 343
@@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
352 352
353 bh_lock_sock(sk); 353 bh_lock_sock(sk);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) 354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 356
357 if (sk->sk_state == TCP_CLOSE) 357 if (sk->sk_state == TCP_CLOSE)
358 goto out; 358 goto out;
359 359
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { 360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 goto out; 362 goto out;
363 } 363 }
364 364
@@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; 368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN && 369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) { 370 !between(seq, snd_una, tp->snd_nxt)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 goto out; 372 goto out;
373 } 373 }
374 374
@@ -439,7 +439,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl, 439 struct flowi *fl,
440 struct request_sock *req, 440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc, 441 struct tcp_fastopen_cookie *foc,
442 bool attach_req) 442 enum tcp_synack_type synack_type)
443{ 443{
444 struct inet_request_sock *ireq = inet_rsk(req); 444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk); 445 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -452,7 +452,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
452 IPPROTO_TCP)) == NULL) 452 IPPROTO_TCP)) == NULL)
453 goto done; 453 goto done;
454 454
455 skb = tcp_make_synack(sk, dst, req, foc, attach_req); 455 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
456 456
457 if (skb) { 457 if (skb) {
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
649 return false; 649 return false;
650 650
651 if (hash_expected && !hash_location) { 651 if (hash_expected && !hash_location) {
652 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 652 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
653 return true; 653 return true;
654 } 654 }
655 655
656 if (!hash_expected && hash_location) { 656 if (!hash_expected && hash_location) {
657 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 657 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
658 return true; 658 return true;
659 } 659 }
660 660
@@ -830,9 +830,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
830 if (!IS_ERR(dst)) { 830 if (!IS_ERR(dst)) {
831 skb_dst_set(buff, dst); 831 skb_dst_set(buff, dst);
832 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 832 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
833 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 833 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
834 if (rst) 834 if (rst)
835 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 835 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
836 return; 836 return;
837 } 837 }
838 838
@@ -863,6 +863,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
863 return; 863 return;
864 864
865#ifdef CONFIG_TCP_MD5SIG 865#ifdef CONFIG_TCP_MD5SIG
866 rcu_read_lock();
866 hash_location = tcp_parse_md5sig_option(th); 867 hash_location = tcp_parse_md5sig_option(th);
867 if (sk && sk_fullsock(sk)) { 868 if (sk && sk_fullsock(sk)) {
868 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); 869 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
@@ -880,16 +881,15 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
880 th->source, &ipv6h->daddr, 881 th->source, &ipv6h->daddr,
881 ntohs(th->source), tcp_v6_iif(skb)); 882 ntohs(th->source), tcp_v6_iif(skb));
882 if (!sk1) 883 if (!sk1)
883 return; 884 goto out;
884 885
885 rcu_read_lock();
886 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); 886 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
887 if (!key) 887 if (!key)
888 goto release_sk1; 888 goto out;
889 889
890 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); 890 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
891 if (genhash || memcmp(hash_location, newhash, 16) != 0) 891 if (genhash || memcmp(hash_location, newhash, 16) != 0)
892 goto release_sk1; 892 goto out;
893 } 893 }
894#endif 894#endif
895 895
@@ -903,11 +903,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
903 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); 903 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
904 904
905#ifdef CONFIG_TCP_MD5SIG 905#ifdef CONFIG_TCP_MD5SIG
906release_sk1: 906out:
907 if (sk1) { 907 rcu_read_unlock();
908 rcu_read_unlock();
909 sock_put(sk1);
910 }
911#endif 908#endif
912} 909}
913 910
@@ -972,7 +969,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
972 &tcp_request_sock_ipv6_ops, sk, skb); 969 &tcp_request_sock_ipv6_ops, sk, skb);
973 970
974drop: 971drop:
975 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 972 tcp_listendrop(sk);
976 return 0; /* don't send reset */ 973 return 0; /* don't send reset */
977} 974}
978 975
@@ -1173,11 +1170,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1173 return newsk; 1170 return newsk;
1174 1171
1175out_overflow: 1172out_overflow:
1176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1173 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177out_nonewsk: 1174out_nonewsk:
1178 dst_release(dst); 1175 dst_release(dst);
1179out: 1176out:
1180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1177 tcp_listendrop(sk);
1181 return NULL; 1178 return NULL;
1182} 1179}
1183 1180
@@ -1284,8 +1281,8 @@ discard:
1284 kfree_skb(skb); 1281 kfree_skb(skb);
1285 return 0; 1282 return 0;
1286csum_err: 1283csum_err:
1287 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); 1284 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1288 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 1285 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1289 goto discard; 1286 goto discard;
1290 1287
1291 1288
@@ -1356,6 +1353,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1356{ 1353{
1357 const struct tcphdr *th; 1354 const struct tcphdr *th;
1358 const struct ipv6hdr *hdr; 1355 const struct ipv6hdr *hdr;
1356 bool refcounted;
1359 struct sock *sk; 1357 struct sock *sk;
1360 int ret; 1358 int ret;
1361 struct net *net = dev_net(skb->dev); 1359 struct net *net = dev_net(skb->dev);
@@ -1366,7 +1364,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1366 /* 1364 /*
1367 * Count it even if it's bad. 1365 * Count it even if it's bad.
1368 */ 1366 */
1369 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); 1367 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1370 1368
1371 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1369 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1372 goto discard_it; 1370 goto discard_it;
@@ -1386,7 +1384,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1386 1384
1387lookup: 1385lookup:
1388 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), 1386 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1389 th->source, th->dest, inet6_iif(skb)); 1387 th->source, th->dest, inet6_iif(skb),
1388 &refcounted);
1390 if (!sk) 1389 if (!sk)
1391 goto no_tcp_socket; 1390 goto no_tcp_socket;
1392 1391
@@ -1409,6 +1408,7 @@ process:
1409 goto lookup; 1408 goto lookup;
1410 } 1409 }
1411 sock_hold(sk); 1410 sock_hold(sk);
1411 refcounted = true;
1412 nsk = tcp_check_req(sk, skb, req, false); 1412 nsk = tcp_check_req(sk, skb, req, false);
1413 if (!nsk) { 1413 if (!nsk) {
1414 reqsk_put(req); 1414 reqsk_put(req);
@@ -1426,7 +1426,7 @@ process:
1426 } 1426 }
1427 } 1427 }
1428 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { 1428 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1429 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 1429 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1430 goto discard_and_relse; 1430 goto discard_and_relse;
1431 } 1431 }
1432 1432
@@ -1459,13 +1459,14 @@ process:
1459 } else if (unlikely(sk_add_backlog(sk, skb, 1459 } else if (unlikely(sk_add_backlog(sk, skb,
1460 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1460 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1461 bh_unlock_sock(sk); 1461 bh_unlock_sock(sk);
1462 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1462 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1463 goto discard_and_relse; 1463 goto discard_and_relse;
1464 } 1464 }
1465 bh_unlock_sock(sk); 1465 bh_unlock_sock(sk);
1466 1466
1467put_and_return: 1467put_and_return:
1468 sock_put(sk); 1468 if (refcounted)
1469 sock_put(sk);
1469 return ret ? -1 : 0; 1470 return ret ? -1 : 0;
1470 1471
1471no_tcp_socket: 1472no_tcp_socket:
@@ -1476,9 +1477,9 @@ no_tcp_socket:
1476 1477
1477 if (tcp_checksum_complete(skb)) { 1478 if (tcp_checksum_complete(skb)) {
1478csum_error: 1479csum_error:
1479 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); 1480 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1480bad_packet: 1481bad_packet:
1481 TCP_INC_STATS_BH(net, TCP_MIB_INERRS); 1482 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1482 } else { 1483 } else {
1483 tcp_v6_send_reset(NULL, skb); 1484 tcp_v6_send_reset(NULL, skb);
1484 } 1485 }
@@ -1488,7 +1489,9 @@ discard_it:
1488 return 0; 1489 return 0;
1489 1490
1490discard_and_relse: 1491discard_and_relse:
1491 sock_put(sk); 1492 sk_drops_add(sk, skb);
1493 if (refcounted)
1494 sock_put(sk);
1492 goto discard_it; 1495 goto discard_it;
1493 1496
1494do_time_wait: 1497do_time_wait:
@@ -1519,6 +1522,7 @@ do_time_wait:
1519 inet_twsk_deschedule_put(tw); 1522 inet_twsk_deschedule_put(tw);
1520 sk = sk2; 1523 sk = sk2;
1521 tcp_v6_restore_cb(skb); 1524 tcp_v6_restore_cb(skb);
1525 refcounted = false;
1522 goto process; 1526 goto process;
1523 } 1527 }
1524 /* Fall through to ACK */ 1528 /* Fall through to ACK */