diff options
author | Eric Dumazet <edumazet@google.com> | 2012-08-06 01:09:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-08-06 16:33:21 -0400 |
commit | 5d299f3d3c8a2fbc732b1bf03af36333ccec3130 (patch) | |
tree | 119701591725281d99ecad6f459166da3e6034a2 /net/ipv6 | |
parent | b5497eeb37d7d4a5a61b91f64efedc90d1ad6fa3 (diff) |
net: ipv6: fix TCP early demux
IPv6 needs a cookie in dst_check() call.
We need to add rx_dst_cookie and provide a family independent
sk_rx_dst_set(sk, skb) method to properly support IPv6 TCP early demux.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 27 |
1 files changed, 25 insertions, 2 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c66b90f71c9b..5a439e9a4c01 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1447 | opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); | 1447 | opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); |
1448 | 1448 | ||
1449 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1449 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1450 | struct dst_entry *dst = sk->sk_rx_dst; | ||
1451 | |||
1450 | sock_rps_save_rxhash(sk, skb); | 1452 | sock_rps_save_rxhash(sk, skb); |
1453 | if (dst) { | ||
1454 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || | ||
1455 | dst->ops->check(dst, np->rx_dst_cookie) == NULL) { | ||
1456 | dst_release(dst); | ||
1457 | sk->sk_rx_dst = NULL; | ||
1458 | } | ||
1459 | } | ||
1460 | |||
1451 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) | 1461 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) |
1452 | goto reset; | 1462 | goto reset; |
1453 | if (opt_skb) | 1463 | if (opt_skb) |
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1705 | struct dst_entry *dst = sk->sk_rx_dst; | 1715 | struct dst_entry *dst = sk->sk_rx_dst; |
1706 | struct inet_sock *icsk = inet_sk(sk); | 1716 | struct inet_sock *icsk = inet_sk(sk); |
1707 | if (dst) | 1717 | if (dst) |
1708 | dst = dst_check(dst, 0); | 1718 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
1709 | if (dst && | 1719 | if (dst && |
1710 | icsk->rx_dst_ifindex == inet6_iif(skb)) | 1720 | icsk->rx_dst_ifindex == skb->skb_iif) |
1711 | skb_dst_set_noref(skb, dst); | 1721 | skb_dst_set_noref(skb, dst); |
1712 | } | 1722 | } |
1713 | } | 1723 | } |
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |||
1719 | .twsk_destructor= tcp_twsk_destructor, | 1729 | .twsk_destructor= tcp_twsk_destructor, |
1720 | }; | 1730 | }; |
1721 | 1731 | ||
1732 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
1733 | { | ||
1734 | struct dst_entry *dst = skb_dst(skb); | ||
1735 | const struct rt6_info *rt = (const struct rt6_info *)dst; | ||
1736 | |||
1737 | dst_hold(dst); | ||
1738 | sk->sk_rx_dst = dst; | ||
1739 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
1740 | if (rt->rt6i_node) | ||
1741 | inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; | ||
1742 | } | ||
1743 | |||
1722 | static const struct inet_connection_sock_af_ops ipv6_specific = { | 1744 | static const struct inet_connection_sock_af_ops ipv6_specific = { |
1723 | .queue_xmit = inet6_csk_xmit, | 1745 | .queue_xmit = inet6_csk_xmit, |
1724 | .send_check = tcp_v6_send_check, | 1746 | .send_check = tcp_v6_send_check, |
1725 | .rebuild_header = inet6_sk_rebuild_header, | 1747 | .rebuild_header = inet6_sk_rebuild_header, |
1748 | .sk_rx_dst_set = inet6_sk_rx_dst_set, | ||
1726 | .conn_request = tcp_v6_conn_request, | 1749 | .conn_request = tcp_v6_conn_request, |
1727 | .syn_recv_sock = tcp_v6_syn_recv_sock, | 1750 | .syn_recv_sock = tcp_v6_syn_recv_sock, |
1728 | .net_header_len = sizeof(struct ipv6hdr), | 1751 | .net_header_len = sizeof(struct ipv6hdr), |