aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-07-23 19:29:00 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-23 19:36:26 -0400
commit92101b3b2e3178087127709a556b091dae314e9e (patch)
tree06359f8823da3ed7617c5ea78e4a56bc5e958fea /net/ipv4/tcp_ipv4.c
parentfe3edf45792a7d2f0edff4e2fcdd9a84c1a388a0 (diff)
ipv4: Prepare for change of rt->rt_iif encoding.
Use inet_iif() consistently, and for TCP record the input interface of cached RX dst in inet sock. rt->rt_iif is going to be encoded differently, so that we can legitimately cache input routes in the FIB info more aggressively. When the input interface is "use SKB device index" the rt->rt_iif will be set to zero. This forces us to move the TCP RX dst cache installation into the ipv4 specific code, and as well it should since doing the route caching for ipv6 is pointless at the moment since it is not inspected in the ipv6 input paths yet. Also, remove the unlikely on dst->obsolete, all ipv4 dsts have obsolete set to a non-zero value to force invocation of the check callback. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bc5432e3c778..3e30548ac32a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1618,6 +1618,20 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1618 1618
1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1620 sock_rps_save_rxhash(sk, skb); 1620 sock_rps_save_rxhash(sk, skb);
1621 if (sk->sk_rx_dst) {
1622 struct dst_entry *dst = sk->sk_rx_dst;
1623 if (dst->ops->check(dst, 0) == NULL) {
1624 dst_release(dst);
1625 sk->sk_rx_dst = NULL;
1626 }
1627 }
1628 if (unlikely(sk->sk_rx_dst == NULL)) {
1629 struct inet_sock *icsk = inet_sk(sk);
1630 struct rtable *rt = skb_rtable(skb);
1631
1632 sk->sk_rx_dst = dst_clone(&rt->dst);
1633 icsk->rx_dst_ifindex = inet_iif(skb);
1634 }
1621 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1635 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1622 rsk = sk; 1636 rsk = sk;
1623 goto reset; 1637 goto reset;
@@ -1700,14 +1714,12 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1700 skb->destructor = sock_edemux; 1714 skb->destructor = sock_edemux;
1701 if (sk->sk_state != TCP_TIME_WAIT) { 1715 if (sk->sk_state != TCP_TIME_WAIT) {
1702 struct dst_entry *dst = sk->sk_rx_dst; 1716 struct dst_entry *dst = sk->sk_rx_dst;
1717 struct inet_sock *icsk = inet_sk(sk);
1703 if (dst) 1718 if (dst)
1704 dst = dst_check(dst, 0); 1719 dst = dst_check(dst, 0);
1705 if (dst) { 1720 if (dst &&
1706 struct rtable *rt = (struct rtable *) dst; 1721 icsk->rx_dst_ifindex == dev->ifindex)
1707 1722 skb_dst_set_noref(skb, dst);
1708 if (rt->rt_iif == dev->ifindex)
1709 skb_dst_set_noref(skb, dst);
1710 }
1711 } 1723 }
1712 } 1724 }
1713} 1725}