diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2009-03-14 10:23:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-15 23:09:54 -0400 |
commit | 0c54b85f2828128274f319a1eb3ce7f604fe2a53 (patch) | |
tree | 8ff84b9aa9cd4775f244c2421e11adfbd206a7bf /net/ipv4/tcp_output.c | |
parent | 72211e90501f954f586481c25521c3724cda3cc7 (diff) |
tcp: simplify tcp_current_mss
There's very little need for most of the callsites to get
tp->xmit_goal_size updated. That will cost us divide as is,
so slice the function in two. Also, the only users of the
tp->xmit_goal_size are directly behind tcp_current_mss(),
so there's no need to store that variable into tcp_sock
at all! The drop of xmit_goal_size currently leaves 16-bit
hole and some reorganization would again be necessary to
change that (but I'm aiming to fill that hole with u16
xmit_goal_size_segs to cache the results of the remaining
divide to get that tso on regression).
Bring xmit_goal_size parts into tcp.c
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Evgeniy Polyakov <zbr@ioremap.net>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 41 |
1 files changed, 7 insertions, 34 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 325658039139..c1f259d2d33b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -921,7 +921,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
921 | * factor and mss. | 921 | * factor and mss. |
922 | */ | 922 | */ |
923 | if (tcp_skb_pcount(skb) > 1) | 923 | if (tcp_skb_pcount(skb) > 1) |
924 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); | 924 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); |
925 | 925 | ||
926 | return 0; | 926 | return 0; |
927 | } | 927 | } |
@@ -982,15 +982,6 @@ void tcp_mtup_init(struct sock *sk) | |||
982 | icsk->icsk_mtup.probe_size = 0; | 982 | icsk->icsk_mtup.probe_size = 0; |
983 | } | 983 | } |
984 | 984 | ||
985 | /* Bound MSS / TSO packet size with the half of the window */ | ||
986 | static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | ||
987 | { | ||
988 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | ||
989 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | ||
990 | else | ||
991 | return pktsize; | ||
992 | } | ||
993 | |||
994 | /* This function synchronize snd mss to current pmtu/exthdr set. | 985 | /* This function synchronize snd mss to current pmtu/exthdr set. |
995 | 986 | ||
996 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts | 987 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts |
@@ -1037,22 +1028,17 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
1037 | /* Compute the current effective MSS, taking SACKs and IP options, | 1028 | /* Compute the current effective MSS, taking SACKs and IP options, |
1038 | * and even PMTU discovery events into account. | 1029 | * and even PMTU discovery events into account. |
1039 | */ | 1030 | */ |
1040 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | 1031 | unsigned int tcp_current_mss(struct sock *sk) |
1041 | { | 1032 | { |
1042 | struct tcp_sock *tp = tcp_sk(sk); | 1033 | struct tcp_sock *tp = tcp_sk(sk); |
1043 | struct dst_entry *dst = __sk_dst_get(sk); | 1034 | struct dst_entry *dst = __sk_dst_get(sk); |
1044 | u32 mss_now; | 1035 | u32 mss_now; |
1045 | u16 xmit_size_goal; | ||
1046 | int doing_tso = 0; | ||
1047 | unsigned header_len; | 1036 | unsigned header_len; |
1048 | struct tcp_out_options opts; | 1037 | struct tcp_out_options opts; |
1049 | struct tcp_md5sig_key *md5; | 1038 | struct tcp_md5sig_key *md5; |
1050 | 1039 | ||
1051 | mss_now = tp->mss_cache; | 1040 | mss_now = tp->mss_cache; |
1052 | 1041 | ||
1053 | if (large_allowed && sk_can_gso(sk)) | ||
1054 | doing_tso = 1; | ||
1055 | |||
1056 | if (dst) { | 1042 | if (dst) { |
1057 | u32 mtu = dst_mtu(dst); | 1043 | u32 mtu = dst_mtu(dst); |
1058 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) | 1044 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) |
@@ -1070,19 +1056,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
1070 | mss_now -= delta; | 1056 | mss_now -= delta; |
1071 | } | 1057 | } |
1072 | 1058 | ||
1073 | xmit_size_goal = mss_now; | ||
1074 | |||
1075 | if (doing_tso) { | ||
1076 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | ||
1077 | inet_csk(sk)->icsk_af_ops->net_header_len - | ||
1078 | inet_csk(sk)->icsk_ext_hdr_len - | ||
1079 | tp->tcp_header_len); | ||
1080 | |||
1081 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | ||
1082 | xmit_size_goal -= (xmit_size_goal % mss_now); | ||
1083 | } | ||
1084 | tp->xmit_size_goal = xmit_size_goal; | ||
1085 | |||
1086 | return mss_now; | 1059 | return mss_now; |
1087 | } | 1060 | } |
1088 | 1061 | ||
@@ -1264,7 +1237,7 @@ int tcp_may_send_now(struct sock *sk) | |||
1264 | struct sk_buff *skb = tcp_send_head(sk); | 1237 | struct sk_buff *skb = tcp_send_head(sk); |
1265 | 1238 | ||
1266 | return (skb && | 1239 | return (skb && |
1267 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | 1240 | tcp_snd_test(sk, skb, tcp_current_mss(sk), |
1268 | (tcp_skb_is_last(sk, skb) ? | 1241 | (tcp_skb_is_last(sk, skb) ? |
1269 | tp->nonagle : TCP_NAGLE_PUSH))); | 1242 | tp->nonagle : TCP_NAGLE_PUSH))); |
1270 | } | 1243 | } |
@@ -1421,7 +1394,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1421 | return -1; | 1394 | return -1; |
1422 | 1395 | ||
1423 | /* Very simple search strategy: just double the MSS. */ | 1396 | /* Very simple search strategy: just double the MSS. */ |
1424 | mss_now = tcp_current_mss(sk, 0); | 1397 | mss_now = tcp_current_mss(sk); |
1425 | probe_size = 2 * tp->mss_cache; | 1398 | probe_size = 2 * tp->mss_cache; |
1426 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; | 1399 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; |
1427 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { | 1400 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { |
@@ -1903,7 +1876,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1903 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | 1876 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) |
1904 | return -EHOSTUNREACH; /* Routing failure or similar. */ | 1877 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
1905 | 1878 | ||
1906 | cur_mss = tcp_current_mss(sk, 0); | 1879 | cur_mss = tcp_current_mss(sk); |
1907 | 1880 | ||
1908 | /* If receiver has shrunk his window, and skb is out of | 1881 | /* If receiver has shrunk his window, and skb is out of |
1909 | * new window, do not retransmit it. The exception is the | 1882 | * new window, do not retransmit it. The exception is the |
@@ -2111,7 +2084,7 @@ void tcp_send_fin(struct sock *sk) | |||
2111 | * unsent frames. But be careful about outgoing SACKS | 2084 | * unsent frames. But be careful about outgoing SACKS |
2112 | * and IP options. | 2085 | * and IP options. |
2113 | */ | 2086 | */ |
2114 | mss_now = tcp_current_mss(sk, 1); | 2087 | mss_now = tcp_current_mss(sk); |
2115 | 2088 | ||
2116 | if (tcp_send_head(sk) != NULL) { | 2089 | if (tcp_send_head(sk) != NULL) { |
2117 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; | 2090 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; |
@@ -2523,7 +2496,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2523 | if ((skb = tcp_send_head(sk)) != NULL && | 2496 | if ((skb = tcp_send_head(sk)) != NULL && |
2524 | before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { | 2497 | before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { |
2525 | int err; | 2498 | int err; |
2526 | unsigned int mss = tcp_current_mss(sk, 0); | 2499 | unsigned int mss = tcp_current_mss(sk); |
2527 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 2500 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
2528 | 2501 | ||
2529 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) | 2502 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) |