diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2009-03-14 10:23:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-15 23:09:54 -0400 |
commit | 0c54b85f2828128274f319a1eb3ce7f604fe2a53 (patch) | |
tree | 8ff84b9aa9cd4775f244c2421e11adfbd206a7bf /net/ipv4 | |
parent | 72211e90501f954f586481c25521c3724cda3cc7 (diff) |
tcp: simplify tcp_current_mss
There's very little need for most of the callsites to get
tp->xmit_goal_size updated. That will cost us divide as is,
so slice the function in two. Also, the only users of the
tp->xmit_goal_size are directly behind tcp_current_mss(),
so there's no need to store that variable into tcp_sock
at all! The drop of xmit_goal_size currently leaves 16-bit
hole and some reorganization would again be necessary to
change that (but I'm aiming to fill that hole with u16
xmit_goal_size_segs to cache the results of the remaining
divide to get that tso on regression).
Bring xmit_goal_size parts into tcp.c
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Evgeniy Polyakov <zbr@ioremap.net>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp.c | 43 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 41 |
3 files changed, 43 insertions, 43 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d3f9beee74c0..886596ff0aae 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -661,6 +661,37 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
661 | return NULL; | 661 | return NULL; |
662 | } | 662 | } |
663 | 663 | ||
664 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | ||
665 | int large_allowed) | ||
666 | { | ||
667 | struct tcp_sock *tp = tcp_sk(sk); | ||
668 | u32 xmit_size_goal; | ||
669 | |||
670 | xmit_size_goal = mss_now; | ||
671 | |||
672 | if (large_allowed && sk_can_gso(sk)) { | ||
673 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | ||
674 | inet_csk(sk)->icsk_af_ops->net_header_len - | ||
675 | inet_csk(sk)->icsk_ext_hdr_len - | ||
676 | tp->tcp_header_len); | ||
677 | |||
678 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | ||
679 | xmit_size_goal -= (xmit_size_goal % mss_now); | ||
680 | } | ||
681 | |||
682 | return xmit_size_goal; | ||
683 | } | ||
684 | |||
685 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | ||
686 | { | ||
687 | int mss_now; | ||
688 | |||
689 | mss_now = tcp_current_mss(sk); | ||
690 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | ||
691 | |||
692 | return mss_now; | ||
693 | } | ||
694 | |||
664 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, | 695 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, |
665 | size_t psize, int flags) | 696 | size_t psize, int flags) |
666 | { | 697 | { |
@@ -677,8 +708,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse | |||
677 | 708 | ||
678 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 709 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
679 | 710 | ||
680 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 711 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
681 | size_goal = tp->xmit_size_goal; | ||
682 | copied = 0; | 712 | copied = 0; |
683 | 713 | ||
684 | err = -EPIPE; | 714 | err = -EPIPE; |
@@ -761,8 +791,7 @@ wait_for_memory: | |||
761 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 791 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
762 | goto do_error; | 792 | goto do_error; |
763 | 793 | ||
764 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 794 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
765 | size_goal = tp->xmit_size_goal; | ||
766 | } | 795 | } |
767 | 796 | ||
768 | out: | 797 | out: |
@@ -844,8 +873,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
844 | /* This should be in poll */ | 873 | /* This should be in poll */ |
845 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 874 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
846 | 875 | ||
847 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 876 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
848 | size_goal = tp->xmit_size_goal; | ||
849 | 877 | ||
850 | /* Ok commence sending. */ | 878 | /* Ok commence sending. */ |
851 | iovlen = msg->msg_iovlen; | 879 | iovlen = msg->msg_iovlen; |
@@ -1007,8 +1035,7 @@ wait_for_memory: | |||
1007 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 1035 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
1008 | goto do_error; | 1036 | goto do_error; |
1009 | 1037 | ||
1010 | mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); | 1038 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1011 | size_goal = tp->xmit_size_goal; | ||
1012 | } | 1039 | } |
1013 | } | 1040 | } |
1014 | 1041 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 311c30f73ee4..fae78e3eccc4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2864,7 +2864,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
2864 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2864 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2865 | struct tcp_sock *tp = tcp_sk(sk); | 2865 | struct tcp_sock *tp = tcp_sk(sk); |
2866 | struct sk_buff *skb; | 2866 | struct sk_buff *skb; |
2867 | unsigned int mss = tcp_current_mss(sk, 0); | 2867 | unsigned int mss = tcp_current_mss(sk); |
2868 | u32 prior_lost = tp->lost_out; | 2868 | u32 prior_lost = tp->lost_out; |
2869 | 2869 | ||
2870 | tcp_for_write_queue(skb, sk) { | 2870 | tcp_for_write_queue(skb, sk) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 325658039139..c1f259d2d33b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -921,7 +921,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
921 | * factor and mss. | 921 | * factor and mss. |
922 | */ | 922 | */ |
923 | if (tcp_skb_pcount(skb) > 1) | 923 | if (tcp_skb_pcount(skb) > 1) |
924 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); | 924 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); |
925 | 925 | ||
926 | return 0; | 926 | return 0; |
927 | } | 927 | } |
@@ -982,15 +982,6 @@ void tcp_mtup_init(struct sock *sk) | |||
982 | icsk->icsk_mtup.probe_size = 0; | 982 | icsk->icsk_mtup.probe_size = 0; |
983 | } | 983 | } |
984 | 984 | ||
985 | /* Bound MSS / TSO packet size with the half of the window */ | ||
986 | static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | ||
987 | { | ||
988 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | ||
989 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | ||
990 | else | ||
991 | return pktsize; | ||
992 | } | ||
993 | |||
994 | /* This function synchronize snd mss to current pmtu/exthdr set. | 985 | /* This function synchronize snd mss to current pmtu/exthdr set. |
995 | 986 | ||
996 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts | 987 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts |
@@ -1037,22 +1028,17 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
1037 | /* Compute the current effective MSS, taking SACKs and IP options, | 1028 | /* Compute the current effective MSS, taking SACKs and IP options, |
1038 | * and even PMTU discovery events into account. | 1029 | * and even PMTU discovery events into account. |
1039 | */ | 1030 | */ |
1040 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | 1031 | unsigned int tcp_current_mss(struct sock *sk) |
1041 | { | 1032 | { |
1042 | struct tcp_sock *tp = tcp_sk(sk); | 1033 | struct tcp_sock *tp = tcp_sk(sk); |
1043 | struct dst_entry *dst = __sk_dst_get(sk); | 1034 | struct dst_entry *dst = __sk_dst_get(sk); |
1044 | u32 mss_now; | 1035 | u32 mss_now; |
1045 | u16 xmit_size_goal; | ||
1046 | int doing_tso = 0; | ||
1047 | unsigned header_len; | 1036 | unsigned header_len; |
1048 | struct tcp_out_options opts; | 1037 | struct tcp_out_options opts; |
1049 | struct tcp_md5sig_key *md5; | 1038 | struct tcp_md5sig_key *md5; |
1050 | 1039 | ||
1051 | mss_now = tp->mss_cache; | 1040 | mss_now = tp->mss_cache; |
1052 | 1041 | ||
1053 | if (large_allowed && sk_can_gso(sk)) | ||
1054 | doing_tso = 1; | ||
1055 | |||
1056 | if (dst) { | 1042 | if (dst) { |
1057 | u32 mtu = dst_mtu(dst); | 1043 | u32 mtu = dst_mtu(dst); |
1058 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) | 1044 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) |
@@ -1070,19 +1056,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
1070 | mss_now -= delta; | 1056 | mss_now -= delta; |
1071 | } | 1057 | } |
1072 | 1058 | ||
1073 | xmit_size_goal = mss_now; | ||
1074 | |||
1075 | if (doing_tso) { | ||
1076 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | ||
1077 | inet_csk(sk)->icsk_af_ops->net_header_len - | ||
1078 | inet_csk(sk)->icsk_ext_hdr_len - | ||
1079 | tp->tcp_header_len); | ||
1080 | |||
1081 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | ||
1082 | xmit_size_goal -= (xmit_size_goal % mss_now); | ||
1083 | } | ||
1084 | tp->xmit_size_goal = xmit_size_goal; | ||
1085 | |||
1086 | return mss_now; | 1059 | return mss_now; |
1087 | } | 1060 | } |
1088 | 1061 | ||
@@ -1264,7 +1237,7 @@ int tcp_may_send_now(struct sock *sk) | |||
1264 | struct sk_buff *skb = tcp_send_head(sk); | 1237 | struct sk_buff *skb = tcp_send_head(sk); |
1265 | 1238 | ||
1266 | return (skb && | 1239 | return (skb && |
1267 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | 1240 | tcp_snd_test(sk, skb, tcp_current_mss(sk), |
1268 | (tcp_skb_is_last(sk, skb) ? | 1241 | (tcp_skb_is_last(sk, skb) ? |
1269 | tp->nonagle : TCP_NAGLE_PUSH))); | 1242 | tp->nonagle : TCP_NAGLE_PUSH))); |
1270 | } | 1243 | } |
@@ -1421,7 +1394,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1421 | return -1; | 1394 | return -1; |
1422 | 1395 | ||
1423 | /* Very simple search strategy: just double the MSS. */ | 1396 | /* Very simple search strategy: just double the MSS. */ |
1424 | mss_now = tcp_current_mss(sk, 0); | 1397 | mss_now = tcp_current_mss(sk); |
1425 | probe_size = 2 * tp->mss_cache; | 1398 | probe_size = 2 * tp->mss_cache; |
1426 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; | 1399 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; |
1427 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { | 1400 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { |
@@ -1903,7 +1876,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1903 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | 1876 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) |
1904 | return -EHOSTUNREACH; /* Routing failure or similar. */ | 1877 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
1905 | 1878 | ||
1906 | cur_mss = tcp_current_mss(sk, 0); | 1879 | cur_mss = tcp_current_mss(sk); |
1907 | 1880 | ||
1908 | /* If receiver has shrunk his window, and skb is out of | 1881 | /* If receiver has shrunk his window, and skb is out of |
1909 | * new window, do not retransmit it. The exception is the | 1882 | * new window, do not retransmit it. The exception is the |
@@ -2111,7 +2084,7 @@ void tcp_send_fin(struct sock *sk) | |||
2111 | * unsent frames. But be careful about outgoing SACKS | 2084 | * unsent frames. But be careful about outgoing SACKS |
2112 | * and IP options. | 2085 | * and IP options. |
2113 | */ | 2086 | */ |
2114 | mss_now = tcp_current_mss(sk, 1); | 2087 | mss_now = tcp_current_mss(sk); |
2115 | 2088 | ||
2116 | if (tcp_send_head(sk) != NULL) { | 2089 | if (tcp_send_head(sk) != NULL) { |
2117 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; | 2090 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; |
@@ -2523,7 +2496,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2523 | if ((skb = tcp_send_head(sk)) != NULL && | 2496 | if ((skb = tcp_send_head(sk)) != NULL && |
2524 | before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { | 2497 | before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { |
2525 | int err; | 2498 | int err; |
2526 | unsigned int mss = tcp_current_mss(sk, 0); | 2499 | unsigned int mss = tcp_current_mss(sk); |
2527 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 2500 | unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
2528 | 2501 | ||
2529 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) | 2502 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) |