diff options
author | Nikolay Borisov <kernel@kyup.com> | 2016-02-03 02:46:52 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-02-07 14:35:10 -0500 |
commit | 1043e25ff96a1efc7bd34d11f5f32203a28a3bd7 (patch) | |
tree | 56eb979f398384e49864dfcfb40d0c48e7a7f622 /net/ipv4/tcp_input.c | |
parent | 12ed8244ed8b31b023ea6d2851fd8b15f2999e9b (diff) |
ipv4: Namespaceify tcp reordering sysctl knob
Signed-off-by: Nikolay Borisov <kernel@kyup.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b17aba42a368..5ee6fe0d152d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -80,9 +80,7 @@ int sysctl_tcp_timestamps __read_mostly = 1; | |||
80 | int sysctl_tcp_window_scaling __read_mostly = 1; | 80 | int sysctl_tcp_window_scaling __read_mostly = 1; |
81 | int sysctl_tcp_sack __read_mostly = 1; | 81 | int sysctl_tcp_sack __read_mostly = 1; |
82 | int sysctl_tcp_fack __read_mostly = 1; | 82 | int sysctl_tcp_fack __read_mostly = 1; |
83 | int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; | ||
84 | int sysctl_tcp_max_reordering __read_mostly = 300; | 83 | int sysctl_tcp_max_reordering __read_mostly = 300; |
85 | EXPORT_SYMBOL(sysctl_tcp_reordering); | ||
86 | int sysctl_tcp_dsack __read_mostly = 1; | 84 | int sysctl_tcp_dsack __read_mostly = 1; |
87 | int sysctl_tcp_app_win __read_mostly = 31; | 85 | int sysctl_tcp_app_win __read_mostly = 31; |
88 | int sysctl_tcp_adv_win_scale __read_mostly = 1; | 86 | int sysctl_tcp_adv_win_scale __read_mostly = 1; |
@@ -1883,6 +1881,7 @@ void tcp_enter_loss(struct sock *sk) | |||
1883 | { | 1881 | { |
1884 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1882 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1885 | struct tcp_sock *tp = tcp_sk(sk); | 1883 | struct tcp_sock *tp = tcp_sk(sk); |
1884 | struct net *net = sock_net(sk); | ||
1886 | struct sk_buff *skb; | 1885 | struct sk_buff *skb; |
1887 | bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; | 1886 | bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; |
1888 | bool is_reneg; /* is receiver reneging on SACKs? */ | 1887 | bool is_reneg; /* is receiver reneging on SACKs? */ |
@@ -1933,9 +1932,9 @@ void tcp_enter_loss(struct sock *sk) | |||
1933 | * suggests that the degree of reordering is over-estimated. | 1932 | * suggests that the degree of reordering is over-estimated. |
1934 | */ | 1933 | */ |
1935 | if (icsk->icsk_ca_state <= TCP_CA_Disorder && | 1934 | if (icsk->icsk_ca_state <= TCP_CA_Disorder && |
1936 | tp->sacked_out >= sysctl_tcp_reordering) | 1935 | tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) |
1937 | tp->reordering = min_t(unsigned int, tp->reordering, | 1936 | tp->reordering = min_t(unsigned int, tp->reordering, |
1938 | sysctl_tcp_reordering); | 1937 | net->ipv4.sysctl_tcp_reordering); |
1939 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1938 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1940 | tp->high_seq = tp->snd_nxt; | 1939 | tp->high_seq = tp->snd_nxt; |
1941 | tcp_ecn_queue_cwr(tp); | 1940 | tcp_ecn_queue_cwr(tp); |
@@ -2119,6 +2118,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) | |||
2119 | { | 2118 | { |
2120 | struct tcp_sock *tp = tcp_sk(sk); | 2119 | struct tcp_sock *tp = tcp_sk(sk); |
2121 | __u32 packets_out; | 2120 | __u32 packets_out; |
2121 | int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; | ||
2122 | 2122 | ||
2123 | /* Trick#1: The loss is proven. */ | 2123 | /* Trick#1: The loss is proven. */ |
2124 | if (tp->lost_out) | 2124 | if (tp->lost_out) |
@@ -2133,7 +2133,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) | |||
2133 | */ | 2133 | */ |
2134 | packets_out = tp->packets_out; | 2134 | packets_out = tp->packets_out; |
2135 | if (packets_out <= tp->reordering && | 2135 | if (packets_out <= tp->reordering && |
2136 | tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && | 2136 | tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) && |
2137 | !tcp_may_send_now(sk)) { | 2137 | !tcp_may_send_now(sk)) { |
2138 | /* We have nothing to send. This connection is limited | 2138 | /* We have nothing to send. This connection is limited |
2139 | * either by receiver window or by application. | 2139 | * either by receiver window or by application. |
@@ -3317,7 +3317,7 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) | |||
3317 | * new SACK or ECE mark may first advance cwnd here and later reduce | 3317 | * new SACK or ECE mark may first advance cwnd here and later reduce |
3318 | * cwnd in tcp_fastretrans_alert() based on more states. | 3318 | * cwnd in tcp_fastretrans_alert() based on more states. |
3319 | */ | 3319 | */ |
3320 | if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) | 3320 | if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) |
3321 | return flag & FLAG_FORWARD_PROGRESS; | 3321 | return flag & FLAG_FORWARD_PROGRESS; |
3322 | 3322 | ||
3323 | return flag & FLAG_DATA_ACKED; | 3323 | return flag & FLAG_DATA_ACKED; |