aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h38
1 files changed, 28 insertions, 10 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 34f5cc24d903..75be5a28815d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
196#define TCP_NAGLE_CORK 2 /* Socket is corked */ 196#define TCP_NAGLE_CORK 2 /* Socket is corked */
197#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 197#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
198 198
199/* TCP thin-stream limits */
200#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
201
199extern struct inet_timewait_death_row tcp_death_row; 202extern struct inet_timewait_death_row tcp_death_row;
200 203
201/* sysctl variables for tcp */ 204/* sysctl variables for tcp */
@@ -241,6 +244,8 @@ extern int sysctl_tcp_workaround_signed_windows;
241extern int sysctl_tcp_slow_start_after_idle; 244extern int sysctl_tcp_slow_start_after_idle;
242extern int sysctl_tcp_max_ssthresh; 245extern int sysctl_tcp_max_ssthresh;
243extern int sysctl_tcp_cookie_size; 246extern int sysctl_tcp_cookie_size;
247extern int sysctl_tcp_thin_linear_timeouts;
248extern int sysctl_tcp_thin_dupack;
244 249
245extern atomic_t tcp_memory_allocated; 250extern atomic_t tcp_memory_allocated;
246extern struct percpu_counter tcp_sockets_allocated; 251extern struct percpu_counter tcp_sockets_allocated;
@@ -400,6 +405,8 @@ extern int compat_tcp_setsockopt(struct sock *sk,
400 int level, int optname, 405 int level, int optname,
401 char __user *optval, unsigned int optlen); 406 char __user *optval, unsigned int optlen);
402extern void tcp_set_keepalive(struct sock *sk, int val); 407extern void tcp_set_keepalive(struct sock *sk, int val);
408extern void tcp_syn_ack_timeout(struct sock *sk,
409 struct request_sock *req);
403extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, 410extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
404 struct msghdr *msg, 411 struct msghdr *msg,
405 size_t len, int nonblock, 412 size_t len, int nonblock,
@@ -856,13 +863,6 @@ static inline void tcp_check_probe_timer(struct sock *sk)
856 icsk->icsk_rto, TCP_RTO_MAX); 863 icsk->icsk_rto, TCP_RTO_MAX);
857} 864}
858 865
859static inline void tcp_push_pending_frames(struct sock *sk)
860{
861 struct tcp_sock *tp = tcp_sk(sk);
862
863 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
864}
865
866static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 866static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
867{ 867{
868 tp->snd_wl1 = seq; 868 tp->snd_wl1 = seq;
@@ -939,7 +939,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
939 939
940 tp->ucopy.memory = 0; 940 tp->ucopy.memory = 0;
941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
942 wake_up_interruptible_poll(sk->sk_sleep, 942 wake_up_interruptible_sync_poll(sk->sk_sleep,
943 POLLIN | POLLRDNORM | POLLRDBAND); 943 POLLIN | POLLRDNORM | POLLRDBAND);
944 if (!inet_csk_ack_scheduled(sk)) 944 if (!inet_csk_ack_scheduled(sk))
945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -972,7 +972,8 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
972/* Determine a window scaling and initial window to offer. */ 972/* Determine a window scaling and initial window to offer. */
973extern void tcp_select_initial_window(int __space, __u32 mss, 973extern void tcp_select_initial_window(int __space, __u32 mss,
974 __u32 *rcv_wnd, __u32 *window_clamp, 974 __u32 *rcv_wnd, __u32 *window_clamp,
975 int wscale_ok, __u8 *rcv_wscale); 975 int wscale_ok, __u8 *rcv_wscale,
976 __u32 init_rcv_wnd);
976 977
977static inline int tcp_win_from_space(int space) 978static inline int tcp_win_from_space(int space)
978{ 979{
@@ -1193,7 +1194,7 @@ extern int tcp_v4_md5_do_del(struct sock *sk,
1193#define tcp_twsk_md5_key(twsk) NULL 1194#define tcp_twsk_md5_key(twsk) NULL
1194#endif 1195#endif
1195 1196
1196extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *); 1197extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
1197extern void tcp_free_md5sig_pool(void); 1198extern void tcp_free_md5sig_pool(void);
1198 1199
1199extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); 1200extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
@@ -1342,6 +1343,15 @@ static inline int tcp_write_queue_empty(struct sock *sk)
1342 return skb_queue_empty(&sk->sk_write_queue); 1343 return skb_queue_empty(&sk->sk_write_queue);
1343} 1344}
1344 1345
1346static inline void tcp_push_pending_frames(struct sock *sk)
1347{
1348 if (tcp_send_head(sk)) {
1349 struct tcp_sock *tp = tcp_sk(sk);
1350
1351 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1352 }
1353}
1354
1345/* Start sequence of the highest skb with SACKed bit, valid only if 1355/* Start sequence of the highest skb with SACKed bit, valid only if
1346 * sacked > 0 or when the caller has ensured validity by itself. 1356 * sacked > 0 or when the caller has ensured validity by itself.
1347 */ 1357 */
@@ -1381,6 +1391,14 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
1381 tcp_sk(sk)->highest_sack = new; 1391 tcp_sk(sk)->highest_sack = new;
1382} 1392}
1383 1393
1394/* Determines whether this is a thin stream (which may suffer from
1395 * increased latency). Used to trigger latency-reducing mechanisms.
1396 */
1397static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1398{
1399 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1400}
1401
1384/* /proc */ 1402/* /proc */
1385enum tcp_seq_states { 1403enum tcp_seq_states {
1386 TCP_SEQ_STATE_LISTENING, 1404 TCP_SEQ_STATE_LISTENING,