aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/tcp.h')
-rw-r--r--include/linux/tcp.h78
1 files changed, 23 insertions, 55 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14a55e3e3a50..dfd93d03f5d2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -127,6 +127,7 @@ enum {
127#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ 127#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */
128#define TCP_INFO 11 /* Information about this connection. */ 128#define TCP_INFO 11 /* Information about this connection. */
129#define TCP_QUICKACK 12 /* Block/reenable quick acks */ 129#define TCP_QUICKACK 12 /* Block/reenable quick acks */
130#define TCP_CONGESTION 13 /* Congestion control algorithm */
130 131
131#define TCPI_OPT_TIMESTAMPS 1 132#define TCPI_OPT_TIMESTAMPS 1
132#define TCPI_OPT_SACK 2 133#define TCPI_OPT_SACK 2
@@ -203,13 +204,6 @@ struct tcp_sack_block {
203 __u32 end_seq; 204 __u32 end_seq;
204}; 205};
205 206
206enum tcp_congestion_algo {
207 TCP_RENO=0,
208 TCP_VEGAS,
209 TCP_WESTWOOD,
210 TCP_BIC,
211};
212
213struct tcp_options_received { 207struct tcp_options_received {
214/* PAWS/RTTM data */ 208/* PAWS/RTTM data */
215 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ 209 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -230,6 +224,17 @@ struct tcp_options_received {
230 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 224 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
231}; 225};
232 226
227struct tcp_request_sock {
228 struct inet_request_sock req;
229 __u32 rcv_isn;
230 __u32 snt_isn;
231};
232
233static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
234{
235 return (struct tcp_request_sock *)req;
236}
237
233struct tcp_sock { 238struct tcp_sock {
234 /* inet_sock has to be the first member of tcp_sock */ 239 /* inet_sock has to be the first member of tcp_sock */
235 struct inet_sock inet; 240 struct inet_sock inet;
@@ -294,7 +299,7 @@ struct tcp_sock {
294 __u8 reordering; /* Packet reordering metric. */ 299 __u8 reordering; /* Packet reordering metric. */
295 __u8 frto_counter; /* Number of new acks after RTO */ 300 __u8 frto_counter; /* Number of new acks after RTO */
296 301
297 __u8 adv_cong; /* Using Vegas, Westwood, or BIC */ 302 __u8 unused;
298 __u8 defer_accept; /* User waits for some data after accept() */ 303 __u8 defer_accept; /* User waits for some data after accept() */
299 304
300/* RTT measurement */ 305/* RTT measurement */
@@ -368,22 +373,7 @@ struct tcp_sock {
368 373
369 __u32 total_retrans; /* Total retransmits for entire connection */ 374 __u32 total_retrans; /* Total retransmits for entire connection */
370 375
371 /* The syn_wait_lock is necessary only to avoid proc interface having 376 struct request_sock_queue accept_queue; /* FIFO of established children */
372 * to grab the main lock sock while browsing the listening hash
373 * (otherwise it's deadlock prone).
374 * This lock is acquired in read mode only from listening_get_next()
375 * and it's acquired in write mode _only_ from code that is actively
376 * changing the syn_wait_queue. All readers that are holding
377 * the master sock lock don't need to grab this lock in read mode
378 * too as the syn_wait_queue writes are always protected from
379 * the main sock lock.
380 */
381 rwlock_t syn_wait_lock;
382 struct tcp_listen_opt *listen_opt;
383
384 /* FIFO of established children */
385 struct open_request *accept_queue;
386 struct open_request *accept_queue_tail;
387 377
388 unsigned int keepalive_time; /* time before keep alive takes place */ 378 unsigned int keepalive_time; /* time before keep alive takes place */
389 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 379 unsigned int keepalive_intvl; /* time interval between keep alive probes */
@@ -405,37 +395,10 @@ struct tcp_sock {
405 __u32 time; 395 __u32 time;
406 } rcvq_space; 396 } rcvq_space;
407 397
408/* TCP Westwood structure */ 398 /* Pluggable TCP congestion control hook */
409 struct { 399 struct tcp_congestion_ops *ca_ops;
410 __u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ 400 u32 ca_priv[16];
411 __u32 bw_est; /* bandwidth estimate */ 401#define TCP_CA_PRIV_SIZE (16*sizeof(u32))
412 __u32 rtt_win_sx; /* here starts a new evaluation... */
413 __u32 bk;
414 __u32 snd_una; /* used for evaluating the number of acked bytes */
415 __u32 cumul_ack;
416 __u32 accounted;
417 __u32 rtt;
418 __u32 rtt_min; /* minimum observed RTT */
419 } westwood;
420
421/* Vegas variables */
422 struct {
423 __u32 beg_snd_nxt; /* right edge during last RTT */
424 __u32 beg_snd_una; /* left edge during last RTT */
425 __u32 beg_snd_cwnd; /* saves the size of the cwnd */
426 __u8 doing_vegas_now;/* if true, do vegas for this RTT */
427 __u16 cntRTT; /* # of RTTs measured within last RTT */
428 __u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
429 __u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
430 } vegas;
431
432 /* BI TCP Parameters */
433 struct {
434 __u32 cnt; /* increase cwnd by 1 after this number of ACKs */
435 __u32 last_max_cwnd; /* last maximium snd_cwnd */
436 __u32 last_cwnd; /* the last snd_cwnd */
437 __u32 last_stamp; /* time when updated last_cwnd */
438 } bictcp;
439}; 402};
440 403
441static inline struct tcp_sock *tcp_sk(const struct sock *sk) 404static inline struct tcp_sock *tcp_sk(const struct sock *sk)
@@ -443,6 +406,11 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
443 return (struct tcp_sock *)sk; 406 return (struct tcp_sock *)sk;
444} 407}
445 408
409static inline void *tcp_ca(const struct tcp_sock *tp)
410{
411 return (void *) tp->ca_priv;
412}
413
446#endif 414#endif
447 415
448#endif /* _LINUX_TCP_H */ 416#endif /* _LINUX_TCP_H */