aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h222
1 files changed, 111 insertions, 111 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf8e664176ad..a943c79c88b0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -19,10 +19,11 @@
19#define _TCP_H 19#define _TCP_H
20 20
21#define TCP_DEBUG 1 21#define TCP_DEBUG 1
22#define INET_CSK_DEBUG 1
22#define FASTRETRANS_DEBUG 1 23#define FASTRETRANS_DEBUG 1
23 24
24/* Cancel timers, when they are not required. */ 25/* Cancel timers, when they are not required. */
25#undef TCP_CLEAR_TIMERS 26#undef INET_CSK_CLEAR_TIMERS
26 27
27#include <linux/config.h> 28#include <linux/config.h>
28#include <linux/list.h> 29#include <linux/list.h>
@@ -205,10 +206,10 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw);
205#define TCPOLEN_SACK_BASE_ALIGNED 4 206#define TCPOLEN_SACK_BASE_ALIGNED 4
206#define TCPOLEN_SACK_PERBLOCK 8 207#define TCPOLEN_SACK_PERBLOCK 8
207 208
208#define TCP_TIME_RETRANS 1 /* Retransmit timer */ 209#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
209#define TCP_TIME_DACK 2 /* Delayed ack timer */ 210#define ICSK_TIME_DACK 2 /* Delayed ack timer */
210#define TCP_TIME_PROBE0 3 /* Zero window probe timer */ 211#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
211#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ 212#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
212 213
213/* Flags in tp->nonagle */ 214/* Flags in tp->nonagle */
214#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 215#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -257,9 +258,9 @@ extern atomic_t tcp_sockets_allocated;
257extern int tcp_memory_pressure; 258extern int tcp_memory_pressure;
258 259
259#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 260#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
260#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 261#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
261#else 262#else
262#define TCP_INET_FAMILY(fam) 1 263#define AF_INET_FAMILY(fam) 1
263#endif 264#endif
264 265
265/* 266/*
@@ -372,41 +373,42 @@ extern int tcp_rcv_established(struct sock *sk,
372 373
373extern void tcp_rcv_space_adjust(struct sock *sk); 374extern void tcp_rcv_space_adjust(struct sock *sk);
374 375
375enum tcp_ack_state_t 376enum inet_csk_ack_state_t {
376{ 377 ICSK_ACK_SCHED = 1,
377 TCP_ACK_SCHED = 1, 378 ICSK_ACK_TIMER = 2,
378 TCP_ACK_TIMER = 2, 379 ICSK_ACK_PUSHED = 4
379 TCP_ACK_PUSHED= 4
380}; 380};
381 381
382static inline void tcp_schedule_ack(struct tcp_sock *tp) 382static inline void inet_csk_schedule_ack(struct sock *sk)
383{ 383{
384 tp->ack.pending |= TCP_ACK_SCHED; 384 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
385} 385}
386 386
387static inline int tcp_ack_scheduled(struct tcp_sock *tp) 387static inline int inet_csk_ack_scheduled(const struct sock *sk)
388{ 388{
389 return tp->ack.pending&TCP_ACK_SCHED; 389 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
390} 390}
391 391
392static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) 392static inline void tcp_dec_quickack_mode(struct sock *sk,
393 const unsigned int pkts)
393{ 394{
394 if (tp->ack.quick) { 395 struct inet_connection_sock *icsk = inet_csk(sk);
395 if (pkts >= tp->ack.quick) {
396 tp->ack.quick = 0;
397 396
397 if (icsk->icsk_ack.quick) {
398 if (pkts >= icsk->icsk_ack.quick) {
399 icsk->icsk_ack.quick = 0;
398 /* Leaving quickack mode we deflate ATO. */ 400 /* Leaving quickack mode we deflate ATO. */
399 tp->ack.ato = TCP_ATO_MIN; 401 icsk->icsk_ack.ato = TCP_ATO_MIN;
400 } else 402 } else
401 tp->ack.quick -= pkts; 403 icsk->icsk_ack.quick -= pkts;
402 } 404 }
403} 405}
404 406
405extern void tcp_enter_quickack_mode(struct tcp_sock *tp); 407extern void tcp_enter_quickack_mode(struct sock *sk);
406 408
407static __inline__ void tcp_delack_init(struct tcp_sock *tp) 409static inline void inet_csk_delack_init(struct sock *sk)
408{ 410{
409 memset(&tp->ack, 0, sizeof(tp->ack)); 411 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
410} 412}
411 413
412static inline void tcp_clear_options(struct tcp_options_received *rx_opt) 414static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -440,7 +442,7 @@ extern void tcp_update_metrics(struct sock *sk);
440 442
441extern void tcp_close(struct sock *sk, 443extern void tcp_close(struct sock *sk,
442 long timeout); 444 long timeout);
443extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); 445extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err);
444extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); 446extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
445 447
446extern int tcp_getsockopt(struct sock *sk, int level, 448extern int tcp_getsockopt(struct sock *sk, int level,
@@ -534,15 +536,18 @@ extern void tcp_cwnd_application_limited(struct sock *sk);
534 536
535/* tcp_timer.c */ 537/* tcp_timer.c */
536extern void tcp_init_xmit_timers(struct sock *); 538extern void tcp_init_xmit_timers(struct sock *);
537extern void tcp_clear_xmit_timers(struct sock *); 539static inline void tcp_clear_xmit_timers(struct sock *sk)
540{
541 inet_csk_clear_xmit_timers(sk);
542}
538 543
539extern void tcp_delete_keepalive_timer(struct sock *); 544extern void inet_csk_delete_keepalive_timer(struct sock *sk);
540extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); 545extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
541extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 546extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
542extern unsigned int tcp_current_mss(struct sock *sk, int large); 547extern unsigned int tcp_current_mss(struct sock *sk, int large);
543 548
544#ifdef TCP_DEBUG 549#ifdef INET_CSK_DEBUG
545extern const char tcp_timer_bug_msg[]; 550extern const char inet_csk_timer_bug_msg[];
546#endif 551#endif
547 552
548/* tcp_diag.c */ 553/* tcp_diag.c */
@@ -554,70 +559,58 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
554extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 559extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
555 sk_read_actor_t recv_actor); 560 sk_read_actor_t recv_actor);
556 561
557static inline void tcp_clear_xmit_timer(struct sock *sk, int what) 562static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
558{ 563{
559 struct tcp_sock *tp = tcp_sk(sk); 564 struct inet_connection_sock *icsk = inet_csk(sk);
560 565
561 switch (what) { 566 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
562 case TCP_TIME_RETRANS: 567 icsk->icsk_pending = 0;
563 case TCP_TIME_PROBE0: 568#ifdef INET_CSK_CLEAR_TIMERS
564 tp->pending = 0; 569 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
565
566#ifdef TCP_CLEAR_TIMERS
567 sk_stop_timer(sk, &tp->retransmit_timer);
568#endif 570#endif
569 break; 571 } else if (what == ICSK_TIME_DACK) {
570 case TCP_TIME_DACK: 572 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
571 tp->ack.blocked = 0; 573#ifdef INET_CSK_CLEAR_TIMERS
572 tp->ack.pending = 0; 574 sk_stop_timer(sk, &icsk->icsk_delack_timer);
573
574#ifdef TCP_CLEAR_TIMERS
575 sk_stop_timer(sk, &tp->delack_timer);
576#endif 575#endif
577 break; 576 }
578 default: 577#ifdef INET_CSK_DEBUG
579#ifdef TCP_DEBUG 578 else {
580 printk(tcp_timer_bug_msg); 579 pr_debug(inet_csk_timer_bug_msg);
580 }
581#endif 581#endif
582 return;
583 };
584
585} 582}
586 583
587/* 584/*
588 * Reset the retransmission timer 585 * Reset the retransmission timer
589 */ 586 */
590static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) 587static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
588 unsigned long when)
591{ 589{
592 struct tcp_sock *tp = tcp_sk(sk); 590 struct inet_connection_sock *icsk = inet_csk(sk);
593 591
594 if (when > TCP_RTO_MAX) { 592 if (when > TCP_RTO_MAX) {
595#ifdef TCP_DEBUG 593#ifdef INET_CSK_DEBUG
596 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); 594 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
595 sk, what, when, current_text_addr());
597#endif 596#endif
598 when = TCP_RTO_MAX; 597 when = TCP_RTO_MAX;
599 } 598 }
600 599
601 switch (what) { 600 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
602 case TCP_TIME_RETRANS: 601 icsk->icsk_pending = what;
603 case TCP_TIME_PROBE0: 602 icsk->icsk_timeout = jiffies + when;
604 tp->pending = what; 603 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
605 tp->timeout = jiffies+when; 604 } else if (what == ICSK_TIME_DACK) {
606 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); 605 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
607 break; 606 icsk->icsk_ack.timeout = jiffies + when;
608 607 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
609 case TCP_TIME_DACK: 608 }
610 tp->ack.pending |= TCP_ACK_TIMER; 609#ifdef INET_CSK_DEBUG
611 tp->ack.timeout = jiffies+when; 610 else {
612 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); 611 pr_debug(inet_csk_timer_bug_msg);
613 break; 612 }
614
615 default:
616#ifdef TCP_DEBUG
617 printk(tcp_timer_bug_msg);
618#endif 613#endif
619 return;
620 };
621} 614}
622 615
623/* Initialize RCV_MSS value. 616/* Initialize RCV_MSS value.
@@ -637,7 +630,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk)
637 hint = min(hint, TCP_MIN_RCVMSS); 630 hint = min(hint, TCP_MIN_RCVMSS);
638 hint = max(hint, TCP_MIN_MSS); 631 hint = max(hint, TCP_MIN_MSS);
639 632
640 tp->ack.rcv_mss = hint; 633 inet_csk(sk)->icsk_ack.rcv_mss = hint;
641} 634}
642 635
643static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 636static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -772,7 +765,7 @@ static inline void tcp_packets_out_inc(struct sock *sk,
772 765
773 tp->packets_out += tcp_skb_pcount(skb); 766 tp->packets_out += tcp_skb_pcount(skb);
774 if (!orig) 767 if (!orig)
775 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 768 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
776} 769}
777 770
778static inline void tcp_packets_out_dec(struct tcp_sock *tp, 771static inline void tcp_packets_out_dec(struct tcp_sock *tp,
@@ -939,8 +932,9 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
939 932
940static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 933static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
941{ 934{
942 if (!tp->packets_out && !tp->pending) 935 const struct inet_connection_sock *icsk = inet_csk(sk);
943 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); 936 if (!tp->packets_out && !icsk->icsk_pending)
937 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto);
944} 938}
945 939
946static __inline__ void tcp_push_pending_frames(struct sock *sk, 940static __inline__ void tcp_push_pending_frames(struct sock *sk,
@@ -1021,8 +1015,9 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1021 tp->ucopy.memory = 0; 1015 tp->ucopy.memory = 0;
1022 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 1016 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1023 wake_up_interruptible(sk->sk_sleep); 1017 wake_up_interruptible(sk->sk_sleep);
1024 if (!tcp_ack_scheduled(tp)) 1018 if (!inet_csk_ack_scheduled(sk))
1025 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); 1019 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1020 (3 * TCP_RTO_MIN) / 4);
1026 } 1021 }
1027 return 1; 1022 return 1;
1028 } 1023 }
@@ -1055,7 +1050,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
1055 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1050 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1056 1051
1057 sk->sk_prot->unhash(sk); 1052 sk->sk_prot->unhash(sk);
1058 if (inet_sk(sk)->bind_hash && 1053 if (inet_csk(sk)->icsk_bind_hash &&
1059 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1054 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1060 inet_put_port(&tcp_hashinfo, sk); 1055 inet_put_port(&tcp_hashinfo, sk);
1061 /* fall through */ 1056 /* fall through */
@@ -1186,51 +1181,55 @@ static inline int tcp_full_space(const struct sock *sk)
1186 return tcp_win_from_space(sk->sk_rcvbuf); 1181 return tcp_win_from_space(sk->sk_rcvbuf);
1187} 1182}
1188 1183
1189static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, 1184static inline void inet_csk_reqsk_queue_add(struct sock *sk,
1190 struct sock *child) 1185 struct request_sock *req,
1186 struct sock *child)
1191{ 1187{
1192 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); 1188 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
1193} 1189}
1194 1190
1195static inline void 1191static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
1196tcp_synq_removed(struct sock *sk, struct request_sock *req) 1192 struct request_sock *req)
1197{ 1193{
1198 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) 1194 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
1199 tcp_delete_keepalive_timer(sk); 1195 inet_csk_delete_keepalive_timer(sk);
1200} 1196}
1201 1197
1202static inline void tcp_synq_added(struct sock *sk) 1198static inline void inet_csk_reqsk_queue_added(struct sock *sk,
1199 const unsigned long timeout)
1203{ 1200{
1204 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) 1201 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
1205 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1202 inet_csk_reset_keepalive_timer(sk, timeout);
1206} 1203}
1207 1204
1208static inline int tcp_synq_len(struct sock *sk) 1205static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
1209{ 1206{
1210 return reqsk_queue_len(&tcp_sk(sk)->accept_queue); 1207 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
1211} 1208}
1212 1209
1213static inline int tcp_synq_young(struct sock *sk) 1210static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
1214{ 1211{
1215 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); 1212 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
1216} 1213}
1217 1214
1218static inline int tcp_synq_is_full(struct sock *sk) 1215static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
1219{ 1216{
1220 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); 1217 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
1221} 1218}
1222 1219
1223static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, 1220static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
1224 struct request_sock **prev) 1221 struct request_sock *req,
1222 struct request_sock **prev)
1225{ 1223{
1226 reqsk_queue_unlink(&tp->accept_queue, req, prev); 1224 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
1227} 1225}
1228 1226
1229static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, 1227static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
1230 struct request_sock **prev) 1228 struct request_sock *req,
1229 struct request_sock **prev)
1231{ 1230{
1232 tcp_synq_unlink(tcp_sk(sk), req, prev); 1231 inet_csk_reqsk_queue_unlink(sk, req, prev);
1233 tcp_synq_removed(sk, req); 1232 inet_csk_reqsk_queue_removed(sk, req);
1234 reqsk_free(req); 1233 reqsk_free(req);
1235} 1234}
1236 1235
@@ -1265,12 +1264,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp)
1265 return tp->keepalive_time ? : sysctl_tcp_keepalive_time; 1264 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1266} 1265}
1267 1266
1268static inline int tcp_fin_time(const struct tcp_sock *tp) 1267static inline int tcp_fin_time(const struct sock *sk)
1269{ 1268{
1270 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; 1269 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1270 const int rto = inet_csk(sk)->icsk_rto;
1271 1271
1272 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) 1272 if (fin_timeout < (rto << 2) - (rto >> 1))
1273 fin_timeout = (tp->rto<<2) - (tp->rto>>1); 1273 fin_timeout = (rto << 2) - (rto >> 1);
1274 1274
1275 return fin_timeout; 1275 return fin_timeout;
1276} 1276}