aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/inet_connection_sock.h152
-rw-r--r--include/net/tcp.h160
2 files changed, 160 insertions, 152 deletions
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ef609396e41b..97e002001c1a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -16,9 +16,15 @@
16#define _INET_CONNECTION_SOCK_H 16#define _INET_CONNECTION_SOCK_H
17 17
18#include <linux/ip.h> 18#include <linux/ip.h>
19#include <linux/string.h>
19#include <linux/timer.h> 20#include <linux/timer.h>
20#include <net/request_sock.h> 21#include <net/request_sock.h>
21 22
23#define INET_CSK_DEBUG 1
24
25/* Cancel timers, when they are not required. */
26#undef INET_CSK_CLEAR_TIMERS
27
22struct inet_bind_bucket; 28struct inet_bind_bucket;
23struct inet_hashinfo; 29struct inet_hashinfo;
24 30
@@ -61,17 +67,107 @@ struct inet_connection_sock {
61 } icsk_ack; 67 } icsk_ack;
62}; 68};
63 69
70#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
71#define ICSK_TIME_DACK 2 /* Delayed ack timer */
72#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
73#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
74
64static inline struct inet_connection_sock *inet_csk(const struct sock *sk) 75static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
65{ 76{
66 return (struct inet_connection_sock *)sk; 77 return (struct inet_connection_sock *)sk;
67} 78}
68 79
80enum inet_csk_ack_state_t {
81 ICSK_ACK_SCHED = 1,
82 ICSK_ACK_TIMER = 2,
83 ICSK_ACK_PUSHED = 4
84};
85
69extern void inet_csk_init_xmit_timers(struct sock *sk, 86extern void inet_csk_init_xmit_timers(struct sock *sk,
70 void (*retransmit_handler)(unsigned long), 87 void (*retransmit_handler)(unsigned long),
71 void (*delack_handler)(unsigned long), 88 void (*delack_handler)(unsigned long),
72 void (*keepalive_handler)(unsigned long)); 89 void (*keepalive_handler)(unsigned long));
73extern void inet_csk_clear_xmit_timers(struct sock *sk); 90extern void inet_csk_clear_xmit_timers(struct sock *sk);
74 91
92static inline void inet_csk_schedule_ack(struct sock *sk)
93{
94 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
95}
96
97static inline int inet_csk_ack_scheduled(const struct sock *sk)
98{
99 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
100}
101
102static inline void inet_csk_delack_init(struct sock *sk)
103{
104 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
105}
106
107extern void inet_csk_delete_keepalive_timer(struct sock *sk);
108extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
109
110#ifdef INET_CSK_DEBUG
111extern const char inet_csk_timer_bug_msg[];
112#endif
113
114static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
115{
116 struct inet_connection_sock *icsk = inet_csk(sk);
117
118 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
119 icsk->icsk_pending = 0;
120#ifdef INET_CSK_CLEAR_TIMERS
121 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
122#endif
123 } else if (what == ICSK_TIME_DACK) {
124 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
125#ifdef INET_CSK_CLEAR_TIMERS
126 sk_stop_timer(sk, &icsk->icsk_delack_timer);
127#endif
128 }
129#ifdef INET_CSK_DEBUG
130 else {
131 pr_debug(inet_csk_timer_bug_msg);
132 }
133#endif
134}
135
136/*
137 * Reset the retransmission timer
138 */
139static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
140 unsigned long when,
141 const unsigned long max_when)
142{
143 struct inet_connection_sock *icsk = inet_csk(sk);
144
145 if (when > max_when) {
146#ifdef INET_CSK_DEBUG
147 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
148 sk, what, when, current_text_addr());
149#endif
150 when = max_when;
151 }
152
153 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
154 icsk->icsk_pending = what;
155 icsk->icsk_timeout = jiffies + when;
156 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
157 } else if (what == ICSK_TIME_DACK) {
158 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
159 icsk->icsk_ack.timeout = jiffies + when;
160 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
161 }
162#ifdef INET_CSK_DEBUG
163 else {
164 pr_debug(inet_csk_timer_bug_msg);
165 }
166#endif
167}
168
169extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
170
75extern struct request_sock *inet_csk_search_req(const struct sock *sk, 171extern struct request_sock *inet_csk_search_req(const struct sock *sk,
76 struct request_sock ***prevp, 172 struct request_sock ***prevp,
77 const __u16 rport, 173 const __u16 rport,
@@ -83,4 +179,60 @@ extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
83extern struct dst_entry* inet_csk_route_req(struct sock *sk, 179extern struct dst_entry* inet_csk_route_req(struct sock *sk,
84 const struct request_sock *req); 180 const struct request_sock *req);
85 181
182static inline void inet_csk_reqsk_queue_add(struct sock *sk,
183 struct request_sock *req,
184 struct sock *child)
185{
186 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
187}
188
189extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
190 struct request_sock *req,
191 const unsigned timeout);
192
193static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
194 struct request_sock *req)
195{
196 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
197 inet_csk_delete_keepalive_timer(sk);
198}
199
200static inline void inet_csk_reqsk_queue_added(struct sock *sk,
201 const unsigned long timeout)
202{
203 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
204 inet_csk_reset_keepalive_timer(sk, timeout);
205}
206
207static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
208{
209 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
210}
211
212static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
213{
214 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
215}
216
217static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
218{
219 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
220}
221
222static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
223 struct request_sock *req,
224 struct request_sock **prev)
225{
226 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
227}
228
229static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
230 struct request_sock *req,
231 struct request_sock **prev)
232{
233 inet_csk_reqsk_queue_unlink(sk, req, prev);
234 inet_csk_reqsk_queue_removed(sk, req);
235 reqsk_free(req);
236}
237
86#endif /* _INET_CONNECTION_SOCK_H */ 238#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a943c79c88b0..dd9a5a288f88 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -19,18 +19,16 @@
19#define _TCP_H 19#define _TCP_H
20 20
21#define TCP_DEBUG 1 21#define TCP_DEBUG 1
22#define INET_CSK_DEBUG 1
23#define FASTRETRANS_DEBUG 1 22#define FASTRETRANS_DEBUG 1
24 23
25/* Cancel timers, when they are not required. */
26#undef INET_CSK_CLEAR_TIMERS
27
28#include <linux/config.h> 24#include <linux/config.h>
29#include <linux/list.h> 25#include <linux/list.h>
30#include <linux/tcp.h> 26#include <linux/tcp.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/cache.h> 28#include <linux/cache.h>
33#include <linux/percpu.h> 29#include <linux/percpu.h>
30
31#include <net/inet_connection_sock.h>
34#include <net/inet_hashtables.h> 32#include <net/inet_hashtables.h>
35#include <net/checksum.h> 33#include <net/checksum.h>
36#include <net/request_sock.h> 34#include <net/request_sock.h>
@@ -206,11 +204,6 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw);
206#define TCPOLEN_SACK_BASE_ALIGNED 4 204#define TCPOLEN_SACK_BASE_ALIGNED 4
207#define TCPOLEN_SACK_PERBLOCK 8 205#define TCPOLEN_SACK_PERBLOCK 8
208 206
209#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
210#define ICSK_TIME_DACK 2 /* Delayed ack timer */
211#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
212#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
213
214/* Flags in tp->nonagle */ 207/* Flags in tp->nonagle */
215#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 208#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
216#define TCP_NAGLE_CORK 2 /* Socket is corked */ 209#define TCP_NAGLE_CORK 2 /* Socket is corked */
@@ -257,12 +250,6 @@ extern atomic_t tcp_memory_allocated;
257extern atomic_t tcp_sockets_allocated; 250extern atomic_t tcp_sockets_allocated;
258extern int tcp_memory_pressure; 251extern int tcp_memory_pressure;
259 252
260#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
261#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
262#else
263#define AF_INET_FAMILY(fam) 1
264#endif
265
266/* 253/*
267 * Pointers to address related TCP functions 254 * Pointers to address related TCP functions
268 * (i.e. things that depend on the address family) 255 * (i.e. things that depend on the address family)
@@ -373,22 +360,6 @@ extern int tcp_rcv_established(struct sock *sk,
373 360
374extern void tcp_rcv_space_adjust(struct sock *sk); 361extern void tcp_rcv_space_adjust(struct sock *sk);
375 362
376enum inet_csk_ack_state_t {
377 ICSK_ACK_SCHED = 1,
378 ICSK_ACK_TIMER = 2,
379 ICSK_ACK_PUSHED = 4
380};
381
382static inline void inet_csk_schedule_ack(struct sock *sk)
383{
384 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
385}
386
387static inline int inet_csk_ack_scheduled(const struct sock *sk)
388{
389 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
390}
391
392static inline void tcp_dec_quickack_mode(struct sock *sk, 363static inline void tcp_dec_quickack_mode(struct sock *sk,
393 const unsigned int pkts) 364 const unsigned int pkts)
394{ 365{
@@ -406,11 +377,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
406 377
407extern void tcp_enter_quickack_mode(struct sock *sk); 378extern void tcp_enter_quickack_mode(struct sock *sk);
408 379
409static inline void inet_csk_delack_init(struct sock *sk)
410{
411 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
412}
413
414static inline void tcp_clear_options(struct tcp_options_received *rx_opt) 380static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
415{ 381{
416 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; 382 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
@@ -442,7 +408,6 @@ extern void tcp_update_metrics(struct sock *sk);
442 408
443extern void tcp_close(struct sock *sk, 409extern void tcp_close(struct sock *sk,
444 long timeout); 410 long timeout);
445extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err);
446extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); 411extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
447 412
448extern int tcp_getsockopt(struct sock *sk, int level, 413extern int tcp_getsockopt(struct sock *sk, int level,
@@ -541,15 +506,9 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
541 inet_csk_clear_xmit_timers(sk); 506 inet_csk_clear_xmit_timers(sk);
542} 507}
543 508
544extern void inet_csk_delete_keepalive_timer(struct sock *sk);
545extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
546extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 509extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
547extern unsigned int tcp_current_mss(struct sock *sk, int large); 510extern unsigned int tcp_current_mss(struct sock *sk, int large);
548 511
549#ifdef INET_CSK_DEBUG
550extern const char inet_csk_timer_bug_msg[];
551#endif
552
553/* tcp_diag.c */ 512/* tcp_diag.c */
554extern void tcp_get_info(struct sock *, struct tcp_info *); 513extern void tcp_get_info(struct sock *, struct tcp_info *);
555 514
@@ -559,60 +518,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
559extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 518extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
560 sk_read_actor_t recv_actor); 519 sk_read_actor_t recv_actor);
561 520
562static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
563{
564 struct inet_connection_sock *icsk = inet_csk(sk);
565
566 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
567 icsk->icsk_pending = 0;
568#ifdef INET_CSK_CLEAR_TIMERS
569 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
570#endif
571 } else if (what == ICSK_TIME_DACK) {
572 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
573#ifdef INET_CSK_CLEAR_TIMERS
574 sk_stop_timer(sk, &icsk->icsk_delack_timer);
575#endif
576 }
577#ifdef INET_CSK_DEBUG
578 else {
579 pr_debug(inet_csk_timer_bug_msg);
580 }
581#endif
582}
583
584/*
585 * Reset the retransmission timer
586 */
587static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
588 unsigned long when)
589{
590 struct inet_connection_sock *icsk = inet_csk(sk);
591
592 if (when > TCP_RTO_MAX) {
593#ifdef INET_CSK_DEBUG
594 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
595 sk, what, when, current_text_addr());
596#endif
597 when = TCP_RTO_MAX;
598 }
599
600 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
601 icsk->icsk_pending = what;
602 icsk->icsk_timeout = jiffies + when;
603 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
604 } else if (what == ICSK_TIME_DACK) {
605 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
606 icsk->icsk_ack.timeout = jiffies + when;
607 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
608 }
609#ifdef INET_CSK_DEBUG
610 else {
611 pr_debug(inet_csk_timer_bug_msg);
612 }
613#endif
614}
615
616/* Initialize RCV_MSS value. 521/* Initialize RCV_MSS value.
617 * RCV_MSS is an our guess about MSS used by the peer. 522 * RCV_MSS is an our guess about MSS used by the peer.
618 * We haven't any direct information about the MSS. 523 * We haven't any direct information about the MSS.
@@ -765,7 +670,8 @@ static inline void tcp_packets_out_inc(struct sock *sk,
765 670
766 tp->packets_out += tcp_skb_pcount(skb); 671 tp->packets_out += tcp_skb_pcount(skb);
767 if (!orig) 672 if (!orig)
768 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); 673 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
674 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
769} 675}
770 676
771static inline void tcp_packets_out_dec(struct tcp_sock *tp, 677static inline void tcp_packets_out_dec(struct tcp_sock *tp,
@@ -934,7 +840,8 @@ static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *t
934{ 840{
935 const struct inet_connection_sock *icsk = inet_csk(sk); 841 const struct inet_connection_sock *icsk = inet_csk(sk);
936 if (!tp->packets_out && !icsk->icsk_pending) 842 if (!tp->packets_out && !icsk->icsk_pending)
937 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto); 843 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
844 icsk->icsk_rto, TCP_RTO_MAX);
938} 845}
939 846
940static __inline__ void tcp_push_pending_frames(struct sock *sk, 847static __inline__ void tcp_push_pending_frames(struct sock *sk,
@@ -1017,7 +924,8 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1017 wake_up_interruptible(sk->sk_sleep); 924 wake_up_interruptible(sk->sk_sleep);
1018 if (!inet_csk_ack_scheduled(sk)) 925 if (!inet_csk_ack_scheduled(sk))
1019 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 926 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1020 (3 * TCP_RTO_MIN) / 4); 927 (3 * TCP_RTO_MIN) / 4,
928 TCP_RTO_MAX);
1021 } 929 }
1022 return 1; 930 return 1;
1023 } 931 }
@@ -1181,58 +1089,6 @@ static inline int tcp_full_space(const struct sock *sk)
1181 return tcp_win_from_space(sk->sk_rcvbuf); 1089 return tcp_win_from_space(sk->sk_rcvbuf);
1182} 1090}
1183 1091
1184static inline void inet_csk_reqsk_queue_add(struct sock *sk,
1185 struct request_sock *req,
1186 struct sock *child)
1187{
1188 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
1189}
1190
1191static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
1192 struct request_sock *req)
1193{
1194 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
1195 inet_csk_delete_keepalive_timer(sk);
1196}
1197
1198static inline void inet_csk_reqsk_queue_added(struct sock *sk,
1199 const unsigned long timeout)
1200{
1201 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
1202 inet_csk_reset_keepalive_timer(sk, timeout);
1203}
1204
1205static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
1206{
1207 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
1208}
1209
1210static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
1211{
1212 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
1213}
1214
1215static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
1216{
1217 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
1218}
1219
1220static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
1221 struct request_sock *req,
1222 struct request_sock **prev)
1223{
1224 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
1225}
1226
1227static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
1228 struct request_sock *req,
1229 struct request_sock **prev)
1230{
1231 inet_csk_reqsk_queue_unlink(sk, req, prev);
1232 inet_csk_reqsk_queue_removed(sk, req);
1233 reqsk_free(req);
1234}
1235
1236static __inline__ void tcp_openreq_init(struct request_sock *req, 1092static __inline__ void tcp_openreq_init(struct request_sock *req,
1237 struct tcp_options_received *rx_opt, 1093 struct tcp_options_received *rx_opt,
1238 struct sk_buff *skb) 1094 struct sk_buff *skb)