diff options
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 2022 |
1 files changed, 2022 insertions, 0 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h new file mode 100644 index 000000000000..503810a70e21 --- /dev/null +++ b/include/net/tcp.h | |||
@@ -0,0 +1,2022 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * Definitions for the TCP module. | ||
7 | * | ||
8 | * Version: @(#)tcp.h 1.0.5 05/23/93 | ||
9 | * | ||
10 | * Authors: Ross Biro, <bir7@leland.Stanford.Edu> | ||
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | #ifndef _TCP_H | ||
19 | #define _TCP_H | ||
20 | |||
21 | #define TCP_DEBUG 1 | ||
22 | #define FASTRETRANS_DEBUG 1 | ||
23 | |||
24 | /* Cancel timers, when they are not required. */ | ||
25 | #undef TCP_CLEAR_TIMERS | ||
26 | |||
27 | #include <linux/config.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/cache.h> | ||
32 | #include <linux/percpu.h> | ||
33 | #include <net/checksum.h> | ||
34 | #include <net/sock.h> | ||
35 | #include <net/snmp.h> | ||
36 | #include <net/ip.h> | ||
37 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
38 | #include <linux/ipv6.h> | ||
39 | #endif | ||
40 | #include <linux/seq_file.h> | ||
41 | |||
42 | /* This is for all connections with a full identity, no wildcards. | ||
43 | * New scheme, half the table is for TIME_WAIT, the other half is | ||
44 | * for the rest. I'll experiment with dynamic table growth later. | ||
45 | */ | ||
46 | struct tcp_ehash_bucket { | ||
47 | rwlock_t lock; | ||
48 | struct hlist_head chain; | ||
49 | } __attribute__((__aligned__(8))); | ||
50 | |||
51 | /* This is for listening sockets, thus all sockets which possess wildcards. */ | ||
52 | #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ | ||
53 | |||
54 | /* There are a few simple rules, which allow for local port reuse by | ||
55 | * an application. In essence: | ||
56 | * | ||
57 | * 1) Sockets bound to different interfaces may share a local port. | ||
58 | * Failing that, goto test 2. | ||
59 | * 2) If all sockets have sk->sk_reuse set, and none of them are in | ||
60 | * TCP_LISTEN state, the port may be shared. | ||
61 | * Failing that, goto test 3. | ||
62 | * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local | ||
63 | * address, and none of them are the same, the port may be | ||
64 | * shared. | ||
65 | * Failing this, the port cannot be shared. | ||
66 | * | ||
67 | * The interesting point, is test #2. This is what an FTP server does | ||
68 | * all day. To optimize this case we use a specific flag bit defined | ||
69 | * below. As we add sockets to a bind bucket list, we perform a | ||
70 | * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) | ||
71 | * As long as all sockets added to a bind bucket pass this test, | ||
72 | * the flag bit will be set. | ||
73 | * The resulting situation is that tcp_v[46]_verify_bind() can just check | ||
74 | * for this flag bit, if it is set and the socket trying to bind has | ||
75 | * sk->sk_reuse set, we don't even have to walk the owners list at all, | ||
76 | * we return that it is ok to bind this socket to the requested local port. | ||
77 | * | ||
78 | * Sounds like a lot of work, but it is worth it. In a more naive | ||
79 | * implementation (ie. current FreeBSD etc.) the entire list of ports | ||
80 | * must be walked for each data port opened by an ftp server. Needless | ||
81 | * to say, this does not scale at all. With a couple thousand FTP | ||
82 | * users logged onto your box, isn't it nice to know that new data | ||
83 | * ports are created in O(1) time? I thought so. ;-) -DaveM | ||
84 | */ | ||
85 | struct tcp_bind_bucket { | ||
86 | unsigned short port; | ||
87 | signed short fastreuse; | ||
88 | struct hlist_node node; | ||
89 | struct hlist_head owners; | ||
90 | }; | ||
91 | |||
92 | #define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node) | ||
93 | |||
94 | struct tcp_bind_hashbucket { | ||
95 | spinlock_t lock; | ||
96 | struct hlist_head chain; | ||
97 | }; | ||
98 | |||
99 | static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head) | ||
100 | { | ||
101 | return hlist_entry(head->chain.first, struct tcp_bind_bucket, node); | ||
102 | } | ||
103 | |||
104 | static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head) | ||
105 | { | ||
106 | return hlist_empty(&head->chain) ? NULL : __tb_head(head); | ||
107 | } | ||
108 | |||
109 | extern struct tcp_hashinfo { | ||
110 | /* This is for sockets with full identity only. Sockets here will | ||
111 | * always be without wildcards and will have the following invariant: | ||
112 | * | ||
113 | * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE | ||
114 | * | ||
115 | * First half of the table is for sockets not in TIME_WAIT, second half | ||
116 | * is for TIME_WAIT sockets only. | ||
117 | */ | ||
118 | struct tcp_ehash_bucket *__tcp_ehash; | ||
119 | |||
120 | /* Ok, let's try this, I give up, we do need a local binding | ||
121 | * TCP hash as well as the others for fast bind/connect. | ||
122 | */ | ||
123 | struct tcp_bind_hashbucket *__tcp_bhash; | ||
124 | |||
125 | int __tcp_bhash_size; | ||
126 | int __tcp_ehash_size; | ||
127 | |||
128 | /* All sockets in TCP_LISTEN state will be in here. This is the only | ||
129 | * table where wildcard'd TCP sockets can exist. Hash function here | ||
130 | * is just local port number. | ||
131 | */ | ||
132 | struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE]; | ||
133 | |||
134 | /* All the above members are written once at bootup and | ||
135 | * never written again _or_ are predominantly read-access. | ||
136 | * | ||
137 | * Now align to a new cache line as all the following members | ||
138 | * are often dirty. | ||
139 | */ | ||
140 | rwlock_t __tcp_lhash_lock ____cacheline_aligned; | ||
141 | atomic_t __tcp_lhash_users; | ||
142 | wait_queue_head_t __tcp_lhash_wait; | ||
143 | spinlock_t __tcp_portalloc_lock; | ||
144 | } tcp_hashinfo; | ||
145 | |||
146 | #define tcp_ehash (tcp_hashinfo.__tcp_ehash) | ||
147 | #define tcp_bhash (tcp_hashinfo.__tcp_bhash) | ||
148 | #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size) | ||
149 | #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size) | ||
150 | #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash) | ||
151 | #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock) | ||
152 | #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users) | ||
153 | #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait) | ||
154 | #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock) | ||
155 | |||
156 | extern kmem_cache_t *tcp_bucket_cachep; | ||
157 | extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | ||
158 | unsigned short snum); | ||
159 | extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb); | ||
160 | extern void tcp_bucket_unlock(struct sock *sk); | ||
161 | extern int tcp_port_rover; | ||
162 | |||
163 | /* These are AF independent. */ | ||
164 | static __inline__ int tcp_bhashfn(__u16 lport) | ||
165 | { | ||
166 | return (lport & (tcp_bhash_size - 1)); | ||
167 | } | ||
168 | |||
169 | extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, | ||
170 | unsigned short snum); | ||
171 | |||
172 | #if (BITS_PER_LONG == 64) | ||
173 | #define TCP_ADDRCMP_ALIGN_BYTES 8 | ||
174 | #else | ||
175 | #define TCP_ADDRCMP_ALIGN_BYTES 4 | ||
176 | #endif | ||
177 | |||
178 | /* This is a TIME_WAIT bucket. It works around the memory consumption | ||
179 | * problems of sockets in such a state on heavily loaded servers, but | ||
180 | * without violating the protocol specification. | ||
181 | */ | ||
182 | struct tcp_tw_bucket { | ||
183 | /* | ||
184 | * Now struct sock also uses sock_common, so please just | ||
185 | * don't add nothing before this first member (__tw_common) --acme | ||
186 | */ | ||
187 | struct sock_common __tw_common; | ||
188 | #define tw_family __tw_common.skc_family | ||
189 | #define tw_state __tw_common.skc_state | ||
190 | #define tw_reuse __tw_common.skc_reuse | ||
191 | #define tw_bound_dev_if __tw_common.skc_bound_dev_if | ||
192 | #define tw_node __tw_common.skc_node | ||
193 | #define tw_bind_node __tw_common.skc_bind_node | ||
194 | #define tw_refcnt __tw_common.skc_refcnt | ||
195 | volatile unsigned char tw_substate; | ||
196 | unsigned char tw_rcv_wscale; | ||
197 | __u16 tw_sport; | ||
198 | /* Socket demultiplex comparisons on incoming packets. */ | ||
199 | /* these five are in inet_sock */ | ||
200 | __u32 tw_daddr | ||
201 | __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES))); | ||
202 | __u32 tw_rcv_saddr; | ||
203 | __u16 tw_dport; | ||
204 | __u16 tw_num; | ||
205 | /* And these are ours. */ | ||
206 | int tw_hashent; | ||
207 | int tw_timeout; | ||
208 | __u32 tw_rcv_nxt; | ||
209 | __u32 tw_snd_nxt; | ||
210 | __u32 tw_rcv_wnd; | ||
211 | __u32 tw_ts_recent; | ||
212 | long tw_ts_recent_stamp; | ||
213 | unsigned long tw_ttd; | ||
214 | struct tcp_bind_bucket *tw_tb; | ||
215 | struct hlist_node tw_death_node; | ||
216 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
217 | struct in6_addr tw_v6_daddr; | ||
218 | struct in6_addr tw_v6_rcv_saddr; | ||
219 | int tw_v6_ipv6only; | ||
220 | #endif | ||
221 | }; | ||
222 | |||
223 | static __inline__ void tw_add_node(struct tcp_tw_bucket *tw, | ||
224 | struct hlist_head *list) | ||
225 | { | ||
226 | hlist_add_head(&tw->tw_node, list); | ||
227 | } | ||
228 | |||
229 | static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw, | ||
230 | struct hlist_head *list) | ||
231 | { | ||
232 | hlist_add_head(&tw->tw_bind_node, list); | ||
233 | } | ||
234 | |||
235 | static inline int tw_dead_hashed(struct tcp_tw_bucket *tw) | ||
236 | { | ||
237 | return tw->tw_death_node.pprev != NULL; | ||
238 | } | ||
239 | |||
240 | static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw) | ||
241 | { | ||
242 | tw->tw_death_node.pprev = NULL; | ||
243 | } | ||
244 | |||
245 | static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw) | ||
246 | { | ||
247 | __hlist_del(&tw->tw_death_node); | ||
248 | tw_dead_node_init(tw); | ||
249 | } | ||
250 | |||
251 | static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw) | ||
252 | { | ||
253 | if (tw_dead_hashed(tw)) { | ||
254 | __tw_del_dead_node(tw); | ||
255 | return 1; | ||
256 | } | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | #define tw_for_each(tw, node, head) \ | ||
261 | hlist_for_each_entry(tw, node, head, tw_node) | ||
262 | |||
263 | #define tw_for_each_inmate(tw, node, jail) \ | ||
264 | hlist_for_each_entry(tw, node, jail, tw_death_node) | ||
265 | |||
266 | #define tw_for_each_inmate_safe(tw, node, safe, jail) \ | ||
267 | hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node) | ||
268 | |||
269 | #define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk)) | ||
270 | |||
271 | static inline u32 tcp_v4_rcv_saddr(const struct sock *sk) | ||
272 | { | ||
273 | return likely(sk->sk_state != TCP_TIME_WAIT) ? | ||
274 | inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr; | ||
275 | } | ||
276 | |||
277 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
278 | static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk) | ||
279 | { | ||
280 | return likely(sk->sk_state != TCP_TIME_WAIT) ? | ||
281 | &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr; | ||
282 | } | ||
283 | |||
284 | static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk) | ||
285 | { | ||
286 | return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; | ||
287 | } | ||
288 | |||
289 | #define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only) | ||
290 | |||
291 | static inline int tcp_v6_ipv6only(const struct sock *sk) | ||
292 | { | ||
293 | return likely(sk->sk_state != TCP_TIME_WAIT) ? | ||
294 | ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk); | ||
295 | } | ||
296 | #else | ||
297 | # define __tcp_v6_rcv_saddr(__sk) NULL | ||
298 | # define tcp_v6_rcv_saddr(__sk) NULL | ||
299 | # define tcptw_sk_ipv6only(__sk) 0 | ||
300 | # define tcp_v6_ipv6only(__sk) 0 | ||
301 | #endif | ||
302 | |||
303 | extern kmem_cache_t *tcp_timewait_cachep; | ||
304 | |||
305 | static inline void tcp_tw_put(struct tcp_tw_bucket *tw) | ||
306 | { | ||
307 | if (atomic_dec_and_test(&tw->tw_refcnt)) { | ||
308 | #ifdef INET_REFCNT_DEBUG | ||
309 | printk(KERN_DEBUG "tw_bucket %p released\n", tw); | ||
310 | #endif | ||
311 | kmem_cache_free(tcp_timewait_cachep, tw); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | extern atomic_t tcp_orphan_count; | ||
316 | extern int tcp_tw_count; | ||
317 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); | ||
318 | extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw); | ||
319 | |||
320 | |||
321 | /* Socket demux engine toys. */ | ||
322 | #ifdef __BIG_ENDIAN | ||
323 | #define TCP_COMBINED_PORTS(__sport, __dport) \ | ||
324 | (((__u32)(__sport)<<16) | (__u32)(__dport)) | ||
325 | #else /* __LITTLE_ENDIAN */ | ||
326 | #define TCP_COMBINED_PORTS(__sport, __dport) \ | ||
327 | (((__u32)(__dport)<<16) | (__u32)(__sport)) | ||
328 | #endif | ||
329 | |||
330 | #if (BITS_PER_LONG == 64) | ||
331 | #ifdef __BIG_ENDIAN | ||
332 | #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ | ||
333 | __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr)); | ||
334 | #else /* __LITTLE_ENDIAN */ | ||
335 | #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ | ||
336 | __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr)); | ||
337 | #endif /* __BIG_ENDIAN */ | ||
338 | #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
339 | (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \ | ||
340 | ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ | ||
341 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
342 | #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
343 | (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \ | ||
344 | ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \ | ||
345 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
346 | #else /* 32-bit arch */ | ||
347 | #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) | ||
348 | #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
349 | ((inet_sk(__sk)->daddr == (__saddr)) && \ | ||
350 | (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ | ||
351 | ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ | ||
352 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
353 | #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
354 | ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \ | ||
355 | (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \ | ||
356 | ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \ | ||
357 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
358 | #endif /* 64-bit arch */ | ||
359 | |||
360 | #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ | ||
361 | (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \ | ||
362 | ((__sk)->sk_family == AF_INET6) && \ | ||
363 | ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ | ||
364 | ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ | ||
365 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
366 | |||
367 | /* These can have wildcards, don't try too hard. */ | ||
368 | static __inline__ int tcp_lhashfn(unsigned short num) | ||
369 | { | ||
370 | return num & (TCP_LHTABLE_SIZE - 1); | ||
371 | } | ||
372 | |||
373 | static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) | ||
374 | { | ||
375 | return tcp_lhashfn(inet_sk(sk)->num); | ||
376 | } | ||
377 | |||
378 | #define MAX_TCP_HEADER (128 + MAX_HEADER) | ||
379 | |||
380 | /* | ||
381 | * Never offer a window over 32767 without using window scaling. Some | ||
382 | * poor stacks do signed 16bit maths! | ||
383 | */ | ||
384 | #define MAX_TCP_WINDOW 32767U | ||
385 | |||
386 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ | ||
387 | #define TCP_MIN_MSS 88U | ||
388 | |||
389 | /* Minimal RCV_MSS. */ | ||
390 | #define TCP_MIN_RCVMSS 536U | ||
391 | |||
392 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ | ||
393 | #define TCP_FASTRETRANS_THRESH 3 | ||
394 | |||
395 | /* Maximal reordering. */ | ||
396 | #define TCP_MAX_REORDERING 127 | ||
397 | |||
398 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | ||
399 | #define TCP_MAX_QUICKACKS 16U | ||
400 | |||
401 | /* urg_data states */ | ||
402 | #define TCP_URG_VALID 0x0100 | ||
403 | #define TCP_URG_NOTYET 0x0200 | ||
404 | #define TCP_URG_READ 0x0400 | ||
405 | |||
406 | #define TCP_RETR1 3 /* | ||
407 | * This is how many retries it does before it | ||
408 | * tries to figure out if the gateway is | ||
409 | * down. Minimal RFC value is 3; it corresponds | ||
410 | * to ~3sec-8min depending on RTO. | ||
411 | */ | ||
412 | |||
413 | #define TCP_RETR2 15 /* | ||
414 | * This should take at least | ||
415 | * 90 minutes to time out. | ||
416 | * RFC1122 says that the limit is 100 sec. | ||
417 | * 15 is ~13-30min depending on RTO. | ||
418 | */ | ||
419 | |||
420 | #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a | ||
421 | * connection: ~180sec is RFC minumum */ | ||
422 | |||
423 | #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a | ||
424 | * connection: ~180sec is RFC minumum */ | ||
425 | |||
426 | |||
427 | #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned | ||
428 | * socket. 7 is ~50sec-16min. | ||
429 | */ | ||
430 | |||
431 | |||
432 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT | ||
433 | * state, about 60 seconds */ | ||
434 | #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN | ||
435 | /* BSD style FIN_WAIT2 deadlock breaker. | ||
436 | * It used to be 3min, new value is 60sec, | ||
437 | * to combine FIN-WAIT-2 timeout with | ||
438 | * TIME-WAIT timer. | ||
439 | */ | ||
440 | |||
441 | #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ | ||
442 | #if HZ >= 100 | ||
443 | #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ | ||
444 | #define TCP_ATO_MIN ((unsigned)(HZ/25)) | ||
445 | #else | ||
446 | #define TCP_DELACK_MIN 4U | ||
447 | #define TCP_ATO_MIN 4U | ||
448 | #endif | ||
449 | #define TCP_RTO_MAX ((unsigned)(120*HZ)) | ||
450 | #define TCP_RTO_MIN ((unsigned)(HZ/5)) | ||
451 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ | ||
452 | |||
453 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | ||
454 | * for local resources. | ||
455 | */ | ||
456 | |||
457 | #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ | ||
458 | #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ | ||
459 | #define TCP_KEEPALIVE_INTVL (75*HZ) | ||
460 | |||
461 | #define MAX_TCP_KEEPIDLE 32767 | ||
462 | #define MAX_TCP_KEEPINTVL 32767 | ||
463 | #define MAX_TCP_KEEPCNT 127 | ||
464 | #define MAX_TCP_SYNCNT 127 | ||
465 | |||
466 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | ||
467 | #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */ | ||
468 | |||
469 | #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) | ||
470 | #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated | ||
471 | * after this time. It should be equal | ||
472 | * (or greater than) TCP_TIMEWAIT_LEN | ||
473 | * to provide reliability equal to one | ||
474 | * provided by timewait state. | ||
475 | */ | ||
476 | #define TCP_PAWS_WINDOW 1 /* Replay window for per-host | ||
477 | * timestamps. It must be less than | ||
478 | * minimal timewait lifetime. | ||
479 | */ | ||
480 | |||
481 | #define TCP_TW_RECYCLE_SLOTS_LOG 5 | ||
482 | #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG) | ||
483 | |||
484 | /* If time > 4sec, it is "slow" path, no recycling is required, | ||
485 | so that we select tick to get range about 4 seconds. | ||
486 | */ | ||
487 | |||
488 | #if HZ <= 16 || HZ > 4096 | ||
489 | # error Unsupported: HZ <= 16 or HZ > 4096 | ||
490 | #elif HZ <= 32 | ||
491 | # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
492 | #elif HZ <= 64 | ||
493 | # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
494 | #elif HZ <= 128 | ||
495 | # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
496 | #elif HZ <= 256 | ||
497 | # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
498 | #elif HZ <= 512 | ||
499 | # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
500 | #elif HZ <= 1024 | ||
501 | # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
502 | #elif HZ <= 2048 | ||
503 | # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
504 | #else | ||
505 | # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) | ||
506 | #endif | ||
507 | |||
508 | #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation | ||
509 | * max_cwnd = snd_cwnd * beta | ||
510 | */ | ||
511 | #define BICTCP_MAX_INCREMENT 32 /* | ||
512 | * Limit on the amount of | ||
513 | * increment allowed during | ||
514 | * binary search. | ||
515 | */ | ||
516 | #define BICTCP_FUNC_OF_MIN_INCR 11 /* | ||
517 | * log(B/Smin)/log(B/(B-1))+1, | ||
518 | * Smin:min increment | ||
519 | * B:log factor | ||
520 | */ | ||
521 | #define BICTCP_B 4 /* | ||
522 | * In binary search, | ||
523 | * go to point (max+min)/N | ||
524 | */ | ||
525 | |||
526 | /* | ||
527 | * TCP option | ||
528 | */ | ||
529 | |||
530 | #define TCPOPT_NOP 1 /* Padding */ | ||
531 | #define TCPOPT_EOL 0 /* End of options */ | ||
532 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | ||
533 | #define TCPOPT_WINDOW 3 /* Window scaling */ | ||
534 | #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ | ||
535 | #define TCPOPT_SACK 5 /* SACK Block */ | ||
536 | #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ | ||
537 | |||
538 | /* | ||
539 | * TCP option lengths | ||
540 | */ | ||
541 | |||
542 | #define TCPOLEN_MSS 4 | ||
543 | #define TCPOLEN_WINDOW 3 | ||
544 | #define TCPOLEN_SACK_PERM 2 | ||
545 | #define TCPOLEN_TIMESTAMP 10 | ||
546 | |||
547 | /* But this is what stacks really send out. */ | ||
548 | #define TCPOLEN_TSTAMP_ALIGNED 12 | ||
549 | #define TCPOLEN_WSCALE_ALIGNED 4 | ||
550 | #define TCPOLEN_SACKPERM_ALIGNED 4 | ||
551 | #define TCPOLEN_SACK_BASE 2 | ||
552 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | ||
553 | #define TCPOLEN_SACK_PERBLOCK 8 | ||
554 | |||
555 | #define TCP_TIME_RETRANS 1 /* Retransmit timer */ | ||
556 | #define TCP_TIME_DACK 2 /* Delayed ack timer */ | ||
557 | #define TCP_TIME_PROBE0 3 /* Zero window probe timer */ | ||
558 | #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ | ||
559 | |||
560 | /* Flags in tp->nonagle */ | ||
561 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | ||
562 | #define TCP_NAGLE_CORK 2 /* Socket is corked */ | ||
563 | #define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ | ||
564 | |||
565 | /* sysctl variables for tcp */ | ||
566 | extern int sysctl_max_syn_backlog; | ||
567 | extern int sysctl_tcp_timestamps; | ||
568 | extern int sysctl_tcp_window_scaling; | ||
569 | extern int sysctl_tcp_sack; | ||
570 | extern int sysctl_tcp_fin_timeout; | ||
571 | extern int sysctl_tcp_tw_recycle; | ||
572 | extern int sysctl_tcp_keepalive_time; | ||
573 | extern int sysctl_tcp_keepalive_probes; | ||
574 | extern int sysctl_tcp_keepalive_intvl; | ||
575 | extern int sysctl_tcp_syn_retries; | ||
576 | extern int sysctl_tcp_synack_retries; | ||
577 | extern int sysctl_tcp_retries1; | ||
578 | extern int sysctl_tcp_retries2; | ||
579 | extern int sysctl_tcp_orphan_retries; | ||
580 | extern int sysctl_tcp_syncookies; | ||
581 | extern int sysctl_tcp_retrans_collapse; | ||
582 | extern int sysctl_tcp_stdurg; | ||
583 | extern int sysctl_tcp_rfc1337; | ||
584 | extern int sysctl_tcp_abort_on_overflow; | ||
585 | extern int sysctl_tcp_max_orphans; | ||
586 | extern int sysctl_tcp_max_tw_buckets; | ||
587 | extern int sysctl_tcp_fack; | ||
588 | extern int sysctl_tcp_reordering; | ||
589 | extern int sysctl_tcp_ecn; | ||
590 | extern int sysctl_tcp_dsack; | ||
591 | extern int sysctl_tcp_mem[3]; | ||
592 | extern int sysctl_tcp_wmem[3]; | ||
593 | extern int sysctl_tcp_rmem[3]; | ||
594 | extern int sysctl_tcp_app_win; | ||
595 | extern int sysctl_tcp_adv_win_scale; | ||
596 | extern int sysctl_tcp_tw_reuse; | ||
597 | extern int sysctl_tcp_frto; | ||
598 | extern int sysctl_tcp_low_latency; | ||
599 | extern int sysctl_tcp_westwood; | ||
600 | extern int sysctl_tcp_vegas_cong_avoid; | ||
601 | extern int sysctl_tcp_vegas_alpha; | ||
602 | extern int sysctl_tcp_vegas_beta; | ||
603 | extern int sysctl_tcp_vegas_gamma; | ||
604 | extern int sysctl_tcp_nometrics_save; | ||
605 | extern int sysctl_tcp_bic; | ||
606 | extern int sysctl_tcp_bic_fast_convergence; | ||
607 | extern int sysctl_tcp_bic_low_window; | ||
608 | extern int sysctl_tcp_bic_beta; | ||
609 | extern int sysctl_tcp_moderate_rcvbuf; | ||
610 | extern int sysctl_tcp_tso_win_divisor; | ||
611 | |||
612 | extern atomic_t tcp_memory_allocated; | ||
613 | extern atomic_t tcp_sockets_allocated; | ||
614 | extern int tcp_memory_pressure; | ||
615 | |||
616 | struct open_request; | ||
617 | |||
618 | struct or_calltable { | ||
619 | int family; | ||
620 | int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*); | ||
621 | void (*send_ack) (struct sk_buff *skb, struct open_request *req); | ||
622 | void (*destructor) (struct open_request *req); | ||
623 | void (*send_reset) (struct sk_buff *skb); | ||
624 | }; | ||
625 | |||
626 | struct tcp_v4_open_req { | ||
627 | __u32 loc_addr; | ||
628 | __u32 rmt_addr; | ||
629 | struct ip_options *opt; | ||
630 | }; | ||
631 | |||
632 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
633 | struct tcp_v6_open_req { | ||
634 | struct in6_addr loc_addr; | ||
635 | struct in6_addr rmt_addr; | ||
636 | struct sk_buff *pktopts; | ||
637 | int iif; | ||
638 | }; | ||
639 | #endif | ||
640 | |||
641 | /* this structure is too big */ | ||
642 | struct open_request { | ||
643 | struct open_request *dl_next; /* Must be first member! */ | ||
644 | __u32 rcv_isn; | ||
645 | __u32 snt_isn; | ||
646 | __u16 rmt_port; | ||
647 | __u16 mss; | ||
648 | __u8 retrans; | ||
649 | __u8 __pad; | ||
650 | __u16 snd_wscale : 4, | ||
651 | rcv_wscale : 4, | ||
652 | tstamp_ok : 1, | ||
653 | sack_ok : 1, | ||
654 | wscale_ok : 1, | ||
655 | ecn_ok : 1, | ||
656 | acked : 1; | ||
657 | /* The following two fields can be easily recomputed I think -AK */ | ||
658 | __u32 window_clamp; /* window clamp at creation time */ | ||
659 | __u32 rcv_wnd; /* rcv_wnd offered first time */ | ||
660 | __u32 ts_recent; | ||
661 | unsigned long expires; | ||
662 | struct or_calltable *class; | ||
663 | struct sock *sk; | ||
664 | union { | ||
665 | struct tcp_v4_open_req v4_req; | ||
666 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
667 | struct tcp_v6_open_req v6_req; | ||
668 | #endif | ||
669 | } af; | ||
670 | }; | ||
671 | |||
672 | /* SLAB cache for open requests. */ | ||
673 | extern kmem_cache_t *tcp_openreq_cachep; | ||
674 | |||
675 | #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC) | ||
676 | #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req) | ||
677 | |||
678 | static inline void tcp_openreq_free(struct open_request *req) | ||
679 | { | ||
680 | req->class->destructor(req); | ||
681 | tcp_openreq_fastfree(req); | ||
682 | } | ||
683 | |||
684 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
685 | #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) | ||
686 | #else | ||
687 | #define TCP_INET_FAMILY(fam) 1 | ||
688 | #endif | ||
689 | |||
690 | /* | ||
691 | * Pointers to address related TCP functions | ||
692 | * (i.e. things that depend on the address family) | ||
693 | */ | ||
694 | |||
695 | struct tcp_func { | ||
696 | int (*queue_xmit) (struct sk_buff *skb, | ||
697 | int ipfragok); | ||
698 | |||
699 | void (*send_check) (struct sock *sk, | ||
700 | struct tcphdr *th, | ||
701 | int len, | ||
702 | struct sk_buff *skb); | ||
703 | |||
704 | int (*rebuild_header) (struct sock *sk); | ||
705 | |||
706 | int (*conn_request) (struct sock *sk, | ||
707 | struct sk_buff *skb); | ||
708 | |||
709 | struct sock * (*syn_recv_sock) (struct sock *sk, | ||
710 | struct sk_buff *skb, | ||
711 | struct open_request *req, | ||
712 | struct dst_entry *dst); | ||
713 | |||
714 | int (*remember_stamp) (struct sock *sk); | ||
715 | |||
716 | __u16 net_header_len; | ||
717 | |||
718 | int (*setsockopt) (struct sock *sk, | ||
719 | int level, | ||
720 | int optname, | ||
721 | char __user *optval, | ||
722 | int optlen); | ||
723 | |||
724 | int (*getsockopt) (struct sock *sk, | ||
725 | int level, | ||
726 | int optname, | ||
727 | char __user *optval, | ||
728 | int __user *optlen); | ||
729 | |||
730 | |||
731 | void (*addr2sockaddr) (struct sock *sk, | ||
732 | struct sockaddr *); | ||
733 | |||
734 | int sockaddr_len; | ||
735 | }; | ||
736 | |||
737 | /* | ||
738 | * The next routines deal with comparing 32 bit unsigned ints | ||
739 | * and worry about wraparound (automatic with unsigned arithmetic). | ||
740 | */ | ||
741 | |||
742 | static inline int before(__u32 seq1, __u32 seq2) | ||
743 | { | ||
744 | return (__s32)(seq1-seq2) < 0; | ||
745 | } | ||
746 | |||
747 | static inline int after(__u32 seq1, __u32 seq2) | ||
748 | { | ||
749 | return (__s32)(seq2-seq1) < 0; | ||
750 | } | ||
751 | |||
752 | |||
753 | /* is s2<=s1<=s3 ? */ | ||
754 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | ||
755 | { | ||
756 | return seq3 - seq2 >= seq1 - seq2; | ||
757 | } | ||
758 | |||
759 | |||
760 | extern struct proto tcp_prot; | ||
761 | |||
762 | DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics); | ||
763 | #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field) | ||
764 | #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field) | ||
765 | #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field) | ||
766 | #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field) | ||
767 | #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val) | ||
768 | #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val) | ||
769 | |||
770 | extern void tcp_put_port(struct sock *sk); | ||
771 | extern void tcp_inherit_port(struct sock *sk, struct sock *child); | ||
772 | |||
773 | extern void tcp_v4_err(struct sk_buff *skb, u32); | ||
774 | |||
775 | extern void tcp_shutdown (struct sock *sk, int how); | ||
776 | |||
777 | extern int tcp_v4_rcv(struct sk_buff *skb); | ||
778 | |||
779 | extern int tcp_v4_remember_stamp(struct sock *sk); | ||
780 | |||
781 | extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw); | ||
782 | |||
783 | extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, | ||
784 | struct msghdr *msg, size_t size); | ||
785 | extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); | ||
786 | |||
787 | extern int tcp_ioctl(struct sock *sk, | ||
788 | int cmd, | ||
789 | unsigned long arg); | ||
790 | |||
791 | extern int tcp_rcv_state_process(struct sock *sk, | ||
792 | struct sk_buff *skb, | ||
793 | struct tcphdr *th, | ||
794 | unsigned len); | ||
795 | |||
796 | extern int tcp_rcv_established(struct sock *sk, | ||
797 | struct sk_buff *skb, | ||
798 | struct tcphdr *th, | ||
799 | unsigned len); | ||
800 | |||
801 | extern void tcp_rcv_space_adjust(struct sock *sk); | ||
802 | |||
803 | enum tcp_ack_state_t | ||
804 | { | ||
805 | TCP_ACK_SCHED = 1, | ||
806 | TCP_ACK_TIMER = 2, | ||
807 | TCP_ACK_PUSHED= 4 | ||
808 | }; | ||
809 | |||
810 | static inline void tcp_schedule_ack(struct tcp_sock *tp) | ||
811 | { | ||
812 | tp->ack.pending |= TCP_ACK_SCHED; | ||
813 | } | ||
814 | |||
815 | static inline int tcp_ack_scheduled(struct tcp_sock *tp) | ||
816 | { | ||
817 | return tp->ack.pending&TCP_ACK_SCHED; | ||
818 | } | ||
819 | |||
820 | static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp) | ||
821 | { | ||
822 | if (tp->ack.quick && --tp->ack.quick == 0) { | ||
823 | /* Leaving quickack mode we deflate ATO. */ | ||
824 | tp->ack.ato = TCP_ATO_MIN; | ||
825 | } | ||
826 | } | ||
827 | |||
828 | extern void tcp_enter_quickack_mode(struct tcp_sock *tp); | ||
829 | |||
830 | static __inline__ void tcp_delack_init(struct tcp_sock *tp) | ||
831 | { | ||
832 | memset(&tp->ack, 0, sizeof(tp->ack)); | ||
833 | } | ||
834 | |||
835 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) | ||
836 | { | ||
837 | rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; | ||
838 | } | ||
839 | |||
840 | enum tcp_tw_status | ||
841 | { | ||
842 | TCP_TW_SUCCESS = 0, | ||
843 | TCP_TW_RST = 1, | ||
844 | TCP_TW_ACK = 2, | ||
845 | TCP_TW_SYN = 3 | ||
846 | }; | ||
847 | |||
848 | |||
849 | extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, | ||
850 | struct sk_buff *skb, | ||
851 | struct tcphdr *th, | ||
852 | unsigned len); | ||
853 | |||
854 | extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | ||
855 | struct open_request *req, | ||
856 | struct open_request **prev); | ||
857 | extern int tcp_child_process(struct sock *parent, | ||
858 | struct sock *child, | ||
859 | struct sk_buff *skb); | ||
860 | extern void tcp_enter_frto(struct sock *sk); | ||
861 | extern void tcp_enter_loss(struct sock *sk, int how); | ||
862 | extern void tcp_clear_retrans(struct tcp_sock *tp); | ||
863 | extern void tcp_update_metrics(struct sock *sk); | ||
864 | |||
865 | extern void tcp_close(struct sock *sk, | ||
866 | long timeout); | ||
867 | extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); | ||
868 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); | ||
869 | |||
870 | extern int tcp_getsockopt(struct sock *sk, int level, | ||
871 | int optname, | ||
872 | char __user *optval, | ||
873 | int __user *optlen); | ||
874 | extern int tcp_setsockopt(struct sock *sk, int level, | ||
875 | int optname, char __user *optval, | ||
876 | int optlen); | ||
877 | extern void tcp_set_keepalive(struct sock *sk, int val); | ||
878 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, | ||
879 | struct msghdr *msg, | ||
880 | size_t len, int nonblock, | ||
881 | int flags, int *addr_len); | ||
882 | |||
883 | extern int tcp_listen_start(struct sock *sk); | ||
884 | |||
885 | extern void tcp_parse_options(struct sk_buff *skb, | ||
886 | struct tcp_options_received *opt_rx, | ||
887 | int estab); | ||
888 | |||
889 | /* | ||
890 | * TCP v4 functions exported for the inet6 API | ||
891 | */ | ||
892 | |||
893 | extern int tcp_v4_rebuild_header(struct sock *sk); | ||
894 | |||
895 | extern int tcp_v4_build_header(struct sock *sk, | ||
896 | struct sk_buff *skb); | ||
897 | |||
898 | extern void tcp_v4_send_check(struct sock *sk, | ||
899 | struct tcphdr *th, int len, | ||
900 | struct sk_buff *skb); | ||
901 | |||
902 | extern int tcp_v4_conn_request(struct sock *sk, | ||
903 | struct sk_buff *skb); | ||
904 | |||
905 | extern struct sock * tcp_create_openreq_child(struct sock *sk, | ||
906 | struct open_request *req, | ||
907 | struct sk_buff *skb); | ||
908 | |||
909 | extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, | ||
910 | struct sk_buff *skb, | ||
911 | struct open_request *req, | ||
912 | struct dst_entry *dst); | ||
913 | |||
914 | extern int tcp_v4_do_rcv(struct sock *sk, | ||
915 | struct sk_buff *skb); | ||
916 | |||
917 | extern int tcp_v4_connect(struct sock *sk, | ||
918 | struct sockaddr *uaddr, | ||
919 | int addr_len); | ||
920 | |||
921 | extern int tcp_connect(struct sock *sk); | ||
922 | |||
923 | extern struct sk_buff * tcp_make_synack(struct sock *sk, | ||
924 | struct dst_entry *dst, | ||
925 | struct open_request *req); | ||
926 | |||
927 | extern int tcp_disconnect(struct sock *sk, int flags); | ||
928 | |||
929 | extern void tcp_unhash(struct sock *sk); | ||
930 | |||
931 | extern int tcp_v4_hash_connecting(struct sock *sk); | ||
932 | |||
933 | |||
934 | /* From syncookies.c */ | ||
935 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | ||
936 | struct ip_options *opt); | ||
937 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | ||
938 | __u16 *mss); | ||
939 | |||
940 | /* tcp_output.c */ | ||
941 | |||
942 | extern int tcp_write_xmit(struct sock *, int nonagle); | ||
943 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | ||
944 | extern void tcp_xmit_retransmit_queue(struct sock *); | ||
945 | extern void tcp_simple_retransmit(struct sock *); | ||
946 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | ||
947 | |||
948 | extern void tcp_send_probe0(struct sock *); | ||
949 | extern void tcp_send_partial(struct sock *); | ||
950 | extern int tcp_write_wakeup(struct sock *); | ||
951 | extern void tcp_send_fin(struct sock *sk); | ||
952 | extern void tcp_send_active_reset(struct sock *sk, int priority); | ||
953 | extern int tcp_send_synack(struct sock *); | ||
954 | extern void tcp_push_one(struct sock *, unsigned mss_now); | ||
955 | extern void tcp_send_ack(struct sock *sk); | ||
956 | extern void tcp_send_delayed_ack(struct sock *sk); | ||
957 | |||
958 | /* tcp_timer.c */ | ||
959 | extern void tcp_init_xmit_timers(struct sock *); | ||
960 | extern void tcp_clear_xmit_timers(struct sock *); | ||
961 | |||
962 | extern void tcp_delete_keepalive_timer(struct sock *); | ||
963 | extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); | ||
964 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); | ||
965 | extern unsigned int tcp_current_mss(struct sock *sk, int large); | ||
966 | |||
967 | #ifdef TCP_DEBUG | ||
968 | extern const char tcp_timer_bug_msg[]; | ||
969 | #endif | ||
970 | |||
971 | /* tcp_diag.c */ | ||
972 | extern void tcp_get_info(struct sock *, struct tcp_info *); | ||
973 | |||
974 | /* Read 'sendfile()'-style from a TCP socket */ | ||
975 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | ||
976 | unsigned int, size_t); | ||
977 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | ||
978 | sk_read_actor_t recv_actor); | ||
979 | |||
980 | static inline void tcp_clear_xmit_timer(struct sock *sk, int what) | ||
981 | { | ||
982 | struct tcp_sock *tp = tcp_sk(sk); | ||
983 | |||
984 | switch (what) { | ||
985 | case TCP_TIME_RETRANS: | ||
986 | case TCP_TIME_PROBE0: | ||
987 | tp->pending = 0; | ||
988 | |||
989 | #ifdef TCP_CLEAR_TIMERS | ||
990 | sk_stop_timer(sk, &tp->retransmit_timer); | ||
991 | #endif | ||
992 | break; | ||
993 | case TCP_TIME_DACK: | ||
994 | tp->ack.blocked = 0; | ||
995 | tp->ack.pending = 0; | ||
996 | |||
997 | #ifdef TCP_CLEAR_TIMERS | ||
998 | sk_stop_timer(sk, &tp->delack_timer); | ||
999 | #endif | ||
1000 | break; | ||
1001 | default: | ||
1002 | #ifdef TCP_DEBUG | ||
1003 | printk(tcp_timer_bug_msg); | ||
1004 | #endif | ||
1005 | return; | ||
1006 | }; | ||
1007 | |||
1008 | } | ||
1009 | |||
1010 | /* | ||
1011 | * Reset the retransmission timer | ||
1012 | */ | ||
1013 | static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) | ||
1014 | { | ||
1015 | struct tcp_sock *tp = tcp_sk(sk); | ||
1016 | |||
1017 | if (when > TCP_RTO_MAX) { | ||
1018 | #ifdef TCP_DEBUG | ||
1019 | printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); | ||
1020 | #endif | ||
1021 | when = TCP_RTO_MAX; | ||
1022 | } | ||
1023 | |||
1024 | switch (what) { | ||
1025 | case TCP_TIME_RETRANS: | ||
1026 | case TCP_TIME_PROBE0: | ||
1027 | tp->pending = what; | ||
1028 | tp->timeout = jiffies+when; | ||
1029 | sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); | ||
1030 | break; | ||
1031 | |||
1032 | case TCP_TIME_DACK: | ||
1033 | tp->ack.pending |= TCP_ACK_TIMER; | ||
1034 | tp->ack.timeout = jiffies+when; | ||
1035 | sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); | ||
1036 | break; | ||
1037 | |||
1038 | default: | ||
1039 | #ifdef TCP_DEBUG | ||
1040 | printk(tcp_timer_bug_msg); | ||
1041 | #endif | ||
1042 | return; | ||
1043 | }; | ||
1044 | } | ||
1045 | |||
1046 | /* Initialize RCV_MSS value. | ||
1047 | * RCV_MSS is an our guess about MSS used by the peer. | ||
1048 | * We haven't any direct information about the MSS. | ||
1049 | * It's better to underestimate the RCV_MSS rather than overestimate. | ||
1050 | * Overestimations make us ACKing less frequently than needed. | ||
1051 | * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). | ||
1052 | */ | ||
1053 | |||
1054 | static inline void tcp_initialize_rcv_mss(struct sock *sk) | ||
1055 | { | ||
1056 | struct tcp_sock *tp = tcp_sk(sk); | ||
1057 | unsigned int hint = min(tp->advmss, tp->mss_cache_std); | ||
1058 | |||
1059 | hint = min(hint, tp->rcv_wnd/2); | ||
1060 | hint = min(hint, TCP_MIN_RCVMSS); | ||
1061 | hint = max(hint, TCP_MIN_MSS); | ||
1062 | |||
1063 | tp->ack.rcv_mss = hint; | ||
1064 | } | ||
1065 | |||
1066 | static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | ||
1067 | { | ||
1068 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | ||
1069 | ntohl(TCP_FLAG_ACK) | | ||
1070 | snd_wnd); | ||
1071 | } | ||
1072 | |||
1073 | static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) | ||
1074 | { | ||
1075 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | ||
1076 | } | ||
1077 | |||
1078 | static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) | ||
1079 | { | ||
1080 | if (skb_queue_len(&tp->out_of_order_queue) == 0 && | ||
1081 | tp->rcv_wnd && | ||
1082 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | ||
1083 | !tp->urg_data) | ||
1084 | tcp_fast_path_on(tp); | ||
1085 | } | ||
1086 | |||
1087 | /* Compute the actual receive window we are currently advertising. | ||
1088 | * Rcv_nxt can be after the window if our peer push more data | ||
1089 | * than the offered window. | ||
1090 | */ | ||
1091 | static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp) | ||
1092 | { | ||
1093 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | ||
1094 | |||
1095 | if (win < 0) | ||
1096 | win = 0; | ||
1097 | return (u32) win; | ||
1098 | } | ||
1099 | |||
1100 | /* Choose a new window, without checks for shrinking, and without | ||
1101 | * scaling applied to the result. The caller does these things | ||
1102 | * if necessary. This is a "raw" window selection. | ||
1103 | */ | ||
1104 | extern u32 __tcp_select_window(struct sock *sk); | ||
1105 | |||
1106 | /* TCP timestamps are only 32-bits, this causes a slight | ||
1107 | * complication on 64-bit systems since we store a snapshot | ||
1108 | * of jiffies in the buffer control blocks below. We decidely | ||
1109 | * only use of the low 32-bits of jiffies and hide the ugly | ||
1110 | * casts with the following macro. | ||
1111 | */ | ||
1112 | #define tcp_time_stamp ((__u32)(jiffies)) | ||
1113 | |||
1114 | /* This is what the send packet queueing engine uses to pass | ||
1115 | * TCP per-packet control information to the transmission | ||
1116 | * code. We also store the host-order sequence numbers in | ||
1117 | * here too. This is 36 bytes on 32-bit architectures, | ||
1118 | * 40 bytes on 64-bit machines, if this grows please adjust | ||
1119 | * skbuff.h:skbuff->cb[xxx] size appropriately. | ||
1120 | */ | ||
1121 | struct tcp_skb_cb { | ||
1122 | union { | ||
1123 | struct inet_skb_parm h4; | ||
1124 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
1125 | struct inet6_skb_parm h6; | ||
1126 | #endif | ||
1127 | } header; /* For incoming frames */ | ||
1128 | __u32 seq; /* Starting sequence number */ | ||
1129 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | ||
1130 | __u32 when; /* used to compute rtt's */ | ||
1131 | __u8 flags; /* TCP header flags. */ | ||
1132 | |||
1133 | /* NOTE: These must match up to the flags byte in a | ||
1134 | * real TCP header. | ||
1135 | */ | ||
1136 | #define TCPCB_FLAG_FIN 0x01 | ||
1137 | #define TCPCB_FLAG_SYN 0x02 | ||
1138 | #define TCPCB_FLAG_RST 0x04 | ||
1139 | #define TCPCB_FLAG_PSH 0x08 | ||
1140 | #define TCPCB_FLAG_ACK 0x10 | ||
1141 | #define TCPCB_FLAG_URG 0x20 | ||
1142 | #define TCPCB_FLAG_ECE 0x40 | ||
1143 | #define TCPCB_FLAG_CWR 0x80 | ||
1144 | |||
1145 | __u8 sacked; /* State flags for SACK/FACK. */ | ||
1146 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | ||
1147 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | ||
1148 | #define TCPCB_LOST 0x04 /* SKB is lost */ | ||
1149 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | ||
1150 | |||
1151 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | ||
1152 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | ||
1153 | |||
1154 | #define TCPCB_URG 0x20 /* Urgent pointer advenced here */ | ||
1155 | |||
1156 | #define TCPCB_AT_TAIL (TCPCB_URG) | ||
1157 | |||
1158 | __u16 urg_ptr; /* Valid w/URG flags is set. */ | ||
1159 | __u32 ack_seq; /* Sequence number ACK'd */ | ||
1160 | }; | ||
1161 | |||
1162 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | ||
1163 | |||
1164 | #include <net/tcp_ecn.h> | ||
1165 | |||
1166 | /* Due to TSO, an SKB can be composed of multiple actual | ||
1167 | * packets. To keep these tracked properly, we use this. | ||
1168 | */ | ||
1169 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | ||
1170 | { | ||
1171 | return skb_shinfo(skb)->tso_segs; | ||
1172 | } | ||
1173 | |||
1174 | /* This is valid iff tcp_skb_pcount() > 1. */ | ||
1175 | static inline int tcp_skb_mss(const struct sk_buff *skb) | ||
1176 | { | ||
1177 | return skb_shinfo(skb)->tso_size; | ||
1178 | } | ||
1179 | |||
1180 | static inline void tcp_dec_pcount_approx(__u32 *count, | ||
1181 | const struct sk_buff *skb) | ||
1182 | { | ||
1183 | if (*count) { | ||
1184 | *count -= tcp_skb_pcount(skb); | ||
1185 | if ((int)*count < 0) | ||
1186 | *count = 0; | ||
1187 | } | ||
1188 | } | ||
1189 | |||
1190 | static inline void tcp_packets_out_inc(struct sock *sk, | ||
1191 | struct tcp_sock *tp, | ||
1192 | const struct sk_buff *skb) | ||
1193 | { | ||
1194 | int orig = tp->packets_out; | ||
1195 | |||
1196 | tp->packets_out += tcp_skb_pcount(skb); | ||
1197 | if (!orig) | ||
1198 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | ||
1199 | } | ||
1200 | |||
1201 | static inline void tcp_packets_out_dec(struct tcp_sock *tp, | ||
1202 | const struct sk_buff *skb) | ||
1203 | { | ||
1204 | tp->packets_out -= tcp_skb_pcount(skb); | ||
1205 | } | ||
1206 | |||
1207 | /* This determines how many packets are "in the network" to the best | ||
1208 | * of our knowledge. In many cases it is conservative, but where | ||
1209 | * detailed information is available from the receiver (via SACK | ||
1210 | * blocks etc.) we can make more aggressive calculations. | ||
1211 | * | ||
1212 | * Use this for decisions involving congestion control, use just | ||
1213 | * tp->packets_out to determine if the send queue is empty or not. | ||
1214 | * | ||
1215 | * Read this equation as: | ||
1216 | * | ||
1217 | * "Packets sent once on transmission queue" MINUS | ||
1218 | * "Packets left network, but not honestly ACKed yet" PLUS | ||
1219 | * "Packets fast retransmitted" | ||
1220 | */ | ||
1221 | static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) | ||
1222 | { | ||
1223 | return (tp->packets_out - tp->left_out + tp->retrans_out); | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Which congestion algorithim is in use on the connection. | ||
1228 | */ | ||
1229 | #define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS) | ||
1230 | #define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD) | ||
1231 | #define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC) | ||
1232 | |||
1233 | /* Recalculate snd_ssthresh, we want to set it to: | ||
1234 | * | ||
1235 | * Reno: | ||
1236 | * one half the current congestion window, but no | ||
1237 | * less than two segments | ||
1238 | * | ||
1239 | * BIC: | ||
1240 | * behave like Reno until low_window is reached, | ||
1241 | * then increase congestion window slowly | ||
1242 | */ | ||
1243 | static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp) | ||
1244 | { | ||
1245 | if (tcp_is_bic(tp)) { | ||
1246 | if (sysctl_tcp_bic_fast_convergence && | ||
1247 | tp->snd_cwnd < tp->bictcp.last_max_cwnd) | ||
1248 | tp->bictcp.last_max_cwnd = (tp->snd_cwnd * | ||
1249 | (BICTCP_BETA_SCALE | ||
1250 | + sysctl_tcp_bic_beta)) | ||
1251 | / (2 * BICTCP_BETA_SCALE); | ||
1252 | else | ||
1253 | tp->bictcp.last_max_cwnd = tp->snd_cwnd; | ||
1254 | |||
1255 | if (tp->snd_cwnd > sysctl_tcp_bic_low_window) | ||
1256 | return max((tp->snd_cwnd * sysctl_tcp_bic_beta) | ||
1257 | / BICTCP_BETA_SCALE, 2U); | ||
1258 | } | ||
1259 | |||
1260 | return max(tp->snd_cwnd >> 1U, 2U); | ||
1261 | } | ||
1262 | |||
1263 | /* Stop taking Vegas samples for now. */ | ||
1264 | #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0) | ||
1265 | |||
1266 | static inline void tcp_vegas_enable(struct tcp_sock *tp) | ||
1267 | { | ||
1268 | /* There are several situations when we must "re-start" Vegas: | ||
1269 | * | ||
1270 | * o when a connection is established | ||
1271 | * o after an RTO | ||
1272 | * o after fast recovery | ||
1273 | * o when we send a packet and there is no outstanding | ||
1274 | * unacknowledged data (restarting an idle connection) | ||
1275 | * | ||
1276 | * In these circumstances we cannot do a Vegas calculation at the | ||
1277 | * end of the first RTT, because any calculation we do is using | ||
1278 | * stale info -- both the saved cwnd and congestion feedback are | ||
1279 | * stale. | ||
1280 | * | ||
1281 | * Instead we must wait until the completion of an RTT during | ||
1282 | * which we actually receive ACKs. | ||
1283 | */ | ||
1284 | |||
1285 | /* Begin taking Vegas samples next time we send something. */ | ||
1286 | tp->vegas.doing_vegas_now = 1; | ||
1287 | |||
1288 | /* Set the beginning of the next send window. */ | ||
1289 | tp->vegas.beg_snd_nxt = tp->snd_nxt; | ||
1290 | |||
1291 | tp->vegas.cntRTT = 0; | ||
1292 | tp->vegas.minRTT = 0x7fffffff; | ||
1293 | } | ||
1294 | |||
1295 | /* Should we be taking Vegas samples right now? */ | ||
1296 | #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now) | ||
1297 | |||
1298 | extern void tcp_ca_init(struct tcp_sock *tp); | ||
1299 | |||
1300 | static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) | ||
1301 | { | ||
1302 | if (tcp_is_vegas(tp)) { | ||
1303 | if (ca_state == TCP_CA_Open) | ||
1304 | tcp_vegas_enable(tp); | ||
1305 | else | ||
1306 | tcp_vegas_disable(tp); | ||
1307 | } | ||
1308 | tp->ca_state = ca_state; | ||
1309 | } | ||
1310 | |||
1311 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. | ||
1312 | * The exception is rate halving phase, when cwnd is decreasing towards | ||
1313 | * ssthresh. | ||
1314 | */ | ||
1315 | static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp) | ||
1316 | { | ||
1317 | if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery)) | ||
1318 | return tp->snd_ssthresh; | ||
1319 | else | ||
1320 | return max(tp->snd_ssthresh, | ||
1321 | ((tp->snd_cwnd >> 1) + | ||
1322 | (tp->snd_cwnd >> 2))); | ||
1323 | } | ||
1324 | |||
1325 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | ||
1326 | { | ||
1327 | if (tp->rx_opt.sack_ok && | ||
1328 | (tp->sacked_out >= tp->packets_out - tp->lost_out)) | ||
1329 | tp->sacked_out = tp->packets_out - tp->lost_out; | ||
1330 | tp->left_out = tp->sacked_out + tp->lost_out; | ||
1331 | } | ||
1332 | |||
1333 | extern void tcp_cwnd_application_limited(struct sock *sk); | ||
1334 | |||
1335 | /* Congestion window validation. (RFC2861) */ | ||
1336 | |||
1337 | static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | ||
1338 | { | ||
1339 | __u32 packets_out = tp->packets_out; | ||
1340 | |||
1341 | if (packets_out >= tp->snd_cwnd) { | ||
1342 | /* Network is feed fully. */ | ||
1343 | tp->snd_cwnd_used = 0; | ||
1344 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
1345 | } else { | ||
1346 | /* Network starves. */ | ||
1347 | if (tp->packets_out > tp->snd_cwnd_used) | ||
1348 | tp->snd_cwnd_used = tp->packets_out; | ||
1349 | |||
1350 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | ||
1351 | tcp_cwnd_application_limited(sk); | ||
1352 | } | ||
1353 | } | ||
1354 | |||
1355 | /* Set slow start threshould and cwnd not falling to slow start */ | ||
1356 | static inline void __tcp_enter_cwr(struct tcp_sock *tp) | ||
1357 | { | ||
1358 | tp->undo_marker = 0; | ||
1359 | tp->snd_ssthresh = tcp_recalc_ssthresh(tp); | ||
1360 | tp->snd_cwnd = min(tp->snd_cwnd, | ||
1361 | tcp_packets_in_flight(tp) + 1U); | ||
1362 | tp->snd_cwnd_cnt = 0; | ||
1363 | tp->high_seq = tp->snd_nxt; | ||
1364 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
1365 | TCP_ECN_queue_cwr(tp); | ||
1366 | } | ||
1367 | |||
1368 | static inline void tcp_enter_cwr(struct tcp_sock *tp) | ||
1369 | { | ||
1370 | tp->prior_ssthresh = 0; | ||
1371 | if (tp->ca_state < TCP_CA_CWR) { | ||
1372 | __tcp_enter_cwr(tp); | ||
1373 | tcp_set_ca_state(tp, TCP_CA_CWR); | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1377 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | ||
1378 | |||
1379 | /* Slow start with delack produces 3 packets of burst, so that | ||
1380 | * it is safe "de facto". | ||
1381 | */ | ||
1382 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | ||
1383 | { | ||
1384 | return 3; | ||
1385 | } | ||
1386 | |||
1387 | static __inline__ int tcp_minshall_check(const struct tcp_sock *tp) | ||
1388 | { | ||
1389 | return after(tp->snd_sml,tp->snd_una) && | ||
1390 | !after(tp->snd_sml, tp->snd_nxt); | ||
1391 | } | ||
1392 | |||
1393 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | ||
1394 | const struct sk_buff *skb) | ||
1395 | { | ||
1396 | if (skb->len < mss) | ||
1397 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | ||
1398 | } | ||
1399 | |||
1400 | /* Return 0, if packet can be sent now without violation Nagle's rules: | ||
1401 | 1. It is full sized. | ||
1402 | 2. Or it contains FIN. | ||
1403 | 3. Or TCP_NODELAY was set. | ||
1404 | 4. Or TCP_CORK is not set, and all sent packets are ACKed. | ||
1405 | With Minshall's modification: all sent small packets are ACKed. | ||
1406 | */ | ||
1407 | |||
1408 | static __inline__ int | ||
1409 | tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, | ||
1410 | unsigned mss_now, int nonagle) | ||
1411 | { | ||
1412 | return (skb->len < mss_now && | ||
1413 | !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && | ||
1414 | ((nonagle&TCP_NAGLE_CORK) || | ||
1415 | (!nonagle && | ||
1416 | tp->packets_out && | ||
1417 | tcp_minshall_check(tp)))); | ||
1418 | } | ||
1419 | |||
1420 | extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int); | ||
1421 | |||
1422 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) | ||
1423 | * should be put on the wire right now. | ||
1424 | */ | ||
1425 | static __inline__ int tcp_snd_test(const struct tcp_sock *tp, | ||
1426 | struct sk_buff *skb, | ||
1427 | unsigned cur_mss, int nonagle) | ||
1428 | { | ||
1429 | int pkts = tcp_skb_pcount(skb); | ||
1430 | |||
1431 | if (!pkts) { | ||
1432 | tcp_set_skb_tso_segs(skb, tp->mss_cache_std); | ||
1433 | pkts = tcp_skb_pcount(skb); | ||
1434 | } | ||
1435 | |||
1436 | /* RFC 1122 - section 4.2.3.4 | ||
1437 | * | ||
1438 | * We must queue if | ||
1439 | * | ||
1440 | * a) The right edge of this frame exceeds the window | ||
1441 | * b) There are packets in flight and we have a small segment | ||
1442 | * [SWS avoidance and Nagle algorithm] | ||
1443 | * (part of SWS is done on packetization) | ||
1444 | * Minshall version sounds: there are no _small_ | ||
1445 | * segments in flight. (tcp_nagle_check) | ||
1446 | * c) We have too many packets 'in flight' | ||
1447 | * | ||
1448 | * Don't use the nagle rule for urgent data (or | ||
1449 | * for the final FIN -DaveM). | ||
1450 | * | ||
1451 | * Also, Nagle rule does not apply to frames, which | ||
1452 | * sit in the middle of queue (they have no chances | ||
1453 | * to get new data) and if room at tail of skb is | ||
1454 | * not enough to save something seriously (<32 for now). | ||
1455 | */ | ||
1456 | |||
1457 | /* Don't be strict about the congestion window for the | ||
1458 | * final FIN frame. -DaveM | ||
1459 | */ | ||
1460 | return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode | ||
1461 | || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) && | ||
1462 | (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) || | ||
1463 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && | ||
1464 | !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd)); | ||
1465 | } | ||
1466 | |||
1467 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | ||
1468 | { | ||
1469 | if (!tp->packets_out && !tp->pending) | ||
1470 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); | ||
1471 | } | ||
1472 | |||
1473 | static __inline__ int tcp_skb_is_last(const struct sock *sk, | ||
1474 | const struct sk_buff *skb) | ||
1475 | { | ||
1476 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
1477 | } | ||
1478 | |||
1479 | /* Push out any pending frames which were held back due to | ||
1480 | * TCP_CORK or attempt at coalescing tiny packets. | ||
1481 | * The socket must be locked by the caller. | ||
1482 | */ | ||
1483 | static __inline__ void __tcp_push_pending_frames(struct sock *sk, | ||
1484 | struct tcp_sock *tp, | ||
1485 | unsigned cur_mss, | ||
1486 | int nonagle) | ||
1487 | { | ||
1488 | struct sk_buff *skb = sk->sk_send_head; | ||
1489 | |||
1490 | if (skb) { | ||
1491 | if (!tcp_skb_is_last(sk, skb)) | ||
1492 | nonagle = TCP_NAGLE_PUSH; | ||
1493 | if (!tcp_snd_test(tp, skb, cur_mss, nonagle) || | ||
1494 | tcp_write_xmit(sk, nonagle)) | ||
1495 | tcp_check_probe_timer(sk, tp); | ||
1496 | } | ||
1497 | tcp_cwnd_validate(sk, tp); | ||
1498 | } | ||
1499 | |||
1500 | static __inline__ void tcp_push_pending_frames(struct sock *sk, | ||
1501 | struct tcp_sock *tp) | ||
1502 | { | ||
1503 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | ||
1504 | } | ||
1505 | |||
1506 | static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | ||
1507 | { | ||
1508 | struct sk_buff *skb = sk->sk_send_head; | ||
1509 | |||
1510 | return (skb && | ||
1511 | tcp_snd_test(tp, skb, tcp_current_mss(sk, 1), | ||
1512 | tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)); | ||
1513 | } | ||
1514 | |||
1515 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | ||
1516 | { | ||
1517 | tp->snd_wl1 = seq; | ||
1518 | } | ||
1519 | |||
1520 | static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) | ||
1521 | { | ||
1522 | tp->snd_wl1 = seq; | ||
1523 | } | ||
1524 | |||
1525 | extern void tcp_destroy_sock(struct sock *sk); | ||
1526 | |||
1527 | |||
1528 | /* | ||
1529 | * Calculate(/check) TCP checksum | ||
1530 | */ | ||
1531 | static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len, | ||
1532 | unsigned long saddr, unsigned long daddr, | ||
1533 | unsigned long base) | ||
1534 | { | ||
1535 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | ||
1536 | } | ||
1537 | |||
1538 | static __inline__ int __tcp_checksum_complete(struct sk_buff *skb) | ||
1539 | { | ||
1540 | return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); | ||
1541 | } | ||
1542 | |||
1543 | static __inline__ int tcp_checksum_complete(struct sk_buff *skb) | ||
1544 | { | ||
1545 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | ||
1546 | __tcp_checksum_complete(skb); | ||
1547 | } | ||
1548 | |||
1549 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | ||
1550 | |||
1551 | static __inline__ void tcp_prequeue_init(struct tcp_sock *tp) | ||
1552 | { | ||
1553 | tp->ucopy.task = NULL; | ||
1554 | tp->ucopy.len = 0; | ||
1555 | tp->ucopy.memory = 0; | ||
1556 | skb_queue_head_init(&tp->ucopy.prequeue); | ||
1557 | } | ||
1558 | |||
1559 | /* Packet is added to VJ-style prequeue for processing in process | ||
1560 | * context, if a reader task is waiting. Apparently, this exciting | ||
1561 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | ||
1562 | * failed somewhere. Latency? Burstiness? Well, at least now we will | ||
1563 | * see, why it failed. 8)8) --ANK | ||
1564 | * | ||
1565 | * NOTE: is this not too big to inline? | ||
1566 | */ | ||
1567 | static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | ||
1568 | { | ||
1569 | struct tcp_sock *tp = tcp_sk(sk); | ||
1570 | |||
1571 | if (!sysctl_tcp_low_latency && tp->ucopy.task) { | ||
1572 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | ||
1573 | tp->ucopy.memory += skb->truesize; | ||
1574 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | ||
1575 | struct sk_buff *skb1; | ||
1576 | |||
1577 | BUG_ON(sock_owned_by_user(sk)); | ||
1578 | |||
1579 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | ||
1580 | sk->sk_backlog_rcv(sk, skb1); | ||
1581 | NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED); | ||
1582 | } | ||
1583 | |||
1584 | tp->ucopy.memory = 0; | ||
1585 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
1586 | wake_up_interruptible(sk->sk_sleep); | ||
1587 | if (!tcp_ack_scheduled(tp)) | ||
1588 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); | ||
1589 | } | ||
1590 | return 1; | ||
1591 | } | ||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
1595 | |||
1596 | #undef STATE_TRACE | ||
1597 | |||
1598 | #ifdef STATE_TRACE | ||
1599 | static const char *statename[]={ | ||
1600 | "Unused","Established","Syn Sent","Syn Recv", | ||
1601 | "Fin Wait 1","Fin Wait 2","Time Wait", "Close", | ||
1602 | "Close Wait","Last ACK","Listen","Closing" | ||
1603 | }; | ||
1604 | #endif | ||
1605 | |||
1606 | static __inline__ void tcp_set_state(struct sock *sk, int state) | ||
1607 | { | ||
1608 | int oldstate = sk->sk_state; | ||
1609 | |||
1610 | switch (state) { | ||
1611 | case TCP_ESTABLISHED: | ||
1612 | if (oldstate != TCP_ESTABLISHED) | ||
1613 | TCP_INC_STATS(TCP_MIB_CURRESTAB); | ||
1614 | break; | ||
1615 | |||
1616 | case TCP_CLOSE: | ||
1617 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | ||
1618 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | ||
1619 | |||
1620 | sk->sk_prot->unhash(sk); | ||
1621 | if (tcp_sk(sk)->bind_hash && | ||
1622 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | ||
1623 | tcp_put_port(sk); | ||
1624 | /* fall through */ | ||
1625 | default: | ||
1626 | if (oldstate==TCP_ESTABLISHED) | ||
1627 | TCP_DEC_STATS(TCP_MIB_CURRESTAB); | ||
1628 | } | ||
1629 | |||
1630 | /* Change state AFTER socket is unhashed to avoid closed | ||
1631 | * socket sitting in hash tables. | ||
1632 | */ | ||
1633 | sk->sk_state = state; | ||
1634 | |||
1635 | #ifdef STATE_TRACE | ||
1636 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); | ||
1637 | #endif | ||
1638 | } | ||
1639 | |||
1640 | static __inline__ void tcp_done(struct sock *sk) | ||
1641 | { | ||
1642 | tcp_set_state(sk, TCP_CLOSE); | ||
1643 | tcp_clear_xmit_timers(sk); | ||
1644 | |||
1645 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
1646 | |||
1647 | if (!sock_flag(sk, SOCK_DEAD)) | ||
1648 | sk->sk_state_change(sk); | ||
1649 | else | ||
1650 | tcp_destroy_sock(sk); | ||
1651 | } | ||
1652 | |||
1653 | static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) | ||
1654 | { | ||
1655 | rx_opt->dsack = 0; | ||
1656 | rx_opt->eff_sacks = 0; | ||
1657 | rx_opt->num_sacks = 0; | ||
1658 | } | ||
1659 | |||
1660 | static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp) | ||
1661 | { | ||
1662 | if (tp->rx_opt.tstamp_ok) { | ||
1663 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | | ||
1664 | (TCPOPT_NOP << 16) | | ||
1665 | (TCPOPT_TIMESTAMP << 8) | | ||
1666 | TCPOLEN_TIMESTAMP); | ||
1667 | *ptr++ = htonl(tstamp); | ||
1668 | *ptr++ = htonl(tp->rx_opt.ts_recent); | ||
1669 | } | ||
1670 | if (tp->rx_opt.eff_sacks) { | ||
1671 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; | ||
1672 | int this_sack; | ||
1673 | |||
1674 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | | ||
1675 | (TCPOPT_NOP << 16) | | ||
1676 | (TCPOPT_SACK << 8) | | ||
1677 | (TCPOLEN_SACK_BASE + | ||
1678 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK))); | ||
1679 | for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { | ||
1680 | *ptr++ = htonl(sp[this_sack].start_seq); | ||
1681 | *ptr++ = htonl(sp[this_sack].end_seq); | ||
1682 | } | ||
1683 | if (tp->rx_opt.dsack) { | ||
1684 | tp->rx_opt.dsack = 0; | ||
1685 | tp->rx_opt.eff_sacks--; | ||
1686 | } | ||
1687 | } | ||
1688 | } | ||
1689 | |||
1690 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | ||
1691 | * If this is every changed make sure to change the definition of | ||
1692 | * MAX_SYN_SIZE to match the new maximum number of options that you | ||
1693 | * can generate. | ||
1694 | */ | ||
1695 | static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, | ||
1696 | int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent) | ||
1697 | { | ||
1698 | /* We always get an MSS option. | ||
1699 | * The option bytes which will be seen in normal data | ||
1700 | * packets should timestamps be used, must be in the MSS | ||
1701 | * advertised. But we subtract them from tp->mss_cache so | ||
1702 | * that calculations in tcp_sendmsg are simpler etc. | ||
1703 | * So account for this fact here if necessary. If we | ||
1704 | * don't do this correctly, as a receiver we won't | ||
1705 | * recognize data packets as being full sized when we | ||
1706 | * should, and thus we won't abide by the delayed ACK | ||
1707 | * rules correctly. | ||
1708 | * SACKs don't matter, we never delay an ACK when we | ||
1709 | * have any of those going out. | ||
1710 | */ | ||
1711 | *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); | ||
1712 | if (ts) { | ||
1713 | if(sack) | ||
1714 | *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | | ||
1715 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | ||
1716 | else | ||
1717 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
1718 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | ||
1719 | *ptr++ = htonl(tstamp); /* TSVAL */ | ||
1720 | *ptr++ = htonl(ts_recent); /* TSECR */ | ||
1721 | } else if(sack) | ||
1722 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
1723 | (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); | ||
1724 | if (offer_wscale) | ||
1725 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); | ||
1726 | } | ||
1727 | |||
1728 | /* Determine a window scaling and initial window to offer. */ | ||
1729 | extern void tcp_select_initial_window(int __space, __u32 mss, | ||
1730 | __u32 *rcv_wnd, __u32 *window_clamp, | ||
1731 | int wscale_ok, __u8 *rcv_wscale); | ||
1732 | |||
1733 | static inline int tcp_win_from_space(int space) | ||
1734 | { | ||
1735 | return sysctl_tcp_adv_win_scale<=0 ? | ||
1736 | (space>>(-sysctl_tcp_adv_win_scale)) : | ||
1737 | space - (space>>sysctl_tcp_adv_win_scale); | ||
1738 | } | ||
1739 | |||
1740 | /* Note: caller must be prepared to deal with negative returns */ | ||
1741 | static inline int tcp_space(const struct sock *sk) | ||
1742 | { | ||
1743 | return tcp_win_from_space(sk->sk_rcvbuf - | ||
1744 | atomic_read(&sk->sk_rmem_alloc)); | ||
1745 | } | ||
1746 | |||
1747 | static inline int tcp_full_space(const struct sock *sk) | ||
1748 | { | ||
1749 | return tcp_win_from_space(sk->sk_rcvbuf); | ||
1750 | } | ||
1751 | |||
1752 | static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, | ||
1753 | struct sock *child) | ||
1754 | { | ||
1755 | struct tcp_sock *tp = tcp_sk(sk); | ||
1756 | |||
1757 | req->sk = child; | ||
1758 | sk_acceptq_added(sk); | ||
1759 | |||
1760 | if (!tp->accept_queue_tail) { | ||
1761 | tp->accept_queue = req; | ||
1762 | } else { | ||
1763 | tp->accept_queue_tail->dl_next = req; | ||
1764 | } | ||
1765 | tp->accept_queue_tail = req; | ||
1766 | req->dl_next = NULL; | ||
1767 | } | ||
1768 | |||
1769 | struct tcp_listen_opt | ||
1770 | { | ||
1771 | u8 max_qlen_log; /* log_2 of maximal queued SYNs */ | ||
1772 | int qlen; | ||
1773 | int qlen_young; | ||
1774 | int clock_hand; | ||
1775 | u32 hash_rnd; | ||
1776 | struct open_request *syn_table[TCP_SYNQ_HSIZE]; | ||
1777 | }; | ||
1778 | |||
1779 | static inline void | ||
1780 | tcp_synq_removed(struct sock *sk, struct open_request *req) | ||
1781 | { | ||
1782 | struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; | ||
1783 | |||
1784 | if (--lopt->qlen == 0) | ||
1785 | tcp_delete_keepalive_timer(sk); | ||
1786 | if (req->retrans == 0) | ||
1787 | lopt->qlen_young--; | ||
1788 | } | ||
1789 | |||
1790 | static inline void tcp_synq_added(struct sock *sk) | ||
1791 | { | ||
1792 | struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; | ||
1793 | |||
1794 | if (lopt->qlen++ == 0) | ||
1795 | tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); | ||
1796 | lopt->qlen_young++; | ||
1797 | } | ||
1798 | |||
1799 | static inline int tcp_synq_len(struct sock *sk) | ||
1800 | { | ||
1801 | return tcp_sk(sk)->listen_opt->qlen; | ||
1802 | } | ||
1803 | |||
1804 | static inline int tcp_synq_young(struct sock *sk) | ||
1805 | { | ||
1806 | return tcp_sk(sk)->listen_opt->qlen_young; | ||
1807 | } | ||
1808 | |||
1809 | static inline int tcp_synq_is_full(struct sock *sk) | ||
1810 | { | ||
1811 | return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; | ||
1812 | } | ||
1813 | |||
1814 | static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, | ||
1815 | struct open_request **prev) | ||
1816 | { | ||
1817 | write_lock(&tp->syn_wait_lock); | ||
1818 | *prev = req->dl_next; | ||
1819 | write_unlock(&tp->syn_wait_lock); | ||
1820 | } | ||
1821 | |||
1822 | static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, | ||
1823 | struct open_request **prev) | ||
1824 | { | ||
1825 | tcp_synq_unlink(tcp_sk(sk), req, prev); | ||
1826 | tcp_synq_removed(sk, req); | ||
1827 | tcp_openreq_free(req); | ||
1828 | } | ||
1829 | |||
1830 | static __inline__ void tcp_openreq_init(struct open_request *req, | ||
1831 | struct tcp_options_received *rx_opt, | ||
1832 | struct sk_buff *skb) | ||
1833 | { | ||
1834 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ | ||
1835 | req->rcv_isn = TCP_SKB_CB(skb)->seq; | ||
1836 | req->mss = rx_opt->mss_clamp; | ||
1837 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | ||
1838 | req->tstamp_ok = rx_opt->tstamp_ok; | ||
1839 | req->sack_ok = rx_opt->sack_ok; | ||
1840 | req->snd_wscale = rx_opt->snd_wscale; | ||
1841 | req->wscale_ok = rx_opt->wscale_ok; | ||
1842 | req->acked = 0; | ||
1843 | req->ecn_ok = 0; | ||
1844 | req->rmt_port = skb->h.th->source; | ||
1845 | } | ||
1846 | |||
1847 | extern void tcp_enter_memory_pressure(void); | ||
1848 | |||
1849 | extern void tcp_listen_wlock(void); | ||
1850 | |||
1851 | /* - We may sleep inside this lock. | ||
1852 | * - If sleeping is not required (or called from BH), | ||
1853 | * use plain read_(un)lock(&tcp_lhash_lock). | ||
1854 | */ | ||
1855 | |||
1856 | static inline void tcp_listen_lock(void) | ||
1857 | { | ||
1858 | /* read_lock synchronizes to candidates to writers */ | ||
1859 | read_lock(&tcp_lhash_lock); | ||
1860 | atomic_inc(&tcp_lhash_users); | ||
1861 | read_unlock(&tcp_lhash_lock); | ||
1862 | } | ||
1863 | |||
1864 | static inline void tcp_listen_unlock(void) | ||
1865 | { | ||
1866 | if (atomic_dec_and_test(&tcp_lhash_users)) | ||
1867 | wake_up(&tcp_lhash_wait); | ||
1868 | } | ||
1869 | |||
1870 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | ||
1871 | { | ||
1872 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | ||
1873 | } | ||
1874 | |||
1875 | static inline int keepalive_time_when(const struct tcp_sock *tp) | ||
1876 | { | ||
1877 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | ||
1878 | } | ||
1879 | |||
1880 | static inline int tcp_fin_time(const struct tcp_sock *tp) | ||
1881 | { | ||
1882 | int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; | ||
1883 | |||
1884 | if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) | ||
1885 | fin_timeout = (tp->rto<<2) - (tp->rto>>1); | ||
1886 | |||
1887 | return fin_timeout; | ||
1888 | } | ||
1889 | |||
1890 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst) | ||
1891 | { | ||
1892 | if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) | ||
1893 | return 0; | ||
1894 | if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) | ||
1895 | return 0; | ||
1896 | |||
1897 | /* RST segments are not recommended to carry timestamp, | ||
1898 | and, if they do, it is recommended to ignore PAWS because | ||
1899 | "their cleanup function should take precedence over timestamps." | ||
1900 | Certainly, it is mistake. It is necessary to understand the reasons | ||
1901 | of this constraint to relax it: if peer reboots, clock may go | ||
1902 | out-of-sync and half-open connections will not be reset. | ||
1903 | Actually, the problem would be not existing if all | ||
1904 | the implementations followed draft about maintaining clock | ||
1905 | via reboots. Linux-2.2 DOES NOT! | ||
1906 | |||
1907 | However, we can relax time bounds for RST segments to MSL. | ||
1908 | */ | ||
1909 | if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) | ||
1910 | return 0; | ||
1911 | return 1; | ||
1912 | } | ||
1913 | |||
1914 | static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst) | ||
1915 | { | ||
1916 | sk->sk_route_caps = dst->dev->features; | ||
1917 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
1918 | if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) | ||
1919 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | #define TCP_CHECK_TIMER(sk) do { } while (0) | ||
1924 | |||
1925 | static inline int tcp_use_frto(const struct sock *sk) | ||
1926 | { | ||
1927 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1928 | |||
1929 | /* F-RTO must be activated in sysctl and there must be some | ||
1930 | * unsent new data, and the advertised window should allow | ||
1931 | * sending it. | ||
1932 | */ | ||
1933 | return (sysctl_tcp_frto && sk->sk_send_head && | ||
1934 | !after(TCP_SKB_CB(sk->sk_send_head)->end_seq, | ||
1935 | tp->snd_una + tp->snd_wnd)); | ||
1936 | } | ||
1937 | |||
1938 | static inline void tcp_mib_init(void) | ||
1939 | { | ||
1940 | /* See RFC 2012 */ | ||
1941 | TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1); | ||
1942 | TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | ||
1943 | TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | ||
1944 | TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1); | ||
1945 | } | ||
1946 | |||
1947 | /* /proc */ | ||
1948 | enum tcp_seq_states { | ||
1949 | TCP_SEQ_STATE_LISTENING, | ||
1950 | TCP_SEQ_STATE_OPENREQ, | ||
1951 | TCP_SEQ_STATE_ESTABLISHED, | ||
1952 | TCP_SEQ_STATE_TIME_WAIT, | ||
1953 | }; | ||
1954 | |||
1955 | struct tcp_seq_afinfo { | ||
1956 | struct module *owner; | ||
1957 | char *name; | ||
1958 | sa_family_t family; | ||
1959 | int (*seq_show) (struct seq_file *m, void *v); | ||
1960 | struct file_operations *seq_fops; | ||
1961 | }; | ||
1962 | |||
1963 | struct tcp_iter_state { | ||
1964 | sa_family_t family; | ||
1965 | enum tcp_seq_states state; | ||
1966 | struct sock *syn_wait_sk; | ||
1967 | int bucket, sbucket, num, uid; | ||
1968 | struct seq_operations seq_ops; | ||
1969 | }; | ||
1970 | |||
1971 | extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); | ||
1972 | extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); | ||
1973 | |||
1974 | /* TCP Westwood functions and constants */ | ||
1975 | |||
1976 | #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ | ||
1977 | #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ | ||
1978 | |||
1979 | static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq) | ||
1980 | { | ||
1981 | if (tcp_is_westwood(tp)) | ||
1982 | tp->westwood.rtt = rtt_seq; | ||
1983 | } | ||
1984 | |||
1985 | static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp) | ||
1986 | { | ||
1987 | return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) / | ||
1988 | (__u32) (tp->mss_cache_std), | ||
1989 | 2U); | ||
1990 | } | ||
1991 | |||
1992 | static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp) | ||
1993 | { | ||
1994 | return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0; | ||
1995 | } | ||
1996 | |||
1997 | static inline int tcp_westwood_ssthresh(struct tcp_sock *tp) | ||
1998 | { | ||
1999 | __u32 ssthresh = 0; | ||
2000 | |||
2001 | if (tcp_is_westwood(tp)) { | ||
2002 | ssthresh = __tcp_westwood_bw_rttmin(tp); | ||
2003 | if (ssthresh) | ||
2004 | tp->snd_ssthresh = ssthresh; | ||
2005 | } | ||
2006 | |||
2007 | return (ssthresh != 0); | ||
2008 | } | ||
2009 | |||
2010 | static inline int tcp_westwood_cwnd(struct tcp_sock *tp) | ||
2011 | { | ||
2012 | __u32 cwnd = 0; | ||
2013 | |||
2014 | if (tcp_is_westwood(tp)) { | ||
2015 | cwnd = __tcp_westwood_bw_rttmin(tp); | ||
2016 | if (cwnd) | ||
2017 | tp->snd_cwnd = cwnd; | ||
2018 | } | ||
2019 | |||
2020 | return (cwnd != 0); | ||
2021 | } | ||
2022 | #endif /* _TCP_H */ | ||