aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:10:42 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:43:19 -0400
commit463c84b97f24010a67cd871746d6a7e4c925a5f9 (patch)
tree48df67ede4ebb5d12b3c0ae55d72531574bd51a6 /include
parent87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71 (diff)
[NET]: Introduce inet_connection_sock
This creates struct inet_connection_sock, moving members out of struct tcp_sock that are shareable with other INET connection oriented protocols, such as DCCP, that in my private tree already uses most of these members. The functions that operate on these members were renamed, using a inet_csk_ prefix while not being moved yet to a new file, so as to ease the review of these changes. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ip.h2
-rw-r--r--include/linux/ipv6.h8
-rw-r--r--include/linux/tcp.h39
-rw-r--r--include/net/inet_connection_sock.h86
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/request_sock.h6
-rw-r--r--include/net/sock.h3
-rw-r--r--include/net/tcp.h222
-rw-r--r--include/net/tcp_ecn.h2
9 files changed, 215 insertions, 159 deletions
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 2c54bbd3da76..33e8a19a1a0f 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -128,7 +128,6 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
128 return (struct inet_request_sock *)sk; 128 return (struct inet_request_sock *)sk;
129} 129}
130 130
131struct inet_bind_bucket;
132struct ipv6_pinfo; 131struct ipv6_pinfo;
133 132
134struct inet_sock { 133struct inet_sock {
@@ -158,7 +157,6 @@ struct inet_sock {
158 int mc_index; /* Multicast device index */ 157 int mc_index; /* Multicast device index */
159 __u32 mc_addr; 158 __u32 mc_addr;
160 struct ip_mc_socklist *mc_list; /* Group array */ 159 struct ip_mc_socklist *mc_list; /* Group array */
161 struct inet_bind_bucket *bind_hash;
162 /* 160 /*
163 * Following members are used to retain the infomation to build 161 * Following members are used to retain the infomation to build
164 * an ip header on each ip fragmentation while the socket is corked. 162 * an ip header on each ip fragmentation while the socket is corked.
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 98fa32316e40..88591913c94f 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -333,15 +333,15 @@ static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
333 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; 333 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
334} 334}
335 335
336static inline int tcp_twsk_ipv6only(const struct sock *sk) 336static inline int inet_twsk_ipv6only(const struct sock *sk)
337{ 337{
338 return inet_twsk(sk)->tw_ipv6only; 338 return inet_twsk(sk)->tw_ipv6only;
339} 339}
340 340
341static inline int tcp_v6_ipv6only(const struct sock *sk) 341static inline int inet_v6_ipv6only(const struct sock *sk)
342{ 342{
343 return likely(sk->sk_state != TCP_TIME_WAIT) ? 343 return likely(sk->sk_state != TCP_TIME_WAIT) ?
344 ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk); 344 ipv6_only_sock(sk) : inet_twsk_ipv6only(sk);
345} 345}
346#else 346#else
347#define __ipv6_only_sock(sk) 0 347#define __ipv6_only_sock(sk) 0
@@ -360,7 +360,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
360#define __tcp_v6_rcv_saddr(__sk) NULL 360#define __tcp_v6_rcv_saddr(__sk) NULL
361#define tcp_v6_rcv_saddr(__sk) NULL 361#define tcp_v6_rcv_saddr(__sk) NULL
362#define tcp_twsk_ipv6only(__sk) 0 362#define tcp_twsk_ipv6only(__sk) 0
363#define tcp_v6_ipv6only(__sk) 0 363#define inet_v6_ipv6only(__sk) 0
364#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 364#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
365 365
366#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ 366#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 5d295b1b3de7..800930fac388 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -177,8 +177,8 @@ struct tcp_info
177 177
178#include <linux/config.h> 178#include <linux/config.h>
179#include <linux/skbuff.h> 179#include <linux/skbuff.h>
180#include <linux/ip.h>
181#include <net/sock.h> 180#include <net/sock.h>
181#include <net/inet_connection_sock.h>
182#include <net/inet_timewait_sock.h> 182#include <net/inet_timewait_sock.h>
183 183
184/* This defines a selective acknowledgement block. */ 184/* This defines a selective acknowledgement block. */
@@ -219,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
219} 219}
220 220
221struct tcp_sock { 221struct tcp_sock {
222 /* inet_sock has to be the first member of tcp_sock */ 222 /* inet_connection_sock has to be the first member of tcp_sock */
223 struct inet_sock inet; 223 struct inet_connection_sock inet_conn;
224 int tcp_header_len; /* Bytes of tcp header to send */ 224 int tcp_header_len; /* Bytes of tcp header to send */
225 225
226/* 226/*
@@ -241,18 +241,6 @@ struct tcp_sock {
241 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 241 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
242 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 242 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
243 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 243 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
244 /* Delayed ACK control data */
245 struct {
246 __u8 pending; /* ACK is pending */
247 __u8 quick; /* Scheduled number of quick acks */
248 __u8 pingpong; /* The session is interactive */
249 __u8 blocked; /* Delayed ACK was blocked by socket lock*/
250 __u32 ato; /* Predicted tick of soft clock */
251 unsigned long timeout; /* Currently scheduled timeout */
252 __u32 lrcvtime; /* timestamp of last received data packet*/
253 __u16 last_seg_size; /* Size of last incoming segment */
254 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
255 } ack;
256 244
257 /* Data for direct copy to user */ 245 /* Data for direct copy to user */
258 struct { 246 struct {
@@ -271,8 +259,8 @@ struct tcp_sock {
271 __u16 xmit_size_goal; /* Goal for segmenting output packets */ 259 __u16 xmit_size_goal; /* Goal for segmenting output packets */
272 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ 260 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
273 __u8 ca_state; /* State of fast-retransmit machine */ 261 __u8 ca_state; /* State of fast-retransmit machine */
274 __u8 retransmits; /* Number of unrecovered RTO timeouts. */
275 262
263 __u8 keepalive_probes; /* num of allowed keep alive probes */
276 __u16 advmss; /* Advertised MSS */ 264 __u16 advmss; /* Advertised MSS */
277 __u32 window_clamp; /* Maximal window to advertise */ 265 __u32 window_clamp; /* Maximal window to advertise */
278 __u32 rcv_ssthresh; /* Current window clamp */ 266 __u32 rcv_ssthresh; /* Current window clamp */
@@ -281,7 +269,7 @@ struct tcp_sock {
281 __u8 reordering; /* Packet reordering metric. */ 269 __u8 reordering; /* Packet reordering metric. */
282 __u8 frto_counter; /* Number of new acks after RTO */ 270 __u8 frto_counter; /* Number of new acks after RTO */
283 271
284 __u8 unused; 272 __u8 nonagle; /* Disable Nagle algorithm? */
285 __u8 defer_accept; /* User waits for some data after accept() */ 273 __u8 defer_accept; /* User waits for some data after accept() */
286 274
287/* RTT measurement */ 275/* RTT measurement */
@@ -290,19 +278,13 @@ struct tcp_sock {
290 __u32 mdev_max; /* maximal mdev for the last rtt period */ 278 __u32 mdev_max; /* maximal mdev for the last rtt period */
291 __u32 rttvar; /* smoothed mdev_max */ 279 __u32 rttvar; /* smoothed mdev_max */
292 __u32 rtt_seq; /* sequence number to update rttvar */ 280 __u32 rtt_seq; /* sequence number to update rttvar */
293 __u32 rto; /* retransmit timeout */
294 281
295 __u32 packets_out; /* Packets which are "in flight" */ 282 __u32 packets_out; /* Packets which are "in flight" */
296 __u32 left_out; /* Packets which leaved network */ 283 __u32 left_out; /* Packets which leaved network */
297 __u32 retrans_out; /* Retransmitted packets out */ 284 __u32 retrans_out; /* Retransmitted packets out */
298 __u8 backoff; /* backoff */
299/* 285/*
300 * Options received (usually on last packet, some only on SYN packets). 286 * Options received (usually on last packet, some only on SYN packets).
301 */ 287 */
302 __u8 nonagle; /* Disable Nagle algorithm? */
303 __u8 keepalive_probes; /* num of allowed keep alive probes */
304
305 __u8 probes_out; /* unanswered 0 window probes */
306 struct tcp_options_received rx_opt; 288 struct tcp_options_received rx_opt;
307 289
308/* 290/*
@@ -315,11 +297,6 @@ struct tcp_sock {
315 __u32 snd_cwnd_used; 297 __u32 snd_cwnd_used;
316 __u32 snd_cwnd_stamp; 298 __u32 snd_cwnd_stamp;
317 299
318 /* Two commonly used timers in both sender and receiver paths. */
319 unsigned long timeout;
320 struct timer_list retransmit_timer; /* Resend (no ack) */
321 struct timer_list delack_timer; /* Ack delay */
322
323 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 300 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
324 301
325 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ 302 struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
@@ -334,7 +311,7 @@ struct tcp_sock {
334 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 311 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
335 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 312 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
336 313
337 __u8 syn_retries; /* num of allowed syn retries */ 314 __u8 probes_out; /* unanswered 0 window probes */
338 __u8 ecn_flags; /* ECN status bits. */ 315 __u8 ecn_flags; /* ECN status bits. */
339 __u16 prior_ssthresh; /* ssthresh saved at recovery start */ 316 __u16 prior_ssthresh; /* ssthresh saved at recovery start */
340 __u32 lost_out; /* Lost packets */ 317 __u32 lost_out; /* Lost packets */
@@ -349,14 +326,12 @@ struct tcp_sock {
349 int undo_retrans; /* number of undoable retransmissions. */ 326 int undo_retrans; /* number of undoable retransmissions. */
350 __u32 urg_seq; /* Seq of received urgent pointer */ 327 __u32 urg_seq; /* Seq of received urgent pointer */
351 __u16 urg_data; /* Saved octet of OOB data and control flags */ 328 __u16 urg_data; /* Saved octet of OOB data and control flags */
352 __u8 pending; /* Scheduled timer event */
353 __u8 urg_mode; /* In urgent mode */ 329 __u8 urg_mode; /* In urgent mode */
330 /* ONE BYTE HOLE, TRY TO PACK! */
354 __u32 snd_up; /* Urgent pointer */ 331 __u32 snd_up; /* Urgent pointer */
355 332
356 __u32 total_retrans; /* Total retransmits for entire connection */ 333 __u32 total_retrans; /* Total retransmits for entire connection */
357 334
358 struct request_sock_queue accept_queue; /* FIFO of established children */
359
360 unsigned int keepalive_time; /* time before keep alive takes place */ 335 unsigned int keepalive_time; /* time before keep alive takes place */
361 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 336 unsigned int keepalive_intvl; /* time interval between keep alive probes */
362 int linger2; 337 int linger2;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
new file mode 100644
index 000000000000..ef609396e41b
--- /dev/null
+++ b/include/net/inet_connection_sock.h
@@ -0,0 +1,86 @@
1/*
2 * NET Generic infrastructure for INET connection oriented protocols.
3 *
4 * Definitions for inet_connection_sock
5 *
6 * Authors: Many people, see the TCP sources
7 *
8 * From code originally in TCP
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _INET_CONNECTION_SOCK_H
16#define _INET_CONNECTION_SOCK_H
17
18#include <linux/ip.h>
19#include <linux/timer.h>
20#include <net/request_sock.h>
21
22struct inet_bind_bucket;
23struct inet_hashinfo;
24
25/** inet_connection_sock - INET connection oriented sock
26 *
27 * @icsk_accept_queue: FIFO of established children
28 * @icsk_bind_hash: Bind node
29 * @icsk_timeout: Timeout
30 * @icsk_retransmit_timer: Resend (no ack)
31 * @icsk_rto: Retransmit timeout
32 * @icsk_retransmits: Number of unrecovered [RTO] timeouts
33 * @icsk_pending: Scheduled timer event
34 * @icsk_backoff: Backoff
35 * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries
36 * @icsk_ack: Delayed ACK control data
37 */
38struct inet_connection_sock {
39 /* inet_sock has to be the first member! */
40 struct inet_sock icsk_inet;
41 struct request_sock_queue icsk_accept_queue;
42 struct inet_bind_bucket *icsk_bind_hash;
43 unsigned long icsk_timeout;
44 struct timer_list icsk_retransmit_timer;
45 struct timer_list icsk_delack_timer;
46 __u32 icsk_rto;
47 __u8 icsk_retransmits;
48 __u8 icsk_pending;
49 __u8 icsk_backoff;
50 __u8 icsk_syn_retries;
51 struct {
52 __u8 pending; /* ACK is pending */
53 __u8 quick; /* Scheduled number of quick acks */
54 __u8 pingpong; /* The session is interactive */
55 __u8 blocked; /* Delayed ACK was blocked by socket lock */
56 __u32 ato; /* Predicted tick of soft clock */
57 unsigned long timeout; /* Currently scheduled timeout */
58 __u32 lrcvtime; /* timestamp of last received data packet */
59 __u16 last_seg_size; /* Size of last incoming segment */
60 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
61 } icsk_ack;
62};
63
64static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
65{
66 return (struct inet_connection_sock *)sk;
67}
68
69extern void inet_csk_init_xmit_timers(struct sock *sk,
70 void (*retransmit_handler)(unsigned long),
71 void (*delack_handler)(unsigned long),
72 void (*keepalive_handler)(unsigned long));
73extern void inet_csk_clear_xmit_timers(struct sock *sk);
74
75extern struct request_sock *inet_csk_search_req(const struct sock *sk,
76 struct request_sock ***prevp,
77 const __u16 rport,
78 const __u32 raddr,
79 const __u32 laddr);
80extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
81 struct sock *sk, unsigned short snum);
82
83extern struct dst_entry* inet_csk_route_req(struct sock *sk,
84 const struct request_sock *req);
85
86#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b5c0d64ea741..f0c21c07f894 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -17,7 +17,6 @@
17#include <linux/config.h> 17#include <linux/config.h>
18 18
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h> 20#include <linux/ipv6.h>
22#include <linux/list.h> 21#include <linux/list.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
@@ -26,6 +25,7 @@
26#include <linux/types.h> 25#include <linux/types.h>
27#include <linux/wait.h> 26#include <linux/wait.h>
28 27
28#include <net/inet_connection_sock.h>
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/tcp_states.h> 30#include <net/tcp_states.h>
31 31
@@ -185,9 +185,9 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table,
185 struct inet_bind_bucket *tb; 185 struct inet_bind_bucket *tb;
186 186
187 spin_lock(&head->lock); 187 spin_lock(&head->lock);
188 tb = inet_sk(sk)->bind_hash; 188 tb = inet_csk(sk)->icsk_bind_hash;
189 sk_add_bind_node(child, &tb->owners); 189 sk_add_bind_node(child, &tb->owners);
190 inet_sk(child)->bind_hash = tb; 190 inet_csk(child)->icsk_bind_hash = tb;
191 spin_unlock(&head->lock); 191 spin_unlock(&head->lock);
192} 192}
193 193
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 334717bf9ef6..b7c7eecbe64d 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -224,17 +224,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue)
224 return prev_qlen; 224 return prev_qlen;
225} 225}
226 226
227static inline int reqsk_queue_len(struct request_sock_queue *queue) 227static inline int reqsk_queue_len(const struct request_sock_queue *queue)
228{ 228{
229 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; 229 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
230} 230}
231 231
232static inline int reqsk_queue_len_young(struct request_sock_queue *queue) 232static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
233{ 233{
234 return queue->listen_opt->qlen_young; 234 return queue->listen_opt->qlen_young;
235} 235}
236 236
237static inline int reqsk_queue_is_full(struct request_sock_queue *queue) 237static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
238{ 238{
239 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; 239 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
240} 240}
diff --git a/include/net/sock.h b/include/net/sock.h
index 828dc082fcb7..48cc337a6566 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -493,9 +493,6 @@ extern int sk_wait_data(struct sock *sk, long *timeo);
493 493
494struct request_sock_ops; 494struct request_sock_ops;
495 495
496/* Here is the right place to enable sock refcounting debugging */
497//#define SOCK_REFCNT_DEBUG
498
499/* Networking protocol blocks we attach to sockets. 496/* Networking protocol blocks we attach to sockets.
500 * socket layer -> transport layer interface 497 * socket layer -> transport layer interface
501 * transport -> network interface is defined by struct inet_proto 498 * transport -> network interface is defined by struct inet_proto
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf8e664176ad..a943c79c88b0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -19,10 +19,11 @@
19#define _TCP_H 19#define _TCP_H
20 20
21#define TCP_DEBUG 1 21#define TCP_DEBUG 1
22#define INET_CSK_DEBUG 1
22#define FASTRETRANS_DEBUG 1 23#define FASTRETRANS_DEBUG 1
23 24
24/* Cancel timers, when they are not required. */ 25/* Cancel timers, when they are not required. */
25#undef TCP_CLEAR_TIMERS 26#undef INET_CSK_CLEAR_TIMERS
26 27
27#include <linux/config.h> 28#include <linux/config.h>
28#include <linux/list.h> 29#include <linux/list.h>
@@ -205,10 +206,10 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw);
205#define TCPOLEN_SACK_BASE_ALIGNED 4 206#define TCPOLEN_SACK_BASE_ALIGNED 4
206#define TCPOLEN_SACK_PERBLOCK 8 207#define TCPOLEN_SACK_PERBLOCK 8
207 208
208#define TCP_TIME_RETRANS 1 /* Retransmit timer */ 209#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
209#define TCP_TIME_DACK 2 /* Delayed ack timer */ 210#define ICSK_TIME_DACK 2 /* Delayed ack timer */
210#define TCP_TIME_PROBE0 3 /* Zero window probe timer */ 211#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
211#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ 212#define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
212 213
213/* Flags in tp->nonagle */ 214/* Flags in tp->nonagle */
214#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 215#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -257,9 +258,9 @@ extern atomic_t tcp_sockets_allocated;
257extern int tcp_memory_pressure; 258extern int tcp_memory_pressure;
258 259
259#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 260#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
260#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 261#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
261#else 262#else
262#define TCP_INET_FAMILY(fam) 1 263#define AF_INET_FAMILY(fam) 1
263#endif 264#endif
264 265
265/* 266/*
@@ -372,41 +373,42 @@ extern int tcp_rcv_established(struct sock *sk,
372 373
373extern void tcp_rcv_space_adjust(struct sock *sk); 374extern void tcp_rcv_space_adjust(struct sock *sk);
374 375
375enum tcp_ack_state_t 376enum inet_csk_ack_state_t {
376{ 377 ICSK_ACK_SCHED = 1,
377 TCP_ACK_SCHED = 1, 378 ICSK_ACK_TIMER = 2,
378 TCP_ACK_TIMER = 2, 379 ICSK_ACK_PUSHED = 4
379 TCP_ACK_PUSHED= 4
380}; 380};
381 381
382static inline void tcp_schedule_ack(struct tcp_sock *tp) 382static inline void inet_csk_schedule_ack(struct sock *sk)
383{ 383{
384 tp->ack.pending |= TCP_ACK_SCHED; 384 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
385} 385}
386 386
387static inline int tcp_ack_scheduled(struct tcp_sock *tp) 387static inline int inet_csk_ack_scheduled(const struct sock *sk)
388{ 388{
389 return tp->ack.pending&TCP_ACK_SCHED; 389 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
390} 390}
391 391
392static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) 392static inline void tcp_dec_quickack_mode(struct sock *sk,
393 const unsigned int pkts)
393{ 394{
394 if (tp->ack.quick) { 395 struct inet_connection_sock *icsk = inet_csk(sk);
395 if (pkts >= tp->ack.quick) {
396 tp->ack.quick = 0;
397 396
397 if (icsk->icsk_ack.quick) {
398 if (pkts >= icsk->icsk_ack.quick) {
399 icsk->icsk_ack.quick = 0;
398 /* Leaving quickack mode we deflate ATO. */ 400 /* Leaving quickack mode we deflate ATO. */
399 tp->ack.ato = TCP_ATO_MIN; 401 icsk->icsk_ack.ato = TCP_ATO_MIN;
400 } else 402 } else
401 tp->ack.quick -= pkts; 403 icsk->icsk_ack.quick -= pkts;
402 } 404 }
403} 405}
404 406
405extern void tcp_enter_quickack_mode(struct tcp_sock *tp); 407extern void tcp_enter_quickack_mode(struct sock *sk);
406 408
407static __inline__ void tcp_delack_init(struct tcp_sock *tp) 409static inline void inet_csk_delack_init(struct sock *sk)
408{ 410{
409 memset(&tp->ack, 0, sizeof(tp->ack)); 411 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
410} 412}
411 413
412static inline void tcp_clear_options(struct tcp_options_received *rx_opt) 414static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -440,7 +442,7 @@ extern void tcp_update_metrics(struct sock *sk);
440 442
441extern void tcp_close(struct sock *sk, 443extern void tcp_close(struct sock *sk,
442 long timeout); 444 long timeout);
443extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); 445extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err);
444extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); 446extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
445 447
446extern int tcp_getsockopt(struct sock *sk, int level, 448extern int tcp_getsockopt(struct sock *sk, int level,
@@ -534,15 +536,18 @@ extern void tcp_cwnd_application_limited(struct sock *sk);
534 536
535/* tcp_timer.c */ 537/* tcp_timer.c */
536extern void tcp_init_xmit_timers(struct sock *); 538extern void tcp_init_xmit_timers(struct sock *);
537extern void tcp_clear_xmit_timers(struct sock *); 539static inline void tcp_clear_xmit_timers(struct sock *sk)
540{
541 inet_csk_clear_xmit_timers(sk);
542}
538 543
539extern void tcp_delete_keepalive_timer(struct sock *); 544extern void inet_csk_delete_keepalive_timer(struct sock *sk);
540extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); 545extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
541extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 546extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
542extern unsigned int tcp_current_mss(struct sock *sk, int large); 547extern unsigned int tcp_current_mss(struct sock *sk, int large);
543 548
544#ifdef TCP_DEBUG 549#ifdef INET_CSK_DEBUG
545extern const char tcp_timer_bug_msg[]; 550extern const char inet_csk_timer_bug_msg[];
546#endif 551#endif
547 552
548/* tcp_diag.c */ 553/* tcp_diag.c */
@@ -554,70 +559,58 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
554extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 559extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
555 sk_read_actor_t recv_actor); 560 sk_read_actor_t recv_actor);
556 561
557static inline void tcp_clear_xmit_timer(struct sock *sk, int what) 562static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
558{ 563{
559 struct tcp_sock *tp = tcp_sk(sk); 564 struct inet_connection_sock *icsk = inet_csk(sk);
560 565
561 switch (what) { 566 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
562 case TCP_TIME_RETRANS: 567 icsk->icsk_pending = 0;
563 case TCP_TIME_PROBE0: 568#ifdef INET_CSK_CLEAR_TIMERS
564 tp->pending = 0; 569 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
565
566#ifdef TCP_CLEAR_TIMERS
567 sk_stop_timer(sk, &tp->retransmit_timer);
568#endif 570#endif
569 break; 571 } else if (what == ICSK_TIME_DACK) {
570 case TCP_TIME_DACK: 572 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
571 tp->ack.blocked = 0; 573#ifdef INET_CSK_CLEAR_TIMERS
572 tp->ack.pending = 0; 574 sk_stop_timer(sk, &icsk->icsk_delack_timer);
573
574#ifdef TCP_CLEAR_TIMERS
575 sk_stop_timer(sk, &tp->delack_timer);
576#endif 575#endif
577 break; 576 }
578 default: 577#ifdef INET_CSK_DEBUG
579#ifdef TCP_DEBUG 578 else {
580 printk(tcp_timer_bug_msg); 579 pr_debug(inet_csk_timer_bug_msg);
580 }
581#endif 581#endif
582 return;
583 };
584
585} 582}
586 583
587/* 584/*
588 * Reset the retransmission timer 585 * Reset the retransmission timer
589 */ 586 */
590static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) 587static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
588 unsigned long when)
591{ 589{
592 struct tcp_sock *tp = tcp_sk(sk); 590 struct inet_connection_sock *icsk = inet_csk(sk);
593 591
594 if (when > TCP_RTO_MAX) { 592 if (when > TCP_RTO_MAX) {
595#ifdef TCP_DEBUG 593#ifdef INET_CSK_DEBUG
596 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); 594 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
595 sk, what, when, current_text_addr());
597#endif 596#endif
598 when = TCP_RTO_MAX; 597 when = TCP_RTO_MAX;
599 } 598 }
600 599
601 switch (what) { 600 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
602 case TCP_TIME_RETRANS: 601 icsk->icsk_pending = what;
603 case TCP_TIME_PROBE0: 602 icsk->icsk_timeout = jiffies + when;
604 tp->pending = what; 603 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
605 tp->timeout = jiffies+when; 604 } else if (what == ICSK_TIME_DACK) {
606 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); 605 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
607 break; 606 icsk->icsk_ack.timeout = jiffies + when;
608 607 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
609 case TCP_TIME_DACK: 608 }
610 tp->ack.pending |= TCP_ACK_TIMER; 609#ifdef INET_CSK_DEBUG
611 tp->ack.timeout = jiffies+when; 610 else {
612 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); 611 pr_debug(inet_csk_timer_bug_msg);
613 break; 612 }
614
615 default:
616#ifdef TCP_DEBUG
617 printk(tcp_timer_bug_msg);
618#endif 613#endif
619 return;
620 };
621} 614}
622 615
623/* Initialize RCV_MSS value. 616/* Initialize RCV_MSS value.
@@ -637,7 +630,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk)
637 hint = min(hint, TCP_MIN_RCVMSS); 630 hint = min(hint, TCP_MIN_RCVMSS);
638 hint = max(hint, TCP_MIN_MSS); 631 hint = max(hint, TCP_MIN_MSS);
639 632
640 tp->ack.rcv_mss = hint; 633 inet_csk(sk)->icsk_ack.rcv_mss = hint;
641} 634}
642 635
643static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 636static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -772,7 +765,7 @@ static inline void tcp_packets_out_inc(struct sock *sk,
772 765
773 tp->packets_out += tcp_skb_pcount(skb); 766 tp->packets_out += tcp_skb_pcount(skb);
774 if (!orig) 767 if (!orig)
775 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 768 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
776} 769}
777 770
778static inline void tcp_packets_out_dec(struct tcp_sock *tp, 771static inline void tcp_packets_out_dec(struct tcp_sock *tp,
@@ -939,8 +932,9 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
939 932
940static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 933static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
941{ 934{
942 if (!tp->packets_out && !tp->pending) 935 const struct inet_connection_sock *icsk = inet_csk(sk);
943 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); 936 if (!tp->packets_out && !icsk->icsk_pending)
937 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto);
944} 938}
945 939
946static __inline__ void tcp_push_pending_frames(struct sock *sk, 940static __inline__ void tcp_push_pending_frames(struct sock *sk,
@@ -1021,8 +1015,9 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1021 tp->ucopy.memory = 0; 1015 tp->ucopy.memory = 0;
1022 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 1016 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1023 wake_up_interruptible(sk->sk_sleep); 1017 wake_up_interruptible(sk->sk_sleep);
1024 if (!tcp_ack_scheduled(tp)) 1018 if (!inet_csk_ack_scheduled(sk))
1025 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); 1019 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1020 (3 * TCP_RTO_MIN) / 4);
1026 } 1021 }
1027 return 1; 1022 return 1;
1028 } 1023 }
@@ -1055,7 +1050,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
1055 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1050 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1056 1051
1057 sk->sk_prot->unhash(sk); 1052 sk->sk_prot->unhash(sk);
1058 if (inet_sk(sk)->bind_hash && 1053 if (inet_csk(sk)->icsk_bind_hash &&
1059 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1054 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1060 inet_put_port(&tcp_hashinfo, sk); 1055 inet_put_port(&tcp_hashinfo, sk);
1061 /* fall through */ 1056 /* fall through */
@@ -1186,51 +1181,55 @@ static inline int tcp_full_space(const struct sock *sk)
1186 return tcp_win_from_space(sk->sk_rcvbuf); 1181 return tcp_win_from_space(sk->sk_rcvbuf);
1187} 1182}
1188 1183
1189static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, 1184static inline void inet_csk_reqsk_queue_add(struct sock *sk,
1190 struct sock *child) 1185 struct request_sock *req,
1186 struct sock *child)
1191{ 1187{
1192 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); 1188 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
1193} 1189}
1194 1190
1195static inline void 1191static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
1196tcp_synq_removed(struct sock *sk, struct request_sock *req) 1192 struct request_sock *req)
1197{ 1193{
1198 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) 1194 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
1199 tcp_delete_keepalive_timer(sk); 1195 inet_csk_delete_keepalive_timer(sk);
1200} 1196}
1201 1197
1202static inline void tcp_synq_added(struct sock *sk) 1198static inline void inet_csk_reqsk_queue_added(struct sock *sk,
1199 const unsigned long timeout)
1203{ 1200{
1204 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) 1201 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
1205 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1202 inet_csk_reset_keepalive_timer(sk, timeout);
1206} 1203}
1207 1204
1208static inline int tcp_synq_len(struct sock *sk) 1205static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
1209{ 1206{
1210 return reqsk_queue_len(&tcp_sk(sk)->accept_queue); 1207 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
1211} 1208}
1212 1209
1213static inline int tcp_synq_young(struct sock *sk) 1210static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
1214{ 1211{
1215 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); 1212 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
1216} 1213}
1217 1214
1218static inline int tcp_synq_is_full(struct sock *sk) 1215static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
1219{ 1216{
1220 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); 1217 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
1221} 1218}
1222 1219
1223static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, 1220static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
1224 struct request_sock **prev) 1221 struct request_sock *req,
1222 struct request_sock **prev)
1225{ 1223{
1226 reqsk_queue_unlink(&tp->accept_queue, req, prev); 1224 reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
1227} 1225}
1228 1226
1229static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, 1227static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
1230 struct request_sock **prev) 1228 struct request_sock *req,
1229 struct request_sock **prev)
1231{ 1230{
1232 tcp_synq_unlink(tcp_sk(sk), req, prev); 1231 inet_csk_reqsk_queue_unlink(sk, req, prev);
1233 tcp_synq_removed(sk, req); 1232 inet_csk_reqsk_queue_removed(sk, req);
1234 reqsk_free(req); 1233 reqsk_free(req);
1235} 1234}
1236 1235
@@ -1265,12 +1264,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp)
1265 return tp->keepalive_time ? : sysctl_tcp_keepalive_time; 1264 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1266} 1265}
1267 1266
1268static inline int tcp_fin_time(const struct tcp_sock *tp) 1267static inline int tcp_fin_time(const struct sock *sk)
1269{ 1268{
1270 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; 1269 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1270 const int rto = inet_csk(sk)->icsk_rto;
1271 1271
1272 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) 1272 if (fin_timeout < (rto << 2) - (rto >> 1))
1273 fin_timeout = (tp->rto<<2) - (tp->rto>>1); 1273 fin_timeout = (rto << 2) - (rto >> 1);
1274 1274
1275 return fin_timeout; 1275 return fin_timeout;
1276} 1276}
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index 64980ee8c92a..c6b84397448d 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
88 * it is surely retransmit. It is not in ECN RFC, 88 * it is surely retransmit. It is not in ECN RFC,
89 * but Linux follows this rule. */ 89 * but Linux follows this rule. */
90 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 90 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
91 tcp_enter_quickack_mode(tp); 91 tcp_enter_quickack_mode((struct sock *)tp);
92 } 92 }
93} 93}
94 94