diff options
-rw-r--r-- | include/linux/ip.h | 2 | ||||
-rw-r--r-- | include/linux/ipv6.h | 8 | ||||
-rw-r--r-- | include/linux/tcp.h | 39 | ||||
-rw-r--r-- | include/net/inet_connection_sock.h | 86 | ||||
-rw-r--r-- | include/net/inet_hashtables.h | 6 | ||||
-rw-r--r-- | include/net/request_sock.h | 6 | ||||
-rw-r--r-- | include/net/sock.h | 3 | ||||
-rw-r--r-- | include/net/tcp.h | 222 | ||||
-rw-r--r-- | include/net/tcp_ecn.h | 2 | ||||
-rw-r--r-- | net/ipv4/inet_hashtables.c | 15 | ||||
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 5 | ||||
-rw-r--r-- | net/ipv4/syncookies.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 90 | ||||
-rw-r--r-- | net/ipv4/tcp_diag.c | 21 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 266 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 158 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 28 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 86 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 165 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 54 |
21 files changed, 685 insertions, 581 deletions
diff --git a/include/linux/ip.h b/include/linux/ip.h index 2c54bbd3da76..33e8a19a1a0f 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h | |||
@@ -128,7 +128,6 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) | |||
128 | return (struct inet_request_sock *)sk; | 128 | return (struct inet_request_sock *)sk; |
129 | } | 129 | } |
130 | 130 | ||
131 | struct inet_bind_bucket; | ||
132 | struct ipv6_pinfo; | 131 | struct ipv6_pinfo; |
133 | 132 | ||
134 | struct inet_sock { | 133 | struct inet_sock { |
@@ -158,7 +157,6 @@ struct inet_sock { | |||
158 | int mc_index; /* Multicast device index */ | 157 | int mc_index; /* Multicast device index */ |
159 | __u32 mc_addr; | 158 | __u32 mc_addr; |
160 | struct ip_mc_socklist *mc_list; /* Group array */ | 159 | struct ip_mc_socklist *mc_list; /* Group array */ |
161 | struct inet_bind_bucket *bind_hash; | ||
162 | /* | 160 | /* |
163 | * Following members are used to retain the infomation to build | 161 | * Following members are used to retain the infomation to build |
164 | * an ip header on each ip fragmentation while the socket is corked. | 162 | * an ip header on each ip fragmentation while the socket is corked. |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 98fa32316e40..88591913c94f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -333,15 +333,15 @@ static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk) | |||
333 | return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; | 333 | return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; |
334 | } | 334 | } |
335 | 335 | ||
336 | static inline int tcp_twsk_ipv6only(const struct sock *sk) | 336 | static inline int inet_twsk_ipv6only(const struct sock *sk) |
337 | { | 337 | { |
338 | return inet_twsk(sk)->tw_ipv6only; | 338 | return inet_twsk(sk)->tw_ipv6only; |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline int tcp_v6_ipv6only(const struct sock *sk) | 341 | static inline int inet_v6_ipv6only(const struct sock *sk) |
342 | { | 342 | { |
343 | return likely(sk->sk_state != TCP_TIME_WAIT) ? | 343 | return likely(sk->sk_state != TCP_TIME_WAIT) ? |
344 | ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk); | 344 | ipv6_only_sock(sk) : inet_twsk_ipv6only(sk); |
345 | } | 345 | } |
346 | #else | 346 | #else |
347 | #define __ipv6_only_sock(sk) 0 | 347 | #define __ipv6_only_sock(sk) 0 |
@@ -360,7 +360,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
360 | #define __tcp_v6_rcv_saddr(__sk) NULL | 360 | #define __tcp_v6_rcv_saddr(__sk) NULL |
361 | #define tcp_v6_rcv_saddr(__sk) NULL | 361 | #define tcp_v6_rcv_saddr(__sk) NULL |
362 | #define tcp_twsk_ipv6only(__sk) 0 | 362 | #define tcp_twsk_ipv6only(__sk) 0 |
363 | #define tcp_v6_ipv6only(__sk) 0 | 363 | #define inet_v6_ipv6only(__sk) 0 |
364 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 364 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ |
365 | 365 | ||
366 | #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ | 366 | #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 5d295b1b3de7..800930fac388 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -177,8 +177,8 @@ struct tcp_info | |||
177 | 177 | ||
178 | #include <linux/config.h> | 178 | #include <linux/config.h> |
179 | #include <linux/skbuff.h> | 179 | #include <linux/skbuff.h> |
180 | #include <linux/ip.h> | ||
181 | #include <net/sock.h> | 180 | #include <net/sock.h> |
181 | #include <net/inet_connection_sock.h> | ||
182 | #include <net/inet_timewait_sock.h> | 182 | #include <net/inet_timewait_sock.h> |
183 | 183 | ||
184 | /* This defines a selective acknowledgement block. */ | 184 | /* This defines a selective acknowledgement block. */ |
@@ -219,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | struct tcp_sock { | 221 | struct tcp_sock { |
222 | /* inet_sock has to be the first member of tcp_sock */ | 222 | /* inet_connection_sock has to be the first member of tcp_sock */ |
223 | struct inet_sock inet; | 223 | struct inet_connection_sock inet_conn; |
224 | int tcp_header_len; /* Bytes of tcp header to send */ | 224 | int tcp_header_len; /* Bytes of tcp header to send */ |
225 | 225 | ||
226 | /* | 226 | /* |
@@ -241,18 +241,6 @@ struct tcp_sock { | |||
241 | __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ | 241 | __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ |
242 | __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ | 242 | __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ |
243 | __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ | 243 | __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ |
244 | /* Delayed ACK control data */ | ||
245 | struct { | ||
246 | __u8 pending; /* ACK is pending */ | ||
247 | __u8 quick; /* Scheduled number of quick acks */ | ||
248 | __u8 pingpong; /* The session is interactive */ | ||
249 | __u8 blocked; /* Delayed ACK was blocked by socket lock*/ | ||
250 | __u32 ato; /* Predicted tick of soft clock */ | ||
251 | unsigned long timeout; /* Currently scheduled timeout */ | ||
252 | __u32 lrcvtime; /* timestamp of last received data packet*/ | ||
253 | __u16 last_seg_size; /* Size of last incoming segment */ | ||
254 | __u16 rcv_mss; /* MSS used for delayed ACK decisions */ | ||
255 | } ack; | ||
256 | 244 | ||
257 | /* Data for direct copy to user */ | 245 | /* Data for direct copy to user */ |
258 | struct { | 246 | struct { |
@@ -271,8 +259,8 @@ struct tcp_sock { | |||
271 | __u16 xmit_size_goal; /* Goal for segmenting output packets */ | 259 | __u16 xmit_size_goal; /* Goal for segmenting output packets */ |
272 | __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ | 260 | __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ |
273 | __u8 ca_state; /* State of fast-retransmit machine */ | 261 | __u8 ca_state; /* State of fast-retransmit machine */ |
274 | __u8 retransmits; /* Number of unrecovered RTO timeouts. */ | ||
275 | 262 | ||
263 | __u8 keepalive_probes; /* num of allowed keep alive probes */ | ||
276 | __u16 advmss; /* Advertised MSS */ | 264 | __u16 advmss; /* Advertised MSS */ |
277 | __u32 window_clamp; /* Maximal window to advertise */ | 265 | __u32 window_clamp; /* Maximal window to advertise */ |
278 | __u32 rcv_ssthresh; /* Current window clamp */ | 266 | __u32 rcv_ssthresh; /* Current window clamp */ |
@@ -281,7 +269,7 @@ struct tcp_sock { | |||
281 | __u8 reordering; /* Packet reordering metric. */ | 269 | __u8 reordering; /* Packet reordering metric. */ |
282 | __u8 frto_counter; /* Number of new acks after RTO */ | 270 | __u8 frto_counter; /* Number of new acks after RTO */ |
283 | 271 | ||
284 | __u8 unused; | 272 | __u8 nonagle; /* Disable Nagle algorithm? */ |
285 | __u8 defer_accept; /* User waits for some data after accept() */ | 273 | __u8 defer_accept; /* User waits for some data after accept() */ |
286 | 274 | ||
287 | /* RTT measurement */ | 275 | /* RTT measurement */ |
@@ -290,19 +278,13 @@ struct tcp_sock { | |||
290 | __u32 mdev_max; /* maximal mdev for the last rtt period */ | 278 | __u32 mdev_max; /* maximal mdev for the last rtt period */ |
291 | __u32 rttvar; /* smoothed mdev_max */ | 279 | __u32 rttvar; /* smoothed mdev_max */ |
292 | __u32 rtt_seq; /* sequence number to update rttvar */ | 280 | __u32 rtt_seq; /* sequence number to update rttvar */ |
293 | __u32 rto; /* retransmit timeout */ | ||
294 | 281 | ||
295 | __u32 packets_out; /* Packets which are "in flight" */ | 282 | __u32 packets_out; /* Packets which are "in flight" */ |
296 | __u32 left_out; /* Packets which leaved network */ | 283 | __u32 left_out; /* Packets which leaved network */ |
297 | __u32 retrans_out; /* Retransmitted packets out */ | 284 | __u32 retrans_out; /* Retransmitted packets out */ |
298 | __u8 backoff; /* backoff */ | ||
299 | /* | 285 | /* |
300 | * Options received (usually on last packet, some only on SYN packets). | 286 | * Options received (usually on last packet, some only on SYN packets). |
301 | */ | 287 | */ |
302 | __u8 nonagle; /* Disable Nagle algorithm? */ | ||
303 | __u8 keepalive_probes; /* num of allowed keep alive probes */ | ||
304 | |||
305 | __u8 probes_out; /* unanswered 0 window probes */ | ||
306 | struct tcp_options_received rx_opt; | 288 | struct tcp_options_received rx_opt; |
307 | 289 | ||
308 | /* | 290 | /* |
@@ -315,11 +297,6 @@ struct tcp_sock { | |||
315 | __u32 snd_cwnd_used; | 297 | __u32 snd_cwnd_used; |
316 | __u32 snd_cwnd_stamp; | 298 | __u32 snd_cwnd_stamp; |
317 | 299 | ||
318 | /* Two commonly used timers in both sender and receiver paths. */ | ||
319 | unsigned long timeout; | ||
320 | struct timer_list retransmit_timer; /* Resend (no ack) */ | ||
321 | struct timer_list delack_timer; /* Ack delay */ | ||
322 | |||
323 | struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ | 300 | struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ |
324 | 301 | ||
325 | struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ | 302 | struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ |
@@ -334,7 +311,7 @@ struct tcp_sock { | |||
334 | struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ | 311 | struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ |
335 | struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ | 312 | struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ |
336 | 313 | ||
337 | __u8 syn_retries; /* num of allowed syn retries */ | 314 | __u8 probes_out; /* unanswered 0 window probes */ |
338 | __u8 ecn_flags; /* ECN status bits. */ | 315 | __u8 ecn_flags; /* ECN status bits. */ |
339 | __u16 prior_ssthresh; /* ssthresh saved at recovery start */ | 316 | __u16 prior_ssthresh; /* ssthresh saved at recovery start */ |
340 | __u32 lost_out; /* Lost packets */ | 317 | __u32 lost_out; /* Lost packets */ |
@@ -349,14 +326,12 @@ struct tcp_sock { | |||
349 | int undo_retrans; /* number of undoable retransmissions. */ | 326 | int undo_retrans; /* number of undoable retransmissions. */ |
350 | __u32 urg_seq; /* Seq of received urgent pointer */ | 327 | __u32 urg_seq; /* Seq of received urgent pointer */ |
351 | __u16 urg_data; /* Saved octet of OOB data and control flags */ | 328 | __u16 urg_data; /* Saved octet of OOB data and control flags */ |
352 | __u8 pending; /* Scheduled timer event */ | ||
353 | __u8 urg_mode; /* In urgent mode */ | 329 | __u8 urg_mode; /* In urgent mode */ |
330 | /* ONE BYTE HOLE, TRY TO PACK! */ | ||
354 | __u32 snd_up; /* Urgent pointer */ | 331 | __u32 snd_up; /* Urgent pointer */ |
355 | 332 | ||
356 | __u32 total_retrans; /* Total retransmits for entire connection */ | 333 | __u32 total_retrans; /* Total retransmits for entire connection */ |
357 | 334 | ||
358 | struct request_sock_queue accept_queue; /* FIFO of established children */ | ||
359 | |||
360 | unsigned int keepalive_time; /* time before keep alive takes place */ | 335 | unsigned int keepalive_time; /* time before keep alive takes place */ |
361 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ | 336 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ |
362 | int linger2; | 337 | int linger2; |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h new file mode 100644 index 000000000000..ef609396e41b --- /dev/null +++ b/include/net/inet_connection_sock.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * NET Generic infrastructure for INET connection oriented protocols. | ||
3 | * | ||
4 | * Definitions for inet_connection_sock | ||
5 | * | ||
6 | * Authors: Many people, see the TCP sources | ||
7 | * | ||
8 | * From code originally in TCP | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | #ifndef _INET_CONNECTION_SOCK_H | ||
16 | #define _INET_CONNECTION_SOCK_H | ||
17 | |||
18 | #include <linux/ip.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <net/request_sock.h> | ||
21 | |||
22 | struct inet_bind_bucket; | ||
23 | struct inet_hashinfo; | ||
24 | |||
25 | /** inet_connection_sock - INET connection oriented sock | ||
26 | * | ||
27 | * @icsk_accept_queue: FIFO of established children | ||
28 | * @icsk_bind_hash: Bind node | ||
29 | * @icsk_timeout: Timeout | ||
30 | * @icsk_retransmit_timer: Resend (no ack) | ||
31 | * @icsk_rto: Retransmit timeout | ||
32 | * @icsk_retransmits: Number of unrecovered [RTO] timeouts | ||
33 | * @icsk_pending: Scheduled timer event | ||
34 | * @icsk_backoff: Backoff | ||
35 | * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries | ||
36 | * @icsk_ack: Delayed ACK control data | ||
37 | */ | ||
38 | struct inet_connection_sock { | ||
39 | /* inet_sock has to be the first member! */ | ||
40 | struct inet_sock icsk_inet; | ||
41 | struct request_sock_queue icsk_accept_queue; | ||
42 | struct inet_bind_bucket *icsk_bind_hash; | ||
43 | unsigned long icsk_timeout; | ||
44 | struct timer_list icsk_retransmit_timer; | ||
45 | struct timer_list icsk_delack_timer; | ||
46 | __u32 icsk_rto; | ||
47 | __u8 icsk_retransmits; | ||
48 | __u8 icsk_pending; | ||
49 | __u8 icsk_backoff; | ||
50 | __u8 icsk_syn_retries; | ||
51 | struct { | ||
52 | __u8 pending; /* ACK is pending */ | ||
53 | __u8 quick; /* Scheduled number of quick acks */ | ||
54 | __u8 pingpong; /* The session is interactive */ | ||
55 | __u8 blocked; /* Delayed ACK was blocked by socket lock */ | ||
56 | __u32 ato; /* Predicted tick of soft clock */ | ||
57 | unsigned long timeout; /* Currently scheduled timeout */ | ||
58 | __u32 lrcvtime; /* timestamp of last received data packet */ | ||
59 | __u16 last_seg_size; /* Size of last incoming segment */ | ||
60 | __u16 rcv_mss; /* MSS used for delayed ACK decisions */ | ||
61 | } icsk_ack; | ||
62 | }; | ||
63 | |||
64 | static inline struct inet_connection_sock *inet_csk(const struct sock *sk) | ||
65 | { | ||
66 | return (struct inet_connection_sock *)sk; | ||
67 | } | ||
68 | |||
69 | extern void inet_csk_init_xmit_timers(struct sock *sk, | ||
70 | void (*retransmit_handler)(unsigned long), | ||
71 | void (*delack_handler)(unsigned long), | ||
72 | void (*keepalive_handler)(unsigned long)); | ||
73 | extern void inet_csk_clear_xmit_timers(struct sock *sk); | ||
74 | |||
75 | extern struct request_sock *inet_csk_search_req(const struct sock *sk, | ||
76 | struct request_sock ***prevp, | ||
77 | const __u16 rport, | ||
78 | const __u32 raddr, | ||
79 | const __u32 laddr); | ||
80 | extern int inet_csk_get_port(struct inet_hashinfo *hashinfo, | ||
81 | struct sock *sk, unsigned short snum); | ||
82 | |||
83 | extern struct dst_entry* inet_csk_route_req(struct sock *sk, | ||
84 | const struct request_sock *req); | ||
85 | |||
86 | #endif /* _INET_CONNECTION_SOCK_H */ | ||
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index b5c0d64ea741..f0c21c07f894 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/config.h> | 17 | #include <linux/config.h> |
18 | 18 | ||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/ip.h> | ||
21 | #include <linux/ipv6.h> | 20 | #include <linux/ipv6.h> |
22 | #include <linux/list.h> | 21 | #include <linux/list.h> |
23 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
@@ -26,6 +25,7 @@ | |||
26 | #include <linux/types.h> | 25 | #include <linux/types.h> |
27 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
28 | 27 | ||
28 | #include <net/inet_connection_sock.h> | ||
29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
30 | #include <net/tcp_states.h> | 30 | #include <net/tcp_states.h> |
31 | 31 | ||
@@ -185,9 +185,9 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table, | |||
185 | struct inet_bind_bucket *tb; | 185 | struct inet_bind_bucket *tb; |
186 | 186 | ||
187 | spin_lock(&head->lock); | 187 | spin_lock(&head->lock); |
188 | tb = inet_sk(sk)->bind_hash; | 188 | tb = inet_csk(sk)->icsk_bind_hash; |
189 | sk_add_bind_node(child, &tb->owners); | 189 | sk_add_bind_node(child, &tb->owners); |
190 | inet_sk(child)->bind_hash = tb; | 190 | inet_csk(child)->icsk_bind_hash = tb; |
191 | spin_unlock(&head->lock); | 191 | spin_unlock(&head->lock); |
192 | } | 192 | } |
193 | 193 | ||
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 334717bf9ef6..b7c7eecbe64d 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -224,17 +224,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue) | |||
224 | return prev_qlen; | 224 | return prev_qlen; |
225 | } | 225 | } |
226 | 226 | ||
227 | static inline int reqsk_queue_len(struct request_sock_queue *queue) | 227 | static inline int reqsk_queue_len(const struct request_sock_queue *queue) |
228 | { | 228 | { |
229 | return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; | 229 | return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; |
230 | } | 230 | } |
231 | 231 | ||
232 | static inline int reqsk_queue_len_young(struct request_sock_queue *queue) | 232 | static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) |
233 | { | 233 | { |
234 | return queue->listen_opt->qlen_young; | 234 | return queue->listen_opt->qlen_young; |
235 | } | 235 | } |
236 | 236 | ||
237 | static inline int reqsk_queue_is_full(struct request_sock_queue *queue) | 237 | static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) |
238 | { | 238 | { |
239 | return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; | 239 | return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; |
240 | } | 240 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index 828dc082fcb7..48cc337a6566 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -493,9 +493,6 @@ extern int sk_wait_data(struct sock *sk, long *timeo); | |||
493 | 493 | ||
494 | struct request_sock_ops; | 494 | struct request_sock_ops; |
495 | 495 | ||
496 | /* Here is the right place to enable sock refcounting debugging */ | ||
497 | //#define SOCK_REFCNT_DEBUG | ||
498 | |||
499 | /* Networking protocol blocks we attach to sockets. | 496 | /* Networking protocol blocks we attach to sockets. |
500 | * socket layer -> transport layer interface | 497 | * socket layer -> transport layer interface |
501 | * transport -> network interface is defined by struct inet_proto | 498 | * transport -> network interface is defined by struct inet_proto |
diff --git a/include/net/tcp.h b/include/net/tcp.h index cf8e664176ad..a943c79c88b0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -19,10 +19,11 @@ | |||
19 | #define _TCP_H | 19 | #define _TCP_H |
20 | 20 | ||
21 | #define TCP_DEBUG 1 | 21 | #define TCP_DEBUG 1 |
22 | #define INET_CSK_DEBUG 1 | ||
22 | #define FASTRETRANS_DEBUG 1 | 23 | #define FASTRETRANS_DEBUG 1 |
23 | 24 | ||
24 | /* Cancel timers, when they are not required. */ | 25 | /* Cancel timers, when they are not required. */ |
25 | #undef TCP_CLEAR_TIMERS | 26 | #undef INET_CSK_CLEAR_TIMERS |
26 | 27 | ||
27 | #include <linux/config.h> | 28 | #include <linux/config.h> |
28 | #include <linux/list.h> | 29 | #include <linux/list.h> |
@@ -205,10 +206,10 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw); | |||
205 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | 206 | #define TCPOLEN_SACK_BASE_ALIGNED 4 |
206 | #define TCPOLEN_SACK_PERBLOCK 8 | 207 | #define TCPOLEN_SACK_PERBLOCK 8 |
207 | 208 | ||
208 | #define TCP_TIME_RETRANS 1 /* Retransmit timer */ | 209 | #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ |
209 | #define TCP_TIME_DACK 2 /* Delayed ack timer */ | 210 | #define ICSK_TIME_DACK 2 /* Delayed ack timer */ |
210 | #define TCP_TIME_PROBE0 3 /* Zero window probe timer */ | 211 | #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ |
211 | #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ | 212 | #define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */ |
212 | 213 | ||
213 | /* Flags in tp->nonagle */ | 214 | /* Flags in tp->nonagle */ |
214 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | 215 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ |
@@ -257,9 +258,9 @@ extern atomic_t tcp_sockets_allocated; | |||
257 | extern int tcp_memory_pressure; | 258 | extern int tcp_memory_pressure; |
258 | 259 | ||
259 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 260 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
260 | #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) | 261 | #define AF_INET_FAMILY(fam) ((fam) == AF_INET) |
261 | #else | 262 | #else |
262 | #define TCP_INET_FAMILY(fam) 1 | 263 | #define AF_INET_FAMILY(fam) 1 |
263 | #endif | 264 | #endif |
264 | 265 | ||
265 | /* | 266 | /* |
@@ -372,41 +373,42 @@ extern int tcp_rcv_established(struct sock *sk, | |||
372 | 373 | ||
373 | extern void tcp_rcv_space_adjust(struct sock *sk); | 374 | extern void tcp_rcv_space_adjust(struct sock *sk); |
374 | 375 | ||
375 | enum tcp_ack_state_t | 376 | enum inet_csk_ack_state_t { |
376 | { | 377 | ICSK_ACK_SCHED = 1, |
377 | TCP_ACK_SCHED = 1, | 378 | ICSK_ACK_TIMER = 2, |
378 | TCP_ACK_TIMER = 2, | 379 | ICSK_ACK_PUSHED = 4 |
379 | TCP_ACK_PUSHED= 4 | ||
380 | }; | 380 | }; |
381 | 381 | ||
382 | static inline void tcp_schedule_ack(struct tcp_sock *tp) | 382 | static inline void inet_csk_schedule_ack(struct sock *sk) |
383 | { | 383 | { |
384 | tp->ack.pending |= TCP_ACK_SCHED; | 384 | inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; |
385 | } | 385 | } |
386 | 386 | ||
387 | static inline int tcp_ack_scheduled(struct tcp_sock *tp) | 387 | static inline int inet_csk_ack_scheduled(const struct sock *sk) |
388 | { | 388 | { |
389 | return tp->ack.pending&TCP_ACK_SCHED; | 389 | return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; |
390 | } | 390 | } |
391 | 391 | ||
392 | static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) | 392 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
393 | const unsigned int pkts) | ||
393 | { | 394 | { |
394 | if (tp->ack.quick) { | 395 | struct inet_connection_sock *icsk = inet_csk(sk); |
395 | if (pkts >= tp->ack.quick) { | ||
396 | tp->ack.quick = 0; | ||
397 | 396 | ||
397 | if (icsk->icsk_ack.quick) { | ||
398 | if (pkts >= icsk->icsk_ack.quick) { | ||
399 | icsk->icsk_ack.quick = 0; | ||
398 | /* Leaving quickack mode we deflate ATO. */ | 400 | /* Leaving quickack mode we deflate ATO. */ |
399 | tp->ack.ato = TCP_ATO_MIN; | 401 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
400 | } else | 402 | } else |
401 | tp->ack.quick -= pkts; | 403 | icsk->icsk_ack.quick -= pkts; |
402 | } | 404 | } |
403 | } | 405 | } |
404 | 406 | ||
405 | extern void tcp_enter_quickack_mode(struct tcp_sock *tp); | 407 | extern void tcp_enter_quickack_mode(struct sock *sk); |
406 | 408 | ||
407 | static __inline__ void tcp_delack_init(struct tcp_sock *tp) | 409 | static inline void inet_csk_delack_init(struct sock *sk) |
408 | { | 410 | { |
409 | memset(&tp->ack, 0, sizeof(tp->ack)); | 411 | memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); |
410 | } | 412 | } |
411 | 413 | ||
412 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) | 414 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) |
@@ -440,7 +442,7 @@ extern void tcp_update_metrics(struct sock *sk); | |||
440 | 442 | ||
441 | extern void tcp_close(struct sock *sk, | 443 | extern void tcp_close(struct sock *sk, |
442 | long timeout); | 444 | long timeout); |
443 | extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); | 445 | extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err); |
444 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); | 446 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); |
445 | 447 | ||
446 | extern int tcp_getsockopt(struct sock *sk, int level, | 448 | extern int tcp_getsockopt(struct sock *sk, int level, |
@@ -534,15 +536,18 @@ extern void tcp_cwnd_application_limited(struct sock *sk); | |||
534 | 536 | ||
535 | /* tcp_timer.c */ | 537 | /* tcp_timer.c */ |
536 | extern void tcp_init_xmit_timers(struct sock *); | 538 | extern void tcp_init_xmit_timers(struct sock *); |
537 | extern void tcp_clear_xmit_timers(struct sock *); | 539 | static inline void tcp_clear_xmit_timers(struct sock *sk) |
540 | { | ||
541 | inet_csk_clear_xmit_timers(sk); | ||
542 | } | ||
538 | 543 | ||
539 | extern void tcp_delete_keepalive_timer(struct sock *); | 544 | extern void inet_csk_delete_keepalive_timer(struct sock *sk); |
540 | extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); | 545 | extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); |
541 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); | 546 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); |
542 | extern unsigned int tcp_current_mss(struct sock *sk, int large); | 547 | extern unsigned int tcp_current_mss(struct sock *sk, int large); |
543 | 548 | ||
544 | #ifdef TCP_DEBUG | 549 | #ifdef INET_CSK_DEBUG |
545 | extern const char tcp_timer_bug_msg[]; | 550 | extern const char inet_csk_timer_bug_msg[]; |
546 | #endif | 551 | #endif |
547 | 552 | ||
548 | /* tcp_diag.c */ | 553 | /* tcp_diag.c */ |
@@ -554,70 +559,58 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |||
554 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | 559 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, |
555 | sk_read_actor_t recv_actor); | 560 | sk_read_actor_t recv_actor); |
556 | 561 | ||
557 | static inline void tcp_clear_xmit_timer(struct sock *sk, int what) | 562 | static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) |
558 | { | 563 | { |
559 | struct tcp_sock *tp = tcp_sk(sk); | 564 | struct inet_connection_sock *icsk = inet_csk(sk); |
560 | 565 | ||
561 | switch (what) { | 566 | if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { |
562 | case TCP_TIME_RETRANS: | 567 | icsk->icsk_pending = 0; |
563 | case TCP_TIME_PROBE0: | 568 | #ifdef INET_CSK_CLEAR_TIMERS |
564 | tp->pending = 0; | 569 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); |
565 | |||
566 | #ifdef TCP_CLEAR_TIMERS | ||
567 | sk_stop_timer(sk, &tp->retransmit_timer); | ||
568 | #endif | 570 | #endif |
569 | break; | 571 | } else if (what == ICSK_TIME_DACK) { |
570 | case TCP_TIME_DACK: | 572 | icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; |
571 | tp->ack.blocked = 0; | 573 | #ifdef INET_CSK_CLEAR_TIMERS |
572 | tp->ack.pending = 0; | 574 | sk_stop_timer(sk, &icsk->icsk_delack_timer); |
573 | |||
574 | #ifdef TCP_CLEAR_TIMERS | ||
575 | sk_stop_timer(sk, &tp->delack_timer); | ||
576 | #endif | 575 | #endif |
577 | break; | 576 | } |
578 | default: | 577 | #ifdef INET_CSK_DEBUG |
579 | #ifdef TCP_DEBUG | 578 | else { |
580 | printk(tcp_timer_bug_msg); | 579 | pr_debug(inet_csk_timer_bug_msg); |
580 | } | ||
581 | #endif | 581 | #endif |
582 | return; | ||
583 | }; | ||
584 | |||
585 | } | 582 | } |
586 | 583 | ||
587 | /* | 584 | /* |
588 | * Reset the retransmission timer | 585 | * Reset the retransmission timer |
589 | */ | 586 | */ |
590 | static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) | 587 | static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, |
588 | unsigned long when) | ||
591 | { | 589 | { |
592 | struct tcp_sock *tp = tcp_sk(sk); | 590 | struct inet_connection_sock *icsk = inet_csk(sk); |
593 | 591 | ||
594 | if (when > TCP_RTO_MAX) { | 592 | if (when > TCP_RTO_MAX) { |
595 | #ifdef TCP_DEBUG | 593 | #ifdef INET_CSK_DEBUG |
596 | printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); | 594 | pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", |
595 | sk, what, when, current_text_addr()); | ||
597 | #endif | 596 | #endif |
598 | when = TCP_RTO_MAX; | 597 | when = TCP_RTO_MAX; |
599 | } | 598 | } |
600 | 599 | ||
601 | switch (what) { | 600 | if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { |
602 | case TCP_TIME_RETRANS: | 601 | icsk->icsk_pending = what; |
603 | case TCP_TIME_PROBE0: | 602 | icsk->icsk_timeout = jiffies + when; |
604 | tp->pending = what; | 603 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
605 | tp->timeout = jiffies+when; | 604 | } else if (what == ICSK_TIME_DACK) { |
606 | sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); | 605 | icsk->icsk_ack.pending |= ICSK_ACK_TIMER; |
607 | break; | 606 | icsk->icsk_ack.timeout = jiffies + when; |
608 | 607 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); | |
609 | case TCP_TIME_DACK: | 608 | } |
610 | tp->ack.pending |= TCP_ACK_TIMER; | 609 | #ifdef INET_CSK_DEBUG |
611 | tp->ack.timeout = jiffies+when; | 610 | else { |
612 | sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); | 611 | pr_debug(inet_csk_timer_bug_msg); |
613 | break; | 612 | } |
614 | |||
615 | default: | ||
616 | #ifdef TCP_DEBUG | ||
617 | printk(tcp_timer_bug_msg); | ||
618 | #endif | 613 | #endif |
619 | return; | ||
620 | }; | ||
621 | } | 614 | } |
622 | 615 | ||
623 | /* Initialize RCV_MSS value. | 616 | /* Initialize RCV_MSS value. |
@@ -637,7 +630,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk) | |||
637 | hint = min(hint, TCP_MIN_RCVMSS); | 630 | hint = min(hint, TCP_MIN_RCVMSS); |
638 | hint = max(hint, TCP_MIN_MSS); | 631 | hint = max(hint, TCP_MIN_MSS); |
639 | 632 | ||
640 | tp->ack.rcv_mss = hint; | 633 | inet_csk(sk)->icsk_ack.rcv_mss = hint; |
641 | } | 634 | } |
642 | 635 | ||
643 | static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | 636 | static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) |
@@ -772,7 +765,7 @@ static inline void tcp_packets_out_inc(struct sock *sk, | |||
772 | 765 | ||
773 | tp->packets_out += tcp_skb_pcount(skb); | 766 | tp->packets_out += tcp_skb_pcount(skb); |
774 | if (!orig) | 767 | if (!orig) |
775 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 768 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
776 | } | 769 | } |
777 | 770 | ||
778 | static inline void tcp_packets_out_dec(struct tcp_sock *tp, | 771 | static inline void tcp_packets_out_dec(struct tcp_sock *tp, |
@@ -939,8 +932,9 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | |||
939 | 932 | ||
940 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 933 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) |
941 | { | 934 | { |
942 | if (!tp->packets_out && !tp->pending) | 935 | const struct inet_connection_sock *icsk = inet_csk(sk); |
943 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); | 936 | if (!tp->packets_out && !icsk->icsk_pending) |
937 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto); | ||
944 | } | 938 | } |
945 | 939 | ||
946 | static __inline__ void tcp_push_pending_frames(struct sock *sk, | 940 | static __inline__ void tcp_push_pending_frames(struct sock *sk, |
@@ -1021,8 +1015,9 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
1021 | tp->ucopy.memory = 0; | 1015 | tp->ucopy.memory = 0; |
1022 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | 1016 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { |
1023 | wake_up_interruptible(sk->sk_sleep); | 1017 | wake_up_interruptible(sk->sk_sleep); |
1024 | if (!tcp_ack_scheduled(tp)) | 1018 | if (!inet_csk_ack_scheduled(sk)) |
1025 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); | 1019 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
1020 | (3 * TCP_RTO_MIN) / 4); | ||
1026 | } | 1021 | } |
1027 | return 1; | 1022 | return 1; |
1028 | } | 1023 | } |
@@ -1055,7 +1050,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) | |||
1055 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | 1050 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); |
1056 | 1051 | ||
1057 | sk->sk_prot->unhash(sk); | 1052 | sk->sk_prot->unhash(sk); |
1058 | if (inet_sk(sk)->bind_hash && | 1053 | if (inet_csk(sk)->icsk_bind_hash && |
1059 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | 1054 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
1060 | inet_put_port(&tcp_hashinfo, sk); | 1055 | inet_put_port(&tcp_hashinfo, sk); |
1061 | /* fall through */ | 1056 | /* fall through */ |
@@ -1186,51 +1181,55 @@ static inline int tcp_full_space(const struct sock *sk) | |||
1186 | return tcp_win_from_space(sk->sk_rcvbuf); | 1181 | return tcp_win_from_space(sk->sk_rcvbuf); |
1187 | } | 1182 | } |
1188 | 1183 | ||
1189 | static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, | 1184 | static inline void inet_csk_reqsk_queue_add(struct sock *sk, |
1190 | struct sock *child) | 1185 | struct request_sock *req, |
1186 | struct sock *child) | ||
1191 | { | 1187 | { |
1192 | reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); | 1188 | reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); |
1193 | } | 1189 | } |
1194 | 1190 | ||
1195 | static inline void | 1191 | static inline void inet_csk_reqsk_queue_removed(struct sock *sk, |
1196 | tcp_synq_removed(struct sock *sk, struct request_sock *req) | 1192 | struct request_sock *req) |
1197 | { | 1193 | { |
1198 | if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) | 1194 | if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) |
1199 | tcp_delete_keepalive_timer(sk); | 1195 | inet_csk_delete_keepalive_timer(sk); |
1200 | } | 1196 | } |
1201 | 1197 | ||
1202 | static inline void tcp_synq_added(struct sock *sk) | 1198 | static inline void inet_csk_reqsk_queue_added(struct sock *sk, |
1199 | const unsigned long timeout) | ||
1203 | { | 1200 | { |
1204 | if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) | 1201 | if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) |
1205 | tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); | 1202 | inet_csk_reset_keepalive_timer(sk, timeout); |
1206 | } | 1203 | } |
1207 | 1204 | ||
1208 | static inline int tcp_synq_len(struct sock *sk) | 1205 | static inline int inet_csk_reqsk_queue_len(const struct sock *sk) |
1209 | { | 1206 | { |
1210 | return reqsk_queue_len(&tcp_sk(sk)->accept_queue); | 1207 | return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); |
1211 | } | 1208 | } |
1212 | 1209 | ||
1213 | static inline int tcp_synq_young(struct sock *sk) | 1210 | static inline int inet_csk_reqsk_queue_young(const struct sock *sk) |
1214 | { | 1211 | { |
1215 | return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); | 1212 | return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); |
1216 | } | 1213 | } |
1217 | 1214 | ||
1218 | static inline int tcp_synq_is_full(struct sock *sk) | 1215 | static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) |
1219 | { | 1216 | { |
1220 | return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); | 1217 | return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); |
1221 | } | 1218 | } |
1222 | 1219 | ||
1223 | static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, | 1220 | static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, |
1224 | struct request_sock **prev) | 1221 | struct request_sock *req, |
1222 | struct request_sock **prev) | ||
1225 | { | 1223 | { |
1226 | reqsk_queue_unlink(&tp->accept_queue, req, prev); | 1224 | reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev); |
1227 | } | 1225 | } |
1228 | 1226 | ||
1229 | static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, | 1227 | static inline void inet_csk_reqsk_queue_drop(struct sock *sk, |
1230 | struct request_sock **prev) | 1228 | struct request_sock *req, |
1229 | struct request_sock **prev) | ||
1231 | { | 1230 | { |
1232 | tcp_synq_unlink(tcp_sk(sk), req, prev); | 1231 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
1233 | tcp_synq_removed(sk, req); | 1232 | inet_csk_reqsk_queue_removed(sk, req); |
1234 | reqsk_free(req); | 1233 | reqsk_free(req); |
1235 | } | 1234 | } |
1236 | 1235 | ||
@@ -1265,12 +1264,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp) | |||
1265 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | 1264 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; |
1266 | } | 1265 | } |
1267 | 1266 | ||
1268 | static inline int tcp_fin_time(const struct tcp_sock *tp) | 1267 | static inline int tcp_fin_time(const struct sock *sk) |
1269 | { | 1268 | { |
1270 | int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; | 1269 | int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; |
1270 | const int rto = inet_csk(sk)->icsk_rto; | ||
1271 | 1271 | ||
1272 | if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) | 1272 | if (fin_timeout < (rto << 2) - (rto >> 1)) |
1273 | fin_timeout = (tp->rto<<2) - (tp->rto>>1); | 1273 | fin_timeout = (rto << 2) - (rto >> 1); |
1274 | 1274 | ||
1275 | return fin_timeout; | 1275 | return fin_timeout; |
1276 | } | 1276 | } |
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index 64980ee8c92a..c6b84397448d 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h | |||
@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) | |||
88 | * it is surely retransmit. It is not in ECN RFC, | 88 | * it is surely retransmit. It is not in ECN RFC, |
89 | * but Linux follows this rule. */ | 89 | * but Linux follows this rule. */ |
90 | else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) | 90 | else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) |
91 | tcp_enter_quickack_mode(tp); | 91 | tcp_enter_quickack_mode((struct sock *)tp); |
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index d94e962958a4..e8d29fe736d2 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/wait.h> | 20 | #include <linux/wait.h> |
21 | 21 | ||
22 | #include <net/inet_connection_sock.h> | ||
22 | #include <net/inet_hashtables.h> | 23 | #include <net/inet_hashtables.h> |
23 | 24 | ||
24 | /* | 25 | /* |
@@ -56,10 +57,9 @@ void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) | |||
56 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | 57 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
57 | const unsigned short snum) | 58 | const unsigned short snum) |
58 | { | 59 | { |
59 | struct inet_sock *inet = inet_sk(sk); | 60 | inet_sk(sk)->num = snum; |
60 | inet->num = snum; | ||
61 | sk_add_bind_node(sk, &tb->owners); | 61 | sk_add_bind_node(sk, &tb->owners); |
62 | inet->bind_hash = tb; | 62 | inet_csk(sk)->icsk_bind_hash = tb; |
63 | } | 63 | } |
64 | 64 | ||
65 | EXPORT_SYMBOL(inet_bind_hash); | 65 | EXPORT_SYMBOL(inet_bind_hash); |
@@ -69,16 +69,15 @@ EXPORT_SYMBOL(inet_bind_hash); | |||
69 | */ | 69 | */ |
70 | static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) | 70 | static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) |
71 | { | 71 | { |
72 | struct inet_sock *inet = inet_sk(sk); | 72 | const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); |
73 | const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size); | ||
74 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 73 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
75 | struct inet_bind_bucket *tb; | 74 | struct inet_bind_bucket *tb; |
76 | 75 | ||
77 | spin_lock(&head->lock); | 76 | spin_lock(&head->lock); |
78 | tb = inet->bind_hash; | 77 | tb = inet_csk(sk)->icsk_bind_hash; |
79 | __sk_del_bind_node(sk); | 78 | __sk_del_bind_node(sk); |
80 | inet->bind_hash = NULL; | 79 | inet_csk(sk)->icsk_bind_hash = NULL; |
81 | inet->num = 0; | 80 | inet_sk(sk)->num = 0; |
82 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | 81 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
83 | spin_unlock(&head->lock); | 82 | spin_unlock(&head->lock); |
84 | } | 83 | } |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ceb577c74237..5cba59b869fe 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -56,6 +56,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
56 | struct inet_hashinfo *hashinfo) | 56 | struct inet_hashinfo *hashinfo) |
57 | { | 57 | { |
58 | const struct inet_sock *inet = inet_sk(sk); | 58 | const struct inet_sock *inet = inet_sk(sk); |
59 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
59 | struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; | 60 | struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; |
60 | struct inet_bind_hashbucket *bhead; | 61 | struct inet_bind_hashbucket *bhead; |
61 | /* Step 1: Put TW into bind hash. Original socket stays there too. | 62 | /* Step 1: Put TW into bind hash. Original socket stays there too. |
@@ -64,8 +65,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
64 | */ | 65 | */ |
65 | bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; | 66 | bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; |
66 | spin_lock(&bhead->lock); | 67 | spin_lock(&bhead->lock); |
67 | tw->tw_tb = inet->bind_hash; | 68 | tw->tw_tb = icsk->icsk_bind_hash; |
68 | BUG_TRAP(inet->bind_hash); | 69 | BUG_TRAP(icsk->icsk_bind_hash); |
69 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); | 70 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
70 | spin_unlock(&bhead->lock); | 71 | spin_unlock(&bhead->lock); |
71 | 72 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 72d014442185..8692cb9d4bdb 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -180,7 +180,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | |||
180 | 180 | ||
181 | child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); | 181 | child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); |
182 | if (child) | 182 | if (child) |
183 | tcp_acceptq_queue(sk, req, child); | 183 | inet_csk_reqsk_queue_add(sk, req, child); |
184 | else | 184 | else |
185 | reqsk_free(req); | 185 | reqsk_free(req); |
186 | 186 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f1a708bf7a97..8177b86570db 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure); | |||
313 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, | 313 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, |
314 | poll_table *wait) | 314 | poll_table *wait) |
315 | { | 315 | { |
316 | return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; | 316 | return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | /* | 319 | /* |
@@ -458,15 +458,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
458 | int tcp_listen_start(struct sock *sk) | 458 | int tcp_listen_start(struct sock *sk) |
459 | { | 459 | { |
460 | struct inet_sock *inet = inet_sk(sk); | 460 | struct inet_sock *inet = inet_sk(sk); |
461 | struct tcp_sock *tp = tcp_sk(sk); | 461 | struct inet_connection_sock *icsk = inet_csk(sk); |
462 | int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); | 462 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE); |
463 | 463 | ||
464 | if (rc != 0) | 464 | if (rc != 0) |
465 | return rc; | 465 | return rc; |
466 | 466 | ||
467 | sk->sk_max_ack_backlog = 0; | 467 | sk->sk_max_ack_backlog = 0; |
468 | sk->sk_ack_backlog = 0; | 468 | sk->sk_ack_backlog = 0; |
469 | tcp_delack_init(tp); | 469 | inet_csk_delack_init(sk); |
470 | 470 | ||
471 | /* There is race window here: we announce ourselves listening, | 471 | /* There is race window here: we announce ourselves listening, |
472 | * but this transition is still not validated by get_port(). | 472 | * but this transition is still not validated by get_port(). |
@@ -484,7 +484,7 @@ int tcp_listen_start(struct sock *sk) | |||
484 | } | 484 | } |
485 | 485 | ||
486 | sk->sk_state = TCP_CLOSE; | 486 | sk->sk_state = TCP_CLOSE; |
487 | __reqsk_queue_destroy(&tp->accept_queue); | 487 | __reqsk_queue_destroy(&icsk->icsk_accept_queue); |
488 | return -EADDRINUSE; | 488 | return -EADDRINUSE; |
489 | } | 489 | } |
490 | 490 | ||
@@ -495,14 +495,14 @@ int tcp_listen_start(struct sock *sk) | |||
495 | 495 | ||
496 | static void tcp_listen_stop (struct sock *sk) | 496 | static void tcp_listen_stop (struct sock *sk) |
497 | { | 497 | { |
498 | struct tcp_sock *tp = tcp_sk(sk); | 498 | struct inet_connection_sock *icsk = inet_csk(sk); |
499 | struct request_sock *acc_req; | 499 | struct request_sock *acc_req; |
500 | struct request_sock *req; | 500 | struct request_sock *req; |
501 | 501 | ||
502 | tcp_delete_keepalive_timer(sk); | 502 | inet_csk_delete_keepalive_timer(sk); |
503 | 503 | ||
504 | /* make all the listen_opt local to us */ | 504 | /* make all the listen_opt local to us */ |
505 | acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); | 505 | acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); |
506 | 506 | ||
507 | /* Following specs, it would be better either to send FIN | 507 | /* Following specs, it would be better either to send FIN |
508 | * (and enter FIN-WAIT-1, it is normal close) | 508 | * (and enter FIN-WAIT-1, it is normal close) |
@@ -512,7 +512,7 @@ static void tcp_listen_stop (struct sock *sk) | |||
512 | * To be honest, we are not able to make either | 512 | * To be honest, we are not able to make either |
513 | * of the variants now. --ANK | 513 | * of the variants now. --ANK |
514 | */ | 514 | */ |
515 | reqsk_queue_destroy(&tp->accept_queue); | 515 | reqsk_queue_destroy(&icsk->icsk_accept_queue); |
516 | 516 | ||
517 | while ((req = acc_req) != NULL) { | 517 | while ((req = acc_req) != NULL) { |
518 | struct sock *child = req->sk; | 518 | struct sock *child = req->sk; |
@@ -1039,20 +1039,21 @@ static void cleanup_rbuf(struct sock *sk, int copied) | |||
1039 | BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); | 1039 | BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); |
1040 | #endif | 1040 | #endif |
1041 | 1041 | ||
1042 | if (tcp_ack_scheduled(tp)) { | 1042 | if (inet_csk_ack_scheduled(sk)) { |
1043 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
1043 | /* Delayed ACKs frequently hit locked sockets during bulk | 1044 | /* Delayed ACKs frequently hit locked sockets during bulk |
1044 | * receive. */ | 1045 | * receive. */ |
1045 | if (tp->ack.blocked || | 1046 | if (icsk->icsk_ack.blocked || |
1046 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ | 1047 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
1047 | tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || | 1048 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1048 | /* | 1049 | /* |
1049 | * If this read emptied read buffer, we send ACK, if | 1050 | * If this read emptied read buffer, we send ACK, if |
1050 | * connection is not bidirectional, user drained | 1051 | * connection is not bidirectional, user drained |
1051 | * receive buffer and there was a small segment | 1052 | * receive buffer and there was a small segment |
1052 | * in queue. | 1053 | * in queue. |
1053 | */ | 1054 | */ |
1054 | (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && | 1055 | (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && |
1055 | !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) | 1056 | !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) |
1056 | time_to_ack = 1; | 1057 | time_to_ack = 1; |
1057 | } | 1058 | } |
1058 | 1059 | ||
@@ -1569,7 +1570,7 @@ void tcp_destroy_sock(struct sock *sk) | |||
1569 | BUG_TRAP(sk_unhashed(sk)); | 1570 | BUG_TRAP(sk_unhashed(sk)); |
1570 | 1571 | ||
1571 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ | 1572 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ |
1572 | BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash); | 1573 | BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); |
1573 | 1574 | ||
1574 | sk->sk_prot->destroy(sk); | 1575 | sk->sk_prot->destroy(sk); |
1575 | 1576 | ||
@@ -1698,10 +1699,10 @@ adjudge_to_death: | |||
1698 | tcp_send_active_reset(sk, GFP_ATOMIC); | 1699 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1699 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); | 1700 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); |
1700 | } else { | 1701 | } else { |
1701 | int tmo = tcp_fin_time(tp); | 1702 | const int tmo = tcp_fin_time(sk); |
1702 | 1703 | ||
1703 | if (tmo > TCP_TIMEWAIT_LEN) { | 1704 | if (tmo > TCP_TIMEWAIT_LEN) { |
1704 | tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); | 1705 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); |
1705 | } else { | 1706 | } else { |
1706 | atomic_inc(&tcp_orphan_count); | 1707 | atomic_inc(&tcp_orphan_count); |
1707 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 1708 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
@@ -1746,6 +1747,7 @@ static inline int tcp_need_reset(int state) | |||
1746 | int tcp_disconnect(struct sock *sk, int flags) | 1747 | int tcp_disconnect(struct sock *sk, int flags) |
1747 | { | 1748 | { |
1748 | struct inet_sock *inet = inet_sk(sk); | 1749 | struct inet_sock *inet = inet_sk(sk); |
1750 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1749 | struct tcp_sock *tp = tcp_sk(sk); | 1751 | struct tcp_sock *tp = tcp_sk(sk); |
1750 | int err = 0; | 1752 | int err = 0; |
1751 | int old_state = sk->sk_state; | 1753 | int old_state = sk->sk_state; |
@@ -1782,7 +1784,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1782 | tp->srtt = 0; | 1784 | tp->srtt = 0; |
1783 | if ((tp->write_seq += tp->max_window + 2) == 0) | 1785 | if ((tp->write_seq += tp->max_window + 2) == 0) |
1784 | tp->write_seq = 1; | 1786 | tp->write_seq = 1; |
1785 | tp->backoff = 0; | 1787 | icsk->icsk_backoff = 0; |
1786 | tp->snd_cwnd = 2; | 1788 | tp->snd_cwnd = 2; |
1787 | tp->probes_out = 0; | 1789 | tp->probes_out = 0; |
1788 | tp->packets_out = 0; | 1790 | tp->packets_out = 0; |
@@ -1790,13 +1792,13 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1790 | tp->snd_cwnd_cnt = 0; | 1792 | tp->snd_cwnd_cnt = 0; |
1791 | tcp_set_ca_state(tp, TCP_CA_Open); | 1793 | tcp_set_ca_state(tp, TCP_CA_Open); |
1792 | tcp_clear_retrans(tp); | 1794 | tcp_clear_retrans(tp); |
1793 | tcp_delack_init(tp); | 1795 | inet_csk_delack_init(sk); |
1794 | sk->sk_send_head = NULL; | 1796 | sk->sk_send_head = NULL; |
1795 | tp->rx_opt.saw_tstamp = 0; | 1797 | tp->rx_opt.saw_tstamp = 0; |
1796 | tcp_sack_reset(&tp->rx_opt); | 1798 | tcp_sack_reset(&tp->rx_opt); |
1797 | __sk_dst_reset(sk); | 1799 | __sk_dst_reset(sk); |
1798 | 1800 | ||
1799 | BUG_TRAP(!inet->num || inet->bind_hash); | 1801 | BUG_TRAP(!inet->num || icsk->icsk_bind_hash); |
1800 | 1802 | ||
1801 | sk->sk_error_report(sk); | 1803 | sk->sk_error_report(sk); |
1802 | return err; | 1804 | return err; |
@@ -1808,7 +1810,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1808 | */ | 1810 | */ |
1809 | static int wait_for_connect(struct sock *sk, long timeo) | 1811 | static int wait_for_connect(struct sock *sk, long timeo) |
1810 | { | 1812 | { |
1811 | struct tcp_sock *tp = tcp_sk(sk); | 1813 | struct inet_connection_sock *icsk = inet_csk(sk); |
1812 | DEFINE_WAIT(wait); | 1814 | DEFINE_WAIT(wait); |
1813 | int err; | 1815 | int err; |
1814 | 1816 | ||
@@ -1830,11 +1832,11 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1830 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 1832 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, |
1831 | TASK_INTERRUPTIBLE); | 1833 | TASK_INTERRUPTIBLE); |
1832 | release_sock(sk); | 1834 | release_sock(sk); |
1833 | if (reqsk_queue_empty(&tp->accept_queue)) | 1835 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
1834 | timeo = schedule_timeout(timeo); | 1836 | timeo = schedule_timeout(timeo); |
1835 | lock_sock(sk); | 1837 | lock_sock(sk); |
1836 | err = 0; | 1838 | err = 0; |
1837 | if (!reqsk_queue_empty(&tp->accept_queue)) | 1839 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) |
1838 | break; | 1840 | break; |
1839 | err = -EINVAL; | 1841 | err = -EINVAL; |
1840 | if (sk->sk_state != TCP_LISTEN) | 1842 | if (sk->sk_state != TCP_LISTEN) |
@@ -1854,9 +1856,9 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1854 | * This will accept the next outstanding connection. | 1856 | * This will accept the next outstanding connection. |
1855 | */ | 1857 | */ |
1856 | 1858 | ||
1857 | struct sock *tcp_accept(struct sock *sk, int flags, int *err) | 1859 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) |
1858 | { | 1860 | { |
1859 | struct tcp_sock *tp = tcp_sk(sk); | 1861 | struct inet_connection_sock *icsk = inet_csk(sk); |
1860 | struct sock *newsk; | 1862 | struct sock *newsk; |
1861 | int error; | 1863 | int error; |
1862 | 1864 | ||
@@ -1870,7 +1872,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) | |||
1870 | goto out_err; | 1872 | goto out_err; |
1871 | 1873 | ||
1872 | /* Find already established connection */ | 1874 | /* Find already established connection */ |
1873 | if (reqsk_queue_empty(&tp->accept_queue)) { | 1875 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { |
1874 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 1876 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
1875 | 1877 | ||
1876 | /* If this is a non blocking socket don't sleep */ | 1878 | /* If this is a non blocking socket don't sleep */ |
@@ -1883,7 +1885,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) | |||
1883 | goto out_err; | 1885 | goto out_err; |
1884 | } | 1886 | } |
1885 | 1887 | ||
1886 | newsk = reqsk_queue_get_child(&tp->accept_queue, sk); | 1888 | newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); |
1887 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); | 1889 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); |
1888 | out: | 1890 | out: |
1889 | release_sock(sk); | 1891 | release_sock(sk); |
@@ -1901,6 +1903,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
1901 | int optlen) | 1903 | int optlen) |
1902 | { | 1904 | { |
1903 | struct tcp_sock *tp = tcp_sk(sk); | 1905 | struct tcp_sock *tp = tcp_sk(sk); |
1906 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1904 | int val; | 1907 | int val; |
1905 | int err = 0; | 1908 | int err = 0; |
1906 | 1909 | ||
@@ -1999,7 +2002,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
1999 | elapsed = tp->keepalive_time - elapsed; | 2002 | elapsed = tp->keepalive_time - elapsed; |
2000 | else | 2003 | else |
2001 | elapsed = 0; | 2004 | elapsed = 0; |
2002 | tcp_reset_keepalive_timer(sk, elapsed); | 2005 | inet_csk_reset_keepalive_timer(sk, elapsed); |
2003 | } | 2006 | } |
2004 | } | 2007 | } |
2005 | break; | 2008 | break; |
@@ -2019,7 +2022,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2019 | if (val < 1 || val > MAX_TCP_SYNCNT) | 2022 | if (val < 1 || val > MAX_TCP_SYNCNT) |
2020 | err = -EINVAL; | 2023 | err = -EINVAL; |
2021 | else | 2024 | else |
2022 | tp->syn_retries = val; | 2025 | icsk->icsk_syn_retries = val; |
2023 | break; | 2026 | break; |
2024 | 2027 | ||
2025 | case TCP_LINGER2: | 2028 | case TCP_LINGER2: |
@@ -2058,16 +2061,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2058 | 2061 | ||
2059 | case TCP_QUICKACK: | 2062 | case TCP_QUICKACK: |
2060 | if (!val) { | 2063 | if (!val) { |
2061 | tp->ack.pingpong = 1; | 2064 | icsk->icsk_ack.pingpong = 1; |
2062 | } else { | 2065 | } else { |
2063 | tp->ack.pingpong = 0; | 2066 | icsk->icsk_ack.pingpong = 0; |
2064 | if ((1 << sk->sk_state) & | 2067 | if ((1 << sk->sk_state) & |
2065 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | 2068 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && |
2066 | tcp_ack_scheduled(tp)) { | 2069 | inet_csk_ack_scheduled(sk)) { |
2067 | tp->ack.pending |= TCP_ACK_PUSHED; | 2070 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
2068 | cleanup_rbuf(sk, 1); | 2071 | cleanup_rbuf(sk, 1); |
2069 | if (!(val & 1)) | 2072 | if (!(val & 1)) |
2070 | tp->ack.pingpong = 1; | 2073 | icsk->icsk_ack.pingpong = 1; |
2071 | } | 2074 | } |
2072 | } | 2075 | } |
2073 | break; | 2076 | break; |
@@ -2084,15 +2087,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2084 | void tcp_get_info(struct sock *sk, struct tcp_info *info) | 2087 | void tcp_get_info(struct sock *sk, struct tcp_info *info) |
2085 | { | 2088 | { |
2086 | struct tcp_sock *tp = tcp_sk(sk); | 2089 | struct tcp_sock *tp = tcp_sk(sk); |
2090 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2087 | u32 now = tcp_time_stamp; | 2091 | u32 now = tcp_time_stamp; |
2088 | 2092 | ||
2089 | memset(info, 0, sizeof(*info)); | 2093 | memset(info, 0, sizeof(*info)); |
2090 | 2094 | ||
2091 | info->tcpi_state = sk->sk_state; | 2095 | info->tcpi_state = sk->sk_state; |
2092 | info->tcpi_ca_state = tp->ca_state; | 2096 | info->tcpi_ca_state = tp->ca_state; |
2093 | info->tcpi_retransmits = tp->retransmits; | 2097 | info->tcpi_retransmits = icsk->icsk_retransmits; |
2094 | info->tcpi_probes = tp->probes_out; | 2098 | info->tcpi_probes = tp->probes_out; |
2095 | info->tcpi_backoff = tp->backoff; | 2099 | info->tcpi_backoff = icsk->icsk_backoff; |
2096 | 2100 | ||
2097 | if (tp->rx_opt.tstamp_ok) | 2101 | if (tp->rx_opt.tstamp_ok) |
2098 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | 2102 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
@@ -2107,10 +2111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2107 | if (tp->ecn_flags&TCP_ECN_OK) | 2111 | if (tp->ecn_flags&TCP_ECN_OK) |
2108 | info->tcpi_options |= TCPI_OPT_ECN; | 2112 | info->tcpi_options |= TCPI_OPT_ECN; |
2109 | 2113 | ||
2110 | info->tcpi_rto = jiffies_to_usecs(tp->rto); | 2114 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2111 | info->tcpi_ato = jiffies_to_usecs(tp->ack.ato); | 2115 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
2112 | info->tcpi_snd_mss = tp->mss_cache; | 2116 | info->tcpi_snd_mss = tp->mss_cache; |
2113 | info->tcpi_rcv_mss = tp->ack.rcv_mss; | 2117 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
2114 | 2118 | ||
2115 | info->tcpi_unacked = tp->packets_out; | 2119 | info->tcpi_unacked = tp->packets_out; |
2116 | info->tcpi_sacked = tp->sacked_out; | 2120 | info->tcpi_sacked = tp->sacked_out; |
@@ -2119,7 +2123,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2119 | info->tcpi_fackets = tp->fackets_out; | 2123 | info->tcpi_fackets = tp->fackets_out; |
2120 | 2124 | ||
2121 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | 2125 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); |
2122 | info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime); | 2126 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
2123 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); | 2127 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
2124 | 2128 | ||
2125 | info->tcpi_pmtu = tp->pmtu_cookie; | 2129 | info->tcpi_pmtu = tp->pmtu_cookie; |
@@ -2179,7 +2183,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2179 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | 2183 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; |
2180 | break; | 2184 | break; |
2181 | case TCP_SYNCNT: | 2185 | case TCP_SYNCNT: |
2182 | val = tp->syn_retries ? : sysctl_tcp_syn_retries; | 2186 | val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
2183 | break; | 2187 | break; |
2184 | case TCP_LINGER2: | 2188 | case TCP_LINGER2: |
2185 | val = tp->linger2; | 2189 | val = tp->linger2; |
@@ -2209,7 +2213,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |||
2209 | return 0; | 2213 | return 0; |
2210 | } | 2214 | } |
2211 | case TCP_QUICKACK: | 2215 | case TCP_QUICKACK: |
2212 | val = !tp->ack.pingpong; | 2216 | val = !inet_csk(sk)->icsk_ack.pingpong; |
2213 | break; | 2217 | break; |
2214 | 2218 | ||
2215 | case TCP_CONGESTION: | 2219 | case TCP_CONGESTION: |
@@ -2340,7 +2344,7 @@ void __init tcp_init(void) | |||
2340 | tcp_register_congestion_control(&tcp_reno); | 2344 | tcp_register_congestion_control(&tcp_reno); |
2341 | } | 2345 | } |
2342 | 2346 | ||
2343 | EXPORT_SYMBOL(tcp_accept); | 2347 | EXPORT_SYMBOL(inet_csk_accept); |
2344 | EXPORT_SYMBOL(tcp_close); | 2348 | EXPORT_SYMBOL(tcp_close); |
2345 | EXPORT_SYMBOL(tcp_destroy_sock); | 2349 | EXPORT_SYMBOL(tcp_destroy_sock); |
2346 | EXPORT_SYMBOL(tcp_disconnect); | 2350 | EXPORT_SYMBOL(tcp_disconnect); |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 60c6a797cc50..5f4c74f45e82 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -48,8 +48,9 @@ static struct sock *tcpnl; | |||
48 | static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, | 48 | static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, |
49 | int ext, u32 pid, u32 seq, u16 nlmsg_flags) | 49 | int ext, u32 pid, u32 seq, u16 nlmsg_flags) |
50 | { | 50 | { |
51 | struct inet_sock *inet = inet_sk(sk); | 51 | const struct inet_sock *inet = inet_sk(sk); |
52 | struct tcp_sock *tp = tcp_sk(sk); | 52 | struct tcp_sock *tp = tcp_sk(sk); |
53 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
53 | struct tcpdiagmsg *r; | 54 | struct tcpdiagmsg *r; |
54 | struct nlmsghdr *nlh; | 55 | struct nlmsghdr *nlh; |
55 | struct tcp_info *info = NULL; | 56 | struct tcp_info *info = NULL; |
@@ -129,14 +130,14 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, | |||
129 | 130 | ||
130 | #define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ | 131 | #define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ |
131 | 132 | ||
132 | if (tp->pending == TCP_TIME_RETRANS) { | 133 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
133 | r->tcpdiag_timer = 1; | 134 | r->tcpdiag_timer = 1; |
134 | r->tcpdiag_retrans = tp->retransmits; | 135 | r->tcpdiag_retrans = icsk->icsk_retransmits; |
135 | r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); | 136 | r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); |
136 | } else if (tp->pending == TCP_TIME_PROBE0) { | 137 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
137 | r->tcpdiag_timer = 4; | 138 | r->tcpdiag_timer = 4; |
138 | r->tcpdiag_retrans = tp->probes_out; | 139 | r->tcpdiag_retrans = tp->probes_out; |
139 | r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); | 140 | r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); |
140 | } else if (timer_pending(&sk->sk_timer)) { | 141 | } else if (timer_pending(&sk->sk_timer)) { |
141 | r->tcpdiag_timer = 2; | 142 | r->tcpdiag_timer = 2; |
142 | r->tcpdiag_retrans = tp->probes_out; | 143 | r->tcpdiag_retrans = tp->probes_out; |
@@ -497,7 +498,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
497 | { | 498 | { |
498 | struct tcpdiag_entry entry; | 499 | struct tcpdiag_entry entry; |
499 | struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); | 500 | struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); |
500 | struct tcp_sock *tp = tcp_sk(sk); | 501 | struct inet_connection_sock *icsk = inet_csk(sk); |
501 | struct listen_sock *lopt; | 502 | struct listen_sock *lopt; |
502 | struct rtattr *bc = NULL; | 503 | struct rtattr *bc = NULL; |
503 | struct inet_sock *inet = inet_sk(sk); | 504 | struct inet_sock *inet = inet_sk(sk); |
@@ -513,9 +514,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
513 | 514 | ||
514 | entry.family = sk->sk_family; | 515 | entry.family = sk->sk_family; |
515 | 516 | ||
516 | read_lock_bh(&tp->accept_queue.syn_wait_lock); | 517 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
517 | 518 | ||
518 | lopt = tp->accept_queue.listen_opt; | 519 | lopt = icsk->icsk_accept_queue.listen_opt; |
519 | if (!lopt || !lopt->qlen) | 520 | if (!lopt || !lopt->qlen) |
520 | goto out; | 521 | goto out; |
521 | 522 | ||
@@ -572,7 +573,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
572 | } | 573 | } |
573 | 574 | ||
574 | out: | 575 | out: |
575 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 576 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
576 | 577 | ||
577 | return err; | 578 | return err; |
578 | } | 579 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ffa24025cd02..8a8c5c2d90cb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1; | |||
114 | /* Adapt the MSS value used to make delayed ack decision to the | 114 | /* Adapt the MSS value used to make delayed ack decision to the |
115 | * real world. | 115 | * real world. |
116 | */ | 116 | */ |
117 | static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, | 117 | static inline void tcp_measure_rcv_mss(struct sock *sk, |
118 | struct sk_buff *skb) | 118 | const struct sk_buff *skb) |
119 | { | 119 | { |
120 | unsigned int len, lss; | 120 | struct inet_connection_sock *icsk = inet_csk(sk); |
121 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | ||
122 | unsigned int len; | ||
121 | 123 | ||
122 | lss = tp->ack.last_seg_size; | 124 | icsk->icsk_ack.last_seg_size = 0; |
123 | tp->ack.last_seg_size = 0; | ||
124 | 125 | ||
125 | /* skb->len may jitter because of SACKs, even if peer | 126 | /* skb->len may jitter because of SACKs, even if peer |
126 | * sends good full-sized frames. | 127 | * sends good full-sized frames. |
127 | */ | 128 | */ |
128 | len = skb->len; | 129 | len = skb->len; |
129 | if (len >= tp->ack.rcv_mss) { | 130 | if (len >= icsk->icsk_ack.rcv_mss) { |
130 | tp->ack.rcv_mss = len; | 131 | icsk->icsk_ack.rcv_mss = len; |
131 | } else { | 132 | } else { |
132 | /* Otherwise, we make more careful check taking into account, | 133 | /* Otherwise, we make more careful check taking into account, |
133 | * that SACKs block is variable. | 134 | * that SACKs block is variable. |
@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, | |||
147 | * tcp header plus fixed timestamp option length. | 148 | * tcp header plus fixed timestamp option length. |
148 | * Resulting "len" is MSS free of SACK jitter. | 149 | * Resulting "len" is MSS free of SACK jitter. |
149 | */ | 150 | */ |
150 | len -= tp->tcp_header_len; | 151 | len -= tcp_sk(sk)->tcp_header_len; |
151 | tp->ack.last_seg_size = len; | 152 | icsk->icsk_ack.last_seg_size = len; |
152 | if (len == lss) { | 153 | if (len == lss) { |
153 | tp->ack.rcv_mss = len; | 154 | icsk->icsk_ack.rcv_mss = len; |
154 | return; | 155 | return; |
155 | } | 156 | } |
156 | } | 157 | } |
157 | tp->ack.pending |= TCP_ACK_PUSHED; | 158 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
158 | } | 159 | } |
159 | } | 160 | } |
160 | 161 | ||
161 | static void tcp_incr_quickack(struct tcp_sock *tp) | 162 | static void tcp_incr_quickack(struct sock *sk) |
162 | { | 163 | { |
163 | unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); | 164 | struct inet_connection_sock *icsk = inet_csk(sk); |
165 | unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); | ||
164 | 166 | ||
165 | if (quickacks==0) | 167 | if (quickacks==0) |
166 | quickacks=2; | 168 | quickacks=2; |
167 | if (quickacks > tp->ack.quick) | 169 | if (quickacks > icsk->icsk_ack.quick) |
168 | tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); | 170 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
169 | } | 171 | } |
170 | 172 | ||
171 | void tcp_enter_quickack_mode(struct tcp_sock *tp) | 173 | void tcp_enter_quickack_mode(struct sock *sk) |
172 | { | 174 | { |
173 | tcp_incr_quickack(tp); | 175 | struct inet_connection_sock *icsk = inet_csk(sk); |
174 | tp->ack.pingpong = 0; | 176 | tcp_incr_quickack(sk); |
175 | tp->ack.ato = TCP_ATO_MIN; | 177 | icsk->icsk_ack.pingpong = 0; |
178 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
176 | } | 179 | } |
177 | 180 | ||
178 | /* Send ACKs quickly, if "quick" count is not exhausted | 181 | /* Send ACKs quickly, if "quick" count is not exhausted |
179 | * and the session is not interactive. | 182 | * and the session is not interactive. |
180 | */ | 183 | */ |
181 | 184 | ||
182 | static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) | 185 | static inline int tcp_in_quickack_mode(const struct sock *sk) |
183 | { | 186 | { |
184 | return (tp->ack.quick && !tp->ack.pingpong); | 187 | const struct inet_connection_sock *icsk = inet_csk(sk); |
188 | return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; | ||
185 | } | 189 | } |
186 | 190 | ||
187 | /* Buffer size and advertised window tuning. | 191 | /* Buffer size and advertised window tuning. |
@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
224 | */ | 228 | */ |
225 | 229 | ||
226 | /* Slow part of check#2. */ | 230 | /* Slow part of check#2. */ |
227 | static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | 231 | static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, |
228 | struct sk_buff *skb) | 232 | const struct sk_buff *skb) |
229 | { | 233 | { |
230 | /* Optimize this! */ | 234 | /* Optimize this! */ |
231 | int truesize = tcp_win_from_space(skb->truesize)/2; | 235 | int truesize = tcp_win_from_space(skb->truesize)/2; |
@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
233 | 237 | ||
234 | while (tp->rcv_ssthresh <= window) { | 238 | while (tp->rcv_ssthresh <= window) { |
235 | if (truesize <= skb->len) | 239 | if (truesize <= skb->len) |
236 | return 2*tp->ack.rcv_mss; | 240 | return 2 * inet_csk(sk)->icsk_ack.rcv_mss; |
237 | 241 | ||
238 | truesize >>= 1; | 242 | truesize >>= 1; |
239 | window >>= 1; | 243 | window >>= 1; |
@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
260 | 264 | ||
261 | if (incr) { | 265 | if (incr) { |
262 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); | 266 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); |
263 | tp->ack.quick |= 1; | 267 | inet_csk(sk)->icsk_ack.quick |= 1; |
264 | } | 268 | } |
265 | } | 269 | } |
266 | } | 270 | } |
@@ -325,7 +329,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | |||
325 | unsigned int app_win = tp->rcv_nxt - tp->copied_seq; | 329 | unsigned int app_win = tp->rcv_nxt - tp->copied_seq; |
326 | int ofo_win = 0; | 330 | int ofo_win = 0; |
327 | 331 | ||
328 | tp->ack.quick = 0; | 332 | inet_csk(sk)->icsk_ack.quick = 0; |
329 | 333 | ||
330 | skb_queue_walk(&tp->out_of_order_queue, skb) { | 334 | skb_queue_walk(&tp->out_of_order_queue, skb) { |
331 | ofo_win += skb->len; | 335 | ofo_win += skb->len; |
@@ -346,8 +350,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | |||
346 | app_win += ofo_win; | 350 | app_win += ofo_win; |
347 | if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) | 351 | if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) |
348 | app_win >>= 1; | 352 | app_win >>= 1; |
349 | if (app_win > tp->ack.rcv_mss) | 353 | if (app_win > inet_csk(sk)->icsk_ack.rcv_mss) |
350 | app_win -= tp->ack.rcv_mss; | 354 | app_win -= inet_csk(sk)->icsk_ack.rcv_mss; |
351 | app_win = max(app_win, 2U*tp->advmss); | 355 | app_win = max(app_win, 2U*tp->advmss); |
352 | 356 | ||
353 | if (!ofo_win) | 357 | if (!ofo_win) |
@@ -415,11 +419,12 @@ new_measure: | |||
415 | tp->rcv_rtt_est.time = tcp_time_stamp; | 419 | tp->rcv_rtt_est.time = tcp_time_stamp; |
416 | } | 420 | } |
417 | 421 | ||
418 | static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) | 422 | static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) |
419 | { | 423 | { |
424 | struct tcp_sock *tp = tcp_sk(sk); | ||
420 | if (tp->rx_opt.rcv_tsecr && | 425 | if (tp->rx_opt.rcv_tsecr && |
421 | (TCP_SKB_CB(skb)->end_seq - | 426 | (TCP_SKB_CB(skb)->end_seq - |
422 | TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) | 427 | TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) |
423 | tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); | 428 | tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); |
424 | } | 429 | } |
425 | 430 | ||
@@ -492,41 +497,42 @@ new_measure: | |||
492 | */ | 497 | */ |
493 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 498 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) |
494 | { | 499 | { |
500 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
495 | u32 now; | 501 | u32 now; |
496 | 502 | ||
497 | tcp_schedule_ack(tp); | 503 | inet_csk_schedule_ack(sk); |
498 | 504 | ||
499 | tcp_measure_rcv_mss(tp, skb); | 505 | tcp_measure_rcv_mss(sk, skb); |
500 | 506 | ||
501 | tcp_rcv_rtt_measure(tp); | 507 | tcp_rcv_rtt_measure(tp); |
502 | 508 | ||
503 | now = tcp_time_stamp; | 509 | now = tcp_time_stamp; |
504 | 510 | ||
505 | if (!tp->ack.ato) { | 511 | if (!icsk->icsk_ack.ato) { |
506 | /* The _first_ data packet received, initialize | 512 | /* The _first_ data packet received, initialize |
507 | * delayed ACK engine. | 513 | * delayed ACK engine. |
508 | */ | 514 | */ |
509 | tcp_incr_quickack(tp); | 515 | tcp_incr_quickack(sk); |
510 | tp->ack.ato = TCP_ATO_MIN; | 516 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
511 | } else { | 517 | } else { |
512 | int m = now - tp->ack.lrcvtime; | 518 | int m = now - icsk->icsk_ack.lrcvtime; |
513 | 519 | ||
514 | if (m <= TCP_ATO_MIN/2) { | 520 | if (m <= TCP_ATO_MIN/2) { |
515 | /* The fastest case is the first. */ | 521 | /* The fastest case is the first. */ |
516 | tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; | 522 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; |
517 | } else if (m < tp->ack.ato) { | 523 | } else if (m < icsk->icsk_ack.ato) { |
518 | tp->ack.ato = (tp->ack.ato>>1) + m; | 524 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; |
519 | if (tp->ack.ato > tp->rto) | 525 | if (icsk->icsk_ack.ato > icsk->icsk_rto) |
520 | tp->ack.ato = tp->rto; | 526 | icsk->icsk_ack.ato = icsk->icsk_rto; |
521 | } else if (m > tp->rto) { | 527 | } else if (m > icsk->icsk_rto) { |
522 | /* Too long gap. Apparently sender falled to | 528 | /* Too long gap. Apparently sender falled to |
523 | * restart window, so that we send ACKs quickly. | 529 | * restart window, so that we send ACKs quickly. |
524 | */ | 530 | */ |
525 | tcp_incr_quickack(tp); | 531 | tcp_incr_quickack(sk); |
526 | sk_stream_mem_reclaim(sk); | 532 | sk_stream_mem_reclaim(sk); |
527 | } | 533 | } |
528 | } | 534 | } |
529 | tp->ack.lrcvtime = now; | 535 | icsk->icsk_ack.lrcvtime = now; |
530 | 536 | ||
531 | TCP_ECN_check_ce(tp, skb); | 537 | TCP_ECN_check_ce(tp, skb); |
532 | 538 | ||
@@ -611,8 +617,9 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) | |||
611 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 617 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
612 | * routine referred to above. | 618 | * routine referred to above. |
613 | */ | 619 | */ |
614 | static inline void tcp_set_rto(struct tcp_sock *tp) | 620 | static inline void tcp_set_rto(struct sock *sk) |
615 | { | 621 | { |
622 | const struct tcp_sock *tp = tcp_sk(sk); | ||
616 | /* Old crap is replaced with new one. 8) | 623 | /* Old crap is replaced with new one. 8) |
617 | * | 624 | * |
618 | * More seriously: | 625 | * More seriously: |
@@ -623,7 +630,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp) | |||
623 | * is invisible. Actually, Linux-2.4 also generates erratic | 630 | * is invisible. Actually, Linux-2.4 also generates erratic |
624 | * ACKs in some curcumstances. | 631 | * ACKs in some curcumstances. |
625 | */ | 632 | */ |
626 | tp->rto = (tp->srtt >> 3) + tp->rttvar; | 633 | inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; |
627 | 634 | ||
628 | /* 2. Fixups made earlier cannot be right. | 635 | /* 2. Fixups made earlier cannot be right. |
629 | * If we do not estimate RTO correctly without them, | 636 | * If we do not estimate RTO correctly without them, |
@@ -635,10 +642,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp) | |||
635 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo | 642 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo |
636 | * guarantees that rto is higher. | 643 | * guarantees that rto is higher. |
637 | */ | 644 | */ |
638 | static inline void tcp_bound_rto(struct tcp_sock *tp) | 645 | static inline void tcp_bound_rto(struct sock *sk) |
639 | { | 646 | { |
640 | if (tp->rto > TCP_RTO_MAX) | 647 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) |
641 | tp->rto = TCP_RTO_MAX; | 648 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; |
642 | } | 649 | } |
643 | 650 | ||
644 | /* Save metrics learned by this TCP session. | 651 | /* Save metrics learned by this TCP session. |
@@ -658,7 +665,7 @@ void tcp_update_metrics(struct sock *sk) | |||
658 | if (dst && (dst->flags&DST_HOST)) { | 665 | if (dst && (dst->flags&DST_HOST)) { |
659 | int m; | 666 | int m; |
660 | 667 | ||
661 | if (tp->backoff || !tp->srtt) { | 668 | if (inet_csk(sk)->icsk_backoff || !tp->srtt) { |
662 | /* This session failed to estimate rtt. Why? | 669 | /* This session failed to estimate rtt. Why? |
663 | * Probably, no packets returned in time. | 670 | * Probably, no packets returned in time. |
664 | * Reset our results. | 671 | * Reset our results. |
@@ -801,9 +808,9 @@ static void tcp_init_metrics(struct sock *sk) | |||
801 | tp->mdev = dst_metric(dst, RTAX_RTTVAR); | 808 | tp->mdev = dst_metric(dst, RTAX_RTTVAR); |
802 | tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); | 809 | tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); |
803 | } | 810 | } |
804 | tcp_set_rto(tp); | 811 | tcp_set_rto(sk); |
805 | tcp_bound_rto(tp); | 812 | tcp_bound_rto(sk); |
806 | if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) | 813 | if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) |
807 | goto reset; | 814 | goto reset; |
808 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | 815 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); |
809 | tp->snd_cwnd_stamp = tcp_time_stamp; | 816 | tp->snd_cwnd_stamp = tcp_time_stamp; |
@@ -817,7 +824,7 @@ reset: | |||
817 | if (!tp->rx_opt.saw_tstamp && tp->srtt) { | 824 | if (!tp->rx_opt.saw_tstamp && tp->srtt) { |
818 | tp->srtt = 0; | 825 | tp->srtt = 0; |
819 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; | 826 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; |
820 | tp->rto = TCP_TIMEOUT_INIT; | 827 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
821 | } | 828 | } |
822 | } | 829 | } |
823 | 830 | ||
@@ -1118,7 +1125,7 @@ void tcp_enter_frto(struct sock *sk) | |||
1118 | 1125 | ||
1119 | if (tp->ca_state <= TCP_CA_Disorder || | 1126 | if (tp->ca_state <= TCP_CA_Disorder || |
1120 | tp->snd_una == tp->high_seq || | 1127 | tp->snd_una == tp->high_seq || |
1121 | (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { | 1128 | (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { |
1122 | tp->prior_ssthresh = tcp_current_ssthresh(tp); | 1129 | tp->prior_ssthresh = tcp_current_ssthresh(tp); |
1123 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); | 1130 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); |
1124 | tcp_ca_event(tp, CA_EVENT_FRTO); | 1131 | tcp_ca_event(tp, CA_EVENT_FRTO); |
@@ -1214,7 +1221,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1214 | 1221 | ||
1215 | /* Reduce ssthresh if it has not yet been made inside this window. */ | 1222 | /* Reduce ssthresh if it has not yet been made inside this window. */ |
1216 | if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || | 1223 | if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || |
1217 | (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { | 1224 | (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { |
1218 | tp->prior_ssthresh = tcp_current_ssthresh(tp); | 1225 | tp->prior_ssthresh = tcp_current_ssthresh(tp); |
1219 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); | 1226 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); |
1220 | tcp_ca_event(tp, CA_EVENT_LOSS); | 1227 | tcp_ca_event(tp, CA_EVENT_LOSS); |
@@ -1253,7 +1260,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1253 | TCP_ECN_queue_cwr(tp); | 1260 | TCP_ECN_queue_cwr(tp); |
1254 | } | 1261 | } |
1255 | 1262 | ||
1256 | static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) | 1263 | static int tcp_check_sack_reneging(struct sock *sk) |
1257 | { | 1264 | { |
1258 | struct sk_buff *skb; | 1265 | struct sk_buff *skb; |
1259 | 1266 | ||
@@ -1268,9 +1275,10 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) | |||
1268 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); | 1275 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); |
1269 | 1276 | ||
1270 | tcp_enter_loss(sk, 1); | 1277 | tcp_enter_loss(sk, 1); |
1271 | tp->retransmits++; | 1278 | inet_csk(sk)->icsk_retransmits++; |
1272 | tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); | 1279 | tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); |
1273 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1280 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1281 | inet_csk(sk)->icsk_rto); | ||
1274 | return 1; | 1282 | return 1; |
1275 | } | 1283 | } |
1276 | return 0; | 1284 | return 0; |
@@ -1281,15 +1289,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) | |||
1281 | return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; | 1289 | return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; |
1282 | } | 1290 | } |
1283 | 1291 | ||
1284 | static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) | 1292 | static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) |
1285 | { | 1293 | { |
1286 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); | 1294 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); |
1287 | } | 1295 | } |
1288 | 1296 | ||
1289 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | 1297 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) |
1290 | { | 1298 | { |
1291 | return tp->packets_out && | 1299 | return tp->packets_out && |
1292 | tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); | 1300 | tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); |
1293 | } | 1301 | } |
1294 | 1302 | ||
1295 | /* Linux NewReno/SACK/FACK/ECN state machine. | 1303 | /* Linux NewReno/SACK/FACK/ECN state machine. |
@@ -1509,7 +1517,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1509 | struct sk_buff *skb; | 1517 | struct sk_buff *skb; |
1510 | 1518 | ||
1511 | sk_stream_for_retrans_queue(skb, sk) { | 1519 | sk_stream_for_retrans_queue(skb, sk) { |
1512 | if (tcp_skb_timedout(tp, skb) && | 1520 | if (tcp_skb_timedout(sk, skb) && |
1513 | !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 1521 | !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { |
1514 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1522 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1515 | tp->lost_out += tcp_skb_pcount(skb); | 1523 | tp->lost_out += tcp_skb_pcount(skb); |
@@ -1676,7 +1684,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | |||
1676 | tp->left_out = tp->sacked_out; | 1684 | tp->left_out = tp->sacked_out; |
1677 | tcp_undo_cwr(tp, 1); | 1685 | tcp_undo_cwr(tp, 1); |
1678 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 1686 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); |
1679 | tp->retransmits = 0; | 1687 | inet_csk(sk)->icsk_retransmits = 0; |
1680 | tp->undo_marker = 0; | 1688 | tp->undo_marker = 0; |
1681 | if (!IsReno(tp)) | 1689 | if (!IsReno(tp)) |
1682 | tcp_set_ca_state(tp, TCP_CA_Open); | 1690 | tcp_set_ca_state(tp, TCP_CA_Open); |
@@ -1750,7 +1758,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1750 | tp->prior_ssthresh = 0; | 1758 | tp->prior_ssthresh = 0; |
1751 | 1759 | ||
1752 | /* B. In all the states check for reneging SACKs. */ | 1760 | /* B. In all the states check for reneging SACKs. */ |
1753 | if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) | 1761 | if (tp->sacked_out && tcp_check_sack_reneging(sk)) |
1754 | return; | 1762 | return; |
1755 | 1763 | ||
1756 | /* C. Process data loss notification, provided it is valid. */ | 1764 | /* C. Process data loss notification, provided it is valid. */ |
@@ -1774,7 +1782,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1774 | } else if (!before(tp->snd_una, tp->high_seq)) { | 1782 | } else if (!before(tp->snd_una, tp->high_seq)) { |
1775 | switch (tp->ca_state) { | 1783 | switch (tp->ca_state) { |
1776 | case TCP_CA_Loss: | 1784 | case TCP_CA_Loss: |
1777 | tp->retransmits = 0; | 1785 | inet_csk(sk)->icsk_retransmits = 0; |
1778 | if (tcp_try_undo_recovery(sk, tp)) | 1786 | if (tcp_try_undo_recovery(sk, tp)) |
1779 | return; | 1787 | return; |
1780 | break; | 1788 | break; |
@@ -1824,7 +1832,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1824 | break; | 1832 | break; |
1825 | case TCP_CA_Loss: | 1833 | case TCP_CA_Loss: |
1826 | if (flag&FLAG_DATA_ACKED) | 1834 | if (flag&FLAG_DATA_ACKED) |
1827 | tp->retransmits = 0; | 1835 | inet_csk(sk)->icsk_retransmits = 0; |
1828 | if (!tcp_try_undo_loss(sk, tp)) { | 1836 | if (!tcp_try_undo_loss(sk, tp)) { |
1829 | tcp_moderate_cwnd(tp); | 1837 | tcp_moderate_cwnd(tp); |
1830 | tcp_xmit_retransmit_queue(sk); | 1838 | tcp_xmit_retransmit_queue(sk); |
@@ -1881,10 +1889,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1881 | /* Read draft-ietf-tcplw-high-performance before mucking | 1889 | /* Read draft-ietf-tcplw-high-performance before mucking |
1882 | * with this code. (Superceeds RFC1323) | 1890 | * with this code. (Superceeds RFC1323) |
1883 | */ | 1891 | */ |
1884 | static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) | 1892 | static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag) |
1885 | { | 1893 | { |
1886 | __u32 seq_rtt; | ||
1887 | |||
1888 | /* RTTM Rule: A TSecr value received in a segment is used to | 1894 | /* RTTM Rule: A TSecr value received in a segment is used to |
1889 | * update the averaged RTT measurement only if the segment | 1895 | * update the averaged RTT measurement only if the segment |
1890 | * acknowledges some new data, i.e., only if it advances the | 1896 | * acknowledges some new data, i.e., only if it advances the |
@@ -1900,14 +1906,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) | |||
1900 | * answer arrives rto becomes 120 seconds! If at least one of segments | 1906 | * answer arrives rto becomes 120 seconds! If at least one of segments |
1901 | * in window is lost... Voila. --ANK (010210) | 1907 | * in window is lost... Voila. --ANK (010210) |
1902 | */ | 1908 | */ |
1903 | seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; | 1909 | struct tcp_sock *tp = tcp_sk(sk); |
1910 | const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; | ||
1904 | tcp_rtt_estimator(tp, seq_rtt, usrtt); | 1911 | tcp_rtt_estimator(tp, seq_rtt, usrtt); |
1905 | tcp_set_rto(tp); | 1912 | tcp_set_rto(sk); |
1906 | tp->backoff = 0; | 1913 | inet_csk(sk)->icsk_backoff = 0; |
1907 | tcp_bound_rto(tp); | 1914 | tcp_bound_rto(sk); |
1908 | } | 1915 | } |
1909 | 1916 | ||
1910 | static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) | 1917 | static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag) |
1911 | { | 1918 | { |
1912 | /* We don't have a timestamp. Can only use | 1919 | /* We don't have a timestamp. Can only use |
1913 | * packets that are not retransmitted to determine | 1920 | * packets that are not retransmitted to determine |
@@ -1921,20 +1928,21 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int | |||
1921 | if (flag & FLAG_RETRANS_DATA_ACKED) | 1928 | if (flag & FLAG_RETRANS_DATA_ACKED) |
1922 | return; | 1929 | return; |
1923 | 1930 | ||
1924 | tcp_rtt_estimator(tp, seq_rtt, usrtt); | 1931 | tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt); |
1925 | tcp_set_rto(tp); | 1932 | tcp_set_rto(sk); |
1926 | tp->backoff = 0; | 1933 | inet_csk(sk)->icsk_backoff = 0; |
1927 | tcp_bound_rto(tp); | 1934 | tcp_bound_rto(sk); |
1928 | } | 1935 | } |
1929 | 1936 | ||
1930 | static inline void tcp_ack_update_rtt(struct tcp_sock *tp, | 1937 | static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, |
1931 | int flag, s32 seq_rtt, u32 *usrtt) | 1938 | const s32 seq_rtt, u32 *usrtt) |
1932 | { | 1939 | { |
1940 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1933 | /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ | 1941 | /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ |
1934 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) | 1942 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
1935 | tcp_ack_saw_tstamp(tp, usrtt, flag); | 1943 | tcp_ack_saw_tstamp(sk, usrtt, flag); |
1936 | else if (seq_rtt >= 0) | 1944 | else if (seq_rtt >= 0) |
1937 | tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); | 1945 | tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); |
1938 | } | 1946 | } |
1939 | 1947 | ||
1940 | static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, | 1948 | static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, |
@@ -1951,9 +1959,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, | |||
1951 | static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) | 1959 | static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) |
1952 | { | 1960 | { |
1953 | if (!tp->packets_out) { | 1961 | if (!tp->packets_out) { |
1954 | tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); | 1962 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
1955 | } else { | 1963 | } else { |
1956 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1964 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1957 | } | 1965 | } |
1958 | } | 1966 | } |
1959 | 1967 | ||
@@ -2090,7 +2098,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt | |||
2090 | } | 2098 | } |
2091 | 2099 | ||
2092 | if (acked&FLAG_ACKED) { | 2100 | if (acked&FLAG_ACKED) { |
2093 | tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); | 2101 | tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt); |
2094 | tcp_ack_packets_out(sk, tp); | 2102 | tcp_ack_packets_out(sk, tp); |
2095 | 2103 | ||
2096 | if (tp->ca_ops->pkts_acked) | 2104 | if (tp->ca_ops->pkts_acked) |
@@ -2125,20 +2133,21 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt | |||
2125 | 2133 | ||
2126 | static void tcp_ack_probe(struct sock *sk) | 2134 | static void tcp_ack_probe(struct sock *sk) |
2127 | { | 2135 | { |
2128 | struct tcp_sock *tp = tcp_sk(sk); | 2136 | const struct tcp_sock *tp = tcp_sk(sk); |
2137 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
2129 | 2138 | ||
2130 | /* Was it a usable window open? */ | 2139 | /* Was it a usable window open? */ |
2131 | 2140 | ||
2132 | if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, | 2141 | if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, |
2133 | tp->snd_una + tp->snd_wnd)) { | 2142 | tp->snd_una + tp->snd_wnd)) { |
2134 | tp->backoff = 0; | 2143 | icsk->icsk_backoff = 0; |
2135 | tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); | 2144 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); |
2136 | /* Socket must be waked up by subsequent tcp_data_snd_check(). | 2145 | /* Socket must be waked up by subsequent tcp_data_snd_check(). |
2137 | * This function is not for random using! | 2146 | * This function is not for random using! |
2138 | */ | 2147 | */ |
2139 | } else { | 2148 | } else { |
2140 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, | 2149 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2141 | min(tp->rto << tp->backoff, TCP_RTO_MAX)); | 2150 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); |
2142 | } | 2151 | } |
2143 | } | 2152 | } |
2144 | 2153 | ||
@@ -2157,8 +2166,8 @@ static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) | |||
2157 | /* Check that window update is acceptable. | 2166 | /* Check that window update is acceptable. |
2158 | * The function assumes that snd_una<=ack<=snd_next. | 2167 | * The function assumes that snd_una<=ack<=snd_next. |
2159 | */ | 2168 | */ |
2160 | static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, | 2169 | static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, |
2161 | u32 ack_seq, u32 nwin) | 2170 | const u32 ack_seq, const u32 nwin) |
2162 | { | 2171 | { |
2163 | return (after(ack, tp->snd_una) || | 2172 | return (after(ack, tp->snd_una) || |
2164 | after(ack_seq, tp->snd_wl1) || | 2173 | after(ack_seq, tp->snd_wl1) || |
@@ -2500,8 +2509,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | |||
2500 | * up to bandwidth of 18Gigabit/sec. 8) ] | 2509 | * up to bandwidth of 18Gigabit/sec. 8) ] |
2501 | */ | 2510 | */ |
2502 | 2511 | ||
2503 | static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) | 2512 | static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) |
2504 | { | 2513 | { |
2514 | struct tcp_sock *tp = tcp_sk(sk); | ||
2505 | struct tcphdr *th = skb->h.th; | 2515 | struct tcphdr *th = skb->h.th; |
2506 | u32 seq = TCP_SKB_CB(skb)->seq; | 2516 | u32 seq = TCP_SKB_CB(skb)->seq; |
2507 | u32 ack = TCP_SKB_CB(skb)->ack_seq; | 2517 | u32 ack = TCP_SKB_CB(skb)->ack_seq; |
@@ -2516,14 +2526,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) | |||
2516 | !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && | 2526 | !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && |
2517 | 2527 | ||
2518 | /* 4. ... and sits in replay window. */ | 2528 | /* 4. ... and sits in replay window. */ |
2519 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); | 2529 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); |
2520 | } | 2530 | } |
2521 | 2531 | ||
2522 | static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) | 2532 | static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) |
2523 | { | 2533 | { |
2534 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2524 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && | 2535 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && |
2525 | xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && | 2536 | xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && |
2526 | !tcp_disordered_ack(tp, skb)); | 2537 | !tcp_disordered_ack(sk, skb)); |
2527 | } | 2538 | } |
2528 | 2539 | ||
2529 | /* Check segment sequence number for validity. | 2540 | /* Check segment sequence number for validity. |
@@ -2586,7 +2597,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
2586 | { | 2597 | { |
2587 | struct tcp_sock *tp = tcp_sk(sk); | 2598 | struct tcp_sock *tp = tcp_sk(sk); |
2588 | 2599 | ||
2589 | tcp_schedule_ack(tp); | 2600 | inet_csk_schedule_ack(sk); |
2590 | 2601 | ||
2591 | sk->sk_shutdown |= RCV_SHUTDOWN; | 2602 | sk->sk_shutdown |= RCV_SHUTDOWN; |
2592 | sock_set_flag(sk, SOCK_DONE); | 2603 | sock_set_flag(sk, SOCK_DONE); |
@@ -2596,7 +2607,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
2596 | case TCP_ESTABLISHED: | 2607 | case TCP_ESTABLISHED: |
2597 | /* Move to CLOSE_WAIT */ | 2608 | /* Move to CLOSE_WAIT */ |
2598 | tcp_set_state(sk, TCP_CLOSE_WAIT); | 2609 | tcp_set_state(sk, TCP_CLOSE_WAIT); |
2599 | tp->ack.pingpong = 1; | 2610 | inet_csk(sk)->icsk_ack.pingpong = 1; |
2600 | break; | 2611 | break; |
2601 | 2612 | ||
2602 | case TCP_CLOSE_WAIT: | 2613 | case TCP_CLOSE_WAIT: |
@@ -2694,7 +2705,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | |||
2694 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 2705 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
2695 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 2706 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
2696 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); | 2707 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); |
2697 | tcp_enter_quickack_mode(tp); | 2708 | tcp_enter_quickack_mode(sk); |
2698 | 2709 | ||
2699 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { | 2710 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { |
2700 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 2711 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
@@ -2942,7 +2953,7 @@ queue_and_out: | |||
2942 | * gap in queue is filled. | 2953 | * gap in queue is filled. |
2943 | */ | 2954 | */ |
2944 | if (skb_queue_empty(&tp->out_of_order_queue)) | 2955 | if (skb_queue_empty(&tp->out_of_order_queue)) |
2945 | tp->ack.pingpong = 0; | 2956 | inet_csk(sk)->icsk_ack.pingpong = 0; |
2946 | } | 2957 | } |
2947 | 2958 | ||
2948 | if (tp->rx_opt.num_sacks) | 2959 | if (tp->rx_opt.num_sacks) |
@@ -2963,8 +2974,8 @@ queue_and_out: | |||
2963 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 2974 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
2964 | 2975 | ||
2965 | out_of_window: | 2976 | out_of_window: |
2966 | tcp_enter_quickack_mode(tp); | 2977 | tcp_enter_quickack_mode(sk); |
2967 | tcp_schedule_ack(tp); | 2978 | inet_csk_schedule_ack(sk); |
2968 | drop: | 2979 | drop: |
2969 | __kfree_skb(skb); | 2980 | __kfree_skb(skb); |
2970 | return; | 2981 | return; |
@@ -2974,7 +2985,7 @@ drop: | |||
2974 | if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) | 2985 | if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) |
2975 | goto out_of_window; | 2986 | goto out_of_window; |
2976 | 2987 | ||
2977 | tcp_enter_quickack_mode(tp); | 2988 | tcp_enter_quickack_mode(sk); |
2978 | 2989 | ||
2979 | if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 2990 | if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
2980 | /* Partial packet, seq < rcv_next < end_seq */ | 2991 | /* Partial packet, seq < rcv_next < end_seq */ |
@@ -3003,7 +3014,7 @@ drop: | |||
3003 | 3014 | ||
3004 | /* Disable header prediction. */ | 3015 | /* Disable header prediction. */ |
3005 | tp->pred_flags = 0; | 3016 | tp->pred_flags = 0; |
3006 | tcp_schedule_ack(tp); | 3017 | inet_csk_schedule_ack(sk); |
3007 | 3018 | ||
3008 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", | 3019 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", |
3009 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 3020 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
@@ -3373,13 +3384,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
3373 | struct tcp_sock *tp = tcp_sk(sk); | 3384 | struct tcp_sock *tp = tcp_sk(sk); |
3374 | 3385 | ||
3375 | /* More than one full frame received... */ | 3386 | /* More than one full frame received... */ |
3376 | if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss | 3387 | if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss |
3377 | /* ... and right edge of window advances far enough. | 3388 | /* ... and right edge of window advances far enough. |
3378 | * (tcp_recvmsg() will send ACK otherwise). Or... | 3389 | * (tcp_recvmsg() will send ACK otherwise). Or... |
3379 | */ | 3390 | */ |
3380 | && __tcp_select_window(sk) >= tp->rcv_wnd) || | 3391 | && __tcp_select_window(sk) >= tp->rcv_wnd) || |
3381 | /* We ACK each frame or... */ | 3392 | /* We ACK each frame or... */ |
3382 | tcp_in_quickack_mode(tp) || | 3393 | tcp_in_quickack_mode(sk) || |
3383 | /* We have out of order data. */ | 3394 | /* We have out of order data. */ |
3384 | (ofo_possible && | 3395 | (ofo_possible && |
3385 | skb_peek(&tp->out_of_order_queue))) { | 3396 | skb_peek(&tp->out_of_order_queue))) { |
@@ -3393,8 +3404,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
3393 | 3404 | ||
3394 | static __inline__ void tcp_ack_snd_check(struct sock *sk) | 3405 | static __inline__ void tcp_ack_snd_check(struct sock *sk) |
3395 | { | 3406 | { |
3396 | struct tcp_sock *tp = tcp_sk(sk); | 3407 | if (!inet_csk_ack_scheduled(sk)) { |
3397 | if (!tcp_ack_scheduled(tp)) { | ||
3398 | /* We sent a data segment already. */ | 3408 | /* We sent a data segment already. */ |
3399 | return; | 3409 | return; |
3400 | } | 3410 | } |
@@ -3648,7 +3658,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3648 | tp->rcv_nxt == tp->rcv_wup) | 3658 | tp->rcv_nxt == tp->rcv_wup) |
3649 | tcp_store_ts_recent(tp); | 3659 | tcp_store_ts_recent(tp); |
3650 | 3660 | ||
3651 | tcp_rcv_rtt_measure_ts(tp, skb); | 3661 | tcp_rcv_rtt_measure_ts(sk, skb); |
3652 | 3662 | ||
3653 | /* We know that such packets are checksummed | 3663 | /* We know that such packets are checksummed |
3654 | * on entry. | 3664 | * on entry. |
@@ -3681,7 +3691,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3681 | tp->rcv_nxt == tp->rcv_wup) | 3691 | tp->rcv_nxt == tp->rcv_wup) |
3682 | tcp_store_ts_recent(tp); | 3692 | tcp_store_ts_recent(tp); |
3683 | 3693 | ||
3684 | tcp_rcv_rtt_measure_ts(tp, skb); | 3694 | tcp_rcv_rtt_measure_ts(sk, skb); |
3685 | 3695 | ||
3686 | __skb_pull(skb, tcp_header_len); | 3696 | __skb_pull(skb, tcp_header_len); |
3687 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3697 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
@@ -3702,7 +3712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3702 | tp->rcv_nxt == tp->rcv_wup) | 3712 | tp->rcv_nxt == tp->rcv_wup) |
3703 | tcp_store_ts_recent(tp); | 3713 | tcp_store_ts_recent(tp); |
3704 | 3714 | ||
3705 | tcp_rcv_rtt_measure_ts(tp, skb); | 3715 | tcp_rcv_rtt_measure_ts(sk, skb); |
3706 | 3716 | ||
3707 | if ((int)skb->truesize > sk->sk_forward_alloc) | 3717 | if ((int)skb->truesize > sk->sk_forward_alloc) |
3708 | goto step5; | 3718 | goto step5; |
@@ -3722,7 +3732,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3722 | /* Well, only one small jumplet in fast path... */ | 3732 | /* Well, only one small jumplet in fast path... */ |
3723 | tcp_ack(sk, skb, FLAG_DATA); | 3733 | tcp_ack(sk, skb, FLAG_DATA); |
3724 | tcp_data_snd_check(sk, tp); | 3734 | tcp_data_snd_check(sk, tp); |
3725 | if (!tcp_ack_scheduled(tp)) | 3735 | if (!inet_csk_ack_scheduled(sk)) |
3726 | goto no_ack; | 3736 | goto no_ack; |
3727 | } | 3737 | } |
3728 | 3738 | ||
@@ -3744,7 +3754,7 @@ slow_path: | |||
3744 | * RFC1323: H1. Apply PAWS check first. | 3754 | * RFC1323: H1. Apply PAWS check first. |
3745 | */ | 3755 | */ |
3746 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 3756 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
3747 | tcp_paws_discard(tp, skb)) { | 3757 | tcp_paws_discard(sk, skb)) { |
3748 | if (!th->rst) { | 3758 | if (!th->rst) { |
3749 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 3759 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); |
3750 | tcp_send_dupack(sk, skb); | 3760 | tcp_send_dupack(sk, skb); |
@@ -3791,7 +3801,7 @@ step5: | |||
3791 | if(th->ack) | 3801 | if(th->ack) |
3792 | tcp_ack(sk, skb, FLAG_SLOWPATH); | 3802 | tcp_ack(sk, skb, FLAG_SLOWPATH); |
3793 | 3803 | ||
3794 | tcp_rcv_rtt_measure_ts(tp, skb); | 3804 | tcp_rcv_rtt_measure_ts(sk, skb); |
3795 | 3805 | ||
3796 | /* Process urgent data. */ | 3806 | /* Process urgent data. */ |
3797 | tcp_urg(sk, skb, th); | 3807 | tcp_urg(sk, skb, th); |
@@ -3933,7 +3943,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3933 | tcp_init_buffer_space(sk); | 3943 | tcp_init_buffer_space(sk); |
3934 | 3944 | ||
3935 | if (sock_flag(sk, SOCK_KEEPOPEN)) | 3945 | if (sock_flag(sk, SOCK_KEEPOPEN)) |
3936 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); | 3946 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); |
3937 | 3947 | ||
3938 | if (!tp->rx_opt.snd_wscale) | 3948 | if (!tp->rx_opt.snd_wscale) |
3939 | __tcp_fast_path_on(tp, tp->snd_wnd); | 3949 | __tcp_fast_path_on(tp, tp->snd_wnd); |
@@ -3945,7 +3955,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3945 | sk_wake_async(sk, 0, POLL_OUT); | 3955 | sk_wake_async(sk, 0, POLL_OUT); |
3946 | } | 3956 | } |
3947 | 3957 | ||
3948 | if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { | 3958 | if (sk->sk_write_pending || tp->defer_accept || inet_csk(sk)->icsk_ack.pingpong) { |
3949 | /* Save one ACK. Data will be ready after | 3959 | /* Save one ACK. Data will be ready after |
3950 | * several ticks, if write_pending is set. | 3960 | * several ticks, if write_pending is set. |
3951 | * | 3961 | * |
@@ -3953,12 +3963,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3953 | * look so _wonderfully_ clever, that I was not able | 3963 | * look so _wonderfully_ clever, that I was not able |
3954 | * to stand against the temptation 8) --ANK | 3964 | * to stand against the temptation 8) --ANK |
3955 | */ | 3965 | */ |
3956 | tcp_schedule_ack(tp); | 3966 | inet_csk_schedule_ack(sk); |
3957 | tp->ack.lrcvtime = tcp_time_stamp; | 3967 | inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp; |
3958 | tp->ack.ato = TCP_ATO_MIN; | 3968 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
3959 | tcp_incr_quickack(tp); | 3969 | tcp_incr_quickack(sk); |
3960 | tcp_enter_quickack_mode(tp); | 3970 | tcp_enter_quickack_mode(sk); |
3961 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); | 3971 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); |
3962 | 3972 | ||
3963 | discard: | 3973 | discard: |
3964 | __kfree_skb(skb); | 3974 | __kfree_skb(skb); |
@@ -4114,7 +4124,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4114 | } | 4124 | } |
4115 | 4125 | ||
4116 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 4126 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
4117 | tcp_paws_discard(tp, skb)) { | 4127 | tcp_paws_discard(sk, skb)) { |
4118 | if (!th->rst) { | 4128 | if (!th->rst) { |
4119 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 4129 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); |
4120 | tcp_send_dupack(sk, skb); | 4130 | tcp_send_dupack(sk, skb); |
@@ -4183,7 +4193,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4183 | */ | 4193 | */ |
4184 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 4194 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
4185 | !tp->srtt) | 4195 | !tp->srtt) |
4186 | tcp_ack_saw_tstamp(tp, 0, 0); | 4196 | tcp_ack_saw_tstamp(sk, 0, 0); |
4187 | 4197 | ||
4188 | if (tp->rx_opt.tstamp_ok) | 4198 | if (tp->rx_opt.tstamp_ok) |
4189 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; | 4199 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; |
@@ -4230,9 +4240,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4230 | return 1; | 4240 | return 1; |
4231 | } | 4241 | } |
4232 | 4242 | ||
4233 | tmo = tcp_fin_time(tp); | 4243 | tmo = tcp_fin_time(sk); |
4234 | if (tmo > TCP_TIMEWAIT_LEN) { | 4244 | if (tmo > TCP_TIMEWAIT_LEN) { |
4235 | tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); | 4245 | inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); |
4236 | } else if (th->fin || sock_owned_by_user(sk)) { | 4246 | } else if (th->fin || sock_owned_by_user(sk)) { |
4237 | /* Bad case. We could lose such FIN otherwise. | 4247 | /* Bad case. We could lose such FIN otherwise. |
4238 | * It is not a big problem, but it looks confusing | 4248 | * It is not a big problem, but it looks confusing |
@@ -4240,7 +4250,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4240 | * if it spins in bh_lock_sock(), but it is really | 4250 | * if it spins in bh_lock_sock(), but it is really |
4241 | * marginal case. | 4251 | * marginal case. |
4242 | */ | 4252 | */ |
4243 | tcp_reset_keepalive_timer(sk, tmo); | 4253 | inet_csk_reset_keepalive_timer(sk, tmo); |
4244 | } else { | 4254 | } else { |
4245 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 4255 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
4246 | goto discard; | 4256 | goto discard; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e7e91e60ac74..2cd41265d17f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -104,7 +104,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
104 | */ | 104 | */ |
105 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 105 | int sysctl_local_port_range[2] = { 1024, 4999 }; |
106 | 106 | ||
107 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) | 107 | static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) |
108 | { | 108 | { |
109 | const u32 sk_rcv_saddr = inet_rcv_saddr(sk); | 109 | const u32 sk_rcv_saddr = inet_rcv_saddr(sk); |
110 | struct sock *sk2; | 110 | struct sock *sk2; |
@@ -113,7 +113,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb | |||
113 | 113 | ||
114 | sk_for_each_bound(sk2, node, &tb->owners) { | 114 | sk_for_each_bound(sk2, node, &tb->owners) { |
115 | if (sk != sk2 && | 115 | if (sk != sk2 && |
116 | !tcp_v6_ipv6only(sk2) && | 116 | !inet_v6_ipv6only(sk2) && |
117 | (!sk->sk_bound_dev_if || | 117 | (!sk->sk_bound_dev_if || |
118 | !sk2->sk_bound_dev_if || | 118 | !sk2->sk_bound_dev_if || |
119 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 119 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
@@ -132,7 +132,8 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb | |||
132 | /* Obtain a reference to a local port for the given sock, | 132 | /* Obtain a reference to a local port for the given sock, |
133 | * if snum is zero it means select any available local port. | 133 | * if snum is zero it means select any available local port. |
134 | */ | 134 | */ |
135 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | 135 | int inet_csk_get_port(struct inet_hashinfo *hashinfo, |
136 | struct sock *sk, unsigned short snum) | ||
136 | { | 137 | { |
137 | struct inet_bind_hashbucket *head; | 138 | struct inet_bind_hashbucket *head; |
138 | struct hlist_node *node; | 139 | struct hlist_node *node; |
@@ -146,16 +147,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
146 | int remaining = (high - low) + 1; | 147 | int remaining = (high - low) + 1; |
147 | int rover; | 148 | int rover; |
148 | 149 | ||
149 | spin_lock(&tcp_hashinfo.portalloc_lock); | 150 | spin_lock(&hashinfo->portalloc_lock); |
150 | if (tcp_hashinfo.port_rover < low) | 151 | if (hashinfo->port_rover < low) |
151 | rover = low; | 152 | rover = low; |
152 | else | 153 | else |
153 | rover = tcp_hashinfo.port_rover; | 154 | rover = hashinfo->port_rover; |
154 | do { | 155 | do { |
155 | rover++; | 156 | rover++; |
156 | if (rover > high) | 157 | if (rover > high) |
157 | rover = low; | 158 | rover = low; |
158 | head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; | 159 | head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; |
159 | spin_lock(&head->lock); | 160 | spin_lock(&head->lock); |
160 | inet_bind_bucket_for_each(tb, node, &head->chain) | 161 | inet_bind_bucket_for_each(tb, node, &head->chain) |
161 | if (tb->port == rover) | 162 | if (tb->port == rover) |
@@ -164,8 +165,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
164 | next: | 165 | next: |
165 | spin_unlock(&head->lock); | 166 | spin_unlock(&head->lock); |
166 | } while (--remaining > 0); | 167 | } while (--remaining > 0); |
167 | tcp_hashinfo.port_rover = rover; | 168 | hashinfo->port_rover = rover; |
168 | spin_unlock(&tcp_hashinfo.portalloc_lock); | 169 | spin_unlock(&hashinfo->portalloc_lock); |
169 | 170 | ||
170 | /* Exhausted local port range during search? It is not | 171 | /* Exhausted local port range during search? It is not |
171 | * possible for us to be holding one of the bind hash | 172 | * possible for us to be holding one of the bind hash |
@@ -182,7 +183,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
182 | */ | 183 | */ |
183 | snum = rover; | 184 | snum = rover; |
184 | } else { | 185 | } else { |
185 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 186 | head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; |
186 | spin_lock(&head->lock); | 187 | spin_lock(&head->lock); |
187 | inet_bind_bucket_for_each(tb, node, &head->chain) | 188 | inet_bind_bucket_for_each(tb, node, &head->chain) |
188 | if (tb->port == snum) | 189 | if (tb->port == snum) |
@@ -199,13 +200,13 @@ tb_found: | |||
199 | goto success; | 200 | goto success; |
200 | } else { | 201 | } else { |
201 | ret = 1; | 202 | ret = 1; |
202 | if (tcp_bind_conflict(sk, tb)) | 203 | if (inet_csk_bind_conflict(sk, tb)) |
203 | goto fail_unlock; | 204 | goto fail_unlock; |
204 | } | 205 | } |
205 | } | 206 | } |
206 | tb_not_found: | 207 | tb_not_found: |
207 | ret = 1; | 208 | ret = 1; |
208 | if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL) | 209 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) |
209 | goto fail_unlock; | 210 | goto fail_unlock; |
210 | if (hlist_empty(&tb->owners)) { | 211 | if (hlist_empty(&tb->owners)) { |
211 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 212 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -216,9 +217,9 @@ tb_not_found: | |||
216 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) | 217 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) |
217 | tb->fastreuse = 0; | 218 | tb->fastreuse = 0; |
218 | success: | 219 | success: |
219 | if (!inet_sk(sk)->bind_hash) | 220 | if (!inet_csk(sk)->icsk_bind_hash) |
220 | inet_bind_hash(sk, tb, snum); | 221 | inet_bind_hash(sk, tb, snum); |
221 | BUG_TRAP(inet_sk(sk)->bind_hash == tb); | 222 | BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); |
222 | ret = 0; | 223 | ret = 0; |
223 | 224 | ||
224 | fail_unlock: | 225 | fail_unlock: |
@@ -228,6 +229,11 @@ fail: | |||
228 | return ret; | 229 | return ret; |
229 | } | 230 | } |
230 | 231 | ||
232 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | ||
233 | { | ||
234 | return inet_csk_get_port(&tcp_hashinfo, sk, snum); | ||
235 | } | ||
236 | |||
231 | static void tcp_v4_hash(struct sock *sk) | 237 | static void tcp_v4_hash(struct sock *sk) |
232 | { | 238 | { |
233 | inet_hash(&tcp_hashinfo, sk); | 239 | inet_hash(&tcp_hashinfo, sk); |
@@ -426,7 +432,7 @@ ok: | |||
426 | } | 432 | } |
427 | 433 | ||
428 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 434 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; |
429 | tb = inet_sk(sk)->bind_hash; | 435 | tb = inet_csk(sk)->icsk_bind_hash; |
430 | spin_lock_bh(&head->lock); | 436 | spin_lock_bh(&head->lock); |
431 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 437 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
432 | __inet_hash(&tcp_hashinfo, sk, 0); | 438 | __inet_hash(&tcp_hashinfo, sk, 0); |
@@ -557,25 +563,28 @@ failure: | |||
557 | return err; | 563 | return err; |
558 | } | 564 | } |
559 | 565 | ||
560 | static __inline__ int tcp_v4_iif(struct sk_buff *skb) | 566 | static inline int inet_iif(const struct sk_buff *skb) |
561 | { | 567 | { |
562 | return ((struct rtable *)skb->dst)->rt_iif; | 568 | return ((struct rtable *)skb->dst)->rt_iif; |
563 | } | 569 | } |
564 | 570 | ||
565 | static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) | 571 | static inline u32 inet_synq_hash(const u32 raddr, const u16 rport, |
572 | const u32 rnd, const u16 synq_hsize) | ||
566 | { | 573 | { |
567 | return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); | 574 | return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1); |
568 | } | 575 | } |
569 | 576 | ||
570 | static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | 577 | struct request_sock *inet_csk_search_req(const struct sock *sk, |
571 | struct request_sock ***prevp, | 578 | struct request_sock ***prevp, |
572 | __u16 rport, | 579 | const __u16 rport, const __u32 raddr, |
573 | __u32 raddr, __u32 laddr) | 580 | const __u32 laddr) |
574 | { | 581 | { |
575 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 582 | const struct inet_connection_sock *icsk = inet_csk(sk); |
583 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | ||
576 | struct request_sock *req, **prev; | 584 | struct request_sock *req, **prev; |
577 | 585 | ||
578 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; | 586 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, |
587 | lopt->nr_table_entries)]; | ||
579 | (req = *prev) != NULL; | 588 | (req = *prev) != NULL; |
580 | prev = &req->dl_next) { | 589 | prev = &req->dl_next) { |
581 | const struct inet_request_sock *ireq = inet_rsk(req); | 590 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -583,7 +592,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
583 | if (ireq->rmt_port == rport && | 592 | if (ireq->rmt_port == rport && |
584 | ireq->rmt_addr == raddr && | 593 | ireq->rmt_addr == raddr && |
585 | ireq->loc_addr == laddr && | 594 | ireq->loc_addr == laddr && |
586 | TCP_INET_FAMILY(req->rsk_ops->family)) { | 595 | AF_INET_FAMILY(req->rsk_ops->family)) { |
587 | BUG_TRAP(!req->sk); | 596 | BUG_TRAP(!req->sk); |
588 | *prevp = prev; | 597 | *prevp = prev; |
589 | break; | 598 | break; |
@@ -595,12 +604,13 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
595 | 604 | ||
596 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) | 605 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) |
597 | { | 606 | { |
598 | struct tcp_sock *tp = tcp_sk(sk); | 607 | struct inet_connection_sock *icsk = inet_csk(sk); |
599 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 608 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
600 | u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); | 609 | const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, |
610 | lopt->hash_rnd, lopt->nr_table_entries); | ||
601 | 611 | ||
602 | reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); | 612 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT); |
603 | tcp_synq_added(sk); | 613 | inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT); |
604 | } | 614 | } |
605 | 615 | ||
606 | 616 | ||
@@ -687,7 +697,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
687 | } | 697 | } |
688 | 698 | ||
689 | sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, | 699 | sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, |
690 | th->source, tcp_v4_iif(skb)); | 700 | th->source, inet_iif(skb)); |
691 | if (!sk) { | 701 | if (!sk) { |
692 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 702 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
693 | return; | 703 | return; |
@@ -747,8 +757,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
747 | if (sock_owned_by_user(sk)) | 757 | if (sock_owned_by_user(sk)) |
748 | goto out; | 758 | goto out; |
749 | 759 | ||
750 | req = tcp_v4_search_req(tp, &prev, th->dest, | 760 | req = inet_csk_search_req(sk, &prev, th->dest, |
751 | iph->daddr, iph->saddr); | 761 | iph->daddr, iph->saddr); |
752 | if (!req) | 762 | if (!req) |
753 | goto out; | 763 | goto out; |
754 | 764 | ||
@@ -768,7 +778,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
768 | * created socket, and POSIX does not want network | 778 | * created socket, and POSIX does not want network |
769 | * errors returned from accept(). | 779 | * errors returned from accept(). |
770 | */ | 780 | */ |
771 | tcp_synq_drop(sk, req, prev); | 781 | inet_csk_reqsk_queue_drop(sk, req, prev); |
772 | goto out; | 782 | goto out; |
773 | 783 | ||
774 | case TCP_SYN_SENT: | 784 | case TCP_SYN_SENT: |
@@ -953,8 +963,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | |||
953 | req->ts_recent); | 963 | req->ts_recent); |
954 | } | 964 | } |
955 | 965 | ||
956 | static struct dst_entry* tcp_v4_route_req(struct sock *sk, | 966 | struct dst_entry* inet_csk_route_req(struct sock *sk, |
957 | struct request_sock *req) | 967 | const struct request_sock *req) |
958 | { | 968 | { |
959 | struct rtable *rt; | 969 | struct rtable *rt; |
960 | const struct inet_request_sock *ireq = inet_rsk(req); | 970 | const struct inet_request_sock *ireq = inet_rsk(req); |
@@ -966,7 +976,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, | |||
966 | ireq->rmt_addr), | 976 | ireq->rmt_addr), |
967 | .saddr = ireq->loc_addr, | 977 | .saddr = ireq->loc_addr, |
968 | .tos = RT_CONN_FLAGS(sk) } }, | 978 | .tos = RT_CONN_FLAGS(sk) } }, |
969 | .proto = IPPROTO_TCP, | 979 | .proto = sk->sk_protocol, |
970 | .uli_u = { .ports = | 980 | .uli_u = { .ports = |
971 | { .sport = inet_sk(sk)->sport, | 981 | { .sport = inet_sk(sk)->sport, |
972 | .dport = ireq->rmt_port } } }; | 982 | .dport = ireq->rmt_port } } }; |
@@ -996,7 +1006,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
996 | struct sk_buff * skb; | 1006 | struct sk_buff * skb; |
997 | 1007 | ||
998 | /* First, grab a route. */ | 1008 | /* First, grab a route. */ |
999 | if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) | 1009 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1000 | goto out; | 1010 | goto out; |
1001 | 1011 | ||
1002 | skb = tcp_make_synack(sk, dst, req); | 1012 | skb = tcp_make_synack(sk, dst, req); |
@@ -1098,7 +1108,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1098 | * limitations, they conserve resources and peer is | 1108 | * limitations, they conserve resources and peer is |
1099 | * evidently real one. | 1109 | * evidently real one. |
1100 | */ | 1110 | */ |
1101 | if (tcp_synq_is_full(sk) && !isn) { | 1111 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1102 | #ifdef CONFIG_SYN_COOKIES | 1112 | #ifdef CONFIG_SYN_COOKIES |
1103 | if (sysctl_tcp_syncookies) { | 1113 | if (sysctl_tcp_syncookies) { |
1104 | want_cookie = 1; | 1114 | want_cookie = 1; |
@@ -1112,7 +1122,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1112 | * clogging syn queue with openreqs with exponentially increasing | 1122 | * clogging syn queue with openreqs with exponentially increasing |
1113 | * timeout. | 1123 | * timeout. |
1114 | */ | 1124 | */ |
1115 | if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) | 1125 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
1116 | goto drop; | 1126 | goto drop; |
1117 | 1127 | ||
1118 | req = reqsk_alloc(&tcp_request_sock_ops); | 1128 | req = reqsk_alloc(&tcp_request_sock_ops); |
@@ -1169,7 +1179,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1169 | */ | 1179 | */ |
1170 | if (tmp_opt.saw_tstamp && | 1180 | if (tmp_opt.saw_tstamp && |
1171 | sysctl_tcp_tw_recycle && | 1181 | sysctl_tcp_tw_recycle && |
1172 | (dst = tcp_v4_route_req(sk, req)) != NULL && | 1182 | (dst = inet_csk_route_req(sk, req)) != NULL && |
1173 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1183 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1174 | peer->v4daddr == saddr) { | 1184 | peer->v4daddr == saddr) { |
1175 | if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1185 | if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && |
@@ -1182,7 +1192,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1182 | } | 1192 | } |
1183 | /* Kill the following clause, if you dislike this way. */ | 1193 | /* Kill the following clause, if you dislike this way. */ |
1184 | else if (!sysctl_tcp_syncookies && | 1194 | else if (!sysctl_tcp_syncookies && |
1185 | (sysctl_max_syn_backlog - tcp_synq_len(sk) < | 1195 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
1186 | (sysctl_max_syn_backlog >> 2)) && | 1196 | (sysctl_max_syn_backlog >> 2)) && |
1187 | (!peer || !peer->tcp_ts_stamp) && | 1197 | (!peer || !peer->tcp_ts_stamp) && |
1188 | (!dst || !dst_metric(dst, RTAX_RTT))) { | 1198 | (!dst || !dst_metric(dst, RTAX_RTT))) { |
@@ -1240,7 +1250,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1240 | if (sk_acceptq_is_full(sk)) | 1250 | if (sk_acceptq_is_full(sk)) |
1241 | goto exit_overflow; | 1251 | goto exit_overflow; |
1242 | 1252 | ||
1243 | if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) | 1253 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1244 | goto exit; | 1254 | goto exit; |
1245 | 1255 | ||
1246 | newsk = tcp_create_openreq_child(sk, req, skb); | 1256 | newsk = tcp_create_openreq_child(sk, req, skb); |
@@ -1257,7 +1267,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1257 | newinet->saddr = ireq->loc_addr; | 1267 | newinet->saddr = ireq->loc_addr; |
1258 | newinet->opt = ireq->opt; | 1268 | newinet->opt = ireq->opt; |
1259 | ireq->opt = NULL; | 1269 | ireq->opt = NULL; |
1260 | newinet->mc_index = tcp_v4_iif(skb); | 1270 | newinet->mc_index = inet_iif(skb); |
1261 | newinet->mc_ttl = skb->nh.iph->ttl; | 1271 | newinet->mc_ttl = skb->nh.iph->ttl; |
1262 | newtp->ext_header_len = 0; | 1272 | newtp->ext_header_len = 0; |
1263 | if (newinet->opt) | 1273 | if (newinet->opt) |
@@ -1285,18 +1295,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1285 | { | 1295 | { |
1286 | struct tcphdr *th = skb->h.th; | 1296 | struct tcphdr *th = skb->h.th; |
1287 | struct iphdr *iph = skb->nh.iph; | 1297 | struct iphdr *iph = skb->nh.iph; |
1288 | struct tcp_sock *tp = tcp_sk(sk); | ||
1289 | struct sock *nsk; | 1298 | struct sock *nsk; |
1290 | struct request_sock **prev; | 1299 | struct request_sock **prev; |
1291 | /* Find possible connection requests. */ | 1300 | /* Find possible connection requests. */ |
1292 | struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, | 1301 | struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, |
1293 | iph->saddr, iph->daddr); | 1302 | iph->saddr, iph->daddr); |
1294 | if (req) | 1303 | if (req) |
1295 | return tcp_check_req(sk, skb, req, prev); | 1304 | return tcp_check_req(sk, skb, req, prev); |
1296 | 1305 | ||
1297 | nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, | 1306 | nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, |
1298 | th->source, skb->nh.iph->daddr, | 1307 | th->source, skb->nh.iph->daddr, |
1299 | ntohs(th->dest), tcp_v4_iif(skb)); | 1308 | ntohs(th->dest), inet_iif(skb)); |
1300 | 1309 | ||
1301 | if (nsk) { | 1310 | if (nsk) { |
1302 | if (nsk->sk_state != TCP_TIME_WAIT) { | 1311 | if (nsk->sk_state != TCP_TIME_WAIT) { |
@@ -1440,7 +1449,7 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1440 | 1449 | ||
1441 | sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, | 1450 | sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, |
1442 | skb->nh.iph->daddr, ntohs(th->dest), | 1451 | skb->nh.iph->daddr, ntohs(th->dest), |
1443 | tcp_v4_iif(skb)); | 1452 | inet_iif(skb)); |
1444 | 1453 | ||
1445 | if (!sk) | 1454 | if (!sk) |
1446 | goto no_tcp_socket; | 1455 | goto no_tcp_socket; |
@@ -1507,7 +1516,7 @@ do_time_wait: | |||
1507 | struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, | 1516 | struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, |
1508 | skb->nh.iph->daddr, | 1517 | skb->nh.iph->daddr, |
1509 | ntohs(th->dest), | 1518 | ntohs(th->dest), |
1510 | tcp_v4_iif(skb)); | 1519 | inet_iif(skb)); |
1511 | if (sk2) { | 1520 | if (sk2) { |
1512 | tcp_tw_deschedule((struct inet_timewait_sock *)sk); | 1521 | tcp_tw_deschedule((struct inet_timewait_sock *)sk); |
1513 | inet_twsk_put((struct inet_timewait_sock *)sk); | 1522 | inet_twsk_put((struct inet_timewait_sock *)sk); |
@@ -1619,7 +1628,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1619 | tcp_init_xmit_timers(sk); | 1628 | tcp_init_xmit_timers(sk); |
1620 | tcp_prequeue_init(tp); | 1629 | tcp_prequeue_init(tp); |
1621 | 1630 | ||
1622 | tp->rto = TCP_TIMEOUT_INIT; | 1631 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
1623 | tp->mdev = TCP_TIMEOUT_INIT; | 1632 | tp->mdev = TCP_TIMEOUT_INIT; |
1624 | 1633 | ||
1625 | /* So many TCP implementations out there (incorrectly) count the | 1634 | /* So many TCP implementations out there (incorrectly) count the |
@@ -1672,7 +1681,7 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1672 | __skb_queue_purge(&tp->ucopy.prequeue); | 1681 | __skb_queue_purge(&tp->ucopy.prequeue); |
1673 | 1682 | ||
1674 | /* Clean up a referenced TCP bind bucket. */ | 1683 | /* Clean up a referenced TCP bind bucket. */ |
1675 | if (inet_sk(sk)->bind_hash) | 1684 | if (inet_csk(sk)->icsk_bind_hash) |
1676 | inet_put_port(&tcp_hashinfo, sk); | 1685 | inet_put_port(&tcp_hashinfo, sk); |
1677 | 1686 | ||
1678 | /* | 1687 | /* |
@@ -1707,7 +1716,7 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) | |||
1707 | 1716 | ||
1708 | static void *listening_get_next(struct seq_file *seq, void *cur) | 1717 | static void *listening_get_next(struct seq_file *seq, void *cur) |
1709 | { | 1718 | { |
1710 | struct tcp_sock *tp; | 1719 | struct inet_connection_sock *icsk; |
1711 | struct hlist_node *node; | 1720 | struct hlist_node *node; |
1712 | struct sock *sk = cur; | 1721 | struct sock *sk = cur; |
1713 | struct tcp_iter_state* st = seq->private; | 1722 | struct tcp_iter_state* st = seq->private; |
@@ -1723,7 +1732,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1723 | if (st->state == TCP_SEQ_STATE_OPENREQ) { | 1732 | if (st->state == TCP_SEQ_STATE_OPENREQ) { |
1724 | struct request_sock *req = cur; | 1733 | struct request_sock *req = cur; |
1725 | 1734 | ||
1726 | tp = tcp_sk(st->syn_wait_sk); | 1735 | icsk = inet_csk(st->syn_wait_sk); |
1727 | req = req->dl_next; | 1736 | req = req->dl_next; |
1728 | while (1) { | 1737 | while (1) { |
1729 | while (req) { | 1738 | while (req) { |
@@ -1736,17 +1745,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1736 | if (++st->sbucket >= TCP_SYNQ_HSIZE) | 1745 | if (++st->sbucket >= TCP_SYNQ_HSIZE) |
1737 | break; | 1746 | break; |
1738 | get_req: | 1747 | get_req: |
1739 | req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; | 1748 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; |
1740 | } | 1749 | } |
1741 | sk = sk_next(st->syn_wait_sk); | 1750 | sk = sk_next(st->syn_wait_sk); |
1742 | st->state = TCP_SEQ_STATE_LISTENING; | 1751 | st->state = TCP_SEQ_STATE_LISTENING; |
1743 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1752 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1744 | } else { | 1753 | } else { |
1745 | tp = tcp_sk(sk); | 1754 | icsk = inet_csk(sk); |
1746 | read_lock_bh(&tp->accept_queue.syn_wait_lock); | 1755 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1747 | if (reqsk_queue_len(&tp->accept_queue)) | 1756 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
1748 | goto start_req; | 1757 | goto start_req; |
1749 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1758 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1750 | sk = sk_next(sk); | 1759 | sk = sk_next(sk); |
1751 | } | 1760 | } |
1752 | get_sk: | 1761 | get_sk: |
@@ -1755,9 +1764,9 @@ get_sk: | |||
1755 | cur = sk; | 1764 | cur = sk; |
1756 | goto out; | 1765 | goto out; |
1757 | } | 1766 | } |
1758 | tp = tcp_sk(sk); | 1767 | icsk = inet_csk(sk); |
1759 | read_lock_bh(&tp->accept_queue.syn_wait_lock); | 1768 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1760 | if (reqsk_queue_len(&tp->accept_queue)) { | 1769 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { |
1761 | start_req: | 1770 | start_req: |
1762 | st->uid = sock_i_uid(sk); | 1771 | st->uid = sock_i_uid(sk); |
1763 | st->syn_wait_sk = sk; | 1772 | st->syn_wait_sk = sk; |
@@ -1765,7 +1774,7 @@ start_req: | |||
1765 | st->sbucket = 0; | 1774 | st->sbucket = 0; |
1766 | goto get_req; | 1775 | goto get_req; |
1767 | } | 1776 | } |
1768 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1777 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1769 | } | 1778 | } |
1770 | if (++st->bucket < INET_LHTABLE_SIZE) { | 1779 | if (++st->bucket < INET_LHTABLE_SIZE) { |
1771 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); | 1780 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); |
@@ -1951,8 +1960,8 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
1951 | switch (st->state) { | 1960 | switch (st->state) { |
1952 | case TCP_SEQ_STATE_OPENREQ: | 1961 | case TCP_SEQ_STATE_OPENREQ: |
1953 | if (v) { | 1962 | if (v) { |
1954 | struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); | 1963 | struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); |
1955 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1964 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1956 | } | 1965 | } |
1957 | case TCP_SEQ_STATE_LISTENING: | 1966 | case TCP_SEQ_STATE_LISTENING: |
1958 | if (v != SEQ_START_TOKEN) | 1967 | if (v != SEQ_START_TOKEN) |
@@ -2058,18 +2067,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
2058 | int timer_active; | 2067 | int timer_active; |
2059 | unsigned long timer_expires; | 2068 | unsigned long timer_expires; |
2060 | struct tcp_sock *tp = tcp_sk(sp); | 2069 | struct tcp_sock *tp = tcp_sk(sp); |
2070 | const struct inet_connection_sock *icsk = inet_csk(sp); | ||
2061 | struct inet_sock *inet = inet_sk(sp); | 2071 | struct inet_sock *inet = inet_sk(sp); |
2062 | unsigned int dest = inet->daddr; | 2072 | unsigned int dest = inet->daddr; |
2063 | unsigned int src = inet->rcv_saddr; | 2073 | unsigned int src = inet->rcv_saddr; |
2064 | __u16 destp = ntohs(inet->dport); | 2074 | __u16 destp = ntohs(inet->dport); |
2065 | __u16 srcp = ntohs(inet->sport); | 2075 | __u16 srcp = ntohs(inet->sport); |
2066 | 2076 | ||
2067 | if (tp->pending == TCP_TIME_RETRANS) { | 2077 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
2068 | timer_active = 1; | 2078 | timer_active = 1; |
2069 | timer_expires = tp->timeout; | 2079 | timer_expires = icsk->icsk_timeout; |
2070 | } else if (tp->pending == TCP_TIME_PROBE0) { | 2080 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
2071 | timer_active = 4; | 2081 | timer_active = 4; |
2072 | timer_expires = tp->timeout; | 2082 | timer_expires = icsk->icsk_timeout; |
2073 | } else if (timer_pending(&sp->sk_timer)) { | 2083 | } else if (timer_pending(&sp->sk_timer)) { |
2074 | timer_active = 2; | 2084 | timer_active = 2; |
2075 | timer_expires = sp->sk_timer.expires; | 2085 | timer_expires = sp->sk_timer.expires; |
@@ -2084,12 +2094,14 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
2084 | tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, | 2094 | tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, |
2085 | timer_active, | 2095 | timer_active, |
2086 | jiffies_to_clock_t(timer_expires - jiffies), | 2096 | jiffies_to_clock_t(timer_expires - jiffies), |
2087 | tp->retransmits, | 2097 | icsk->icsk_retransmits, |
2088 | sock_i_uid(sp), | 2098 | sock_i_uid(sp), |
2089 | tp->probes_out, | 2099 | tp->probes_out, |
2090 | sock_i_ino(sp), | 2100 | sock_i_ino(sp), |
2091 | atomic_read(&sp->sk_refcnt), sp, | 2101 | atomic_read(&sp->sk_refcnt), sp, |
2092 | tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong, | 2102 | icsk->icsk_rto, |
2103 | icsk->icsk_ack.ato, | ||
2104 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | ||
2093 | tp->snd_cwnd, | 2105 | tp->snd_cwnd, |
2094 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); | 2106 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); |
2095 | } | 2107 | } |
@@ -2174,7 +2186,7 @@ struct proto tcp_prot = { | |||
2174 | .close = tcp_close, | 2186 | .close = tcp_close, |
2175 | .connect = tcp_v4_connect, | 2187 | .connect = tcp_v4_connect, |
2176 | .disconnect = tcp_disconnect, | 2188 | .disconnect = tcp_disconnect, |
2177 | .accept = tcp_accept, | 2189 | .accept = inet_csk_accept, |
2178 | .ioctl = tcp_ioctl, | 2190 | .ioctl = tcp_ioctl, |
2179 | .init = tcp_v4_init_sock, | 2191 | .init = tcp_v4_init_sock, |
2180 | .destroy = tcp_v4_destroy_sock, | 2192 | .destroy = tcp_v4_destroy_sock, |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 8b6cd8d80662..56823704eb7d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -271,7 +271,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
271 | 271 | ||
272 | if (tw != NULL) { | 272 | if (tw != NULL) { |
273 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 273 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
274 | const int rto = (tp->rto << 2) - (tp->rto >> 1); | 274 | const struct inet_connection_sock *icsk = inet_csk(sk); |
275 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); | ||
275 | 276 | ||
276 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; | 277 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
277 | tcptw->tw_rcv_nxt = tp->rcv_nxt; | 278 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
@@ -605,10 +606,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
605 | struct inet_request_sock *ireq = inet_rsk(req); | 606 | struct inet_request_sock *ireq = inet_rsk(req); |
606 | struct tcp_request_sock *treq = tcp_rsk(req); | 607 | struct tcp_request_sock *treq = tcp_rsk(req); |
607 | struct inet_sock *newinet = inet_sk(newsk); | 608 | struct inet_sock *newinet = inet_sk(newsk); |
609 | struct inet_connection_sock *newicsk = inet_csk(newsk); | ||
608 | struct tcp_sock *newtp; | 610 | struct tcp_sock *newtp; |
609 | 611 | ||
610 | newsk->sk_state = TCP_SYN_RECV; | 612 | newsk->sk_state = TCP_SYN_RECV; |
611 | newinet->bind_hash = NULL; | 613 | newicsk->icsk_bind_hash = NULL; |
612 | 614 | ||
613 | /* Clone the TCP header template */ | 615 | /* Clone the TCP header template */ |
614 | newinet->dport = ireq->rmt_port; | 616 | newinet->dport = ireq->rmt_port; |
@@ -624,11 +626,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
624 | 626 | ||
625 | tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); | 627 | tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); |
626 | 628 | ||
627 | newtp->retransmits = 0; | 629 | newicsk->icsk_retransmits = 0; |
628 | newtp->backoff = 0; | 630 | newicsk->icsk_backoff = 0; |
629 | newtp->srtt = 0; | 631 | newtp->srtt = 0; |
630 | newtp->mdev = TCP_TIMEOUT_INIT; | 632 | newtp->mdev = TCP_TIMEOUT_INIT; |
631 | newtp->rto = TCP_TIMEOUT_INIT; | 633 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; |
632 | 634 | ||
633 | newtp->packets_out = 0; | 635 | newtp->packets_out = 0; |
634 | newtp->left_out = 0; | 636 | newtp->left_out = 0; |
@@ -667,11 +669,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
667 | newtp->rx_opt.num_sacks = 0; | 669 | newtp->rx_opt.num_sacks = 0; |
668 | newtp->urg_data = 0; | 670 | newtp->urg_data = 0; |
669 | /* Deinitialize accept_queue to trap illegal accesses. */ | 671 | /* Deinitialize accept_queue to trap illegal accesses. */ |
670 | memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); | 672 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); |
671 | 673 | ||
672 | if (sock_flag(newsk, SOCK_KEEPOPEN)) | 674 | if (sock_flag(newsk, SOCK_KEEPOPEN)) |
673 | tcp_reset_keepalive_timer(newsk, | 675 | inet_csk_reset_keepalive_timer(newsk, |
674 | keepalive_time_when(newtp)); | 676 | keepalive_time_when(newtp)); |
675 | 677 | ||
676 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; | 678 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
677 | if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { | 679 | if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
@@ -701,7 +703,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
701 | newtp->tcp_header_len = sizeof(struct tcphdr); | 703 | newtp->tcp_header_len = sizeof(struct tcphdr); |
702 | } | 704 | } |
703 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 705 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) |
704 | newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; | 706 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
705 | newtp->rx_opt.mss_clamp = req->mss; | 707 | newtp->rx_opt.mss_clamp = req->mss; |
706 | TCP_ECN_openreq_child(newtp, req); | 708 | TCP_ECN_openreq_child(newtp, req); |
707 | if (newtp->ecn_flags&TCP_ECN_OK) | 709 | if (newtp->ecn_flags&TCP_ECN_OK) |
@@ -881,10 +883,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
881 | if (child == NULL) | 883 | if (child == NULL) |
882 | goto listen_overflow; | 884 | goto listen_overflow; |
883 | 885 | ||
884 | tcp_synq_unlink(tp, req, prev); | 886 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
885 | tcp_synq_removed(sk, req); | 887 | inet_csk_reqsk_queue_removed(sk, req); |
886 | 888 | ||
887 | tcp_acceptq_queue(sk, req, child); | 889 | inet_csk_reqsk_queue_add(sk, req, child); |
888 | return child; | 890 | return child; |
889 | 891 | ||
890 | listen_overflow: | 892 | listen_overflow: |
@@ -898,7 +900,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
898 | if (!(flg & TCP_FLAG_RST)) | 900 | if (!(flg & TCP_FLAG_RST)) |
899 | req->rsk_ops->send_reset(skb); | 901 | req->rsk_ops->send_reset(skb); |
900 | 902 | ||
901 | tcp_synq_drop(sk, req, prev); | 903 | inet_csk_reqsk_queue_drop(sk, req, prev); |
902 | return NULL; | 904 | return NULL; |
903 | } | 905 | } |
904 | 906 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a4d1eb9a0926..6f0a7e30ceac 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk) | |||
105 | 105 | ||
106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". | 106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". |
107 | * This is the first part of cwnd validation mechanism. */ | 107 | * This is the first part of cwnd validation mechanism. */ |
108 | static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | 108 | static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) |
109 | { | 109 | { |
110 | struct tcp_sock *tp = tcp_sk(sk); | ||
110 | s32 delta = tcp_time_stamp - tp->lsndtime; | 111 | s32 delta = tcp_time_stamp - tp->lsndtime; |
111 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); | 112 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); |
112 | u32 cwnd = tp->snd_cwnd; | 113 | u32 cwnd = tp->snd_cwnd; |
@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | |||
116 | tp->snd_ssthresh = tcp_current_ssthresh(tp); | 117 | tp->snd_ssthresh = tcp_current_ssthresh(tp); |
117 | restart_cwnd = min(restart_cwnd, cwnd); | 118 | restart_cwnd = min(restart_cwnd, cwnd); |
118 | 119 | ||
119 | while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) | 120 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
120 | cwnd >>= 1; | 121 | cwnd >>= 1; |
121 | tp->snd_cwnd = max(cwnd, restart_cwnd); | 122 | tp->snd_cwnd = max(cwnd, restart_cwnd); |
122 | tp->snd_cwnd_stamp = tcp_time_stamp; | 123 | tp->snd_cwnd_stamp = tcp_time_stamp; |
@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) | |||
126 | static inline void tcp_event_data_sent(struct tcp_sock *tp, | 127 | static inline void tcp_event_data_sent(struct tcp_sock *tp, |
127 | struct sk_buff *skb, struct sock *sk) | 128 | struct sk_buff *skb, struct sock *sk) |
128 | { | 129 | { |
129 | u32 now = tcp_time_stamp; | 130 | struct inet_connection_sock *icsk = inet_csk(sk); |
131 | const u32 now = tcp_time_stamp; | ||
130 | 132 | ||
131 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) | 133 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) |
132 | tcp_cwnd_restart(tp, __sk_dst_get(sk)); | 134 | tcp_cwnd_restart(sk, __sk_dst_get(sk)); |
133 | 135 | ||
134 | tp->lsndtime = now; | 136 | tp->lsndtime = now; |
135 | 137 | ||
136 | /* If it is a reply for ato after last received | 138 | /* If it is a reply for ato after last received |
137 | * packet, enter pingpong mode. | 139 | * packet, enter pingpong mode. |
138 | */ | 140 | */ |
139 | if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) | 141 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
140 | tp->ack.pingpong = 1; | 142 | icsk->icsk_ack.pingpong = 1; |
141 | } | 143 | } |
142 | 144 | ||
143 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | 145 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
144 | { | 146 | { |
145 | struct tcp_sock *tp = tcp_sk(sk); | 147 | tcp_dec_quickack_mode(sk, pkts); |
146 | 148 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | |
147 | tcp_dec_quickack_mode(tp, pkts); | ||
148 | tcp_clear_xmit_timer(sk, TCP_TIME_DACK); | ||
149 | } | 149 | } |
150 | 150 | ||
151 | /* Determine a window scaling and initial window to offer. | 151 | /* Determine a window scaling and initial window to offer. |
@@ -696,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | |||
696 | if (tp->packets_out > tp->snd_cwnd_used) | 696 | if (tp->packets_out > tp->snd_cwnd_used) |
697 | tp->snd_cwnd_used = tp->packets_out; | 697 | tp->snd_cwnd_used = tp->packets_out; |
698 | 698 | ||
699 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | 699 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) |
700 | tcp_cwnd_application_limited(sk); | 700 | tcp_cwnd_application_limited(sk); |
701 | } | 701 | } |
702 | } | 702 | } |
@@ -1147,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1147 | */ | 1147 | */ |
1148 | u32 __tcp_select_window(struct sock *sk) | 1148 | u32 __tcp_select_window(struct sock *sk) |
1149 | { | 1149 | { |
1150 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1150 | struct tcp_sock *tp = tcp_sk(sk); | 1151 | struct tcp_sock *tp = tcp_sk(sk); |
1151 | /* MSS for the peer's data. Previous verions used mss_clamp | 1152 | /* MSS for the peer's data. Previous verions used mss_clamp |
1152 | * here. I don't know if the value based on our guesses | 1153 | * here. I don't know if the value based on our guesses |
@@ -1154,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1154 | * but may be worse for the performance because of rcv_mss | 1155 | * but may be worse for the performance because of rcv_mss |
1155 | * fluctuations. --SAW 1998/11/1 | 1156 | * fluctuations. --SAW 1998/11/1 |
1156 | */ | 1157 | */ |
1157 | int mss = tp->ack.rcv_mss; | 1158 | int mss = icsk->icsk_ack.rcv_mss; |
1158 | int free_space = tcp_space(sk); | 1159 | int free_space = tcp_space(sk); |
1159 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); | 1160 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); |
1160 | int window; | 1161 | int window; |
@@ -1163,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1163 | mss = full_space; | 1164 | mss = full_space; |
1164 | 1165 | ||
1165 | if (free_space < full_space/2) { | 1166 | if (free_space < full_space/2) { |
1166 | tp->ack.quick = 0; | 1167 | icsk->icsk_ack.quick = 0; |
1167 | 1168 | ||
1168 | if (tcp_memory_pressure) | 1169 | if (tcp_memory_pressure) |
1169 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); | 1170 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); |
@@ -1491,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1491 | 1492 | ||
1492 | if (skb == | 1493 | if (skb == |
1493 | skb_peek(&sk->sk_write_queue)) | 1494 | skb_peek(&sk->sk_write_queue)) |
1494 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1495 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1496 | inet_csk(sk)->icsk_rto); | ||
1495 | } | 1497 | } |
1496 | 1498 | ||
1497 | packet_cnt -= tcp_skb_pcount(skb); | 1499 | packet_cnt -= tcp_skb_pcount(skb); |
@@ -1544,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1544 | break; | 1546 | break; |
1545 | 1547 | ||
1546 | if (skb == skb_peek(&sk->sk_write_queue)) | 1548 | if (skb == skb_peek(&sk->sk_write_queue)) |
1547 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1549 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1548 | 1550 | ||
1549 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); | 1551 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); |
1550 | } | 1552 | } |
@@ -1780,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk) | |||
1780 | tp->rcv_wup = 0; | 1782 | tp->rcv_wup = 0; |
1781 | tp->copied_seq = 0; | 1783 | tp->copied_seq = 0; |
1782 | 1784 | ||
1783 | tp->rto = TCP_TIMEOUT_INIT; | 1785 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
1784 | tp->retransmits = 0; | 1786 | inet_csk(sk)->icsk_retransmits = 0; |
1785 | tcp_clear_retrans(tp); | 1787 | tcp_clear_retrans(tp); |
1786 | } | 1788 | } |
1787 | 1789 | ||
@@ -1824,7 +1826,7 @@ int tcp_connect(struct sock *sk) | |||
1824 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); | 1826 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); |
1825 | 1827 | ||
1826 | /* Timer for repeating the SYN until an answer. */ | 1828 | /* Timer for repeating the SYN until an answer. */ |
1827 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1829 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1828 | return 0; | 1830 | return 0; |
1829 | } | 1831 | } |
1830 | 1832 | ||
@@ -1834,20 +1836,21 @@ int tcp_connect(struct sock *sk) | |||
1834 | */ | 1836 | */ |
1835 | void tcp_send_delayed_ack(struct sock *sk) | 1837 | void tcp_send_delayed_ack(struct sock *sk) |
1836 | { | 1838 | { |
1837 | struct tcp_sock *tp = tcp_sk(sk); | 1839 | struct inet_connection_sock *icsk = inet_csk(sk); |
1838 | int ato = tp->ack.ato; | 1840 | int ato = icsk->icsk_ack.ato; |
1839 | unsigned long timeout; | 1841 | unsigned long timeout; |
1840 | 1842 | ||
1841 | if (ato > TCP_DELACK_MIN) { | 1843 | if (ato > TCP_DELACK_MIN) { |
1844 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1842 | int max_ato = HZ/2; | 1845 | int max_ato = HZ/2; |
1843 | 1846 | ||
1844 | if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) | 1847 | if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) |
1845 | max_ato = TCP_DELACK_MAX; | 1848 | max_ato = TCP_DELACK_MAX; |
1846 | 1849 | ||
1847 | /* Slow path, intersegment interval is "high". */ | 1850 | /* Slow path, intersegment interval is "high". */ |
1848 | 1851 | ||
1849 | /* If some rtt estimate is known, use it to bound delayed ack. | 1852 | /* If some rtt estimate is known, use it to bound delayed ack. |
1850 | * Do not use tp->rto here, use results of rtt measurements | 1853 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements |
1851 | * directly. | 1854 | * directly. |
1852 | */ | 1855 | */ |
1853 | if (tp->srtt) { | 1856 | if (tp->srtt) { |
@@ -1864,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
1864 | timeout = jiffies + ato; | 1867 | timeout = jiffies + ato; |
1865 | 1868 | ||
1866 | /* Use new timeout only if there wasn't a older one earlier. */ | 1869 | /* Use new timeout only if there wasn't a older one earlier. */ |
1867 | if (tp->ack.pending&TCP_ACK_TIMER) { | 1870 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
1868 | /* If delack timer was blocked or is about to expire, | 1871 | /* If delack timer was blocked or is about to expire, |
1869 | * send ACK now. | 1872 | * send ACK now. |
1870 | */ | 1873 | */ |
1871 | if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { | 1874 | if (icsk->icsk_ack.blocked || |
1875 | time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { | ||
1872 | tcp_send_ack(sk); | 1876 | tcp_send_ack(sk); |
1873 | return; | 1877 | return; |
1874 | } | 1878 | } |
1875 | 1879 | ||
1876 | if (!time_before(timeout, tp->ack.timeout)) | 1880 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
1877 | timeout = tp->ack.timeout; | 1881 | timeout = icsk->icsk_ack.timeout; |
1878 | } | 1882 | } |
1879 | tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; | 1883 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
1880 | tp->ack.timeout = timeout; | 1884 | icsk->icsk_ack.timeout = timeout; |
1881 | sk_reset_timer(sk, &tp->delack_timer, timeout); | 1885 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
1882 | } | 1886 | } |
1883 | 1887 | ||
1884 | /* This routine sends an ack and also updates the window. */ | 1888 | /* This routine sends an ack and also updates the window. */ |
@@ -1895,9 +1899,9 @@ void tcp_send_ack(struct sock *sk) | |||
1895 | */ | 1899 | */ |
1896 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 1900 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
1897 | if (buff == NULL) { | 1901 | if (buff == NULL) { |
1898 | tcp_schedule_ack(tp); | 1902 | inet_csk_schedule_ack(sk); |
1899 | tp->ack.ato = TCP_ATO_MIN; | 1903 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
1900 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); | 1904 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); |
1901 | return; | 1905 | return; |
1902 | } | 1906 | } |
1903 | 1907 | ||
@@ -2011,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2011 | */ | 2015 | */ |
2012 | void tcp_send_probe0(struct sock *sk) | 2016 | void tcp_send_probe0(struct sock *sk) |
2013 | { | 2017 | { |
2018 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
2014 | struct tcp_sock *tp = tcp_sk(sk); | 2019 | struct tcp_sock *tp = tcp_sk(sk); |
2015 | int err; | 2020 | int err; |
2016 | 2021 | ||
@@ -2019,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk) | |||
2019 | if (tp->packets_out || !sk->sk_send_head) { | 2024 | if (tp->packets_out || !sk->sk_send_head) { |
2020 | /* Cancel probe timer, if it is not required. */ | 2025 | /* Cancel probe timer, if it is not required. */ |
2021 | tp->probes_out = 0; | 2026 | tp->probes_out = 0; |
2022 | tp->backoff = 0; | 2027 | icsk->icsk_backoff = 0; |
2023 | return; | 2028 | return; |
2024 | } | 2029 | } |
2025 | 2030 | ||
2026 | if (err <= 0) { | 2031 | if (err <= 0) { |
2027 | if (tp->backoff < sysctl_tcp_retries2) | 2032 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
2028 | tp->backoff++; | 2033 | icsk->icsk_backoff++; |
2029 | tp->probes_out++; | 2034 | tp->probes_out++; |
2030 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2035 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2031 | min(tp->rto << tp->backoff, TCP_RTO_MAX)); | 2036 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); |
2032 | } else { | 2037 | } else { |
2033 | /* If packet was not sent due to local congestion, | 2038 | /* If packet was not sent due to local congestion, |
2034 | * do not backoff and do not remember probes_out. | 2039 | * do not backoff and do not remember probes_out. |
@@ -2038,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk) | |||
2038 | */ | 2043 | */ |
2039 | if (!tp->probes_out) | 2044 | if (!tp->probes_out) |
2040 | tp->probes_out=1; | 2045 | tp->probes_out=1; |
2041 | tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, | 2046 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2042 | min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); | 2047 | min(icsk->icsk_rto << icsk->icsk_backoff, |
2048 | TCP_RESOURCE_PROBE_INTERVAL)); | ||
2043 | } | 2049 | } |
2044 | } | 2050 | } |
2045 | 2051 | ||
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0084227438c2..0b71380ee42f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -36,9 +36,9 @@ static void tcp_write_timer(unsigned long); | |||
36 | static void tcp_delack_timer(unsigned long); | 36 | static void tcp_delack_timer(unsigned long); |
37 | static void tcp_keepalive_timer (unsigned long data); | 37 | static void tcp_keepalive_timer (unsigned long data); |
38 | 38 | ||
39 | #ifdef TCP_DEBUG | 39 | #ifdef INET_CSK_DEBUG |
40 | const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; | 40 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; |
41 | EXPORT_SYMBOL(tcp_timer_bug_msg); | 41 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | 44 | /* |
@@ -46,40 +46,45 @@ EXPORT_SYMBOL(tcp_timer_bug_msg); | |||
46 | * We may wish use just one timer maintaining a list of expire jiffies | 46 | * We may wish use just one timer maintaining a list of expire jiffies |
47 | * to optimize. | 47 | * to optimize. |
48 | */ | 48 | */ |
49 | 49 | void inet_csk_init_xmit_timers(struct sock *sk, | |
50 | void tcp_init_xmit_timers(struct sock *sk) | 50 | void (*retransmit_handler)(unsigned long), |
51 | void (*delack_handler)(unsigned long), | ||
52 | void (*keepalive_handler)(unsigned long)) | ||
51 | { | 53 | { |
52 | struct tcp_sock *tp = tcp_sk(sk); | 54 | struct inet_connection_sock *icsk = inet_csk(sk); |
53 | 55 | ||
54 | init_timer(&tp->retransmit_timer); | 56 | init_timer(&icsk->icsk_retransmit_timer); |
55 | tp->retransmit_timer.function=&tcp_write_timer; | 57 | init_timer(&icsk->icsk_delack_timer); |
56 | tp->retransmit_timer.data = (unsigned long) sk; | 58 | init_timer(&sk->sk_timer); |
57 | tp->pending = 0; | ||
58 | 59 | ||
59 | init_timer(&tp->delack_timer); | 60 | icsk->icsk_retransmit_timer.function = retransmit_handler; |
60 | tp->delack_timer.function=&tcp_delack_timer; | 61 | icsk->icsk_delack_timer.function = delack_handler; |
61 | tp->delack_timer.data = (unsigned long) sk; | 62 | sk->sk_timer.function = keepalive_handler; |
62 | tp->ack.pending = 0; | ||
63 | 63 | ||
64 | init_timer(&sk->sk_timer); | 64 | icsk->icsk_retransmit_timer.data = |
65 | sk->sk_timer.function = &tcp_keepalive_timer; | 65 | icsk->icsk_delack_timer.data = |
66 | sk->sk_timer.data = (unsigned long)sk; | 66 | sk->sk_timer.data = (unsigned long)sk; |
67 | |||
68 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; | ||
67 | } | 69 | } |
68 | 70 | ||
69 | void tcp_clear_xmit_timers(struct sock *sk) | 71 | void inet_csk_clear_xmit_timers(struct sock *sk) |
70 | { | 72 | { |
71 | struct tcp_sock *tp = tcp_sk(sk); | 73 | struct inet_connection_sock *icsk = inet_csk(sk); |
72 | 74 | ||
73 | tp->pending = 0; | 75 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; |
74 | sk_stop_timer(sk, &tp->retransmit_timer); | ||
75 | |||
76 | tp->ack.pending = 0; | ||
77 | tp->ack.blocked = 0; | ||
78 | sk_stop_timer(sk, &tp->delack_timer); | ||
79 | 76 | ||
77 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | ||
78 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | ||
80 | sk_stop_timer(sk, &sk->sk_timer); | 79 | sk_stop_timer(sk, &sk->sk_timer); |
81 | } | 80 | } |
82 | 81 | ||
82 | void tcp_init_xmit_timers(struct sock *sk) | ||
83 | { | ||
84 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, | ||
85 | &tcp_keepalive_timer); | ||
86 | } | ||
87 | |||
83 | static void tcp_write_err(struct sock *sk) | 88 | static void tcp_write_err(struct sock *sk) |
84 | { | 89 | { |
85 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; | 90 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; |
@@ -155,15 +160,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive) | |||
155 | /* A write timeout has occurred. Process the after effects. */ | 160 | /* A write timeout has occurred. Process the after effects. */ |
156 | static int tcp_write_timeout(struct sock *sk) | 161 | static int tcp_write_timeout(struct sock *sk) |
157 | { | 162 | { |
158 | struct tcp_sock *tp = tcp_sk(sk); | 163 | const struct inet_connection_sock *icsk = inet_csk(sk); |
159 | int retry_until; | 164 | int retry_until; |
160 | 165 | ||
161 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 166 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
162 | if (tp->retransmits) | 167 | if (icsk->icsk_retransmits) |
163 | dst_negative_advice(&sk->sk_dst_cache); | 168 | dst_negative_advice(&sk->sk_dst_cache); |
164 | retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; | 169 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
165 | } else { | 170 | } else { |
166 | if (tp->retransmits >= sysctl_tcp_retries1) { | 171 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { |
167 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black | 172 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black |
168 | hole detection. :-( | 173 | hole detection. :-( |
169 | 174 | ||
@@ -189,16 +194,16 @@ static int tcp_write_timeout(struct sock *sk) | |||
189 | 194 | ||
190 | retry_until = sysctl_tcp_retries2; | 195 | retry_until = sysctl_tcp_retries2; |
191 | if (sock_flag(sk, SOCK_DEAD)) { | 196 | if (sock_flag(sk, SOCK_DEAD)) { |
192 | int alive = (tp->rto < TCP_RTO_MAX); | 197 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
193 | 198 | ||
194 | retry_until = tcp_orphan_retries(sk, alive); | 199 | retry_until = tcp_orphan_retries(sk, alive); |
195 | 200 | ||
196 | if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) | 201 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) |
197 | return 1; | 202 | return 1; |
198 | } | 203 | } |
199 | } | 204 | } |
200 | 205 | ||
201 | if (tp->retransmits >= retry_until) { | 206 | if (icsk->icsk_retransmits >= retry_until) { |
202 | /* Has it gone just too far? */ | 207 | /* Has it gone just too far? */ |
203 | tcp_write_err(sk); | 208 | tcp_write_err(sk); |
204 | return 1; | 209 | return 1; |
@@ -210,26 +215,27 @@ static void tcp_delack_timer(unsigned long data) | |||
210 | { | 215 | { |
211 | struct sock *sk = (struct sock*)data; | 216 | struct sock *sk = (struct sock*)data; |
212 | struct tcp_sock *tp = tcp_sk(sk); | 217 | struct tcp_sock *tp = tcp_sk(sk); |
218 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
213 | 219 | ||
214 | bh_lock_sock(sk); | 220 | bh_lock_sock(sk); |
215 | if (sock_owned_by_user(sk)) { | 221 | if (sock_owned_by_user(sk)) { |
216 | /* Try again later. */ | 222 | /* Try again later. */ |
217 | tp->ack.blocked = 1; | 223 | icsk->icsk_ack.blocked = 1; |
218 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 224 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); |
219 | sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); | 225 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
220 | goto out_unlock; | 226 | goto out_unlock; |
221 | } | 227 | } |
222 | 228 | ||
223 | sk_stream_mem_reclaim(sk); | 229 | sk_stream_mem_reclaim(sk); |
224 | 230 | ||
225 | if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) | 231 | if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
226 | goto out; | 232 | goto out; |
227 | 233 | ||
228 | if (time_after(tp->ack.timeout, jiffies)) { | 234 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
229 | sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); | 235 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); |
230 | goto out; | 236 | goto out; |
231 | } | 237 | } |
232 | tp->ack.pending &= ~TCP_ACK_TIMER; | 238 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
233 | 239 | ||
234 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | 240 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
235 | struct sk_buff *skb; | 241 | struct sk_buff *skb; |
@@ -242,16 +248,16 @@ static void tcp_delack_timer(unsigned long data) | |||
242 | tp->ucopy.memory = 0; | 248 | tp->ucopy.memory = 0; |
243 | } | 249 | } |
244 | 250 | ||
245 | if (tcp_ack_scheduled(tp)) { | 251 | if (inet_csk_ack_scheduled(sk)) { |
246 | if (!tp->ack.pingpong) { | 252 | if (!icsk->icsk_ack.pingpong) { |
247 | /* Delayed ACK missed: inflate ATO. */ | 253 | /* Delayed ACK missed: inflate ATO. */ |
248 | tp->ack.ato = min(tp->ack.ato << 1, tp->rto); | 254 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
249 | } else { | 255 | } else { |
250 | /* Delayed ACK missed: leave pingpong mode and | 256 | /* Delayed ACK missed: leave pingpong mode and |
251 | * deflate ATO. | 257 | * deflate ATO. |
252 | */ | 258 | */ |
253 | tp->ack.pingpong = 0; | 259 | icsk->icsk_ack.pingpong = 0; |
254 | tp->ack.ato = TCP_ATO_MIN; | 260 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
255 | } | 261 | } |
256 | tcp_send_ack(sk); | 262 | tcp_send_ack(sk); |
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 263 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); |
@@ -294,7 +300,8 @@ static void tcp_probe_timer(struct sock *sk) | |||
294 | max_probes = sysctl_tcp_retries2; | 300 | max_probes = sysctl_tcp_retries2; |
295 | 301 | ||
296 | if (sock_flag(sk, SOCK_DEAD)) { | 302 | if (sock_flag(sk, SOCK_DEAD)) { |
297 | int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); | 303 | const struct inet_connection_sock *icsk = inet_csk(sk); |
304 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); | ||
298 | 305 | ||
299 | max_probes = tcp_orphan_retries(sk, alive); | 306 | max_probes = tcp_orphan_retries(sk, alive); |
300 | 307 | ||
@@ -317,6 +324,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
317 | static void tcp_retransmit_timer(struct sock *sk) | 324 | static void tcp_retransmit_timer(struct sock *sk) |
318 | { | 325 | { |
319 | struct tcp_sock *tp = tcp_sk(sk); | 326 | struct tcp_sock *tp = tcp_sk(sk); |
327 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
320 | 328 | ||
321 | if (!tp->packets_out) | 329 | if (!tp->packets_out) |
322 | goto out; | 330 | goto out; |
@@ -351,7 +359,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
351 | if (tcp_write_timeout(sk)) | 359 | if (tcp_write_timeout(sk)) |
352 | goto out; | 360 | goto out; |
353 | 361 | ||
354 | if (tp->retransmits == 0) { | 362 | if (icsk->icsk_retransmits == 0) { |
355 | if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { | 363 | if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { |
356 | if (tp->rx_opt.sack_ok) { | 364 | if (tp->rx_opt.sack_ok) { |
357 | if (tp->ca_state == TCP_CA_Recovery) | 365 | if (tp->ca_state == TCP_CA_Recovery) |
@@ -381,10 +389,10 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
381 | /* Retransmission failed because of local congestion, | 389 | /* Retransmission failed because of local congestion, |
382 | * do not backoff. | 390 | * do not backoff. |
383 | */ | 391 | */ |
384 | if (!tp->retransmits) | 392 | if (!icsk->icsk_retransmits) |
385 | tp->retransmits=1; | 393 | icsk->icsk_retransmits = 1; |
386 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, | 394 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
387 | min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); | 395 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL)); |
388 | goto out; | 396 | goto out; |
389 | } | 397 | } |
390 | 398 | ||
@@ -403,13 +411,13 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
403 | * implemented ftp to mars will work nicely. We will have to fix | 411 | * implemented ftp to mars will work nicely. We will have to fix |
404 | * the 120 second clamps though! | 412 | * the 120 second clamps though! |
405 | */ | 413 | */ |
406 | tp->backoff++; | 414 | icsk->icsk_backoff++; |
407 | tp->retransmits++; | 415 | icsk->icsk_retransmits++; |
408 | 416 | ||
409 | out_reset_timer: | 417 | out_reset_timer: |
410 | tp->rto = min(tp->rto << 1, TCP_RTO_MAX); | 418 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
411 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 419 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto); |
412 | if (tp->retransmits > sysctl_tcp_retries1) | 420 | if (icsk->icsk_retransmits > sysctl_tcp_retries1) |
413 | __sk_dst_reset(sk); | 421 | __sk_dst_reset(sk); |
414 | 422 | ||
415 | out:; | 423 | out:; |
@@ -418,32 +426,32 @@ out:; | |||
418 | static void tcp_write_timer(unsigned long data) | 426 | static void tcp_write_timer(unsigned long data) |
419 | { | 427 | { |
420 | struct sock *sk = (struct sock*)data; | 428 | struct sock *sk = (struct sock*)data; |
421 | struct tcp_sock *tp = tcp_sk(sk); | 429 | struct inet_connection_sock *icsk = inet_csk(sk); |
422 | int event; | 430 | int event; |
423 | 431 | ||
424 | bh_lock_sock(sk); | 432 | bh_lock_sock(sk); |
425 | if (sock_owned_by_user(sk)) { | 433 | if (sock_owned_by_user(sk)) { |
426 | /* Try again later */ | 434 | /* Try again later */ |
427 | sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); | 435 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); |
428 | goto out_unlock; | 436 | goto out_unlock; |
429 | } | 437 | } |
430 | 438 | ||
431 | if (sk->sk_state == TCP_CLOSE || !tp->pending) | 439 | if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
432 | goto out; | 440 | goto out; |
433 | 441 | ||
434 | if (time_after(tp->timeout, jiffies)) { | 442 | if (time_after(icsk->icsk_timeout, jiffies)) { |
435 | sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); | 443 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
436 | goto out; | 444 | goto out; |
437 | } | 445 | } |
438 | 446 | ||
439 | event = tp->pending; | 447 | event = icsk->icsk_pending; |
440 | tp->pending = 0; | 448 | icsk->icsk_pending = 0; |
441 | 449 | ||
442 | switch (event) { | 450 | switch (event) { |
443 | case TCP_TIME_RETRANS: | 451 | case ICSK_TIME_RETRANS: |
444 | tcp_retransmit_timer(sk); | 452 | tcp_retransmit_timer(sk); |
445 | break; | 453 | break; |
446 | case TCP_TIME_PROBE0: | 454 | case ICSK_TIME_PROBE0: |
447 | tcp_probe_timer(sk); | 455 | tcp_probe_timer(sk); |
448 | break; | 456 | break; |
449 | } | 457 | } |
@@ -463,8 +471,9 @@ out_unlock: | |||
463 | static void tcp_synack_timer(struct sock *sk) | 471 | static void tcp_synack_timer(struct sock *sk) |
464 | { | 472 | { |
465 | struct tcp_sock *tp = tcp_sk(sk); | 473 | struct tcp_sock *tp = tcp_sk(sk); |
466 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 474 | struct inet_connection_sock *icsk = inet_csk(sk); |
467 | int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; | 475 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
476 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; | ||
468 | int thresh = max_retries; | 477 | int thresh = max_retries; |
469 | unsigned long now = jiffies; | 478 | unsigned long now = jiffies; |
470 | struct request_sock **reqp, *req; | 479 | struct request_sock **reqp, *req; |
@@ -526,8 +535,8 @@ static void tcp_synack_timer(struct sock *sk) | |||
526 | } | 535 | } |
527 | 536 | ||
528 | /* Drop this request */ | 537 | /* Drop this request */ |
529 | tcp_synq_unlink(tp, req, reqp); | 538 | inet_csk_reqsk_queue_unlink(sk, req, reqp); |
530 | reqsk_queue_removed(&tp->accept_queue, req); | 539 | reqsk_queue_removed(&icsk->icsk_accept_queue, req); |
531 | reqsk_free(req); | 540 | reqsk_free(req); |
532 | continue; | 541 | continue; |
533 | } | 542 | } |
@@ -541,15 +550,15 @@ static void tcp_synack_timer(struct sock *sk) | |||
541 | lopt->clock_hand = i; | 550 | lopt->clock_hand = i; |
542 | 551 | ||
543 | if (lopt->qlen) | 552 | if (lopt->qlen) |
544 | tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); | 553 | inet_csk_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); |
545 | } | 554 | } |
546 | 555 | ||
547 | void tcp_delete_keepalive_timer (struct sock *sk) | 556 | void inet_csk_delete_keepalive_timer(struct sock *sk) |
548 | { | 557 | { |
549 | sk_stop_timer(sk, &sk->sk_timer); | 558 | sk_stop_timer(sk, &sk->sk_timer); |
550 | } | 559 | } |
551 | 560 | ||
552 | void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) | 561 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) |
553 | { | 562 | { |
554 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | 563 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); |
555 | } | 564 | } |
@@ -560,9 +569,9 @@ void tcp_set_keepalive(struct sock *sk, int val) | |||
560 | return; | 569 | return; |
561 | 570 | ||
562 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | 571 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) |
563 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); | 572 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
564 | else if (!val) | 573 | else if (!val) |
565 | tcp_delete_keepalive_timer(sk); | 574 | inet_csk_delete_keepalive_timer(sk); |
566 | } | 575 | } |
567 | 576 | ||
568 | 577 | ||
@@ -576,7 +585,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
576 | bh_lock_sock(sk); | 585 | bh_lock_sock(sk); |
577 | if (sock_owned_by_user(sk)) { | 586 | if (sock_owned_by_user(sk)) { |
578 | /* Try again later. */ | 587 | /* Try again later. */ |
579 | tcp_reset_keepalive_timer (sk, HZ/20); | 588 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
580 | goto out; | 589 | goto out; |
581 | } | 590 | } |
582 | 591 | ||
@@ -587,7 +596,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
587 | 596 | ||
588 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { | 597 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
589 | if (tp->linger2 >= 0) { | 598 | if (tp->linger2 >= 0) { |
590 | int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; | 599 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
591 | 600 | ||
592 | if (tmo > 0) { | 601 | if (tmo > 0) { |
593 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 602 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
@@ -634,7 +643,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
634 | sk_stream_mem_reclaim(sk); | 643 | sk_stream_mem_reclaim(sk); |
635 | 644 | ||
636 | resched: | 645 | resched: |
637 | tcp_reset_keepalive_timer (sk, elapsed); | 646 | inet_csk_reset_keepalive_timer (sk, elapsed); |
638 | goto out; | 647 | goto out; |
639 | 648 | ||
640 | death: | 649 | death: |
@@ -645,7 +654,7 @@ out: | |||
645 | sock_put(sk); | 654 | sock_put(sk); |
646 | } | 655 | } |
647 | 656 | ||
648 | EXPORT_SYMBOL(tcp_clear_xmit_timers); | 657 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
649 | EXPORT_SYMBOL(tcp_delete_keepalive_timer); | 658 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
650 | EXPORT_SYMBOL(tcp_init_xmit_timers); | 659 | EXPORT_SYMBOL(tcp_init_xmit_timers); |
651 | EXPORT_SYMBOL(tcp_reset_keepalive_timer); | 660 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4582d9cf4bbe..b9c3da349492 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1043,7 +1043,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
1043 | u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; | 1043 | u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; |
1044 | u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | 1044 | u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); |
1045 | int sk_ipv6only = ipv6_only_sock(sk); | 1045 | int sk_ipv6only = ipv6_only_sock(sk); |
1046 | int sk2_ipv6only = tcp_v6_ipv6only(sk2); | 1046 | int sk2_ipv6only = inet_v6_ipv6only(sk2); |
1047 | int addr_type = ipv6_addr_type(sk_rcv_saddr6); | 1047 | int addr_type = ipv6_addr_type(sk_rcv_saddr6); |
1048 | int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; | 1048 | int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; |
1049 | 1049 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index af8ad5bb273b..b9c7003b7f8b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -207,9 +207,9 @@ tb_not_found: | |||
207 | tb->fastreuse = 0; | 207 | tb->fastreuse = 0; |
208 | 208 | ||
209 | success: | 209 | success: |
210 | if (!inet_sk(sk)->bind_hash) | 210 | if (!inet_csk(sk)->icsk_bind_hash) |
211 | inet_bind_hash(sk, tb, snum); | 211 | inet_bind_hash(sk, tb, snum); |
212 | BUG_TRAP(inet_sk(sk)->bind_hash == tb); | 212 | BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); |
213 | ret = 0; | 213 | ret = 0; |
214 | 214 | ||
215 | fail_unlock: | 215 | fail_unlock: |
@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(tcp_v6_lookup); | |||
381 | * Open request hash tables. | 381 | * Open request hash tables. |
382 | */ | 382 | */ |
383 | 383 | ||
384 | static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) | 384 | static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd) |
385 | { | 385 | { |
386 | u32 a, b, c; | 386 | u32 a, b, c; |
387 | 387 | ||
@@ -401,14 +401,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) | |||
401 | return c & (TCP_SYNQ_HSIZE - 1); | 401 | return c & (TCP_SYNQ_HSIZE - 1); |
402 | } | 402 | } |
403 | 403 | ||
404 | static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, | 404 | static struct request_sock *tcp_v6_search_req(const struct sock *sk, |
405 | struct request_sock ***prevp, | 405 | struct request_sock ***prevp, |
406 | __u16 rport, | 406 | __u16 rport, |
407 | struct in6_addr *raddr, | 407 | struct in6_addr *raddr, |
408 | struct in6_addr *laddr, | 408 | struct in6_addr *laddr, |
409 | int iif) | 409 | int iif) |
410 | { | 410 | { |
411 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 411 | const struct inet_connection_sock *icsk = inet_csk(sk); |
412 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | ||
412 | struct request_sock *req, **prev; | 413 | struct request_sock *req, **prev; |
413 | 414 | ||
414 | for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; | 415 | for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; |
@@ -619,7 +620,7 @@ ok: | |||
619 | } | 620 | } |
620 | 621 | ||
621 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 622 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; |
622 | tb = inet_sk(sk)->bind_hash; | 623 | tb = inet_csk(sk)->icsk_bind_hash; |
623 | spin_lock_bh(&head->lock); | 624 | spin_lock_bh(&head->lock); |
624 | 625 | ||
625 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 626 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
@@ -925,7 +926,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
925 | if (sock_owned_by_user(sk)) | 926 | if (sock_owned_by_user(sk)) |
926 | goto out; | 927 | goto out; |
927 | 928 | ||
928 | req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, | 929 | req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr, |
929 | &hdr->saddr, tcp_v6_iif(skb)); | 930 | &hdr->saddr, tcp_v6_iif(skb)); |
930 | if (!req) | 931 | if (!req) |
931 | goto out; | 932 | goto out; |
@@ -940,7 +941,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
940 | goto out; | 941 | goto out; |
941 | } | 942 | } |
942 | 943 | ||
943 | tcp_synq_drop(sk, req, prev); | 944 | inet_csk_reqsk_queue_drop(sk, req, prev); |
944 | goto out; | 945 | goto out; |
945 | 946 | ||
946 | case TCP_SYN_SENT: | 947 | case TCP_SYN_SENT: |
@@ -1245,11 +1246,10 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1245 | { | 1246 | { |
1246 | struct request_sock *req, **prev; | 1247 | struct request_sock *req, **prev; |
1247 | struct tcphdr *th = skb->h.th; | 1248 | struct tcphdr *th = skb->h.th; |
1248 | struct tcp_sock *tp = tcp_sk(sk); | ||
1249 | struct sock *nsk; | 1249 | struct sock *nsk; |
1250 | 1250 | ||
1251 | /* Find possible connection requests. */ | 1251 | /* Find possible connection requests. */ |
1252 | req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, | 1252 | req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr, |
1253 | &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); | 1253 | &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); |
1254 | if (req) | 1254 | if (req) |
1255 | return tcp_check_req(sk, skb, req, prev); | 1255 | return tcp_check_req(sk, skb, req, prev); |
@@ -1278,12 +1278,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1278 | 1278 | ||
1279 | static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) | 1279 | static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) |
1280 | { | 1280 | { |
1281 | struct tcp_sock *tp = tcp_sk(sk); | 1281 | struct inet_connection_sock *icsk = inet_csk(sk); |
1282 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 1282 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
1283 | u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); | 1283 | const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); |
1284 | 1284 | ||
1285 | reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); | 1285 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT); |
1286 | tcp_synq_added(sk); | 1286 | inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT); |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | 1289 | ||
@@ -1308,13 +1308,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1308 | /* | 1308 | /* |
1309 | * There are no SYN attacks on IPv6, yet... | 1309 | * There are no SYN attacks on IPv6, yet... |
1310 | */ | 1310 | */ |
1311 | if (tcp_synq_is_full(sk) && !isn) { | 1311 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1312 | if (net_ratelimit()) | 1312 | if (net_ratelimit()) |
1313 | printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); | 1313 | printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); |
1314 | goto drop; | 1314 | goto drop; |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) | 1317 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
1318 | goto drop; | 1318 | goto drop; |
1319 | 1319 | ||
1320 | req = reqsk_alloc(&tcp6_request_sock_ops); | 1320 | req = reqsk_alloc(&tcp6_request_sock_ops); |
@@ -2015,7 +2015,7 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
2015 | tcp_init_xmit_timers(sk); | 2015 | tcp_init_xmit_timers(sk); |
2016 | tcp_prequeue_init(tp); | 2016 | tcp_prequeue_init(tp); |
2017 | 2017 | ||
2018 | tp->rto = TCP_TIMEOUT_INIT; | 2018 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
2019 | tp->mdev = TCP_TIMEOUT_INIT; | 2019 | tp->mdev = TCP_TIMEOUT_INIT; |
2020 | 2020 | ||
2021 | /* So many TCP implementations out there (incorrectly) count the | 2021 | /* So many TCP implementations out there (incorrectly) count the |
@@ -2098,18 +2098,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
2098 | unsigned long timer_expires; | 2098 | unsigned long timer_expires; |
2099 | struct inet_sock *inet = inet_sk(sp); | 2099 | struct inet_sock *inet = inet_sk(sp); |
2100 | struct tcp_sock *tp = tcp_sk(sp); | 2100 | struct tcp_sock *tp = tcp_sk(sp); |
2101 | const struct inet_connection_sock *icsk = inet_csk(sp); | ||
2101 | struct ipv6_pinfo *np = inet6_sk(sp); | 2102 | struct ipv6_pinfo *np = inet6_sk(sp); |
2102 | 2103 | ||
2103 | dest = &np->daddr; | 2104 | dest = &np->daddr; |
2104 | src = &np->rcv_saddr; | 2105 | src = &np->rcv_saddr; |
2105 | destp = ntohs(inet->dport); | 2106 | destp = ntohs(inet->dport); |
2106 | srcp = ntohs(inet->sport); | 2107 | srcp = ntohs(inet->sport); |
2107 | if (tp->pending == TCP_TIME_RETRANS) { | 2108 | |
2109 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | ||
2108 | timer_active = 1; | 2110 | timer_active = 1; |
2109 | timer_expires = tp->timeout; | 2111 | timer_expires = icsk->icsk_timeout; |
2110 | } else if (tp->pending == TCP_TIME_PROBE0) { | 2112 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
2111 | timer_active = 4; | 2113 | timer_active = 4; |
2112 | timer_expires = tp->timeout; | 2114 | timer_expires = icsk->icsk_timeout; |
2113 | } else if (timer_pending(&sp->sk_timer)) { | 2115 | } else if (timer_pending(&sp->sk_timer)) { |
2114 | timer_active = 2; | 2116 | timer_active = 2; |
2115 | timer_expires = sp->sk_timer.expires; | 2117 | timer_expires = sp->sk_timer.expires; |
@@ -2130,12 +2132,14 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
2130 | tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, | 2132 | tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, |
2131 | timer_active, | 2133 | timer_active, |
2132 | jiffies_to_clock_t(timer_expires - jiffies), | 2134 | jiffies_to_clock_t(timer_expires - jiffies), |
2133 | tp->retransmits, | 2135 | icsk->icsk_retransmits, |
2134 | sock_i_uid(sp), | 2136 | sock_i_uid(sp), |
2135 | tp->probes_out, | 2137 | tp->probes_out, |
2136 | sock_i_ino(sp), | 2138 | sock_i_ino(sp), |
2137 | atomic_read(&sp->sk_refcnt), sp, | 2139 | atomic_read(&sp->sk_refcnt), sp, |
2138 | tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, | 2140 | icsk->icsk_rto, |
2141 | icsk->icsk_ack.ato, | ||
2142 | (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, | ||
2139 | tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh | 2143 | tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh |
2140 | ); | 2144 | ); |
2141 | } | 2145 | } |
@@ -2227,7 +2231,7 @@ struct proto tcpv6_prot = { | |||
2227 | .close = tcp_close, | 2231 | .close = tcp_close, |
2228 | .connect = tcp_v6_connect, | 2232 | .connect = tcp_v6_connect, |
2229 | .disconnect = tcp_disconnect, | 2233 | .disconnect = tcp_disconnect, |
2230 | .accept = tcp_accept, | 2234 | .accept = inet_csk_accept, |
2231 | .ioctl = tcp_ioctl, | 2235 | .ioctl = tcp_ioctl, |
2232 | .init = tcp_v6_init_sock, | 2236 | .init = tcp_v6_init_sock, |
2233 | .destroy = tcp_v6_destroy_sock, | 2237 | .destroy = tcp_v6_destroy_sock, |