aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 10:55:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 10:55:01 -0500
commitd7fc02c7bae7b1cf69269992cf880a43a350cdaa (patch)
treea43d56fa72913a1cc98a0bbebe054d08581b3a7c /net/ipv4/tcp_ipv4.c
parentee1262dbc65ce0b6234a915d8432171e8d77f518 (diff)
parent28b4d5cc17c20786848cdc07b7ea237a309776bb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1815 commits) mac80211: fix reorder buffer release iwmc3200wifi: Enable wimax core through module parameter iwmc3200wifi: Add wifi-wimax coexistence mode as a module parameter iwmc3200wifi: Coex table command does not expect a response iwmc3200wifi: Update wiwi priority table iwlwifi: driver version track kernel version iwlwifi: indicate uCode type when fail dump error/event log iwl3945: remove duplicated event logging code b43: fix two warnings ipw2100: fix rebooting hang with driver loaded cfg80211: indent regulatory messages with spaces iwmc3200wifi: fix NULL pointer dereference in pmkid update mac80211: Fix TX status reporting for injected data frames ath9k: enable 2GHz band only if the device supports it airo: Fix integer overflow warning rt2x00: Fix padding bug on L2PAD devices. WE: Fix set events not propagated b43legacy: avoid PPC fault during resume b43: avoid PPC fault during resume tcp: fix a timewait refcnt race ... Fix up conflicts due to sysctl cleanups (dead sysctl_check code and CTL_UNNUMBERED removed) in kernel/sysctl_check.c net/ipv4/sysctl_net_ipv4.c net/ipv6/addrconf.c net/sctp/sysctl.c
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c223
1 files changed, 151 insertions, 72 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7cda24b53f61..29002ab26e0d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -165,10 +165,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
165 nexthop = inet->opt->faddr; 165 nexthop = inet->opt->faddr;
166 } 166 }
167 167
168 tmp = ip_route_connect(&rt, nexthop, inet->saddr, 168 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
170 IPPROTO_TCP, 170 IPPROTO_TCP,
171 inet->sport, usin->sin_port, sk, 1); 171 inet->inet_sport, usin->sin_port, sk, 1);
172 if (tmp < 0) { 172 if (tmp < 0) {
173 if (tmp == -ENETUNREACH) 173 if (tmp == -ENETUNREACH)
174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -183,11 +183,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
183 if (!inet->opt || !inet->opt->srr) 183 if (!inet->opt || !inet->opt->srr)
184 daddr = rt->rt_dst; 184 daddr = rt->rt_dst;
185 185
186 if (!inet->saddr) 186 if (!inet->inet_saddr)
187 inet->saddr = rt->rt_src; 187 inet->inet_saddr = rt->rt_src;
188 inet->rcv_saddr = inet->saddr; 188 inet->inet_rcv_saddr = inet->inet_saddr;
189 189
190 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { 190 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
191 /* Reset inherited state */ 191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0; 192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0; 193 tp->rx_opt.ts_recent_stamp = 0;
@@ -204,20 +204,20 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 * when trying new connection. 204 * when trying new connection.
205 */ 205 */
206 if (peer != NULL && 206 if (peer != NULL &&
207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { 207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts; 209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 } 210 }
211 } 211 }
212 212
213 inet->dport = usin->sin_port; 213 inet->inet_dport = usin->sin_port;
214 inet->daddr = daddr; 214 inet->inet_daddr = daddr;
215 215
216 inet_csk(sk)->icsk_ext_hdr_len = 0; 216 inet_csk(sk)->icsk_ext_hdr_len = 0;
217 if (inet->opt) 217 if (inet->opt)
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
219 219
220 tp->rx_opt.mss_clamp = 536; 220 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
221 221
222 /* Socket identity is still unknown (sport may be zero). 222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket 223 * However we set state to SYN-SENT and not releasing socket
@@ -230,7 +230,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
230 goto failure; 230 goto failure;
231 231
232 err = ip_route_newports(&rt, IPPROTO_TCP, 232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->sport, inet->dport, sk); 233 inet->inet_sport, inet->inet_dport, sk);
234 if (err) 234 if (err)
235 goto failure; 235 goto failure;
236 236
@@ -239,12 +239,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
239 sk_setup_caps(sk, &rt->u.dst); 239 sk_setup_caps(sk, &rt->u.dst);
240 240
241 if (!tp->write_seq) 241 if (!tp->write_seq)
242 tp->write_seq = secure_tcp_sequence_number(inet->saddr, 242 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->daddr, 243 inet->inet_daddr,
244 inet->sport, 244 inet->inet_sport,
245 usin->sin_port); 245 usin->sin_port);
246 246
247 inet->id = tp->write_seq ^ jiffies; 247 inet->inet_id = tp->write_seq ^ jiffies;
248 248
249 err = tcp_connect(sk); 249 err = tcp_connect(sk);
250 rt = NULL; 250 rt = NULL;
@@ -261,7 +261,7 @@ failure:
261 tcp_set_state(sk, TCP_CLOSE); 261 tcp_set_state(sk, TCP_CLOSE);
262 ip_rt_put(rt); 262 ip_rt_put(rt);
263 sk->sk_route_caps = 0; 263 sk->sk_route_caps = 0;
264 inet->dport = 0; 264 inet->inet_dport = 0;
265 return err; 265 return err;
266} 266}
267 267
@@ -520,12 +520,13 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
520 struct tcphdr *th = tcp_hdr(skb); 520 struct tcphdr *th = tcp_hdr(skb);
521 521
522 if (skb->ip_summed == CHECKSUM_PARTIAL) { 522 if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 th->check = ~tcp_v4_check(len, inet->saddr, 523 th->check = ~tcp_v4_check(len, inet->inet_saddr,
524 inet->daddr, 0); 524 inet->inet_daddr, 0);
525 skb->csum_start = skb_transport_header(skb) - skb->head; 525 skb->csum_start = skb_transport_header(skb) - skb->head;
526 skb->csum_offset = offsetof(struct tcphdr, check); 526 skb->csum_offset = offsetof(struct tcphdr, check);
527 } else { 527 } else {
528 th->check = tcp_v4_check(len, inet->saddr, inet->daddr, 528 th->check = tcp_v4_check(len, inet->inet_saddr,
529 inet->inet_daddr,
529 csum_partial(th, 530 csum_partial(th,
530 th->doff << 2, 531 th->doff << 2,
531 skb->csum)); 532 skb->csum));
@@ -741,8 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
741 * This still operates on a request_sock only, not on a big 742 * This still operates on a request_sock only, not on a big
742 * socket. 743 * socket.
743 */ 744 */
744static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
745 struct dst_entry *dst) 746 struct request_sock *req,
747 struct request_values *rvp)
746{ 748{
747 const struct inet_request_sock *ireq = inet_rsk(req); 749 const struct inet_request_sock *ireq = inet_rsk(req);
748 int err = -1; 750 int err = -1;
@@ -752,7 +754,7 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
752 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 754 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
753 return -1; 755 return -1;
754 756
755 skb = tcp_make_synack(sk, dst, req); 757 skb = tcp_make_synack(sk, dst, req, rvp);
756 758
757 if (skb) { 759 if (skb) {
758 struct tcphdr *th = tcp_hdr(skb); 760 struct tcphdr *th = tcp_hdr(skb);
@@ -773,9 +775,10 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
773 return err; 775 return err;
774} 776}
775 777
776static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req) 778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp)
777{ 780{
778 return __tcp_v4_send_synack(sk, req, NULL); 781 return __tcp_v4_send_synack(sk, NULL, req, rvp);
779} 782}
780 783
781/* 784/*
@@ -848,7 +851,7 @@ static struct tcp_md5sig_key *
848struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 851struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
849 struct sock *addr_sk) 852 struct sock *addr_sk)
850{ 853{
851 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); 854 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
852} 855}
853 856
854EXPORT_SYMBOL(tcp_v4_md5_lookup); 857EXPORT_SYMBOL(tcp_v4_md5_lookup);
@@ -923,7 +926,7 @@ EXPORT_SYMBOL(tcp_v4_md5_do_add);
923static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, 926static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
924 u8 *newkey, u8 newkeylen) 927 u8 *newkey, u8 newkeylen)
925{ 928{
926 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, 929 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
927 newkey, newkeylen); 930 newkey, newkeylen);
928} 931}
929 932
@@ -1089,8 +1092,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1089 __be32 saddr, daddr; 1092 __be32 saddr, daddr;
1090 1093
1091 if (sk) { 1094 if (sk) {
1092 saddr = inet_sk(sk)->saddr; 1095 saddr = inet_sk(sk)->inet_saddr;
1093 daddr = inet_sk(sk)->daddr; 1096 daddr = inet_sk(sk)->inet_daddr;
1094 } else if (req) { 1097 } else if (req) {
1095 saddr = inet_rsk(req)->loc_addr; 1098 saddr = inet_rsk(req)->loc_addr;
1096 daddr = inet_rsk(req)->rmt_addr; 1099 daddr = inet_rsk(req)->rmt_addr;
@@ -1210,13 +1213,16 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
1210 1213
1211int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1214int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1212{ 1215{
1213 struct inet_request_sock *ireq; 1216 struct tcp_extend_values tmp_ext;
1214 struct tcp_options_received tmp_opt; 1217 struct tcp_options_received tmp_opt;
1218 u8 *hash_location;
1215 struct request_sock *req; 1219 struct request_sock *req;
1220 struct inet_request_sock *ireq;
1221 struct tcp_sock *tp = tcp_sk(sk);
1222 struct dst_entry *dst = NULL;
1216 __be32 saddr = ip_hdr(skb)->saddr; 1223 __be32 saddr = ip_hdr(skb)->saddr;
1217 __be32 daddr = ip_hdr(skb)->daddr; 1224 __be32 daddr = ip_hdr(skb)->daddr;
1218 __u32 isn = TCP_SKB_CB(skb)->when; 1225 __u32 isn = TCP_SKB_CB(skb)->when;
1219 struct dst_entry *dst = NULL;
1220#ifdef CONFIG_SYN_COOKIES 1226#ifdef CONFIG_SYN_COOKIES
1221 int want_cookie = 0; 1227 int want_cookie = 0;
1222#else 1228#else
@@ -1256,27 +1262,65 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1256 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; 1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1257#endif 1263#endif
1258 1264
1265 ireq = inet_rsk(req);
1266 ireq->loc_addr = daddr;
1267 ireq->rmt_addr = saddr;
1268 ireq->no_srccheck = inet_sk(sk)->transparent;
1269 ireq->opt = tcp_v4_save_options(sk, skb);
1270
1271 dst = inet_csk_route_req(sk, req);
1272 if(!dst)
1273 goto drop_and_free;
1274
1259 tcp_clear_options(&tmp_opt); 1275 tcp_clear_options(&tmp_opt);
1260 tmp_opt.mss_clamp = 536; 1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1261 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; 1277 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
1279
1280 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp &&
1282 !tp->rx_opt.cookie_out_never &&
1283 (sysctl_tcp_cookie_size > 0 ||
1284 (tp->cookie_values != NULL &&
1285 tp->cookie_values->cookie_desired > 0))) {
1286 u8 *c;
1287 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1288 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1289
1290 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1291 goto drop_and_release;
1292
1293 /* Secret recipe starts with IP addresses */
1294 *mess++ ^= daddr;
1295 *mess++ ^= saddr;
1262 1296
1263 tcp_parse_options(skb, &tmp_opt, 0); 1297 /* plus variable length Initiator Cookie */
1298 c = (u8 *)mess;
1299 while (l-- > 0)
1300 *c++ ^= *hash_location++;
1301
1302#ifdef CONFIG_SYN_COOKIES
1303 want_cookie = 0; /* not our kind of cookie */
1304#endif
1305 tmp_ext.cookie_out_never = 0; /* false */
1306 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1307 } else if (!tp->rx_opt.cookie_in_always) {
1308 /* redundant indications, but ensure initialization. */
1309 tmp_ext.cookie_out_never = 1; /* true */
1310 tmp_ext.cookie_plus = 0;
1311 } else {
1312 goto drop_and_release;
1313 }
1314 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1264 1315
1265 if (want_cookie && !tmp_opt.saw_tstamp) 1316 if (want_cookie && !tmp_opt.saw_tstamp)
1266 tcp_clear_options(&tmp_opt); 1317 tcp_clear_options(&tmp_opt);
1267 1318
1268 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1269
1270 tcp_openreq_init(req, &tmp_opt, skb); 1320 tcp_openreq_init(req, &tmp_opt, skb);
1271 1321
1272 ireq = inet_rsk(req);
1273 ireq->loc_addr = daddr;
1274 ireq->rmt_addr = saddr;
1275 ireq->no_srccheck = inet_sk(sk)->transparent;
1276 ireq->opt = tcp_v4_save_options(sk, skb);
1277
1278 if (security_inet_conn_request(sk, skb, req)) 1322 if (security_inet_conn_request(sk, skb, req))
1279 goto drop_and_free; 1323 goto drop_and_release;
1280 1324
1281 if (!want_cookie) 1325 if (!want_cookie)
1282 TCP_ECN_create_request(req, tcp_hdr(skb)); 1326 TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1301,10 +1345,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1301 */ 1345 */
1302 if (tmp_opt.saw_tstamp && 1346 if (tmp_opt.saw_tstamp &&
1303 tcp_death_row.sysctl_tw_recycle && 1347 tcp_death_row.sysctl_tw_recycle &&
1304 (dst = inet_csk_route_req(sk, req)) != NULL &&
1305 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1306 peer->v4daddr == saddr) { 1349 peer->v4daddr == saddr) {
1307 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1308 (s32)(peer->tcp_ts - req->ts_recent) > 1351 (s32)(peer->tcp_ts - req->ts_recent) >
1309 TCP_PAWS_WINDOW) { 1352 TCP_PAWS_WINDOW) {
1310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1353 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1333,7 +1376,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1333 } 1376 }
1334 tcp_rsk(req)->snt_isn = isn; 1377 tcp_rsk(req)->snt_isn = isn;
1335 1378
1336 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie) 1379 if (__tcp_v4_send_synack(sk, dst, req,
1380 (struct request_values *)&tmp_ext) ||
1381 want_cookie)
1337 goto drop_and_free; 1382 goto drop_and_free;
1338 1383
1339 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1384 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
@@ -1380,9 +1425,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1380 newtp = tcp_sk(newsk); 1425 newtp = tcp_sk(newsk);
1381 newinet = inet_sk(newsk); 1426 newinet = inet_sk(newsk);
1382 ireq = inet_rsk(req); 1427 ireq = inet_rsk(req);
1383 newinet->daddr = ireq->rmt_addr; 1428 newinet->inet_daddr = ireq->rmt_addr;
1384 newinet->rcv_saddr = ireq->loc_addr; 1429 newinet->inet_rcv_saddr = ireq->loc_addr;
1385 newinet->saddr = ireq->loc_addr; 1430 newinet->inet_saddr = ireq->loc_addr;
1386 newinet->opt = ireq->opt; 1431 newinet->opt = ireq->opt;
1387 ireq->opt = NULL; 1432 ireq->opt = NULL;
1388 newinet->mc_index = inet_iif(skb); 1433 newinet->mc_index = inet_iif(skb);
@@ -1390,7 +1435,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1390 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1435 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1391 if (newinet->opt) 1436 if (newinet->opt)
1392 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; 1437 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1393 newinet->id = newtp->write_seq ^ jiffies; 1438 newinet->inet_id = newtp->write_seq ^ jiffies;
1394 1439
1395 tcp_mtup_init(newsk); 1440 tcp_mtup_init(newsk);
1396 tcp_sync_mss(newsk, dst_mtu(dst)); 1441 tcp_sync_mss(newsk, dst_mtu(dst));
@@ -1403,7 +1448,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1403 1448
1404#ifdef CONFIG_TCP_MD5SIG 1449#ifdef CONFIG_TCP_MD5SIG
1405 /* Copy over the MD5 key from the original socket */ 1450 /* Copy over the MD5 key from the original socket */
1406 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { 1451 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1452 if (key != NULL) {
1407 /* 1453 /*
1408 * We're using one, so create a matching key 1454 * We're using one, so create a matching key
1409 * on the newsk structure. If we fail to get 1455 * on the newsk structure. If we fail to get
@@ -1412,7 +1458,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1412 */ 1458 */
1413 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1459 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1414 if (newkey != NULL) 1460 if (newkey != NULL)
1415 tcp_v4_md5_do_add(newsk, newinet->daddr, 1461 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1416 newkey, key->keylen); 1462 newkey, key->keylen);
1417 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1463 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1418 } 1464 }
@@ -1711,8 +1757,8 @@ int tcp_v4_remember_stamp(struct sock *sk)
1711 struct inet_peer *peer = NULL; 1757 struct inet_peer *peer = NULL;
1712 int release_it = 0; 1758 int release_it = 0;
1713 1759
1714 if (!rt || rt->rt_dst != inet->daddr) { 1760 if (!rt || rt->rt_dst != inet->inet_daddr) {
1715 peer = inet_getpeer(inet->daddr, 1); 1761 peer = inet_getpeer(inet->inet_daddr, 1);
1716 release_it = 1; 1762 release_it = 1;
1717 } else { 1763 } else {
1718 if (!rt->peer) 1764 if (!rt->peer)
@@ -1722,9 +1768,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
1722 1768
1723 if (peer) { 1769 if (peer) {
1724 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || 1770 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1725 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1771 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1726 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { 1772 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1727 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; 1773 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1728 peer->tcp_ts = tp->rx_opt.ts_recent; 1774 peer->tcp_ts = tp->rx_opt.ts_recent;
1729 } 1775 }
1730 if (release_it) 1776 if (release_it)
@@ -1743,9 +1789,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1743 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 1789 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1744 1790
1745 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || 1791 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1746 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && 1792 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1747 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { 1793 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1748 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; 1794 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1749 peer->tcp_ts = tcptw->tw_ts_recent; 1795 peer->tcp_ts = tcptw->tw_ts_recent;
1750 } 1796 }
1751 inet_putpeer(peer); 1797 inet_putpeer(peer);
@@ -1810,7 +1856,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1810 */ 1856 */
1811 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1857 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1812 tp->snd_cwnd_clamp = ~0; 1858 tp->snd_cwnd_clamp = ~0;
1813 tp->mss_cache = 536; 1859 tp->mss_cache = TCP_MSS_DEFAULT;
1814 1860
1815 tp->reordering = sysctl_tcp_reordering; 1861 tp->reordering = sysctl_tcp_reordering;
1816 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1862 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
@@ -1826,6 +1872,19 @@ static int tcp_v4_init_sock(struct sock *sk)
1826 tp->af_specific = &tcp_sock_ipv4_specific; 1872 tp->af_specific = &tcp_sock_ipv4_specific;
1827#endif 1873#endif
1828 1874
1875 /* TCP Cookie Transactions */
1876 if (sysctl_tcp_cookie_size > 0) {
1877 /* Default, cookies without s_data_payload. */
1878 tp->cookie_values =
1879 kzalloc(sizeof(*tp->cookie_values),
1880 sk->sk_allocation);
1881 if (tp->cookie_values != NULL)
1882 kref_init(&tp->cookie_values->kref);
1883 }
1884 /* Presumed zeroed, in order of appearance:
1885 * cookie_in_always, cookie_out_never,
1886 * s_data_constant, s_data_in, s_data_out
1887 */
1829 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1888 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1830 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1889 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1831 1890
@@ -1879,6 +1938,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
1879 sk->sk_sndmsg_page = NULL; 1938 sk->sk_sndmsg_page = NULL;
1880 } 1939 }
1881 1940
1941 /* TCP Cookie Transactions */
1942 if (tp->cookie_values != NULL) {
1943 kref_put(&tp->cookie_values->kref,
1944 tcp_cookie_values_release);
1945 tp->cookie_values = NULL;
1946 }
1947
1882 percpu_counter_dec(&tcp_sockets_allocated); 1948 percpu_counter_dec(&tcp_sockets_allocated);
1883} 1949}
1884 1950
@@ -2000,7 +2066,7 @@ static void *established_get_first(struct seq_file *seq)
2000 struct net *net = seq_file_net(seq); 2066 struct net *net = seq_file_net(seq);
2001 void *rc = NULL; 2067 void *rc = NULL;
2002 2068
2003 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { 2069 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2004 struct sock *sk; 2070 struct sock *sk;
2005 struct hlist_nulls_node *node; 2071 struct hlist_nulls_node *node;
2006 struct inet_timewait_sock *tw; 2072 struct inet_timewait_sock *tw;
@@ -2061,10 +2127,10 @@ get_tw:
2061 st->state = TCP_SEQ_STATE_ESTABLISHED; 2127 st->state = TCP_SEQ_STATE_ESTABLISHED;
2062 2128
2063 /* Look for next non empty bucket */ 2129 /* Look for next non empty bucket */
2064 while (++st->bucket < tcp_hashinfo.ehash_size && 2130 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2065 empty_bucket(st)) 2131 empty_bucket(st))
2066 ; 2132 ;
2067 if (st->bucket >= tcp_hashinfo.ehash_size) 2133 if (st->bucket > tcp_hashinfo.ehash_mask)
2068 return NULL; 2134 return NULL;
2069 2135
2070 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2136 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2225,7 +2291,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
2225 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", 2291 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2226 i, 2292 i,
2227 ireq->loc_addr, 2293 ireq->loc_addr,
2228 ntohs(inet_sk(sk)->sport), 2294 ntohs(inet_sk(sk)->inet_sport),
2229 ireq->rmt_addr, 2295 ireq->rmt_addr,
2230 ntohs(ireq->rmt_port), 2296 ntohs(ireq->rmt_port),
2231 TCP_SYN_RECV, 2297 TCP_SYN_RECV,
@@ -2248,10 +2314,11 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2248 struct tcp_sock *tp = tcp_sk(sk); 2314 struct tcp_sock *tp = tcp_sk(sk);
2249 const struct inet_connection_sock *icsk = inet_csk(sk); 2315 const struct inet_connection_sock *icsk = inet_csk(sk);
2250 struct inet_sock *inet = inet_sk(sk); 2316 struct inet_sock *inet = inet_sk(sk);
2251 __be32 dest = inet->daddr; 2317 __be32 dest = inet->inet_daddr;
2252 __be32 src = inet->rcv_saddr; 2318 __be32 src = inet->inet_rcv_saddr;
2253 __u16 destp = ntohs(inet->dport); 2319 __u16 destp = ntohs(inet->inet_dport);
2254 __u16 srcp = ntohs(inet->sport); 2320 __u16 srcp = ntohs(inet->inet_sport);
2321 int rx_queue;
2255 2322
2256 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 2323 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2257 timer_active = 1; 2324 timer_active = 1;
@@ -2267,12 +2334,19 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2267 timer_expires = jiffies; 2334 timer_expires = jiffies;
2268 } 2335 }
2269 2336
2337 if (sk->sk_state == TCP_LISTEN)
2338 rx_queue = sk->sk_ack_backlog;
2339 else
2340 /*
2341 * because we dont lock socket, we might find a transient negative value
2342 */
2343 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2344
2270 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2345 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2271 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", 2346 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2272 i, src, srcp, dest, destp, sk->sk_state, 2347 i, src, srcp, dest, destp, sk->sk_state,
2273 tp->write_seq - tp->snd_una, 2348 tp->write_seq - tp->snd_una,
2274 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : 2349 rx_queue,
2275 (tp->rcv_nxt - tp->copied_seq),
2276 timer_active, 2350 timer_active,
2277 jiffies_to_clock_t(timer_expires - jiffies), 2351 jiffies_to_clock_t(timer_expires - jiffies),
2278 icsk->icsk_retransmits, 2352 icsk->icsk_retransmits,
@@ -2463,12 +2537,17 @@ static int __net_init tcp_sk_init(struct net *net)
2463static void __net_exit tcp_sk_exit(struct net *net) 2537static void __net_exit tcp_sk_exit(struct net *net)
2464{ 2538{
2465 inet_ctl_sock_destroy(net->ipv4.tcp_sock); 2539 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2466 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET); 2540}
2541
2542static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2543{
2544 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2467} 2545}
2468 2546
2469static struct pernet_operations __net_initdata tcp_sk_ops = { 2547static struct pernet_operations __net_initdata tcp_sk_ops = {
2470 .init = tcp_sk_init, 2548 .init = tcp_sk_init,
2471 .exit = tcp_sk_exit, 2549 .exit = tcp_sk_exit,
2550 .exit_batch = tcp_sk_exit_batch,
2472}; 2551};
2473 2552
2474void __init tcp_v4_init(void) 2553void __init tcp_v4_init(void)