aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c124
1 files changed, 34 insertions, 90 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b14266bb91eb..300ab2c93f29 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -835,11 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
835 skb = tcp_make_synack(sk, dst, req, NULL); 835 skb = tcp_make_synack(sk, dst, req, NULL);
836 836
837 if (skb) { 837 if (skb) {
838 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 838 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
839 839
840 skb_set_queue_mapping(skb, queue_mapping); 840 skb_set_queue_mapping(skb, queue_mapping);
841 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 841 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 ireq->rmt_addr, 842 ireq->ir_rmt_addr,
843 ireq->opt); 843 ireq->opt);
844 err = net_xmit_eval(err); 844 err = net_xmit_eval(err);
845 if (!tcp_rsk(req)->snt_synack && !err) 845 if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +972,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972{ 972{
973 union tcp_md5_addr *addr; 973 union tcp_md5_addr *addr;
974 974
975 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; 975 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
976 return tcp_md5_do_lookup(sk, addr, AF_INET); 976 return tcp_md5_do_lookup(sk, addr, AF_INET);
977} 977}
978 978
@@ -1149,8 +1149,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1149 saddr = inet_sk(sk)->inet_saddr; 1149 saddr = inet_sk(sk)->inet_saddr;
1150 daddr = inet_sk(sk)->inet_daddr; 1150 daddr = inet_sk(sk)->inet_daddr;
1151 } else if (req) { 1151 } else if (req) {
1152 saddr = inet_rsk(req)->loc_addr; 1152 saddr = inet_rsk(req)->ir_loc_addr;
1153 daddr = inet_rsk(req)->rmt_addr; 1153 daddr = inet_rsk(req)->ir_rmt_addr;
1154 } else { 1154 } else {
1155 const struct iphdr *iph = ip_hdr(skb); 1155 const struct iphdr *iph = ip_hdr(skb);
1156 saddr = iph->saddr; 1156 saddr = iph->saddr;
@@ -1366,8 +1366,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1366 kfree_skb(skb_synack); 1366 kfree_skb(skb_synack);
1367 return -1; 1367 return -1;
1368 } 1368 }
1369 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1369 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1370 ireq->rmt_addr, ireq->opt); 1370 ireq->ir_rmt_addr, ireq->opt);
1371 err = net_xmit_eval(err); 1371 err = net_xmit_eval(err);
1372 if (!err) 1372 if (!err)
1373 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1373 tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1410,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1410 inet_csk(child)->icsk_af_ops->rebuild_header(child); 1410 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1411 tcp_init_congestion_control(child); 1411 tcp_init_congestion_control(child);
1412 tcp_mtup_init(child); 1412 tcp_mtup_init(child);
1413 tcp_init_buffer_space(child);
1414 tcp_init_metrics(child); 1413 tcp_init_metrics(child);
1414 tcp_init_buffer_space(child);
1415 1415
1416 /* Queue the data carried in the SYN packet. We need to first 1416 /* Queue the data carried in the SYN packet. We need to first
1417 * bump skb's refcnt because the caller will attempt to free it. 1417 * bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1502,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1502 tcp_openreq_init(req, &tmp_opt, skb); 1502 tcp_openreq_init(req, &tmp_opt, skb);
1503 1503
1504 ireq = inet_rsk(req); 1504 ireq = inet_rsk(req);
1505 ireq->loc_addr = daddr; 1505 ireq->ir_loc_addr = daddr;
1506 ireq->rmt_addr = saddr; 1506 ireq->ir_rmt_addr = saddr;
1507 ireq->no_srccheck = inet_sk(sk)->transparent; 1507 ireq->no_srccheck = inet_sk(sk)->transparent;
1508 ireq->opt = tcp_v4_save_options(skb); 1508 ireq->opt = tcp_v4_save_options(skb);
1509 1509
@@ -1578,15 +1578,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); 1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1579 1579
1580 if (skb_synack) { 1580 if (skb_synack) {
1581 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr); 1581 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb)); 1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1583 } else 1583 } else
1584 goto drop_and_free; 1584 goto drop_and_free;
1585 1585
1586 if (likely(!do_fastopen)) { 1586 if (likely(!do_fastopen)) {
1587 int err; 1587 int err;
1588 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1588 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1589 ireq->rmt_addr, ireq->opt); 1589 ireq->ir_rmt_addr, ireq->opt);
1590 err = net_xmit_eval(err); 1590 err = net_xmit_eval(err);
1591 if (err || want_cookie) 1591 if (err || want_cookie)
1592 goto drop_and_free; 1592 goto drop_and_free;
@@ -1644,9 +1644,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1644 newtp = tcp_sk(newsk); 1644 newtp = tcp_sk(newsk);
1645 newinet = inet_sk(newsk); 1645 newinet = inet_sk(newsk);
1646 ireq = inet_rsk(req); 1646 ireq = inet_rsk(req);
1647 newinet->inet_daddr = ireq->rmt_addr; 1647 newinet->inet_daddr = ireq->ir_rmt_addr;
1648 newinet->inet_rcv_saddr = ireq->loc_addr; 1648 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1649 newinet->inet_saddr = ireq->loc_addr; 1649 newinet->inet_saddr = ireq->ir_loc_addr;
1650 inet_opt = ireq->opt; 1650 inet_opt = ireq->opt;
1651 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1651 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1652 ireq->opt = NULL; 1652 ireq->opt = NULL;
@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
2194#ifdef CONFIG_PROC_FS 2194#ifdef CONFIG_PROC_FS
2195/* Proc filesystem TCP sock list dumping. */ 2195/* Proc filesystem TCP sock list dumping. */
2196 2196
2197static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2198{
2199 return hlist_nulls_empty(head) ? NULL :
2200 list_entry(head->first, struct inet_timewait_sock, tw_node);
2201}
2202
2203static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2204{
2205 return !is_a_nulls(tw->tw_node.next) ?
2206 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2207}
2208
2209/* 2197/*
2210 * Get next listener socket follow cur. If cur is NULL, get first socket 2198 * Get next listener socket follow cur. If cur is NULL, get first socket
2211 * starting from bucket given in st->bucket; when st->bucket is zero the 2199 * starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2309 return rc; 2297 return rc;
2310} 2298}
2311 2299
2312static inline bool empty_bucket(struct tcp_iter_state *st) 2300static inline bool empty_bucket(const struct tcp_iter_state *st)
2313{ 2301{
2314 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && 2302 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2315 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2316} 2303}
2317 2304
2318/* 2305/*
@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq)
2329 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { 2316 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2330 struct sock *sk; 2317 struct sock *sk;
2331 struct hlist_nulls_node *node; 2318 struct hlist_nulls_node *node;
2332 struct inet_timewait_sock *tw;
2333 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 2319 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2334 2320
2335 /* Lockless fast path for the common case of empty buckets */ 2321 /* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq)
2345 rc = sk; 2331 rc = sk;
2346 goto out; 2332 goto out;
2347 } 2333 }
2348 st->state = TCP_SEQ_STATE_TIME_WAIT;
2349 inet_twsk_for_each(tw, node,
2350 &tcp_hashinfo.ehash[st->bucket].twchain) {
2351 if (tw->tw_family != st->family ||
2352 !net_eq(twsk_net(tw), net)) {
2353 continue;
2354 }
2355 rc = tw;
2356 goto out;
2357 }
2358 spin_unlock_bh(lock); 2334 spin_unlock_bh(lock);
2359 st->state = TCP_SEQ_STATE_ESTABLISHED;
2360 } 2335 }
2361out: 2336out:
2362 return rc; 2337 return rc;
@@ -2365,7 +2340,6 @@ out:
2365static void *established_get_next(struct seq_file *seq, void *cur) 2340static void *established_get_next(struct seq_file *seq, void *cur)
2366{ 2341{
2367 struct sock *sk = cur; 2342 struct sock *sk = cur;
2368 struct inet_timewait_sock *tw;
2369 struct hlist_nulls_node *node; 2343 struct hlist_nulls_node *node;
2370 struct tcp_iter_state *st = seq->private; 2344 struct tcp_iter_state *st = seq->private;
2371 struct net *net = seq_file_net(seq); 2345 struct net *net = seq_file_net(seq);
@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2373 ++st->num; 2347 ++st->num;
2374 ++st->offset; 2348 ++st->offset;
2375 2349
2376 if (st->state == TCP_SEQ_STATE_TIME_WAIT) { 2350 sk = sk_nulls_next(sk);
2377 tw = cur;
2378 tw = tw_next(tw);
2379get_tw:
2380 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2381 tw = tw_next(tw);
2382 }
2383 if (tw) {
2384 cur = tw;
2385 goto out;
2386 }
2387 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2388 st->state = TCP_SEQ_STATE_ESTABLISHED;
2389
2390 /* Look for next non empty bucket */
2391 st->offset = 0;
2392 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2393 empty_bucket(st))
2394 ;
2395 if (st->bucket > tcp_hashinfo.ehash_mask)
2396 return NULL;
2397
2398 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2399 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2400 } else
2401 sk = sk_nulls_next(sk);
2402 2351
2403 sk_nulls_for_each_from(sk, node) { 2352 sk_nulls_for_each_from(sk, node) {
2404 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) 2353 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2405 goto found; 2354 return sk;
2406 } 2355 }
2407 2356
2408 st->state = TCP_SEQ_STATE_TIME_WAIT; 2357 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2409 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain); 2358 ++st->bucket;
2410 goto get_tw; 2359 return established_get_first(seq);
2411found:
2412 cur = sk;
2413out:
2414 return cur;
2415} 2360}
2416 2361
2417static void *established_get_idx(struct seq_file *seq, loff_t pos) 2362static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
2464 if (rc) 2409 if (rc)
2465 break; 2410 break;
2466 st->bucket = 0; 2411 st->bucket = 0;
2412 st->state = TCP_SEQ_STATE_ESTABLISHED;
2467 /* Fallthrough */ 2413 /* Fallthrough */
2468 case TCP_SEQ_STATE_ESTABLISHED: 2414 case TCP_SEQ_STATE_ESTABLISHED:
2469 case TCP_SEQ_STATE_TIME_WAIT:
2470 st->state = TCP_SEQ_STATE_ESTABLISHED;
2471 if (st->bucket > tcp_hashinfo.ehash_mask) 2415 if (st->bucket > tcp_hashinfo.ehash_mask)
2472 break; 2416 break;
2473 rc = established_get_first(seq); 2417 rc = established_get_first(seq);
@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2524 } 2468 }
2525 break; 2469 break;
2526 case TCP_SEQ_STATE_ESTABLISHED: 2470 case TCP_SEQ_STATE_ESTABLISHED:
2527 case TCP_SEQ_STATE_TIME_WAIT:
2528 rc = established_get_next(seq, v); 2471 rc = established_get_next(seq, v);
2529 break; 2472 break;
2530 } 2473 }
@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2548 if (v != SEQ_START_TOKEN) 2491 if (v != SEQ_START_TOKEN)
2549 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); 2492 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2550 break; 2493 break;
2551 case TCP_SEQ_STATE_TIME_WAIT:
2552 case TCP_SEQ_STATE_ESTABLISHED: 2494 case TCP_SEQ_STATE_ESTABLISHED:
2553 if (v) 2495 if (v)
2554 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2496 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2606,10 +2548,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2606 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2548 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2607 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", 2549 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
2608 i, 2550 i,
2609 ireq->loc_addr, 2551 ireq->ir_loc_addr,
2610 ntohs(inet_sk(sk)->inet_sport), 2552 ntohs(inet_sk(sk)->inet_sport),
2611 ireq->rmt_addr, 2553 ireq->ir_rmt_addr,
2612 ntohs(ireq->rmt_port), 2554 ntohs(ireq->ir_rmt_port),
2613 TCP_SYN_RECV, 2555 TCP_SYN_RECV,
2614 0, 0, /* could print option size, but that is af dependent. */ 2556 0, 0, /* could print option size, but that is af dependent. */
2615 1, /* timers active (only the expire timer) */ 2557 1, /* timers active (only the expire timer) */
@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2707static int tcp4_seq_show(struct seq_file *seq, void *v) 2649static int tcp4_seq_show(struct seq_file *seq, void *v)
2708{ 2650{
2709 struct tcp_iter_state *st; 2651 struct tcp_iter_state *st;
2652 struct sock *sk = v;
2710 int len; 2653 int len;
2711 2654
2712 if (v == SEQ_START_TOKEN) { 2655 if (v == SEQ_START_TOKEN) {
@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
2721 switch (st->state) { 2664 switch (st->state) {
2722 case TCP_SEQ_STATE_LISTENING: 2665 case TCP_SEQ_STATE_LISTENING:
2723 case TCP_SEQ_STATE_ESTABLISHED: 2666 case TCP_SEQ_STATE_ESTABLISHED:
2724 get_tcp4_sock(v, seq, st->num, &len); 2667 if (sk->sk_state == TCP_TIME_WAIT)
2668 get_timewait4_sock(v, seq, st->num, &len);
2669 else
2670 get_tcp4_sock(v, seq, st->num, &len);
2725 break; 2671 break;
2726 case TCP_SEQ_STATE_OPENREQ: 2672 case TCP_SEQ_STATE_OPENREQ:
2727 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len); 2673 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2728 break; 2674 break;
2729 case TCP_SEQ_STATE_TIME_WAIT:
2730 get_timewait4_sock(v, seq, st->num, &len);
2731 break;
2732 } 2675 }
2733 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, ""); 2676 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2734out: 2677out:
@@ -2806,6 +2749,7 @@ struct proto tcp_prot = {
2806 .orphan_count = &tcp_orphan_count, 2749 .orphan_count = &tcp_orphan_count,
2807 .memory_allocated = &tcp_memory_allocated, 2750 .memory_allocated = &tcp_memory_allocated,
2808 .memory_pressure = &tcp_memory_pressure, 2751 .memory_pressure = &tcp_memory_pressure,
2752 .sysctl_mem = sysctl_tcp_mem,
2809 .sysctl_wmem = sysctl_tcp_wmem, 2753 .sysctl_wmem = sysctl_tcp_wmem,
2810 .sysctl_rmem = sysctl_tcp_rmem, 2754 .sysctl_rmem = sysctl_tcp_rmem,
2811 .max_header = MAX_TCP_HEADER, 2755 .max_header = MAX_TCP_HEADER,