aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c156
1 files changed, 49 insertions, 107 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b14266bb91eb..067213924751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
177 if (IS_ERR(rt)) { 177 if (IS_ERR(rt)) {
178 err = PTR_ERR(rt); 178 err = PTR_ERR(rt);
179 if (err == -ENETUNREACH) 179 if (err == -ENETUNREACH)
180 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 return err; 181 return err;
182 } 182 }
183 183
@@ -288,6 +288,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
288 mtu = dst_mtu(dst); 288 mtu = dst_mtu(dst);
289 289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT && 290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
291 inet_csk(sk)->icsk_pmtu_cookie > mtu) { 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
292 tcp_sync_mss(sk, mtu); 293 tcp_sync_mss(sk, mtu);
293 294
@@ -835,11 +836,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
835 skb = tcp_make_synack(sk, dst, req, NULL); 836 skb = tcp_make_synack(sk, dst, req, NULL);
836 837
837 if (skb) { 838 if (skb) {
838 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
839 840
840 skb_set_queue_mapping(skb, queue_mapping); 841 skb_set_queue_mapping(skb, queue_mapping);
841 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 ireq->rmt_addr, 843 ireq->ir_rmt_addr,
843 ireq->opt); 844 ireq->opt);
844 err = net_xmit_eval(err); 845 err = net_xmit_eval(err);
845 if (!tcp_rsk(req)->snt_synack && !err) 846 if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +973,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
972{ 973{
973 union tcp_md5_addr *addr; 974 union tcp_md5_addr *addr;
974 975
975 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; 976 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
976 return tcp_md5_do_lookup(sk, addr, AF_INET); 977 return tcp_md5_do_lookup(sk, addr, AF_INET);
977} 978}
978 979
@@ -1149,8 +1150,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1149 saddr = inet_sk(sk)->inet_saddr; 1150 saddr = inet_sk(sk)->inet_saddr;
1150 daddr = inet_sk(sk)->inet_daddr; 1151 daddr = inet_sk(sk)->inet_daddr;
1151 } else if (req) { 1152 } else if (req) {
1152 saddr = inet_rsk(req)->loc_addr; 1153 saddr = inet_rsk(req)->ir_loc_addr;
1153 daddr = inet_rsk(req)->rmt_addr; 1154 daddr = inet_rsk(req)->ir_rmt_addr;
1154 } else { 1155 } else {
1155 const struct iphdr *iph = ip_hdr(skb); 1156 const struct iphdr *iph = ip_hdr(skb);
1156 saddr = iph->saddr; 1157 saddr = iph->saddr;
@@ -1366,8 +1367,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1366 kfree_skb(skb_synack); 1367 kfree_skb(skb_synack);
1367 return -1; 1368 return -1;
1368 } 1369 }
1369 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1370 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1370 ireq->rmt_addr, ireq->opt); 1371 ireq->ir_rmt_addr, ireq->opt);
1371 err = net_xmit_eval(err); 1372 err = net_xmit_eval(err);
1372 if (!err) 1373 if (!err)
1373 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1374 tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1411,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1410 inet_csk(child)->icsk_af_ops->rebuild_header(child); 1411 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1411 tcp_init_congestion_control(child); 1412 tcp_init_congestion_control(child);
1412 tcp_mtup_init(child); 1413 tcp_mtup_init(child);
1413 tcp_init_buffer_space(child);
1414 tcp_init_metrics(child); 1414 tcp_init_metrics(child);
1415 tcp_init_buffer_space(child);
1415 1416
1416 /* Queue the data carried in the SYN packet. We need to first 1417 /* Queue the data carried in the SYN packet. We need to first
1417 * bump skb's refcnt because the caller will attempt to free it. 1418 * bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1503,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1502 tcp_openreq_init(req, &tmp_opt, skb); 1503 tcp_openreq_init(req, &tmp_opt, skb);
1503 1504
1504 ireq = inet_rsk(req); 1505 ireq = inet_rsk(req);
1505 ireq->loc_addr = daddr; 1506 ireq->ir_loc_addr = daddr;
1506 ireq->rmt_addr = saddr; 1507 ireq->ir_rmt_addr = saddr;
1507 ireq->no_srccheck = inet_sk(sk)->transparent; 1508 ireq->no_srccheck = inet_sk(sk)->transparent;
1508 ireq->opt = tcp_v4_save_options(skb); 1509 ireq->opt = tcp_v4_save_options(skb);
1509 1510
@@ -1578,15 +1579,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); 1579 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1579 1580
1580 if (skb_synack) { 1581 if (skb_synack) {
1581 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr); 1582 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb)); 1583 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1583 } else 1584 } else
1584 goto drop_and_free; 1585 goto drop_and_free;
1585 1586
1586 if (likely(!do_fastopen)) { 1587 if (likely(!do_fastopen)) {
1587 int err; 1588 int err;
1588 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1589 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
1589 ireq->rmt_addr, ireq->opt); 1590 ireq->ir_rmt_addr, ireq->opt);
1590 err = net_xmit_eval(err); 1591 err = net_xmit_eval(err);
1591 if (err || want_cookie) 1592 if (err || want_cookie)
1592 goto drop_and_free; 1593 goto drop_and_free;
@@ -1644,9 +1645,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1644 newtp = tcp_sk(newsk); 1645 newtp = tcp_sk(newsk);
1645 newinet = inet_sk(newsk); 1646 newinet = inet_sk(newsk);
1646 ireq = inet_rsk(req); 1647 ireq = inet_rsk(req);
1647 newinet->inet_daddr = ireq->rmt_addr; 1648 newinet->inet_daddr = ireq->ir_rmt_addr;
1648 newinet->inet_rcv_saddr = ireq->loc_addr; 1649 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1649 newinet->inet_saddr = ireq->loc_addr; 1650 newinet->inet_saddr = ireq->ir_loc_addr;
1650 inet_opt = ireq->opt; 1651 inet_opt = ireq->opt;
1651 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1652 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1652 ireq->opt = NULL; 1653 ireq->opt = NULL;
@@ -2194,18 +2195,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
2194#ifdef CONFIG_PROC_FS 2195#ifdef CONFIG_PROC_FS
2195/* Proc filesystem TCP sock list dumping. */ 2196/* Proc filesystem TCP sock list dumping. */
2196 2197
2197static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2198{
2199 return hlist_nulls_empty(head) ? NULL :
2200 list_entry(head->first, struct inet_timewait_sock, tw_node);
2201}
2202
2203static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2204{
2205 return !is_a_nulls(tw->tw_node.next) ?
2206 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2207}
2208
2209/* 2198/*
2210 * Get next listener socket follow cur. If cur is NULL, get first socket 2199 * Get next listener socket follow cur. If cur is NULL, get first socket
2211 * starting from bucket given in st->bucket; when st->bucket is zero the 2200 * starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2298,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2309 return rc; 2298 return rc;
2310} 2299}
2311 2300
2312static inline bool empty_bucket(struct tcp_iter_state *st) 2301static inline bool empty_bucket(const struct tcp_iter_state *st)
2313{ 2302{
2314 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && 2303 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2315 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2316} 2304}
2317 2305
2318/* 2306/*
@@ -2329,7 +2317,6 @@ static void *established_get_first(struct seq_file *seq)
2329 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { 2317 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2330 struct sock *sk; 2318 struct sock *sk;
2331 struct hlist_nulls_node *node; 2319 struct hlist_nulls_node *node;
2332 struct inet_timewait_sock *tw;
2333 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 2320 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2334 2321
2335 /* Lockless fast path for the common case of empty buckets */ 2322 /* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2332,7 @@ static void *established_get_first(struct seq_file *seq)
2345 rc = sk; 2332 rc = sk;
2346 goto out; 2333 goto out;
2347 } 2334 }
2348 st->state = TCP_SEQ_STATE_TIME_WAIT;
2349 inet_twsk_for_each(tw, node,
2350 &tcp_hashinfo.ehash[st->bucket].twchain) {
2351 if (tw->tw_family != st->family ||
2352 !net_eq(twsk_net(tw), net)) {
2353 continue;
2354 }
2355 rc = tw;
2356 goto out;
2357 }
2358 spin_unlock_bh(lock); 2335 spin_unlock_bh(lock);
2359 st->state = TCP_SEQ_STATE_ESTABLISHED;
2360 } 2336 }
2361out: 2337out:
2362 return rc; 2338 return rc;
@@ -2365,7 +2341,6 @@ out:
2365static void *established_get_next(struct seq_file *seq, void *cur) 2341static void *established_get_next(struct seq_file *seq, void *cur)
2366{ 2342{
2367 struct sock *sk = cur; 2343 struct sock *sk = cur;
2368 struct inet_timewait_sock *tw;
2369 struct hlist_nulls_node *node; 2344 struct hlist_nulls_node *node;
2370 struct tcp_iter_state *st = seq->private; 2345 struct tcp_iter_state *st = seq->private;
2371 struct net *net = seq_file_net(seq); 2346 struct net *net = seq_file_net(seq);
@@ -2373,45 +2348,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2373 ++st->num; 2348 ++st->num;
2374 ++st->offset; 2349 ++st->offset;
2375 2350
2376 if (st->state == TCP_SEQ_STATE_TIME_WAIT) { 2351 sk = sk_nulls_next(sk);
2377 tw = cur;
2378 tw = tw_next(tw);
2379get_tw:
2380 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2381 tw = tw_next(tw);
2382 }
2383 if (tw) {
2384 cur = tw;
2385 goto out;
2386 }
2387 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2388 st->state = TCP_SEQ_STATE_ESTABLISHED;
2389
2390 /* Look for next non empty bucket */
2391 st->offset = 0;
2392 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2393 empty_bucket(st))
2394 ;
2395 if (st->bucket > tcp_hashinfo.ehash_mask)
2396 return NULL;
2397
2398 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2399 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2400 } else
2401 sk = sk_nulls_next(sk);
2402 2352
2403 sk_nulls_for_each_from(sk, node) { 2353 sk_nulls_for_each_from(sk, node) {
2404 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) 2354 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2405 goto found; 2355 return sk;
2406 } 2356 }
2407 2357
2408 st->state = TCP_SEQ_STATE_TIME_WAIT; 2358 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2409 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain); 2359 ++st->bucket;
2410 goto get_tw; 2360 return established_get_first(seq);
2411found:
2412 cur = sk;
2413out:
2414 return cur;
2415} 2361}
2416 2362
2417static void *established_get_idx(struct seq_file *seq, loff_t pos) 2363static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2410,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
2464 if (rc) 2410 if (rc)
2465 break; 2411 break;
2466 st->bucket = 0; 2412 st->bucket = 0;
2413 st->state = TCP_SEQ_STATE_ESTABLISHED;
2467 /* Fallthrough */ 2414 /* Fallthrough */
2468 case TCP_SEQ_STATE_ESTABLISHED: 2415 case TCP_SEQ_STATE_ESTABLISHED:
2469 case TCP_SEQ_STATE_TIME_WAIT:
2470 st->state = TCP_SEQ_STATE_ESTABLISHED;
2471 if (st->bucket > tcp_hashinfo.ehash_mask) 2416 if (st->bucket > tcp_hashinfo.ehash_mask)
2472 break; 2417 break;
2473 rc = established_get_first(seq); 2418 rc = established_get_first(seq);
@@ -2524,7 +2469,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2524 } 2469 }
2525 break; 2470 break;
2526 case TCP_SEQ_STATE_ESTABLISHED: 2471 case TCP_SEQ_STATE_ESTABLISHED:
2527 case TCP_SEQ_STATE_TIME_WAIT:
2528 rc = established_get_next(seq, v); 2472 rc = established_get_next(seq, v);
2529 break; 2473 break;
2530 } 2474 }
@@ -2548,7 +2492,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2548 if (v != SEQ_START_TOKEN) 2492 if (v != SEQ_START_TOKEN)
2549 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); 2493 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2550 break; 2494 break;
2551 case TCP_SEQ_STATE_TIME_WAIT:
2552 case TCP_SEQ_STATE_ESTABLISHED: 2495 case TCP_SEQ_STATE_ESTABLISHED:
2553 if (v) 2496 if (v)
2554 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2497 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2598,18 +2541,18 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2598EXPORT_SYMBOL(tcp_proc_unregister); 2541EXPORT_SYMBOL(tcp_proc_unregister);
2599 2542
2600static void get_openreq4(const struct sock *sk, const struct request_sock *req, 2543static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2601 struct seq_file *f, int i, kuid_t uid, int *len) 2544 struct seq_file *f, int i, kuid_t uid)
2602{ 2545{
2603 const struct inet_request_sock *ireq = inet_rsk(req); 2546 const struct inet_request_sock *ireq = inet_rsk(req);
2604 long delta = req->expires - jiffies; 2547 long delta = req->expires - jiffies;
2605 2548
2606 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2549 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2607 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", 2550 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2608 i, 2551 i,
2609 ireq->loc_addr, 2552 ireq->ir_loc_addr,
2610 ntohs(inet_sk(sk)->inet_sport), 2553 ntohs(inet_sk(sk)->inet_sport),
2611 ireq->rmt_addr, 2554 ireq->ir_rmt_addr,
2612 ntohs(ireq->rmt_port), 2555 ntohs(ireq->ir_rmt_port),
2613 TCP_SYN_RECV, 2556 TCP_SYN_RECV,
2614 0, 0, /* could print option size, but that is af dependent. */ 2557 0, 0, /* could print option size, but that is af dependent. */
2615 1, /* timers active (only the expire timer) */ 2558 1, /* timers active (only the expire timer) */
@@ -2619,11 +2562,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2619 0, /* non standard timer */ 2562 0, /* non standard timer */
2620 0, /* open_requests have no inode */ 2563 0, /* open_requests have no inode */
2621 atomic_read(&sk->sk_refcnt), 2564 atomic_read(&sk->sk_refcnt),
2622 req, 2565 req);
2623 len);
2624} 2566}
2625 2567
2626static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) 2568static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2627{ 2569{
2628 int timer_active; 2570 int timer_active;
2629 unsigned long timer_expires; 2571 unsigned long timer_expires;
@@ -2662,7 +2604,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2662 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2604 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2663 2605
2664 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2606 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2665 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n", 2607 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2666 i, src, srcp, dest, destp, sk->sk_state, 2608 i, src, srcp, dest, destp, sk->sk_state,
2667 tp->write_seq - tp->snd_una, 2609 tp->write_seq - tp->snd_una,
2668 rx_queue, 2610 rx_queue,
@@ -2679,12 +2621,11 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2679 tp->snd_cwnd, 2621 tp->snd_cwnd,
2680 sk->sk_state == TCP_LISTEN ? 2622 sk->sk_state == TCP_LISTEN ?
2681 (fastopenq ? fastopenq->max_qlen : 0) : 2623 (fastopenq ? fastopenq->max_qlen : 0) :
2682 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh), 2624 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2683 len);
2684} 2625}
2685 2626
2686static void get_timewait4_sock(const struct inet_timewait_sock *tw, 2627static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2687 struct seq_file *f, int i, int *len) 2628 struct seq_file *f, int i)
2688{ 2629{
2689 __be32 dest, src; 2630 __be32 dest, src;
2690 __u16 destp, srcp; 2631 __u16 destp, srcp;
@@ -2696,10 +2637,10 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2696 srcp = ntohs(tw->tw_sport); 2637 srcp = ntohs(tw->tw_sport);
2697 2638
2698 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2639 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2699 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", 2640 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2700 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2641 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2701 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 2642 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2702 atomic_read(&tw->tw_refcnt), tw, len); 2643 atomic_read(&tw->tw_refcnt), tw);
2703} 2644}
2704 2645
2705#define TMPSZ 150 2646#define TMPSZ 150
@@ -2707,11 +2648,11 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2707static int tcp4_seq_show(struct seq_file *seq, void *v) 2648static int tcp4_seq_show(struct seq_file *seq, void *v)
2708{ 2649{
2709 struct tcp_iter_state *st; 2650 struct tcp_iter_state *st;
2710 int len; 2651 struct sock *sk = v;
2711 2652
2653 seq_setwidth(seq, TMPSZ - 1);
2712 if (v == SEQ_START_TOKEN) { 2654 if (v == SEQ_START_TOKEN) {
2713 seq_printf(seq, "%-*s\n", TMPSZ - 1, 2655 seq_puts(seq, " sl local_address rem_address st tx_queue "
2714 " sl local_address rem_address st tx_queue "
2715 "rx_queue tr tm->when retrnsmt uid timeout " 2656 "rx_queue tr tm->when retrnsmt uid timeout "
2716 "inode"); 2657 "inode");
2717 goto out; 2658 goto out;
@@ -2721,17 +2662,17 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
2721 switch (st->state) { 2662 switch (st->state) {
2722 case TCP_SEQ_STATE_LISTENING: 2663 case TCP_SEQ_STATE_LISTENING:
2723 case TCP_SEQ_STATE_ESTABLISHED: 2664 case TCP_SEQ_STATE_ESTABLISHED:
2724 get_tcp4_sock(v, seq, st->num, &len); 2665 if (sk->sk_state == TCP_TIME_WAIT)
2666 get_timewait4_sock(v, seq, st->num);
2667 else
2668 get_tcp4_sock(v, seq, st->num);
2725 break; 2669 break;
2726 case TCP_SEQ_STATE_OPENREQ: 2670 case TCP_SEQ_STATE_OPENREQ:
2727 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len); 2671 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2728 break;
2729 case TCP_SEQ_STATE_TIME_WAIT:
2730 get_timewait4_sock(v, seq, st->num, &len);
2731 break; 2672 break;
2732 } 2673 }
2733 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2734out: 2674out:
2675 seq_pad(seq, '\n');
2735 return 0; 2676 return 0;
2736} 2677}
2737 2678
@@ -2806,6 +2747,7 @@ struct proto tcp_prot = {
2806 .orphan_count = &tcp_orphan_count, 2747 .orphan_count = &tcp_orphan_count,
2807 .memory_allocated = &tcp_memory_allocated, 2748 .memory_allocated = &tcp_memory_allocated,
2808 .memory_pressure = &tcp_memory_pressure, 2749 .memory_pressure = &tcp_memory_pressure,
2750 .sysctl_mem = sysctl_tcp_mem,
2809 .sysctl_wmem = sysctl_tcp_wmem, 2751 .sysctl_wmem = sysctl_tcp_wmem,
2810 .sysctl_rmem = sysctl_tcp_rmem, 2752 .sysctl_rmem = sysctl_tcp_rmem,
2811 .max_header = MAX_TCP_HEADER, 2753 .max_header = MAX_TCP_HEADER,