aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c26
-rw-r--r--net/ipv4/tcp_output.c26
-rw-r--r--net/ipv4/tcp_timer.c8
4 files changed, 45 insertions, 38 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fdaf965a6794..2cbfa6df7976 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -308,7 +308,7 @@ struct tcp_splice_state {
308/* 308/*
309 * Pressure flag: try to collapse. 309 * Pressure flag: try to collapse.
310 * Technical note: it is used by multiple contexts non atomically. 310 * Technical note: it is used by multiple contexts non atomically.
311 * All the sk_stream_mem_schedule() is of this nature: accounting 311 * All the __sk_mem_schedule() is of this nature: accounting
312 * is strict, actions are advisory and have some latency. 312 * is strict, actions are advisory and have some latency.
313 */ 313 */
314int tcp_memory_pressure __read_mostly; 314int tcp_memory_pressure __read_mostly;
@@ -485,7 +485,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
485 tcb->sacked = 0; 485 tcb->sacked = 0;
486 skb_header_release(skb); 486 skb_header_release(skb);
487 tcp_add_write_queue_tail(sk, skb); 487 tcp_add_write_queue_tail(sk, skb);
488 sk_charge_skb(sk, skb); 488 sk->sk_wmem_queued += skb->truesize;
489 sk_mem_charge(sk, skb->truesize);
489 if (tp->nonagle & TCP_NAGLE_PUSH) 490 if (tp->nonagle & TCP_NAGLE_PUSH)
490 tp->nonagle &= ~TCP_NAGLE_PUSH; 491 tp->nonagle &= ~TCP_NAGLE_PUSH;
491} 492}
@@ -638,7 +639,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
638 639
639 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 640 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
640 if (skb) { 641 if (skb) {
641 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 642 if (sk_wmem_schedule(sk, skb->truesize)) {
642 /* 643 /*
643 * Make sure that we have exactly size bytes 644 * Make sure that we have exactly size bytes
644 * available to the caller, no more, no less. 645 * available to the caller, no more, no less.
@@ -707,7 +708,7 @@ new_segment:
707 tcp_mark_push(tp, skb); 708 tcp_mark_push(tp, skb);
708 goto new_segment; 709 goto new_segment;
709 } 710 }
710 if (!sk_stream_wmem_schedule(sk, copy)) 711 if (!sk_wmem_schedule(sk, copy))
711 goto wait_for_memory; 712 goto wait_for_memory;
712 713
713 if (can_coalesce) { 714 if (can_coalesce) {
@@ -721,7 +722,7 @@ new_segment:
721 skb->data_len += copy; 722 skb->data_len += copy;
722 skb->truesize += copy; 723 skb->truesize += copy;
723 sk->sk_wmem_queued += copy; 724 sk->sk_wmem_queued += copy;
724 sk->sk_forward_alloc -= copy; 725 sk_mem_charge(sk, copy);
725 skb->ip_summed = CHECKSUM_PARTIAL; 726 skb->ip_summed = CHECKSUM_PARTIAL;
726 tp->write_seq += copy; 727 tp->write_seq += copy;
727 TCP_SKB_CB(skb)->end_seq += copy; 728 TCP_SKB_CB(skb)->end_seq += copy;
@@ -928,7 +929,7 @@ new_segment:
928 if (copy > PAGE_SIZE - off) 929 if (copy > PAGE_SIZE - off)
929 copy = PAGE_SIZE - off; 930 copy = PAGE_SIZE - off;
930 931
931 if (!sk_stream_wmem_schedule(sk, copy)) 932 if (!sk_wmem_schedule(sk, copy))
932 goto wait_for_memory; 933 goto wait_for_memory;
933 934
934 if (!page) { 935 if (!page) {
@@ -1019,7 +1020,7 @@ do_fault:
1019 * reset, where we can be unlinking the send_head. 1020 * reset, where we can be unlinking the send_head.
1020 */ 1021 */
1021 tcp_check_send_head(sk, skb); 1022 tcp_check_send_head(sk, skb);
1022 sk_stream_free_skb(sk, skb); 1023 sk_wmem_free_skb(sk, skb);
1023 } 1024 }
1024 1025
1025do_error: 1026do_error:
@@ -1738,7 +1739,7 @@ void tcp_close(struct sock *sk, long timeout)
1738 __kfree_skb(skb); 1739 __kfree_skb(skb);
1739 } 1740 }
1740 1741
1741 sk_stream_mem_reclaim(sk); 1742 sk_mem_reclaim(sk);
1742 1743
1743 /* As outlined in RFC 2525, section 2.17, we send a RST here because 1744 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1744 * data was lost. To witness the awful effects of the old behavior of 1745 * data was lost. To witness the awful effects of the old behavior of
@@ -1841,7 +1842,7 @@ adjudge_to_death:
1841 } 1842 }
1842 } 1843 }
1843 if (sk->sk_state != TCP_CLOSE) { 1844 if (sk->sk_state != TCP_CLOSE) {
1844 sk_stream_mem_reclaim(sk); 1845 sk_mem_reclaim(sk);
1845 if (tcp_too_many_orphans(sk, 1846 if (tcp_too_many_orphans(sk,
1846 atomic_read(sk->sk_prot->orphan_count))) { 1847 atomic_read(sk->sk_prot->orphan_count))) {
1847 if (net_ratelimit()) 1848 if (net_ratelimit())
@@ -2658,11 +2659,11 @@ void __init tcp_init(void)
2658 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); 2659 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2659 max_share = min(4UL*1024*1024, limit); 2660 max_share = min(4UL*1024*1024, limit);
2660 2661
2661 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; 2662 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
2662 sysctl_tcp_wmem[1] = 16*1024; 2663 sysctl_tcp_wmem[1] = 16*1024;
2663 sysctl_tcp_wmem[2] = max(64*1024, max_share); 2664 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2664 2665
2665 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; 2666 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
2666 sysctl_tcp_rmem[1] = 87380; 2667 sysctl_tcp_rmem[1] = 87380;
2667 sysctl_tcp_rmem[2] = max(87380, max_share); 2668 sysctl_tcp_rmem[2] = max(87380, max_share);
2668 2669
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index efea9873208e..722c9cbb91e3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -591,7 +591,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
591 * restart window, so that we send ACKs quickly. 591 * restart window, so that we send ACKs quickly.
592 */ 592 */
593 tcp_incr_quickack(sk); 593 tcp_incr_quickack(sk);
594 sk_stream_mem_reclaim(sk); 594 sk_mem_reclaim(sk);
595 } 595 }
596 } 596 }
597 icsk->icsk_ack.lrcvtime = now; 597 icsk->icsk_ack.lrcvtime = now;
@@ -2851,7 +2851,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2851 break; 2851 break;
2852 2852
2853 tcp_unlink_write_queue(skb, sk); 2853 tcp_unlink_write_queue(skb, sk);
2854 sk_stream_free_skb(sk, skb); 2854 sk_wmem_free_skb(sk, skb);
2855 tcp_clear_all_retrans_hints(tp); 2855 tcp_clear_all_retrans_hints(tp);
2856 } 2856 }
2857 2857
@@ -3567,7 +3567,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3567 __skb_queue_purge(&tp->out_of_order_queue); 3567 __skb_queue_purge(&tp->out_of_order_queue);
3568 if (tcp_is_sack(tp)) 3568 if (tcp_is_sack(tp))
3569 tcp_sack_reset(&tp->rx_opt); 3569 tcp_sack_reset(&tp->rx_opt);
3570 sk_stream_mem_reclaim(sk); 3570 sk_mem_reclaim(sk);
3571 3571
3572 if (!sock_flag(sk, SOCK_DEAD)) { 3572 if (!sock_flag(sk, SOCK_DEAD)) {
3573 sk->sk_state_change(sk); 3573 sk->sk_state_change(sk);
@@ -3850,12 +3850,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3850queue_and_out: 3850queue_and_out:
3851 if (eaten < 0 && 3851 if (eaten < 0 &&
3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3853 !sk_stream_rmem_schedule(sk, skb))) { 3853 !sk_rmem_schedule(sk, skb->truesize))) {
3854 if (tcp_prune_queue(sk) < 0 || 3854 if (tcp_prune_queue(sk) < 0 ||
3855 !sk_stream_rmem_schedule(sk, skb)) 3855 !sk_rmem_schedule(sk, skb->truesize))
3856 goto drop; 3856 goto drop;
3857 } 3857 }
3858 sk_stream_set_owner_r(skb, sk); 3858 skb_set_owner_r(skb, sk);
3859 __skb_queue_tail(&sk->sk_receive_queue, skb); 3859 __skb_queue_tail(&sk->sk_receive_queue, skb);
3860 } 3860 }
3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3924,9 +3924,9 @@ drop:
3924 TCP_ECN_check_ce(tp, skb); 3924 TCP_ECN_check_ce(tp, skb);
3925 3925
3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3927 !sk_stream_rmem_schedule(sk, skb)) { 3927 !sk_rmem_schedule(sk, skb->truesize)) {
3928 if (tcp_prune_queue(sk) < 0 || 3928 if (tcp_prune_queue(sk) < 0 ||
3929 !sk_stream_rmem_schedule(sk, skb)) 3929 !sk_rmem_schedule(sk, skb->truesize))
3930 goto drop; 3930 goto drop;
3931 } 3931 }
3932 3932
@@ -3937,7 +3937,7 @@ drop:
3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3939 3939
3940 sk_stream_set_owner_r(skb, sk); 3940 skb_set_owner_r(skb, sk);
3941 3941
3942 if (!skb_peek(&tp->out_of_order_queue)) { 3942 if (!skb_peek(&tp->out_of_order_queue)) {
3943 /* Initial out of order segment, build 1 SACK. */ 3943 /* Initial out of order segment, build 1 SACK. */
@@ -4079,7 +4079,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4081 __skb_insert(nskb, skb->prev, skb, list); 4081 __skb_insert(nskb, skb->prev, skb, list);
4082 sk_stream_set_owner_r(nskb, sk); 4082 skb_set_owner_r(nskb, sk);
4083 4083
4084 /* Copy data, releasing collapsed skbs. */ 4084 /* Copy data, releasing collapsed skbs. */
4085 while (copy > 0) { 4085 while (copy > 0) {
@@ -4177,7 +4177,7 @@ static int tcp_prune_queue(struct sock *sk)
4177 sk->sk_receive_queue.next, 4177 sk->sk_receive_queue.next,
4178 (struct sk_buff*)&sk->sk_receive_queue, 4178 (struct sk_buff*)&sk->sk_receive_queue,
4179 tp->copied_seq, tp->rcv_nxt); 4179 tp->copied_seq, tp->rcv_nxt);
4180 sk_stream_mem_reclaim(sk); 4180 sk_mem_reclaim(sk);
4181 4181
4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4183 return 0; 4183 return 0;
@@ -4197,7 +4197,7 @@ static int tcp_prune_queue(struct sock *sk)
4197 */ 4197 */
4198 if (tcp_is_sack(tp)) 4198 if (tcp_is_sack(tp))
4199 tcp_sack_reset(&tp->rx_opt); 4199 tcp_sack_reset(&tp->rx_opt);
4200 sk_stream_mem_reclaim(sk); 4200 sk_mem_reclaim(sk);
4201 } 4201 }
4202 4202
4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -4699,7 +4699,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4699 /* Bulk data transfer: receiver */ 4699 /* Bulk data transfer: receiver */
4700 __skb_pull(skb,tcp_header_len); 4700 __skb_pull(skb,tcp_header_len);
4701 __skb_queue_tail(&sk->sk_receive_queue, skb); 4701 __skb_queue_tail(&sk->sk_receive_queue, skb);
4702 sk_stream_set_owner_r(skb, sk); 4702 skb_set_owner_r(skb, sk);
4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4704 } 4704 }
4705 4705
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9058e0a25107..7a4834a2ae84 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,7 +637,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
637 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 637 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
638 skb_header_release(skb); 638 skb_header_release(skb);
639 tcp_add_write_queue_tail(sk, skb); 639 tcp_add_write_queue_tail(sk, skb);
640 sk_charge_skb(sk, skb); 640 sk->sk_wmem_queued += skb->truesize;
641 sk_mem_charge(sk, skb->truesize);
641} 642}
642 643
643static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 644static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
@@ -701,7 +702,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
701 if (buff == NULL) 702 if (buff == NULL)
702 return -ENOMEM; /* We'll just try again later. */ 703 return -ENOMEM; /* We'll just try again later. */
703 704
704 sk_charge_skb(sk, buff); 705 sk->sk_wmem_queued += buff->truesize;
706 sk_mem_charge(sk, buff->truesize);
705 nlen = skb->len - len - nsize; 707 nlen = skb->len - len - nsize;
706 buff->truesize += nlen; 708 buff->truesize += nlen;
707 skb->truesize -= nlen; 709 skb->truesize -= nlen;
@@ -825,7 +827,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
825 827
826 skb->truesize -= len; 828 skb->truesize -= len;
827 sk->sk_wmem_queued -= len; 829 sk->sk_wmem_queued -= len;
828 sk->sk_forward_alloc += len; 830 sk_mem_uncharge(sk, len);
829 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 831 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
830 832
831 /* Any change of skb->len requires recalculation of tso 833 /* Any change of skb->len requires recalculation of tso
@@ -1197,7 +1199,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1197 if (unlikely(buff == NULL)) 1199 if (unlikely(buff == NULL))
1198 return -ENOMEM; 1200 return -ENOMEM;
1199 1201
1200 sk_charge_skb(sk, buff); 1202 sk->sk_wmem_queued += buff->truesize;
1203 sk_mem_charge(sk, buff->truesize);
1201 buff->truesize += nlen; 1204 buff->truesize += nlen;
1202 skb->truesize -= nlen; 1205 skb->truesize -= nlen;
1203 1206
@@ -1350,7 +1353,8 @@ static int tcp_mtu_probe(struct sock *sk)
1350 /* We're allowed to probe. Build it now. */ 1353 /* We're allowed to probe. Build it now. */
1351 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1354 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1352 return -1; 1355 return -1;
1353 sk_charge_skb(sk, nskb); 1356 sk->sk_wmem_queued += nskb->truesize;
1357 sk_mem_charge(sk, nskb->truesize);
1354 1358
1355 skb = tcp_send_head(sk); 1359 skb = tcp_send_head(sk);
1356 1360
@@ -1377,7 +1381,7 @@ static int tcp_mtu_probe(struct sock *sk)
1377 * Throw it away. */ 1381 * Throw it away. */
1378 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1382 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1379 tcp_unlink_write_queue(skb, sk); 1383 tcp_unlink_write_queue(skb, sk);
1380 sk_stream_free_skb(sk, skb); 1384 sk_wmem_free_skb(sk, skb);
1381 } else { 1385 } else {
1382 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1386 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1383 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1387 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
@@ -1744,7 +1748,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1744 /* changed transmit queue under us so clear hints */ 1748 /* changed transmit queue under us so clear hints */
1745 tcp_clear_retrans_hints_partial(tp); 1749 tcp_clear_retrans_hints_partial(tp);
1746 1750
1747 sk_stream_free_skb(sk, next_skb); 1751 sk_wmem_free_skb(sk, next_skb);
1748 } 1752 }
1749} 1753}
1750 1754
@@ -2139,8 +2143,9 @@ int tcp_send_synack(struct sock *sk)
2139 tcp_unlink_write_queue(skb, sk); 2143 tcp_unlink_write_queue(skb, sk);
2140 skb_header_release(nskb); 2144 skb_header_release(nskb);
2141 __tcp_add_write_queue_head(sk, nskb); 2145 __tcp_add_write_queue_head(sk, nskb);
2142 sk_stream_free_skb(sk, skb); 2146 sk_wmem_free_skb(sk, skb);
2143 sk_charge_skb(sk, nskb); 2147 sk->sk_wmem_queued += nskb->truesize;
2148 sk_mem_charge(sk, nskb->truesize);
2144 skb = nskb; 2149 skb = nskb;
2145 } 2150 }
2146 2151
@@ -2343,7 +2348,8 @@ int tcp_connect(struct sock *sk)
2343 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2348 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2344 skb_header_release(buff); 2349 skb_header_release(buff);
2345 __tcp_add_write_queue_tail(sk, buff); 2350 __tcp_add_write_queue_tail(sk, buff);
2346 sk_charge_skb(sk, buff); 2351 sk->sk_wmem_queued += buff->truesize;
2352 sk_mem_charge(sk, buff->truesize);
2347 tp->packets_out += tcp_skb_pcount(buff); 2353 tp->packets_out += tcp_skb_pcount(buff);
2348 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2354 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2349 2355
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ea85bc00c61f..17931be6d584 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -186,7 +186,7 @@ static void tcp_delack_timer(unsigned long data)
186 goto out_unlock; 186 goto out_unlock;
187 } 187 }
188 188
189 sk_stream_mem_reclaim(sk); 189 sk_mem_reclaim(sk);
190 190
191 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 191 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
192 goto out; 192 goto out;
@@ -226,7 +226,7 @@ static void tcp_delack_timer(unsigned long data)
226 226
227out: 227out:
228 if (tcp_memory_pressure) 228 if (tcp_memory_pressure)
229 sk_stream_mem_reclaim(sk); 229 sk_mem_reclaim(sk);
230out_unlock: 230out_unlock:
231 bh_unlock_sock(sk); 231 bh_unlock_sock(sk);
232 sock_put(sk); 232 sock_put(sk);
@@ -420,7 +420,7 @@ static void tcp_write_timer(unsigned long data)
420 TCP_CHECK_TIMER(sk); 420 TCP_CHECK_TIMER(sk);
421 421
422out: 422out:
423 sk_stream_mem_reclaim(sk); 423 sk_mem_reclaim(sk);
424out_unlock: 424out_unlock:
425 bh_unlock_sock(sk); 425 bh_unlock_sock(sk);
426 sock_put(sk); 426 sock_put(sk);
@@ -514,7 +514,7 @@ static void tcp_keepalive_timer (unsigned long data)
514 } 514 }
515 515
516 TCP_CHECK_TIMER(sk); 516 TCP_CHECK_TIMER(sk);
517 sk_stream_mem_reclaim(sk); 517 sk_mem_reclaim(sk);
518 518
519resched: 519resched:
520 inet_csk_reset_keepalive_timer (sk, elapsed); 520 inet_csk_reset_keepalive_timer (sk, elapsed);