aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorHideo Aoki <haoki@redhat.com>2007-12-31 03:11:19 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:00:18 -0500
commit3ab224be6d69de912ee21302745ea45a99274dbc (patch)
tree335dcef1cfacfefe3f36c21d5f144e011bc3bfba /net/ipv4/tcp_input.c
parenta06b494b61de44617dd58612164bdde56fca7bfb (diff)
[NET] CORE: Introducing new memory accounting interface.
This patch introduces new memory accounting functions for each network protocol. Most of them are renamed from memory accounting functions for stream protocols. At the same time, some stream memory accounting functions are removed since other functions do same thing. Renaming: sk_stream_free_skb() -> sk_wmem_free_skb() __sk_stream_mem_reclaim() -> __sk_mem_reclaim() sk_stream_mem_reclaim() -> sk_mem_reclaim() sk_stream_mem_schedule -> __sk_mem_schedule() sk_stream_pages() -> sk_mem_pages() sk_stream_rmem_schedule() -> sk_rmem_schedule() sk_stream_wmem_schedule() -> sk_wmem_schedule() sk_charge_skb() -> sk_mem_charge() Removeing sk_stream_rfree(): consolidates into sock_rfree() sk_stream_set_owner_r(): consolidates into skb_set_owner_r() sk_stream_mem_schedule() The following functions are added. sk_has_account(): check if the protocol supports accounting sk_mem_uncharge(): do the opposite of sk_mem_charge() In addition, to achieve consolidation, updating sk_wmem_queued is removed from sk_mem_charge(). Next, to consolidate memory accounting functions, this patch adds memory accounting calls to network core functions. Moreover, present memory accounting call is renamed to new accounting call. Finally we replace present memory accounting calls with new interface in TCP and SCTP. Signed-off-by: Takahiro Yasui <tyasui@redhat.com> Signed-off-by: Hideo Aoki <haoki@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index efea9873208e..722c9cbb91e3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -591,7 +591,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
591 * restart window, so that we send ACKs quickly. 591 * restart window, so that we send ACKs quickly.
592 */ 592 */
593 tcp_incr_quickack(sk); 593 tcp_incr_quickack(sk);
594 sk_stream_mem_reclaim(sk); 594 sk_mem_reclaim(sk);
595 } 595 }
596 } 596 }
597 icsk->icsk_ack.lrcvtime = now; 597 icsk->icsk_ack.lrcvtime = now;
@@ -2851,7 +2851,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2851 break; 2851 break;
2852 2852
2853 tcp_unlink_write_queue(skb, sk); 2853 tcp_unlink_write_queue(skb, sk);
2854 sk_stream_free_skb(sk, skb); 2854 sk_wmem_free_skb(sk, skb);
2855 tcp_clear_all_retrans_hints(tp); 2855 tcp_clear_all_retrans_hints(tp);
2856 } 2856 }
2857 2857
@@ -3567,7 +3567,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3567 __skb_queue_purge(&tp->out_of_order_queue); 3567 __skb_queue_purge(&tp->out_of_order_queue);
3568 if (tcp_is_sack(tp)) 3568 if (tcp_is_sack(tp))
3569 tcp_sack_reset(&tp->rx_opt); 3569 tcp_sack_reset(&tp->rx_opt);
3570 sk_stream_mem_reclaim(sk); 3570 sk_mem_reclaim(sk);
3571 3571
3572 if (!sock_flag(sk, SOCK_DEAD)) { 3572 if (!sock_flag(sk, SOCK_DEAD)) {
3573 sk->sk_state_change(sk); 3573 sk->sk_state_change(sk);
@@ -3850,12 +3850,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3850queue_and_out: 3850queue_and_out:
3851 if (eaten < 0 && 3851 if (eaten < 0 &&
3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3852 (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3853 !sk_stream_rmem_schedule(sk, skb))) { 3853 !sk_rmem_schedule(sk, skb->truesize))) {
3854 if (tcp_prune_queue(sk) < 0 || 3854 if (tcp_prune_queue(sk) < 0 ||
3855 !sk_stream_rmem_schedule(sk, skb)) 3855 !sk_rmem_schedule(sk, skb->truesize))
3856 goto drop; 3856 goto drop;
3857 } 3857 }
3858 sk_stream_set_owner_r(skb, sk); 3858 skb_set_owner_r(skb, sk);
3859 __skb_queue_tail(&sk->sk_receive_queue, skb); 3859 __skb_queue_tail(&sk->sk_receive_queue, skb);
3860 } 3860 }
3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3861 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3924,9 +3924,9 @@ drop:
3924 TCP_ECN_check_ce(tp, skb); 3924 TCP_ECN_check_ce(tp, skb);
3925 3925
3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3926 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
3927 !sk_stream_rmem_schedule(sk, skb)) { 3927 !sk_rmem_schedule(sk, skb->truesize)) {
3928 if (tcp_prune_queue(sk) < 0 || 3928 if (tcp_prune_queue(sk) < 0 ||
3929 !sk_stream_rmem_schedule(sk, skb)) 3929 !sk_rmem_schedule(sk, skb->truesize))
3930 goto drop; 3930 goto drop;
3931 } 3931 }
3932 3932
@@ -3937,7 +3937,7 @@ drop:
3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3937 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3938 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3939 3939
3940 sk_stream_set_owner_r(skb, sk); 3940 skb_set_owner_r(skb, sk);
3941 3941
3942 if (!skb_peek(&tp->out_of_order_queue)) { 3942 if (!skb_peek(&tp->out_of_order_queue)) {
3943 /* Initial out of order segment, build 1 SACK. */ 3943 /* Initial out of order segment, build 1 SACK. */
@@ -4079,7 +4079,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4079 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4080 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4081 __skb_insert(nskb, skb->prev, skb, list); 4081 __skb_insert(nskb, skb->prev, skb, list);
4082 sk_stream_set_owner_r(nskb, sk); 4082 skb_set_owner_r(nskb, sk);
4083 4083
4084 /* Copy data, releasing collapsed skbs. */ 4084 /* Copy data, releasing collapsed skbs. */
4085 while (copy > 0) { 4085 while (copy > 0) {
@@ -4177,7 +4177,7 @@ static int tcp_prune_queue(struct sock *sk)
4177 sk->sk_receive_queue.next, 4177 sk->sk_receive_queue.next,
4178 (struct sk_buff*)&sk->sk_receive_queue, 4178 (struct sk_buff*)&sk->sk_receive_queue,
4179 tp->copied_seq, tp->rcv_nxt); 4179 tp->copied_seq, tp->rcv_nxt);
4180 sk_stream_mem_reclaim(sk); 4180 sk_mem_reclaim(sk);
4181 4181
4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4182 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4183 return 0; 4183 return 0;
@@ -4197,7 +4197,7 @@ static int tcp_prune_queue(struct sock *sk)
4197 */ 4197 */
4198 if (tcp_is_sack(tp)) 4198 if (tcp_is_sack(tp))
4199 tcp_sack_reset(&tp->rx_opt); 4199 tcp_sack_reset(&tp->rx_opt);
4200 sk_stream_mem_reclaim(sk); 4200 sk_mem_reclaim(sk);
4201 } 4201 }
4202 4202
4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4203 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -4699,7 +4699,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4699 /* Bulk data transfer: receiver */ 4699 /* Bulk data transfer: receiver */
4700 __skb_pull(skb,tcp_header_len); 4700 __skb_pull(skb,tcp_header_len);
4701 __skb_queue_tail(&sk->sk_receive_queue, skb); 4701 __skb_queue_tail(&sk->sk_receive_queue, skb);
4702 sk_stream_set_owner_r(skb, sk); 4702 skb_set_owner_r(skb, sk);
4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4703 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4704 } 4704 }
4705 4705