aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-23 13:34:36 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-23 23:36:58 -0400
commit783c175f902b1ae011f12de45770e7912638ea1a (patch)
tree7da5c4b2280c15b4d54e13ed95c1351a14800018
parentd7ccf7c0a0585a126109a4b7c2a309184bfa4cba (diff)
tcp: tcp_try_coalesce returns a boolean
This clarifies code intention, as suggested by David. Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_input.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c1c611b385a7..c93b0cbb7fc1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4460,23 +4460,23 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4460 * to reduce overall memory use and queue lengths, if cost is small. 4460 * to reduce overall memory use and queue lengths, if cost is small.
4461 * Packets in ofo or receive queues can stay a long time. 4461 * Packets in ofo or receive queues can stay a long time.
4462 * Better try to coalesce them right now to avoid future collapses. 4462 * Better try to coalesce them right now to avoid future collapses.
4463 * Returns > 0 value if caller should free @from instead of queueing it 4463 * Returns true if caller should free @from instead of queueing it
4464 */ 4464 */
4465static int tcp_try_coalesce(struct sock *sk, 4465static bool tcp_try_coalesce(struct sock *sk,
4466 struct sk_buff *to, 4466 struct sk_buff *to,
4467 struct sk_buff *from) 4467 struct sk_buff *from)
4468{ 4468{
4469 int len = from->len; 4469 int len = from->len;
4470 4470
4471 if (tcp_hdr(from)->fin) 4471 if (tcp_hdr(from)->fin)
4472 return 0; 4472 return false;
4473 if (len <= skb_tailroom(to)) { 4473 if (len <= skb_tailroom(to)) {
4474 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4474 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4475merge: 4475merge:
4476 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4476 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4477 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4477 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4478 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4478 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4479 return 1; 4479 return true;
4480 } 4480 }
4481 if (skb_headlen(from) == 0 && 4481 if (skb_headlen(from) == 0 &&
4482 !skb_has_frag_list(to) && 4482 !skb_has_frag_list(to) &&
@@ -4499,7 +4499,7 @@ merge:
4499 to->data_len += len; 4499 to->data_len += len;
4500 goto merge; 4500 goto merge;
4501 } 4501 }
4502 return 0; 4502 return false;
4503} 4503}
4504 4504
4505static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4505static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
@@ -4540,7 +4540,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4540 end_seq = TCP_SKB_CB(skb)->end_seq; 4540 end_seq = TCP_SKB_CB(skb)->end_seq;
4541 4541
4542 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4542 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4543 if (tcp_try_coalesce(sk, skb1, skb) <= 0) { 4543 if (!tcp_try_coalesce(sk, skb1, skb)) {
4544 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4544 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4545 } else { 4545 } else {
4546 __kfree_skb(skb); 4546 __kfree_skb(skb);
@@ -4672,7 +4672,7 @@ queue_and_out:
4672 goto drop; 4672 goto drop;
4673 4673
4674 tail = skb_peek_tail(&sk->sk_receive_queue); 4674 tail = skb_peek_tail(&sk->sk_receive_queue);
4675 eaten = tail ? tcp_try_coalesce(sk, tail, skb) : -1; 4675 eaten = (tail && tcp_try_coalesce(sk, tail, skb)) ? 1 : 0;
4676 if (eaten <= 0) { 4676 if (eaten <= 0) {
4677 skb_set_owner_r(skb, sk); 4677 skb_set_owner_r(skb, sk);
4678 __skb_queue_tail(&sk->sk_receive_queue, skb); 4678 __skb_queue_tail(&sk->sk_receive_queue, skb);