aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-08 17:57:23 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-08 17:57:23 -0400
commitb03efcfb2180289718991bb984044ce6c5b7d1b0 (patch)
treef3b0c6c4eaf0991c28b7116a20994b48398eea57 /net/ipv4
parenta92b7b80579fe68fe229892815c750f6652eb6a9 (diff)
[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()
This is part of the grand scheme to eliminate the qlen member of skb_queue_head, and subsequently remove the 'list' member of sk_buff. Most users of skb_queue_len() want to know if the queue is empty or not, and that's trivially done with skb_queue_empty() which doesn't use the skb_queue_head->qlen member and instead uses the queue list emptyness as the test. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_timer.c5
3 files changed, 11 insertions, 13 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 29894c749163..ddb6ce4ecff2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk)
1105 struct sk_buff *skb; 1105 struct sk_buff *skb;
1106 struct tcp_sock *tp = tcp_sk(sk); 1106 struct tcp_sock *tp = tcp_sk(sk);
1107 1107
1108 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); 1108 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1109 1109
1110 /* RX process wants to run with disabled BHs, though it is not 1110 /* RX process wants to run with disabled BHs, though it is not
1111 * necessary */ 1111 * necessary */
@@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1369 * is not empty. It is more elegant, but eats cycles, 1369 * is not empty. It is more elegant, but eats cycles,
1370 * unfortunately. 1370 * unfortunately.
1371 */ 1371 */
1372 if (skb_queue_len(&tp->ucopy.prequeue)) 1372 if (!skb_queue_empty(&tp->ucopy.prequeue))
1373 goto do_prequeue; 1373 goto do_prequeue;
1374 1374
1375 /* __ Set realtime policy in scheduler __ */ 1375 /* __ Set realtime policy in scheduler __ */
@@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1394 } 1394 }
1395 1395
1396 if (tp->rcv_nxt == tp->copied_seq && 1396 if (tp->rcv_nxt == tp->copied_seq &&
1397 skb_queue_len(&tp->ucopy.prequeue)) { 1397 !skb_queue_empty(&tp->ucopy.prequeue)) {
1398do_prequeue: 1398do_prequeue:
1399 tcp_prequeue_process(sk); 1399 tcp_prequeue_process(sk);
1400 1400
@@ -1476,7 +1476,7 @@ skip_copy:
1476 } while (len > 0); 1476 } while (len > 0);
1477 1477
1478 if (user_recv) { 1478 if (user_recv) {
1479 if (skb_queue_len(&tp->ucopy.prequeue)) { 1479 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1480 int chunk; 1480 int chunk;
1481 1481
1482 tp->ucopy.len = copied > 0 ? len : 0; 1482 tp->ucopy.len = copied > 0 ? len : 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8de2f1071c2b..53a8a5399f1e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
2802 int this_sack; 2802 int this_sack;
2803 2803
2804 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 2804 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
2805 if (skb_queue_len(&tp->out_of_order_queue) == 0) { 2805 if (skb_queue_empty(&tp->out_of_order_queue)) {
2806 tp->rx_opt.num_sacks = 0; 2806 tp->rx_opt.num_sacks = 0;
2807 tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 2807 tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
2808 return; 2808 return;
@@ -2935,13 +2935,13 @@ queue_and_out:
2935 if(th->fin) 2935 if(th->fin)
2936 tcp_fin(skb, sk, th); 2936 tcp_fin(skb, sk, th);
2937 2937
2938 if (skb_queue_len(&tp->out_of_order_queue)) { 2938 if (!skb_queue_empty(&tp->out_of_order_queue)) {
2939 tcp_ofo_queue(sk); 2939 tcp_ofo_queue(sk);
2940 2940
2941 /* RFC2581. 4.2. SHOULD send immediate ACK, when 2941 /* RFC2581. 4.2. SHOULD send immediate ACK, when
2942 * gap in queue is filled. 2942 * gap in queue is filled.
2943 */ 2943 */
2944 if (!skb_queue_len(&tp->out_of_order_queue)) 2944 if (skb_queue_empty(&tp->out_of_order_queue))
2945 tp->ack.pingpong = 0; 2945 tp->ack.pingpong = 0;
2946 } 2946 }
2947 2947
@@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk)
3249 * This must not ever occur. */ 3249 * This must not ever occur. */
3250 3250
3251 /* First, purge the out_of_order queue. */ 3251 /* First, purge the out_of_order queue. */
3252 if (skb_queue_len(&tp->out_of_order_queue)) { 3252 if (!skb_queue_empty(&tp->out_of_order_queue)) {
3253 NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, 3253 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
3254 skb_queue_len(&tp->out_of_order_queue));
3255 __skb_queue_purge(&tp->out_of_order_queue); 3254 __skb_queue_purge(&tp->out_of_order_queue);
3256 3255
3257 /* Reset SACK state. A conforming SACK implementation will 3256 /* Reset SACK state. A conforming SACK implementation will
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b127b4498565..0084227438c2 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data)
231 } 231 }
232 tp->ack.pending &= ~TCP_ACK_TIMER; 232 tp->ack.pending &= ~TCP_ACK_TIMER;
233 233
234 if (skb_queue_len(&tp->ucopy.prequeue)) { 234 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
235 struct sk_buff *skb; 235 struct sk_buff *skb;
236 236
237 NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, 237 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
238 skb_queue_len(&tp->ucopy.prequeue));
239 238
240 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 239 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
241 sk->sk_backlog_rcv(sk, skb); 240 sk->sk_backlog_rcv(sk, skb);