aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2009-11-13 16:56:33 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-13 16:56:33 -0500
commitd792c1006fe92448217b71513d3955868358271d (patch)
tree918d3fea89ab7f053a0a3f15c2b2feff24f81ef5 /net/ipv4
parentd01032e4fd33110f9f3a085a36cb819c1dfc5827 (diff)
tcp: provide more information on the tcp receive_queue bugs
The addition of rcv_nxt allows to discern whether the skb was out of place or tp->copied. Also catch fancy combination of flags if necessary (sadly we might miss the actual causer flags as it might have already returned). Btw, we perhaps would want to forward copied_seq in somewhere or otherwise we might have some nice loop with WARN stuff within but where to do that safely I don't know at this stage until more is known (but it is not made significantly worse by this patch). Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 98440ad82558..f1813bc71088 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1183#if TCP_DEBUG 1183#if TCP_DEBUG
1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1185 1185
1186 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1186 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1187 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1188 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1187#endif 1189#endif
1188 1190
1189 if (inet_csk_ack_scheduled(sk)) { 1191 if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1430 /* Now that we have two receive queues this 1432 /* Now that we have two receive queues this
1431 * shouldn't happen. 1433 * shouldn't happen.
1432 */ 1434 */
1433 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1435 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1434 printk(KERN_INFO "recvmsg bug: copied %X " 1436 KERN_INFO "recvmsg bug: copied %X "
1435 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1437 "seq %X rcvnxt %X fl %X\n", *seq,
1438 TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1439 flags))
1436 break; 1440 break;
1437 } 1441
1438 offset = *seq - TCP_SKB_CB(skb)->seq; 1442 offset = *seq - TCP_SKB_CB(skb)->seq;
1439 if (tcp_hdr(skb)->syn) 1443 if (tcp_hdr(skb)->syn)
1440 offset--; 1444 offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1443 if (tcp_hdr(skb)->fin) 1447 if (tcp_hdr(skb)->fin)
1444 goto found_fin_ok; 1448 goto found_fin_ok;
1445 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1449 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1446 "copied %X seq %X\n", *seq, 1450 "copied %X seq %X rcvnxt %X fl %X\n",
1447 TCP_SKB_CB(skb)->seq); 1451 *seq, TCP_SKB_CB(skb)->seq,
1452 tp->rcv_nxt, flags);
1448 } 1453 }
1449 1454
1450 /* Well, if we have backlog, try to process it now yet. */ 1455 /* Well, if we have backlog, try to process it now yet. */