aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-07-26 00:43:18 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-26 00:43:18 -0400
commit547b792cac0a038b9dbf958d3c120df3740b5572 (patch)
tree08554d083b0ca7d65739dc1ce12f9b12a9b8e1f8 /net/ipv4
parent53e5e96ec18da6f65e89f05674711e1c93d8df67 (diff)
net: convert BUG_TRAP to generic WARN_ON
Removes legacy reinvent-the-wheel type thing. The generic machinery integrates much better to automated debugging aids such as kerneloops.org (and others), and is unambiguous due to better naming. Non-intuively BUG_TRAP() is actually equal to WARN_ON() rather than BUG_ON() though some might actually be promoted to BUG_ON() but I left that to future. I could make at least one BUILD_BUG_ON conversion. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c14
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/inet_connection_sock.c18
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_timer.c2
12 files changed, 47 insertions, 47 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd919d84285f..a107f49eea41 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
148 return; 148 return;
149 } 149 }
150 150
151 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 151 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
152 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 152 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
153 BUG_TRAP(!sk->sk_wmem_queued); 153 WARN_ON(sk->sk_wmem_queued);
154 BUG_TRAP(!sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
155 155
156 kfree(inet->opt); 156 kfree(inet->opt);
157 dst_release(sk->sk_dst_cache); 157 dst_release(sk->sk_dst_cache);
@@ -341,7 +341,7 @@ lookup_protocol:
341 answer_flags = answer->flags; 341 answer_flags = answer->flags;
342 rcu_read_unlock(); 342 rcu_read_unlock();
343 343
344 BUG_TRAP(answer_prot->slab != NULL); 344 WARN_ON(answer_prot->slab == NULL);
345 345
346 err = -ENOBUFS; 346 err = -ENOBUFS;
347 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); 347 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
@@ -661,8 +661,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
661 661
662 lock_sock(sk2); 662 lock_sock(sk2);
663 663
664 BUG_TRAP((1 << sk2->sk_state) & 664 WARN_ON(!((1 << sk2->sk_state) &
665 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)); 665 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
666 666
667 sock_graft(sk2, newsock); 667 sock_graft(sk2, newsock);
668 668
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e667e2f90df..91d3d96805d0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -138,8 +138,8 @@ void in_dev_finish_destroy(struct in_device *idev)
138{ 138{
139 struct net_device *dev = idev->dev; 139 struct net_device *dev = idev->dev;
140 140
141 BUG_TRAP(!idev->ifa_list); 141 WARN_ON(idev->ifa_list);
142 BUG_TRAP(!idev->mc_list); 142 WARN_ON(idev->mc_list);
143#ifdef NET_REFCNT_DEBUG 143#ifdef NET_REFCNT_DEBUG
144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", 144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
145 idev, dev ? dev->name : "NIL"); 145 idev, dev ? dev->name : "NIL");
@@ -399,7 +399,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
399 } 399 }
400 ipv4_devconf_setall(in_dev); 400 ipv4_devconf_setall(in_dev);
401 if (ifa->ifa_dev != in_dev) { 401 if (ifa->ifa_dev != in_dev) {
402 BUG_TRAP(!ifa->ifa_dev); 402 WARN_ON(ifa->ifa_dev);
403 in_dev_hold(in_dev); 403 in_dev_hold(in_dev);
404 ifa->ifa_dev = in_dev; 404 ifa->ifa_dev = in_dev;
405 } 405 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bb81c958b744..0c1ae68ee84b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -167,7 +167,7 @@ tb_not_found:
167success: 167success:
168 if (!inet_csk(sk)->icsk_bind_hash) 168 if (!inet_csk(sk)->icsk_bind_hash)
169 inet_bind_hash(sk, tb, snum); 169 inet_bind_hash(sk, tb, snum);
170 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 170 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
171 ret = 0; 171 ret = 0;
172 172
173fail_unlock: 173fail_unlock:
@@ -260,7 +260,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
260 } 260 }
261 261
262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
263 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); 263 WARN_ON(newsk->sk_state == TCP_SYN_RECV);
264out: 264out:
265 release_sock(sk); 265 release_sock(sk);
266 return newsk; 266 return newsk;
@@ -386,7 +386,7 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
386 ireq->rmt_addr == raddr && 386 ireq->rmt_addr == raddr &&
387 ireq->loc_addr == laddr && 387 ireq->loc_addr == laddr &&
388 AF_INET_FAMILY(req->rsk_ops->family)) { 388 AF_INET_FAMILY(req->rsk_ops->family)) {
389 BUG_TRAP(!req->sk); 389 WARN_ON(req->sk);
390 *prevp = prev; 390 *prevp = prev;
391 break; 391 break;
392 } 392 }
@@ -539,14 +539,14 @@ EXPORT_SYMBOL_GPL(inet_csk_clone);
539 */ 539 */
540void inet_csk_destroy_sock(struct sock *sk) 540void inet_csk_destroy_sock(struct sock *sk)
541{ 541{
542 BUG_TRAP(sk->sk_state == TCP_CLOSE); 542 WARN_ON(sk->sk_state != TCP_CLOSE);
543 BUG_TRAP(sock_flag(sk, SOCK_DEAD)); 543 WARN_ON(!sock_flag(sk, SOCK_DEAD));
544 544
545 /* It cannot be in hash table! */ 545 /* It cannot be in hash table! */
546 BUG_TRAP(sk_unhashed(sk)); 546 WARN_ON(!sk_unhashed(sk));
547 547
548 /* If it has not 0 inet_sk(sk)->num, it must be bound */ 548 /* If it has not 0 inet_sk(sk)->num, it must be bound */
549 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); 549 WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
550 550
551 sk->sk_prot->destroy(sk); 551 sk->sk_prot->destroy(sk);
552 552
@@ -629,7 +629,7 @@ void inet_csk_listen_stop(struct sock *sk)
629 629
630 local_bh_disable(); 630 local_bh_disable();
631 bh_lock_sock(child); 631 bh_lock_sock(child);
632 BUG_TRAP(!sock_owned_by_user(child)); 632 WARN_ON(sock_owned_by_user(child));
633 sock_hold(child); 633 sock_hold(child);
634 634
635 sk->sk_prot->disconnect(child, O_NONBLOCK); 635 sk->sk_prot->disconnect(child, O_NONBLOCK);
@@ -647,7 +647,7 @@ void inet_csk_listen_stop(struct sock *sk)
647 sk_acceptq_removed(sk); 647 sk_acceptq_removed(sk);
648 __reqsk_free(req); 648 __reqsk_free(req);
649 } 649 }
650 BUG_TRAP(!sk->sk_ack_backlog); 650 WARN_ON(sk->sk_ack_backlog);
651} 651}
652 652
653EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 653EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0546a0bc97ea..6c52e08f786e 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -134,8 +134,8 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
134 struct sk_buff *fp; 134 struct sk_buff *fp;
135 struct netns_frags *nf; 135 struct netns_frags *nf;
136 136
137 BUG_TRAP(q->last_in & INET_FRAG_COMPLETE); 137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 BUG_TRAP(del_timer(&q->timer) == 0); 138 WARN_ON(del_timer(&q->timer) != 0);
139 139
140 /* Release all fragment data. */ 140 /* Release all fragment data. */
141 fp = q->fragments; 141 fp = q->fragments;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 115f53722d20..44981906fb91 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -305,7 +305,7 @@ unique:
305 inet->num = lport; 305 inet->num = lport;
306 inet->sport = htons(lport); 306 inet->sport = htons(lport);
307 sk->sk_hash = hash; 307 sk->sk_hash = hash;
308 BUG_TRAP(sk_unhashed(sk)); 308 WARN_ON(!sk_unhashed(sk));
309 __sk_add_node(sk, &head->chain); 309 __sk_add_node(sk, &head->chain);
310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
311 write_unlock(lock); 311 write_unlock(lock);
@@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
342 rwlock_t *lock; 342 rwlock_t *lock;
343 struct inet_ehash_bucket *head; 343 struct inet_ehash_bucket *head;
344 344
345 BUG_TRAP(sk_unhashed(sk)); 345 WARN_ON(!sk_unhashed(sk));
346 346
347 sk->sk_hash = inet_sk_ehashfn(sk); 347 sk->sk_hash = inet_sk_ehashfn(sk);
348 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 348 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
367 return; 367 return;
368 } 368 }
369 369
370 BUG_TRAP(sk_unhashed(sk)); 370 WARN_ON(!sk_unhashed(sk));
371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; 371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
372 lock = &hashinfo->lhash_lock; 372 lock = &hashinfo->lhash_lock;
373 373
@@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
450 */ 450 */
451 inet_bind_bucket_for_each(tb, node, &head->chain) { 451 inet_bind_bucket_for_each(tb, node, &head->chain) {
452 if (tb->ib_net == net && tb->port == port) { 452 if (tb->ib_net == net && tb->port == port) {
453 BUG_TRAP(!hlist_empty(&tb->owners)); 453 WARN_ON(hlist_empty(&tb->owners));
454 if (tb->fastreuse >= 0) 454 if (tb->fastreuse >= 0)
455 goto next_port; 455 goto next_port;
456 if (!check_established(death_row, sk, 456 if (!check_established(death_row, sk,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 75c2def8f9a0..d985bd613d25 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
86 hashinfo->bhash_size)]; 86 hashinfo->bhash_size)];
87 spin_lock(&bhead->lock); 87 spin_lock(&bhead->lock);
88 tw->tw_tb = icsk->icsk_bind_hash; 88 tw->tw_tb = icsk->icsk_bind_hash;
89 BUG_TRAP(icsk->icsk_bind_hash); 89 WARN_ON(!icsk->icsk_bind_hash);
90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 spin_unlock(&bhead->lock); 91 spin_unlock(&bhead->lock);
92 92
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 38d38f058018..2152d222b954 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -488,8 +488,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
488 qp->q.fragments = head; 488 qp->q.fragments = head;
489 } 489 }
490 490
491 BUG_TRAP(head != NULL); 491 WARN_ON(head == NULL);
492 BUG_TRAP(FRAG_CB(head)->offset == 0); 492 WARN_ON(FRAG_CB(head)->offset != 0);
493 493
494 /* Allocate a new buffer for the datagram. */ 494 /* Allocate a new buffer for the datagram. */
495 ihlen = ip_hdrlen(head); 495 ihlen = ip_hdrlen(head);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 465544f6281a..d533a89e08de 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 __skb_pull(newskb, skb_network_offset(newskb)); 118 __skb_pull(newskb, skb_network_offset(newskb));
119 newskb->pkt_type = PACKET_LOOPBACK; 119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY; 120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 BUG_TRAP(newskb->dst); 121 WARN_ON(!newskb->dst);
122 netif_rx(newskb); 122 netif_rx(newskb);
123 return 0; 123 return 0;
124} 124}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0b491bf03db4..1ab341e5d3e0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1096,7 +1096,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1096#if TCP_DEBUG 1096#if TCP_DEBUG
1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1098 1098
1099 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1099 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1100#endif 1100#endif
1101 1101
1102 if (inet_csk_ack_scheduled(sk)) { 1102 if (inet_csk_ack_scheduled(sk)) {
@@ -1358,7 +1358,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1358 goto found_ok_skb; 1358 goto found_ok_skb;
1359 if (tcp_hdr(skb)->fin) 1359 if (tcp_hdr(skb)->fin)
1360 goto found_fin_ok; 1360 goto found_fin_ok;
1361 BUG_TRAP(flags & MSG_PEEK); 1361 WARN_ON(!(flags & MSG_PEEK));
1362 skb = skb->next; 1362 skb = skb->next;
1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue); 1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1364 1364
@@ -1421,8 +1421,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1421 1421
1422 tp->ucopy.len = len; 1422 tp->ucopy.len = len;
1423 1423
1424 BUG_TRAP(tp->copied_seq == tp->rcv_nxt || 1424 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1425 (flags & (MSG_PEEK | MSG_TRUNC))); 1425 !(flags & (MSG_PEEK | MSG_TRUNC)));
1426 1426
1427 /* Ugly... If prequeue is not empty, we have to 1427 /* Ugly... If prequeue is not empty, we have to
1428 * process it before releasing socket, otherwise 1428 * process it before releasing socket, otherwise
@@ -1844,7 +1844,7 @@ adjudge_to_death:
1844 */ 1844 */
1845 local_bh_disable(); 1845 local_bh_disable();
1846 bh_lock_sock(sk); 1846 bh_lock_sock(sk);
1847 BUG_TRAP(!sock_owned_by_user(sk)); 1847 WARN_ON(sock_owned_by_user(sk));
1848 1848
1849 /* Have we already been destroyed by a softirq or backlog? */ 1849 /* Have we already been destroyed by a softirq or backlog? */
1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -1973,7 +1973,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1974 __sk_dst_reset(sk); 1974 __sk_dst_reset(sk);
1975 1975
1976 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1976 WARN_ON(inet->num && !icsk->icsk_bind_hash);
1977 1977
1978 sk->sk_error_report(sk); 1978 sk->sk_error_report(sk);
1979 return err; 1979 return err;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 75efd244f2af..67ccce2a96bd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1629,10 +1629,10 @@ advance_sp:
1629out: 1629out:
1630 1630
1631#if FASTRETRANS_DEBUG > 0 1631#if FASTRETRANS_DEBUG > 0
1632 BUG_TRAP((int)tp->sacked_out >= 0); 1632 WARN_ON((int)tp->sacked_out < 0);
1633 BUG_TRAP((int)tp->lost_out >= 0); 1633 WARN_ON((int)tp->lost_out < 0);
1634 BUG_TRAP((int)tp->retrans_out >= 0); 1634 WARN_ON((int)tp->retrans_out < 0);
1635 BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); 1635 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1636#endif 1636#endif
1637 return flag; 1637 return flag;
1638} 1638}
@@ -2181,7 +2181,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2181 int err; 2181 int err;
2182 unsigned int mss; 2182 unsigned int mss;
2183 2183
2184 BUG_TRAP(packets <= tp->packets_out); 2184 WARN_ON(packets > tp->packets_out);
2185 if (tp->lost_skb_hint) { 2185 if (tp->lost_skb_hint) {
2186 skb = tp->lost_skb_hint; 2186 skb = tp->lost_skb_hint;
2187 cnt = tp->lost_cnt_hint; 2187 cnt = tp->lost_cnt_hint;
@@ -2610,7 +2610,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2610 /* E. Check state exit conditions. State can be terminated 2610 /* E. Check state exit conditions. State can be terminated
2611 * when high_seq is ACKed. */ 2611 * when high_seq is ACKed. */
2612 if (icsk->icsk_ca_state == TCP_CA_Open) { 2612 if (icsk->icsk_ca_state == TCP_CA_Open) {
2613 BUG_TRAP(tp->retrans_out == 0); 2613 WARN_ON(tp->retrans_out != 0);
2614 tp->retrans_stamp = 0; 2614 tp->retrans_stamp = 0;
2615 } else if (!before(tp->snd_una, tp->high_seq)) { 2615 } else if (!before(tp->snd_una, tp->high_seq)) {
2616 switch (icsk->icsk_ca_state) { 2616 switch (icsk->icsk_ca_state) {
@@ -2972,9 +2972,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2972 } 2972 }
2973 2973
2974#if FASTRETRANS_DEBUG > 0 2974#if FASTRETRANS_DEBUG > 0
2975 BUG_TRAP((int)tp->sacked_out >= 0); 2975 WARN_ON((int)tp->sacked_out < 0);
2976 BUG_TRAP((int)tp->lost_out >= 0); 2976 WARN_ON((int)tp->lost_out < 0);
2977 BUG_TRAP((int)tp->retrans_out >= 0); 2977 WARN_ON((int)tp->retrans_out < 0);
2978 if (!tp->packets_out && tcp_is_sack(tp)) { 2978 if (!tp->packets_out && tcp_is_sack(tp)) {
2979 icsk = inet_csk(sk); 2979 icsk = inet_csk(sk);
2980 if (tp->lost_out) { 2980 if (tp->lost_out) {
@@ -3877,7 +3877,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
3877 int i; 3877 int i;
3878 3878
3879 /* RCV.NXT must cover all the block! */ 3879 /* RCV.NXT must cover all the block! */
3880 BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); 3880 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
3881 3881
3882 /* Zap this SACK, by moving forward any other SACKS. */ 3882 /* Zap this SACK, by moving forward any other SACKS. */
3883 for (i=this_sack+1; i < num_sacks; i++) 3883 for (i=this_sack+1; i < num_sacks; i++)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a82df6307567..a2b06d0cc26b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -418,7 +418,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
418 /* ICMPs are not backlogged, hence we cannot get 418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here. 419 an established socket here.
420 */ 420 */
421 BUG_TRAP(!req->sk); 421 WARN_ON(req->sk);
422 422
423 if (seq != tcp_rsk(req)->snt_isn) { 423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 328e0cf42b3c..5ab6ba19c3ce 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -287,7 +287,7 @@ static void tcp_retransmit_timer(struct sock *sk)
287 if (!tp->packets_out) 287 if (!tp->packets_out)
288 goto out; 288 goto out;
289 289
290 BUG_TRAP(!tcp_write_queue_empty(sk)); 290 WARN_ON(tcp_write_queue_empty(sk));
291 291
292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {