aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-07-26 00:43:18 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-26 00:43:18 -0400
commit547b792cac0a038b9dbf958d3c120df3740b5572 (patch)
tree08554d083b0ca7d65739dc1ce12f9b12a9b8e1f8
parent53e5e96ec18da6f65e89f05674711e1c93d8df67 (diff)
net: convert BUG_TRAP to generic WARN_ON
Removes legacy reinvent-the-wheel type thing. The generic machinery integrates much better to automated debugging aids such as kerneloops.org (and others), and is unambiguous due to better naming. Non-intuively BUG_TRAP() is actually equal to WARN_ON() rather than BUG_ON() though some might actually be promoted to BUG_ON() but I left that to future. I could make at least one BUILD_BUG_ON conversion. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/request_sock.h5
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/stream.c6
-rw-r--r--net/core/user_dma.c5
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/ipv4/af_inet.c14
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/inet_connection_sock.c18
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_fib.c31
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mip6.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_htb.c16
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_ipcomp.c3
-rw-r--r--net/xfrm/xfrm_state.c2
51 files changed, 159 insertions, 155 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 0c96e7bed5db..8d6e991ef4df 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/bug.h>
21 22
22#include <net/sock.h> 23#include <net/sock.h>
23 24
@@ -170,7 +171,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
170{ 171{
171 struct request_sock *req = queue->rskq_accept_head; 172 struct request_sock *req = queue->rskq_accept_head;
172 173
173 BUG_TRAP(req != NULL); 174 WARN_ON(req == NULL);
174 175
175 queue->rskq_accept_head = req->dl_next; 176 queue->rskq_accept_head = req->dl_next;
176 if (queue->rskq_accept_head == NULL) 177 if (queue->rskq_accept_head == NULL)
@@ -185,7 +186,7 @@ static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queu
185 struct request_sock *req = reqsk_queue_remove(queue); 186 struct request_sock *req = reqsk_queue_remove(queue);
186 struct sock *child = req->sk; 187 struct sock *child = req->sk;
187 188
188 BUG_TRAP(child != NULL); 189 WARN_ON(child == NULL);
189 190
190 sk_acceptq_removed(parent); 191 sk_acceptq_removed(parent);
191 __reqsk_free(req); 192 __reqsk_free(req);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 07b5b82c5eab..0c850427a85b 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -959,7 +959,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
960 int end; 960 int end;
961 961
962 BUG_TRAP(start <= offset + len); 962 WARN_ON(start > offset + len);
963 963
964 end = start + skb_shinfo(skb)->frags[i].size; 964 end = start + skb_shinfo(skb)->frags[i].size;
965 if ((copy = end - offset) > 0) { 965 if ((copy = end - offset) > 0) {
@@ -986,7 +986,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
986 for (; list; list = list->next) { 986 for (; list; list = list->next) {
987 int end; 987 int end;
988 988
989 BUG_TRAP(start <= offset + len); 989 WARN_ON(start > offset + len);
990 990
991 end = start + list->len; 991 end = start + list->len;
992 if ((copy = end - offset) > 0) { 992 if ((copy = end - offset) > 0) {
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8a28fc93b724..dd61dcad6019 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -285,7 +285,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
286 int end; 286 int end;
287 287
288 BUG_TRAP(start <= offset + len); 288 WARN_ON(start > offset + len);
289 289
290 end = start + skb_shinfo(skb)->frags[i].size; 290 end = start + skb_shinfo(skb)->frags[i].size;
291 if ((copy = end - offset) > 0) { 291 if ((copy = end - offset) > 0) {
@@ -315,7 +315,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
315 for (; list; list = list->next) { 315 for (; list; list = list->next) {
316 int end; 316 int end;
317 317
318 BUG_TRAP(start <= offset + len); 318 WARN_ON(start > offset + len);
319 319
320 end = start + list->len; 320 end = start + list->len;
321 if ((copy = end - offset) > 0) { 321 if ((copy = end - offset) > 0) {
@@ -366,7 +366,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
367 int end; 367 int end;
368 368
369 BUG_TRAP(start <= offset + len); 369 WARN_ON(start > offset + len);
370 370
371 end = start + skb_shinfo(skb)->frags[i].size; 371 end = start + skb_shinfo(skb)->frags[i].size;
372 if ((copy = end - offset) > 0) { 372 if ((copy = end - offset) > 0) {
@@ -402,7 +402,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
402 for (; list; list=list->next) { 402 for (; list; list=list->next) {
403 int end; 403 int end;
404 404
405 BUG_TRAP(start <= offset + len); 405 WARN_ON(start > offset + len);
406 406
407 end = start + list->len; 407 end = start + list->len;
408 if ((copy = end - offset) > 0) { 408 if ((copy = end - offset) > 0) {
diff --git a/net/core/dev.c b/net/core/dev.c
index ccf97f9f37eb..c6f9c83745e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1973,7 +1973,7 @@ static void net_tx_action(struct softirq_action *h)
1973 struct sk_buff *skb = clist; 1973 struct sk_buff *skb = clist;
1974 clist = clist->next; 1974 clist = clist->next;
1975 1975
1976 BUG_TRAP(!atomic_read(&skb->users)); 1976 WARN_ON(atomic_read(&skb->users));
1977 __kfree_skb(skb); 1977 __kfree_skb(skb);
1978 } 1978 }
1979 } 1979 }
@@ -3847,7 +3847,7 @@ static void rollback_registered(struct net_device *dev)
3847 dev->uninit(dev); 3847 dev->uninit(dev);
3848 3848
3849 /* Notifier chain MUST detach us from master device. */ 3849 /* Notifier chain MUST detach us from master device. */
3850 BUG_TRAP(!dev->master); 3850 WARN_ON(dev->master);
3851 3851
3852 /* Remove entries from kobject tree */ 3852 /* Remove entries from kobject tree */
3853 netdev_unregister_kobject(dev); 3853 netdev_unregister_kobject(dev);
@@ -4169,9 +4169,9 @@ void netdev_run_todo(void)
4169 4169
4170 /* paranoia */ 4170 /* paranoia */
4171 BUG_ON(atomic_read(&dev->refcnt)); 4171 BUG_ON(atomic_read(&dev->refcnt));
4172 BUG_TRAP(!dev->ip_ptr); 4172 WARN_ON(dev->ip_ptr);
4173 BUG_TRAP(!dev->ip6_ptr); 4173 WARN_ON(dev->ip6_ptr);
4174 BUG_TRAP(!dev->dn_ptr); 4174 WARN_ON(dev->dn_ptr);
4175 4175
4176 if (dev->destructor) 4176 if (dev->destructor)
4177 dev->destructor(dev); 4177 dev->destructor(dev);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 2d3035d3abd7..7552495aff7a 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -123,7 +123,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
123 } 123 }
124 } 124 }
125 125
126 BUG_TRAP(lopt->qlen == 0); 126 WARN_ON(lopt->qlen != 0);
127 if (lopt_size > PAGE_SIZE) 127 if (lopt_size > PAGE_SIZE)
128 vfree(lopt); 128 vfree(lopt);
129 else 129 else
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e4115672b6cf..4e0c92274189 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1200,7 +1200,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1201 int end; 1201 int end;
1202 1202
1203 BUG_TRAP(start <= offset + len); 1203 WARN_ON(start > offset + len);
1204 1204
1205 end = start + skb_shinfo(skb)->frags[i].size; 1205 end = start + skb_shinfo(skb)->frags[i].size;
1206 if ((copy = end - offset) > 0) { 1206 if ((copy = end - offset) > 0) {
@@ -1229,7 +1229,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1229 for (; list; list = list->next) { 1229 for (; list; list = list->next) {
1230 int end; 1230 int end;
1231 1231
1232 BUG_TRAP(start <= offset + len); 1232 WARN_ON(start > offset + len);
1233 1233
1234 end = start + list->len; 1234 end = start + list->len;
1235 if ((copy = end - offset) > 0) { 1235 if ((copy = end - offset) > 0) {
@@ -1475,7 +1475,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1475 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1475 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1476 int end; 1476 int end;
1477 1477
1478 BUG_TRAP(start <= offset + len); 1478 WARN_ON(start > offset + len);
1479 1479
1480 end = start + frag->size; 1480 end = start + frag->size;
1481 if ((copy = end - offset) > 0) { 1481 if ((copy = end - offset) > 0) {
@@ -1503,7 +1503,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1503 for (; list; list = list->next) { 1503 for (; list; list = list->next) {
1504 int end; 1504 int end;
1505 1505
1506 BUG_TRAP(start <= offset + len); 1506 WARN_ON(start > offset + len);
1507 1507
1508 end = start + list->len; 1508 end = start + list->len;
1509 if ((copy = end - offset) > 0) { 1509 if ((copy = end - offset) > 0) {
@@ -1552,7 +1552,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1552 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1552 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1553 int end; 1553 int end;
1554 1554
1555 BUG_TRAP(start <= offset + len); 1555 WARN_ON(start > offset + len);
1556 1556
1557 end = start + skb_shinfo(skb)->frags[i].size; 1557 end = start + skb_shinfo(skb)->frags[i].size;
1558 if ((copy = end - offset) > 0) { 1558 if ((copy = end - offset) > 0) {
@@ -1581,7 +1581,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1581 for (; list; list = list->next) { 1581 for (; list; list = list->next) {
1582 int end; 1582 int end;
1583 1583
1584 BUG_TRAP(start <= offset + len); 1584 WARN_ON(start > offset + len);
1585 1585
1586 end = start + list->len; 1586 end = start + list->len;
1587 if ((copy = end - offset) > 0) { 1587 if ((copy = end - offset) > 0) {
@@ -1629,7 +1629,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1629 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1629 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1630 int end; 1630 int end;
1631 1631
1632 BUG_TRAP(start <= offset + len); 1632 WARN_ON(start > offset + len);
1633 1633
1634 end = start + skb_shinfo(skb)->frags[i].size; 1634 end = start + skb_shinfo(skb)->frags[i].size;
1635 if ((copy = end - offset) > 0) { 1635 if ((copy = end - offset) > 0) {
@@ -1662,7 +1662,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1662 __wsum csum2; 1662 __wsum csum2;
1663 int end; 1663 int end;
1664 1664
1665 BUG_TRAP(start <= offset + len); 1665 WARN_ON(start > offset + len);
1666 1666
1667 end = start + list->len; 1667 end = start + list->len;
1668 if ((copy = end - offset) > 0) { 1668 if ((copy = end - offset) > 0) {
@@ -2373,7 +2373,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2373 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2373 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2374 int end; 2374 int end;
2375 2375
2376 BUG_TRAP(start <= offset + len); 2376 WARN_ON(start > offset + len);
2377 2377
2378 end = start + skb_shinfo(skb)->frags[i].size; 2378 end = start + skb_shinfo(skb)->frags[i].size;
2379 if ((copy = end - offset) > 0) { 2379 if ((copy = end - offset) > 0) {
@@ -2397,7 +2397,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2397 for (; list; list = list->next) { 2397 for (; list; list = list->next) {
2398 int end; 2398 int end;
2399 2399
2400 BUG_TRAP(start <= offset + len); 2400 WARN_ON(start > offset + len);
2401 2401
2402 end = start + list->len; 2402 end = start + list->len;
2403 if ((copy = end - offset) > 0) { 2403 if ((copy = end - offset) > 0) {
diff --git a/net/core/stream.c b/net/core/stream.c
index 4a0ad152c9c4..a6b3437ff082 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -192,13 +192,13 @@ void sk_stream_kill_queues(struct sock *sk)
192 __skb_queue_purge(&sk->sk_error_queue); 192 __skb_queue_purge(&sk->sk_error_queue);
193 193
194 /* Next, the write queue. */ 194 /* Next, the write queue. */
195 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue)); 195 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
196 196
197 /* Account for returned memory. */ 197 /* Account for returned memory. */
198 sk_mem_reclaim(sk); 198 sk_mem_reclaim(sk);
199 199
200 BUG_TRAP(!sk->sk_wmem_queued); 200 WARN_ON(sk->sk_wmem_queued);
201 BUG_TRAP(!sk->sk_forward_alloc); 201 WARN_ON(sk->sk_forward_alloc);
202 202
203 /* It is _impossible_ for the backlog to contain anything 203 /* It is _impossible_ for the backlog to contain anything
204 * when we get here. All user references to this socket 204 * when we get here. All user references to this socket
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..53c6b67b2877 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -27,7 +27,6 @@
27 27
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/rtnetlink.h> /* for BUG_TRAP */
31#include <net/tcp.h> 30#include <net/tcp.h>
32#include <net/netdma.h> 31#include <net/netdma.h>
33 32
@@ -71,7 +70,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 70 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
72 int end; 71 int end;
73 72
74 BUG_TRAP(start <= offset + len); 73 WARN_ON(start > offset + len);
75 74
76 end = start + skb_shinfo(skb)->frags[i].size; 75 end = start + skb_shinfo(skb)->frags[i].size;
77 copy = end - offset; 76 copy = end - offset;
@@ -100,7 +99,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
100 for (; list; list = list->next) { 99 for (; list; list = list->next) {
101 int end; 100 int end;
102 101
103 BUG_TRAP(start <= offset + len); 102 WARN_ON(start > offset + len);
104 103
105 end = start + list->len; 104 end = start + list->len;
106 copy = end - offset; 105 copy = end - offset;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 32617e0576cb..743d85fcd651 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -164,7 +164,7 @@ static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
164{ 164{
165 s64 delta = dccp_delta_seqno(s1, s2); 165 s64 delta = dccp_delta_seqno(s1, s2);
166 166
167 BUG_TRAP(delta >= 0); 167 WARN_ON(delta < 0);
168 return (u64)delta <= ndp + 1; 168 return (u64)delta <= ndp + 1;
169} 169}
170 170
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 08392ed86c25..df2f110df94a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -413,7 +413,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
413 413
414 /* Stop the REQUEST timer */ 414 /* Stop the REQUEST timer */
415 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 415 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
416 BUG_TRAP(sk->sk_send_head != NULL); 416 WARN_ON(sk->sk_send_head == NULL);
417 __kfree_skb(sk->sk_send_head); 417 __kfree_skb(sk->sk_send_head);
418 sk->sk_send_head = NULL; 418 sk->sk_send_head = NULL;
419 419
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 2622ace17c46..a835b88237cb 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -283,7 +283,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
283 * ICMPs are not backlogged, hence we cannot get an established 283 * ICMPs are not backlogged, hence we cannot get an established
284 * socket here. 284 * socket here.
285 */ 285 */
286 BUG_TRAP(!req->sk); 286 WARN_ON(req->sk);
287 287
288 if (seq != dccp_rsk(req)->dreq_iss) { 288 if (seq != dccp_rsk(req)->dreq_iss) {
289 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 289 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b74e8b2cbe55..da509127e00c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -186,7 +186,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
186 * ICMPs are not backlogged, hence we cannot get an established 186 * ICMPs are not backlogged, hence we cannot get an established
187 * socket here. 187 * socket here.
188 */ 188 */
189 BUG_TRAP(req->sk == NULL); 189 WARN_ON(req->sk != NULL);
190 190
191 if (seq != dccp_rsk(req)->dreq_iss) { 191 if (seq != dccp_rsk(req)->dreq_iss) {
192 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 192 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a0b56009611f..b622d9744856 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -327,7 +327,7 @@ int dccp_disconnect(struct sock *sk, int flags)
327 inet_csk_delack_init(sk); 327 inet_csk_delack_init(sk);
328 __sk_dst_reset(sk); 328 __sk_dst_reset(sk);
329 329
330 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 330 WARN_ON(inet->num && !icsk->icsk_bind_hash);
331 331
332 sk->sk_error_report(sk); 332 sk->sk_error_report(sk);
333 return err; 333 return err;
@@ -981,7 +981,7 @@ adjudge_to_death:
981 */ 981 */
982 local_bh_disable(); 982 local_bh_disable();
983 bh_lock_sock(sk); 983 bh_lock_sock(sk);
984 BUG_TRAP(!sock_owned_by_user(sk)); 984 WARN_ON(sock_owned_by_user(sk));
985 985
986 /* Have we already been destroyed by a softirq or backlog? */ 986 /* Have we already been destroyed by a softirq or backlog? */
987 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) 987 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3608d5342ca2..6a5b961b6f5c 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -106,7 +106,7 @@ static void dccp_retransmit_timer(struct sock *sk)
106 * -- Acks in client-PARTOPEN state (sec. 8.1.5) 106 * -- Acks in client-PARTOPEN state (sec. 8.1.5)
107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3) 107 * -- CloseReq in server-CLOSEREQ state (sec. 8.3)
108 * -- Close in node-CLOSING state (sec. 8.3) */ 108 * -- Close in node-CLOSING state (sec. 8.3) */
109 BUG_TRAP(sk->sk_send_head != NULL); 109 WARN_ON(sk->sk_send_head == NULL);
110 110
111 /* 111 /*
112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was 112 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd919d84285f..a107f49eea41 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
148 return; 148 return;
149 } 149 }
150 150
151 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 151 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
152 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 152 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
153 BUG_TRAP(!sk->sk_wmem_queued); 153 WARN_ON(sk->sk_wmem_queued);
154 BUG_TRAP(!sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
155 155
156 kfree(inet->opt); 156 kfree(inet->opt);
157 dst_release(sk->sk_dst_cache); 157 dst_release(sk->sk_dst_cache);
@@ -341,7 +341,7 @@ lookup_protocol:
341 answer_flags = answer->flags; 341 answer_flags = answer->flags;
342 rcu_read_unlock(); 342 rcu_read_unlock();
343 343
344 BUG_TRAP(answer_prot->slab != NULL); 344 WARN_ON(answer_prot->slab == NULL);
345 345
346 err = -ENOBUFS; 346 err = -ENOBUFS;
347 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); 347 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
@@ -661,8 +661,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
661 661
662 lock_sock(sk2); 662 lock_sock(sk2);
663 663
664 BUG_TRAP((1 << sk2->sk_state) & 664 WARN_ON(!((1 << sk2->sk_state) &
665 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)); 665 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
666 666
667 sock_graft(sk2, newsock); 667 sock_graft(sk2, newsock);
668 668
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e667e2f90df..91d3d96805d0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -138,8 +138,8 @@ void in_dev_finish_destroy(struct in_device *idev)
138{ 138{
139 struct net_device *dev = idev->dev; 139 struct net_device *dev = idev->dev;
140 140
141 BUG_TRAP(!idev->ifa_list); 141 WARN_ON(idev->ifa_list);
142 BUG_TRAP(!idev->mc_list); 142 WARN_ON(idev->mc_list);
143#ifdef NET_REFCNT_DEBUG 143#ifdef NET_REFCNT_DEBUG
144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", 144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
145 idev, dev ? dev->name : "NIL"); 145 idev, dev ? dev->name : "NIL");
@@ -399,7 +399,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
399 } 399 }
400 ipv4_devconf_setall(in_dev); 400 ipv4_devconf_setall(in_dev);
401 if (ifa->ifa_dev != in_dev) { 401 if (ifa->ifa_dev != in_dev) {
402 BUG_TRAP(!ifa->ifa_dev); 402 WARN_ON(ifa->ifa_dev);
403 in_dev_hold(in_dev); 403 in_dev_hold(in_dev);
404 ifa->ifa_dev = in_dev; 404 ifa->ifa_dev = in_dev;
405 } 405 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bb81c958b744..0c1ae68ee84b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -167,7 +167,7 @@ tb_not_found:
167success: 167success:
168 if (!inet_csk(sk)->icsk_bind_hash) 168 if (!inet_csk(sk)->icsk_bind_hash)
169 inet_bind_hash(sk, tb, snum); 169 inet_bind_hash(sk, tb, snum);
170 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 170 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
171 ret = 0; 171 ret = 0;
172 172
173fail_unlock: 173fail_unlock:
@@ -260,7 +260,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
260 } 260 }
261 261
262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
263 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); 263 WARN_ON(newsk->sk_state == TCP_SYN_RECV);
264out: 264out:
265 release_sock(sk); 265 release_sock(sk);
266 return newsk; 266 return newsk;
@@ -386,7 +386,7 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
386 ireq->rmt_addr == raddr && 386 ireq->rmt_addr == raddr &&
387 ireq->loc_addr == laddr && 387 ireq->loc_addr == laddr &&
388 AF_INET_FAMILY(req->rsk_ops->family)) { 388 AF_INET_FAMILY(req->rsk_ops->family)) {
389 BUG_TRAP(!req->sk); 389 WARN_ON(req->sk);
390 *prevp = prev; 390 *prevp = prev;
391 break; 391 break;
392 } 392 }
@@ -539,14 +539,14 @@ EXPORT_SYMBOL_GPL(inet_csk_clone);
539 */ 539 */
540void inet_csk_destroy_sock(struct sock *sk) 540void inet_csk_destroy_sock(struct sock *sk)
541{ 541{
542 BUG_TRAP(sk->sk_state == TCP_CLOSE); 542 WARN_ON(sk->sk_state != TCP_CLOSE);
543 BUG_TRAP(sock_flag(sk, SOCK_DEAD)); 543 WARN_ON(!sock_flag(sk, SOCK_DEAD));
544 544
545 /* It cannot be in hash table! */ 545 /* It cannot be in hash table! */
546 BUG_TRAP(sk_unhashed(sk)); 546 WARN_ON(!sk_unhashed(sk));
547 547
548 /* If it has not 0 inet_sk(sk)->num, it must be bound */ 548 /* If it has not 0 inet_sk(sk)->num, it must be bound */
549 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); 549 WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
550 550
551 sk->sk_prot->destroy(sk); 551 sk->sk_prot->destroy(sk);
552 552
@@ -629,7 +629,7 @@ void inet_csk_listen_stop(struct sock *sk)
629 629
630 local_bh_disable(); 630 local_bh_disable();
631 bh_lock_sock(child); 631 bh_lock_sock(child);
632 BUG_TRAP(!sock_owned_by_user(child)); 632 WARN_ON(sock_owned_by_user(child));
633 sock_hold(child); 633 sock_hold(child);
634 634
635 sk->sk_prot->disconnect(child, O_NONBLOCK); 635 sk->sk_prot->disconnect(child, O_NONBLOCK);
@@ -647,7 +647,7 @@ void inet_csk_listen_stop(struct sock *sk)
647 sk_acceptq_removed(sk); 647 sk_acceptq_removed(sk);
648 __reqsk_free(req); 648 __reqsk_free(req);
649 } 649 }
650 BUG_TRAP(!sk->sk_ack_backlog); 650 WARN_ON(sk->sk_ack_backlog);
651} 651}
652 652
653EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 653EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0546a0bc97ea..6c52e08f786e 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -134,8 +134,8 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
134 struct sk_buff *fp; 134 struct sk_buff *fp;
135 struct netns_frags *nf; 135 struct netns_frags *nf;
136 136
137 BUG_TRAP(q->last_in & INET_FRAG_COMPLETE); 137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 BUG_TRAP(del_timer(&q->timer) == 0); 138 WARN_ON(del_timer(&q->timer) != 0);
139 139
140 /* Release all fragment data. */ 140 /* Release all fragment data. */
141 fp = q->fragments; 141 fp = q->fragments;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 115f53722d20..44981906fb91 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -305,7 +305,7 @@ unique:
305 inet->num = lport; 305 inet->num = lport;
306 inet->sport = htons(lport); 306 inet->sport = htons(lport);
307 sk->sk_hash = hash; 307 sk->sk_hash = hash;
308 BUG_TRAP(sk_unhashed(sk)); 308 WARN_ON(!sk_unhashed(sk));
309 __sk_add_node(sk, &head->chain); 309 __sk_add_node(sk, &head->chain);
310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
311 write_unlock(lock); 311 write_unlock(lock);
@@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
342 rwlock_t *lock; 342 rwlock_t *lock;
343 struct inet_ehash_bucket *head; 343 struct inet_ehash_bucket *head;
344 344
345 BUG_TRAP(sk_unhashed(sk)); 345 WARN_ON(!sk_unhashed(sk));
346 346
347 sk->sk_hash = inet_sk_ehashfn(sk); 347 sk->sk_hash = inet_sk_ehashfn(sk);
348 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 348 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
367 return; 367 return;
368 } 368 }
369 369
370 BUG_TRAP(sk_unhashed(sk)); 370 WARN_ON(!sk_unhashed(sk));
371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; 371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
372 lock = &hashinfo->lhash_lock; 372 lock = &hashinfo->lhash_lock;
373 373
@@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
450 */ 450 */
451 inet_bind_bucket_for_each(tb, node, &head->chain) { 451 inet_bind_bucket_for_each(tb, node, &head->chain) {
452 if (tb->ib_net == net && tb->port == port) { 452 if (tb->ib_net == net && tb->port == port) {
453 BUG_TRAP(!hlist_empty(&tb->owners)); 453 WARN_ON(hlist_empty(&tb->owners));
454 if (tb->fastreuse >= 0) 454 if (tb->fastreuse >= 0)
455 goto next_port; 455 goto next_port;
456 if (!check_established(death_row, sk, 456 if (!check_established(death_row, sk,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 75c2def8f9a0..d985bd613d25 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
86 hashinfo->bhash_size)]; 86 hashinfo->bhash_size)];
87 spin_lock(&bhead->lock); 87 spin_lock(&bhead->lock);
88 tw->tw_tb = icsk->icsk_bind_hash; 88 tw->tw_tb = icsk->icsk_bind_hash;
89 BUG_TRAP(icsk->icsk_bind_hash); 89 WARN_ON(!icsk->icsk_bind_hash);
90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 spin_unlock(&bhead->lock); 91 spin_unlock(&bhead->lock);
92 92
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 38d38f058018..2152d222b954 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -488,8 +488,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
488 qp->q.fragments = head; 488 qp->q.fragments = head;
489 } 489 }
490 490
491 BUG_TRAP(head != NULL); 491 WARN_ON(head == NULL);
492 BUG_TRAP(FRAG_CB(head)->offset == 0); 492 WARN_ON(FRAG_CB(head)->offset != 0);
493 493
494 /* Allocate a new buffer for the datagram. */ 494 /* Allocate a new buffer for the datagram. */
495 ihlen = ip_hdrlen(head); 495 ihlen = ip_hdrlen(head);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 465544f6281a..d533a89e08de 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 __skb_pull(newskb, skb_network_offset(newskb)); 118 __skb_pull(newskb, skb_network_offset(newskb));
119 newskb->pkt_type = PACKET_LOOPBACK; 119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY; 120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 BUG_TRAP(newskb->dst); 121 WARN_ON(!newskb->dst);
122 netif_rx(newskb); 122 netif_rx(newskb);
123 return 0; 123 return 0;
124} 124}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0b491bf03db4..1ab341e5d3e0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1096,7 +1096,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1096#if TCP_DEBUG 1096#if TCP_DEBUG
1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1098 1098
1099 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1099 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1100#endif 1100#endif
1101 1101
1102 if (inet_csk_ack_scheduled(sk)) { 1102 if (inet_csk_ack_scheduled(sk)) {
@@ -1358,7 +1358,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1358 goto found_ok_skb; 1358 goto found_ok_skb;
1359 if (tcp_hdr(skb)->fin) 1359 if (tcp_hdr(skb)->fin)
1360 goto found_fin_ok; 1360 goto found_fin_ok;
1361 BUG_TRAP(flags & MSG_PEEK); 1361 WARN_ON(!(flags & MSG_PEEK));
1362 skb = skb->next; 1362 skb = skb->next;
1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue); 1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1364 1364
@@ -1421,8 +1421,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1421 1421
1422 tp->ucopy.len = len; 1422 tp->ucopy.len = len;
1423 1423
1424 BUG_TRAP(tp->copied_seq == tp->rcv_nxt || 1424 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1425 (flags & (MSG_PEEK | MSG_TRUNC))); 1425 !(flags & (MSG_PEEK | MSG_TRUNC)));
1426 1426
1427 /* Ugly... If prequeue is not empty, we have to 1427 /* Ugly... If prequeue is not empty, we have to
1428 * process it before releasing socket, otherwise 1428 * process it before releasing socket, otherwise
@@ -1844,7 +1844,7 @@ adjudge_to_death:
1844 */ 1844 */
1845 local_bh_disable(); 1845 local_bh_disable();
1846 bh_lock_sock(sk); 1846 bh_lock_sock(sk);
1847 BUG_TRAP(!sock_owned_by_user(sk)); 1847 WARN_ON(sock_owned_by_user(sk));
1848 1848
1849 /* Have we already been destroyed by a softirq or backlog? */ 1849 /* Have we already been destroyed by a softirq or backlog? */
1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -1973,7 +1973,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1974 __sk_dst_reset(sk); 1974 __sk_dst_reset(sk);
1975 1975
1976 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1976 WARN_ON(inet->num && !icsk->icsk_bind_hash);
1977 1977
1978 sk->sk_error_report(sk); 1978 sk->sk_error_report(sk);
1979 return err; 1979 return err;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 75efd244f2af..67ccce2a96bd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1629,10 +1629,10 @@ advance_sp:
1629out: 1629out:
1630 1630
1631#if FASTRETRANS_DEBUG > 0 1631#if FASTRETRANS_DEBUG > 0
1632 BUG_TRAP((int)tp->sacked_out >= 0); 1632 WARN_ON((int)tp->sacked_out < 0);
1633 BUG_TRAP((int)tp->lost_out >= 0); 1633 WARN_ON((int)tp->lost_out < 0);
1634 BUG_TRAP((int)tp->retrans_out >= 0); 1634 WARN_ON((int)tp->retrans_out < 0);
1635 BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); 1635 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1636#endif 1636#endif
1637 return flag; 1637 return flag;
1638} 1638}
@@ -2181,7 +2181,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2181 int err; 2181 int err;
2182 unsigned int mss; 2182 unsigned int mss;
2183 2183
2184 BUG_TRAP(packets <= tp->packets_out); 2184 WARN_ON(packets > tp->packets_out);
2185 if (tp->lost_skb_hint) { 2185 if (tp->lost_skb_hint) {
2186 skb = tp->lost_skb_hint; 2186 skb = tp->lost_skb_hint;
2187 cnt = tp->lost_cnt_hint; 2187 cnt = tp->lost_cnt_hint;
@@ -2610,7 +2610,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2610 /* E. Check state exit conditions. State can be terminated 2610 /* E. Check state exit conditions. State can be terminated
2611 * when high_seq is ACKed. */ 2611 * when high_seq is ACKed. */
2612 if (icsk->icsk_ca_state == TCP_CA_Open) { 2612 if (icsk->icsk_ca_state == TCP_CA_Open) {
2613 BUG_TRAP(tp->retrans_out == 0); 2613 WARN_ON(tp->retrans_out != 0);
2614 tp->retrans_stamp = 0; 2614 tp->retrans_stamp = 0;
2615 } else if (!before(tp->snd_una, tp->high_seq)) { 2615 } else if (!before(tp->snd_una, tp->high_seq)) {
2616 switch (icsk->icsk_ca_state) { 2616 switch (icsk->icsk_ca_state) {
@@ -2972,9 +2972,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2972 } 2972 }
2973 2973
2974#if FASTRETRANS_DEBUG > 0 2974#if FASTRETRANS_DEBUG > 0
2975 BUG_TRAP((int)tp->sacked_out >= 0); 2975 WARN_ON((int)tp->sacked_out < 0);
2976 BUG_TRAP((int)tp->lost_out >= 0); 2976 WARN_ON((int)tp->lost_out < 0);
2977 BUG_TRAP((int)tp->retrans_out >= 0); 2977 WARN_ON((int)tp->retrans_out < 0);
2978 if (!tp->packets_out && tcp_is_sack(tp)) { 2978 if (!tp->packets_out && tcp_is_sack(tp)) {
2979 icsk = inet_csk(sk); 2979 icsk = inet_csk(sk);
2980 if (tp->lost_out) { 2980 if (tp->lost_out) {
@@ -3877,7 +3877,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
3877 int i; 3877 int i;
3878 3878
3879 /* RCV.NXT must cover all the block! */ 3879 /* RCV.NXT must cover all the block! */
3880 BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); 3880 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
3881 3881
3882 /* Zap this SACK, by moving forward any other SACKS. */ 3882 /* Zap this SACK, by moving forward any other SACKS. */
3883 for (i=this_sack+1; i < num_sacks; i++) 3883 for (i=this_sack+1; i < num_sacks; i++)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a82df6307567..a2b06d0cc26b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -418,7 +418,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
418 /* ICMPs are not backlogged, hence we cannot get 418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here. 419 an established socket here.
420 */ 420 */
421 BUG_TRAP(!req->sk); 421 WARN_ON(req->sk);
422 422
423 if (seq != tcp_rsk(req)->snt_isn) { 423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 328e0cf42b3c..5ab6ba19c3ce 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -287,7 +287,7 @@ static void tcp_retransmit_timer(struct sock *sk)
287 if (!tp->packets_out) 287 if (!tp->packets_out)
288 goto out; 288 goto out;
289 289
290 BUG_TRAP(!tcp_write_queue_empty(sk)); 290 WARN_ON(tcp_write_queue_empty(sk));
291 291
292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 74d543d504a1..a7842c54f58a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -313,8 +313,10 @@ static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
313void in6_dev_finish_destroy(struct inet6_dev *idev) 313void in6_dev_finish_destroy(struct inet6_dev *idev)
314{ 314{
315 struct net_device *dev = idev->dev; 315 struct net_device *dev = idev->dev;
316 BUG_TRAP(idev->addr_list==NULL); 316
317 BUG_TRAP(idev->mc_list==NULL); 317 WARN_ON(idev->addr_list != NULL);
318 WARN_ON(idev->mc_list != NULL);
319
318#ifdef NET_REFCNT_DEBUG 320#ifdef NET_REFCNT_DEBUG
319 printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL"); 321 printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
320#endif 322#endif
@@ -517,8 +519,9 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
517 519
518void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) 520void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
519{ 521{
520 BUG_TRAP(ifp->if_next==NULL); 522 WARN_ON(ifp->if_next != NULL);
521 BUG_TRAP(ifp->lst_next==NULL); 523 WARN_ON(ifp->lst_next != NULL);
524
522#ifdef NET_REFCNT_DEBUG 525#ifdef NET_REFCNT_DEBUG
523 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); 526 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
524#endif 527#endif
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3d828bc4b1cf..0843c4d6218c 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -153,7 +153,7 @@ lookup_protocol:
153 answer_flags = answer->flags; 153 answer_flags = answer->flags;
154 rcu_read_unlock(); 154 rcu_read_unlock();
155 155
156 BUG_TRAP(answer_prot->slab != NULL); 156 WARN_ON(answer_prot->slab == NULL);
157 157
158 err = -ENOBUFS; 158 err = -ENOBUFS;
159 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot); 159 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 87801cc1b2f8..16d43f20b32f 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -98,7 +98,7 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
98 ipv6_addr_equal(&treq->rmt_addr, raddr) && 98 ipv6_addr_equal(&treq->rmt_addr, raddr) &&
99 ipv6_addr_equal(&treq->loc_addr, laddr) && 99 ipv6_addr_equal(&treq->loc_addr, laddr) &&
100 (!treq->iif || treq->iif == iif)) { 100 (!treq->iif || treq->iif == iif)) {
101 BUG_TRAP(req->sk == NULL); 101 WARN_ON(req->sk != NULL);
102 *prevp = prev; 102 *prevp = prev;
103 return req; 103 return req;
104 } 104 }
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00a8a5f9380c..1646a5658255 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -28,7 +28,7 @@ void __inet6_hash(struct sock *sk)
28 struct hlist_head *list; 28 struct hlist_head *list;
29 rwlock_t *lock; 29 rwlock_t *lock;
30 30
31 BUG_TRAP(sk_unhashed(sk)); 31 WARN_ON(!sk_unhashed(sk));
32 32
33 if (sk->sk_state == TCP_LISTEN) { 33 if (sk->sk_state == TCP_LISTEN) {
34 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; 34 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -202,7 +202,7 @@ unique:
202 * in hash table socket with a funny identity. */ 202 * in hash table socket with a funny identity. */
203 inet->num = lport; 203 inet->num = lport;
204 inet->sport = htons(lport); 204 inet->sport = htons(lport);
205 BUG_TRAP(sk_unhashed(sk)); 205 WARN_ON(!sk_unhashed(sk));
206 __sk_add_node(sk, &head->chain); 206 __sk_add_node(sk, &head->chain);
207 sk->sk_hash = hash; 207 sk->sk_hash = hash;
208 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 208 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 08ea2de28d63..52dddc25d3e6 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -287,7 +287,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
287 w->leaf = rt; 287 w->leaf = rt;
288 return 1; 288 return 1;
289 } 289 }
290 BUG_TRAP(res!=0); 290 WARN_ON(res == 0);
291 } 291 }
292 w->leaf = NULL; 292 w->leaf = NULL;
293 return 0; 293 return 0;
@@ -778,7 +778,7 @@ out:
778 pn->leaf = fib6_find_prefix(info->nl_net, pn); 778 pn->leaf = fib6_find_prefix(info->nl_net, pn);
779#if RT6_DEBUG >= 2 779#if RT6_DEBUG >= 2
780 if (!pn->leaf) { 780 if (!pn->leaf) {
781 BUG_TRAP(pn->leaf != NULL); 781 WARN_ON(pn->leaf == NULL);
782 pn->leaf = info->nl_net->ipv6.ip6_null_entry; 782 pn->leaf = info->nl_net->ipv6.ip6_null_entry;
783 } 783 }
784#endif 784#endif
@@ -942,7 +942,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
942 942
943#ifdef CONFIG_IPV6_SUBTREES 943#ifdef CONFIG_IPV6_SUBTREES
944 if (src_len) { 944 if (src_len) {
945 BUG_TRAP(saddr!=NULL); 945 WARN_ON(saddr == NULL);
946 if (fn && fn->subtree) 946 if (fn && fn->subtree)
947 fn = fib6_locate_1(fn->subtree, saddr, src_len, 947 fn = fib6_locate_1(fn->subtree, saddr, src_len,
948 offsetof(struct rt6_info, rt6i_src)); 948 offsetof(struct rt6_info, rt6i_src));
@@ -996,9 +996,9 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
996 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); 996 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
997 iter++; 997 iter++;
998 998
999 BUG_TRAP(!(fn->fn_flags&RTN_RTINFO)); 999 WARN_ON(fn->fn_flags & RTN_RTINFO);
1000 BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT)); 1000 WARN_ON(fn->fn_flags & RTN_TL_ROOT);
1001 BUG_TRAP(fn->leaf==NULL); 1001 WARN_ON(fn->leaf != NULL);
1002 1002
1003 children = 0; 1003 children = 0;
1004 child = NULL; 1004 child = NULL;
@@ -1014,7 +1014,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1014 fn->leaf = fib6_find_prefix(net, fn); 1014 fn->leaf = fib6_find_prefix(net, fn);
1015#if RT6_DEBUG >= 2 1015#if RT6_DEBUG >= 2
1016 if (fn->leaf==NULL) { 1016 if (fn->leaf==NULL) {
1017 BUG_TRAP(fn->leaf); 1017 WARN_ON(!fn->leaf);
1018 fn->leaf = net->ipv6.ip6_null_entry; 1018 fn->leaf = net->ipv6.ip6_null_entry;
1019 } 1019 }
1020#endif 1020#endif
@@ -1025,16 +1025,17 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1025 pn = fn->parent; 1025 pn = fn->parent;
1026#ifdef CONFIG_IPV6_SUBTREES 1026#ifdef CONFIG_IPV6_SUBTREES
1027 if (FIB6_SUBTREE(pn) == fn) { 1027 if (FIB6_SUBTREE(pn) == fn) {
1028 BUG_TRAP(fn->fn_flags&RTN_ROOT); 1028 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1029 FIB6_SUBTREE(pn) = NULL; 1029 FIB6_SUBTREE(pn) = NULL;
1030 nstate = FWS_L; 1030 nstate = FWS_L;
1031 } else { 1031 } else {
1032 BUG_TRAP(!(fn->fn_flags&RTN_ROOT)); 1032 WARN_ON(fn->fn_flags & RTN_ROOT);
1033#endif 1033#endif
1034 if (pn->right == fn) pn->right = child; 1034 if (pn->right == fn) pn->right = child;
1035 else if (pn->left == fn) pn->left = child; 1035 else if (pn->left == fn) pn->left = child;
1036#if RT6_DEBUG >= 2 1036#if RT6_DEBUG >= 2
1037 else BUG_TRAP(0); 1037 else
1038 WARN_ON(1);
1038#endif 1039#endif
1039 if (child) 1040 if (child)
1040 child->parent = pn; 1041 child->parent = pn;
@@ -1154,14 +1155,14 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1154 1155
1155#if RT6_DEBUG >= 2 1156#if RT6_DEBUG >= 2
1156 if (rt->u.dst.obsolete>0) { 1157 if (rt->u.dst.obsolete>0) {
1157 BUG_TRAP(fn==NULL); 1158 WARN_ON(fn != NULL);
1158 return -ENOENT; 1159 return -ENOENT;
1159 } 1160 }
1160#endif 1161#endif
1161 if (fn == NULL || rt == net->ipv6.ip6_null_entry) 1162 if (fn == NULL || rt == net->ipv6.ip6_null_entry)
1162 return -ENOENT; 1163 return -ENOENT;
1163 1164
1164 BUG_TRAP(fn->fn_flags&RTN_RTINFO); 1165 WARN_ON(!(fn->fn_flags & RTN_RTINFO));
1165 1166
1166 if (!(rt->rt6i_flags&RTF_CACHE)) { 1167 if (!(rt->rt6i_flags&RTF_CACHE)) {
1167 struct fib6_node *pn = fn; 1168 struct fib6_node *pn = fn;
@@ -1266,7 +1267,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1266 w->node = pn; 1267 w->node = pn;
1267#ifdef CONFIG_IPV6_SUBTREES 1268#ifdef CONFIG_IPV6_SUBTREES
1268 if (FIB6_SUBTREE(pn) == fn) { 1269 if (FIB6_SUBTREE(pn) == fn) {
1269 BUG_TRAP(fn->fn_flags&RTN_ROOT); 1270 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1270 w->state = FWS_L; 1271 w->state = FWS_L;
1271 continue; 1272 continue;
1272 } 1273 }
@@ -1281,7 +1282,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1281 continue; 1282 continue;
1282 } 1283 }
1283#if RT6_DEBUG >= 2 1284#if RT6_DEBUG >= 2
1284 BUG_TRAP(0); 1285 WARN_ON(1);
1285#endif 1286#endif
1286 } 1287 }
1287 } 1288 }
@@ -1323,7 +1324,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1323 } 1324 }
1324 return 0; 1325 return 0;
1325 } 1326 }
1326 BUG_TRAP(res==0); 1327 WARN_ON(res != 0);
1327 } 1328 }
1328 w->leaf = rt; 1329 w->leaf = rt;
1329 return 0; 1330 return 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6407c64ea4a5..6811901e6b1e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -116,7 +116,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
116 __skb_pull(newskb, skb_network_offset(newskb)); 116 __skb_pull(newskb, skb_network_offset(newskb));
117 newskb->pkt_type = PACKET_LOOPBACK; 117 newskb->pkt_type = PACKET_LOOPBACK;
118 newskb->ip_summed = CHECKSUM_UNNECESSARY; 118 newskb->ip_summed = CHECKSUM_UNNECESSARY;
119 BUG_TRAP(newskb->dst); 119 WARN_ON(!newskb->dst);
120 120
121 netif_rx(newskb); 121 netif_rx(newskb);
122 return 0; 122 return 0;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index ad1cc5bbf977..31295c8f6196 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -164,8 +164,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
164 calc_padlen(sizeof(*dstopt), 6)); 164 calc_padlen(sizeof(*dstopt), 6));
165 165
166 hao->type = IPV6_TLV_HAO; 166 hao->type = IPV6_TLV_HAO;
167 BUILD_BUG_ON(sizeof(*hao) != 18);
167 hao->length = sizeof(*hao) - 2; 168 hao->length = sizeof(*hao) - 2;
168 BUG_TRAP(hao->length == 16);
169 169
170 len = ((char *)hao - (char *)dstopt) + sizeof(*hao); 170 len = ((char *)hao - (char *)dstopt) + sizeof(*hao);
171 171
@@ -174,7 +174,7 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
174 memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr)); 174 memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr));
175 spin_unlock_bh(&x->lock); 175 spin_unlock_bh(&x->lock);
176 176
177 BUG_TRAP(len == x->props.header_len); 177 WARN_ON(len != x->props.header_len);
178 dstopt->hdrlen = (x->props.header_len >> 3) - 1; 178 dstopt->hdrlen = (x->props.header_len >> 3) - 1;
179 179
180 return 0; 180 return 0;
@@ -317,7 +317,7 @@ static int mip6_destopt_init_state(struct xfrm_state *x)
317 x->props.header_len = sizeof(struct ipv6_destopt_hdr) + 317 x->props.header_len = sizeof(struct ipv6_destopt_hdr) +
318 calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) + 318 calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) +
319 sizeof(struct ipv6_destopt_hao); 319 sizeof(struct ipv6_destopt_hao);
320 BUG_TRAP(x->props.header_len == 24); 320 WARN_ON(x->props.header_len != 24);
321 321
322 return 0; 322 return 0;
323} 323}
@@ -380,7 +380,7 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
380 rt2->rt_hdr.segments_left = 1; 380 rt2->rt_hdr.segments_left = 1;
381 memset(&rt2->reserved, 0, sizeof(rt2->reserved)); 381 memset(&rt2->reserved, 0, sizeof(rt2->reserved));
382 382
383 BUG_TRAP(rt2->rt_hdr.hdrlen == 2); 383 WARN_ON(rt2->rt_hdr.hdrlen != 2);
384 384
385 memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr)); 385 memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr));
386 spin_lock_bh(&x->lock); 386 spin_lock_bh(&x->lock);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index cf20bc4fd60d..52d06dd4b817 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -416,8 +416,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
416 416
417 fq_kill(fq); 417 fq_kill(fq);
418 418
419 BUG_TRAP(head != NULL); 419 WARN_ON(head == NULL);
420 BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0); 420 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
421 421
422 /* Unfragmented part is taken from the first segment. */ 422 /* Unfragmented part is taken from the first segment. */
423 payload_len = ((head->data - skb_network_header(head)) - 423 payload_len = ((head->data - skb_network_header(head)) -
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6ab957ec2dd6..89184b576e23 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -473,8 +473,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
473 fq->q.fragments = head; 473 fq->q.fragments = head;
474 } 474 }
475 475
476 BUG_TRAP(head != NULL); 476 WARN_ON(head == NULL);
477 BUG_TRAP(FRAG6_CB(head)->offset == 0); 477 WARN_ON(FRAG6_CB(head)->offset != 0);
478 478
479 /* Unfragmented part is taken from the first segment. */ 479 /* Unfragmented part is taken from the first segment. */
480 payload_len = ((head->data - skb_network_header(head)) - 480 payload_len = ((head->data - skb_network_header(head)) -
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ae45f9835014..cff778b23a7f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -421,7 +421,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 /* ICMPs are not backlogged, hence we cannot get 421 /* ICMPs are not backlogged, hence we cannot get
422 * an established socket here. 422 * an established socket here.
423 */ 423 */
424 BUG_TRAP(req->sk == NULL); 424 WARN_ON(req->sk != NULL);
425 425
426 if (seq != tcp_rsk(req)->snt_isn) { 426 if (seq != tcp_rsk(req)->snt_isn) {
427 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 427 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f0fc46c8038d..d628df97e02e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -96,8 +96,8 @@ static void pfkey_sock_destruct(struct sock *sk)
96 return; 96 return;
97 } 97 }
98 98
99 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 99 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
100 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 100 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
101 101
102 atomic_dec(&pfkey_socks_nr); 102 atomic_dec(&pfkey_socks_nr);
103} 103}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 98bfe277eab2..b0eacc0007cc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -158,9 +158,10 @@ static void netlink_sock_destruct(struct sock *sk)
158 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); 158 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
159 return; 159 return;
160 } 160 }
161 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 161
162 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 162 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
163 BUG_TRAP(!nlk_sk(sk)->groups); 163 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
164 WARN_ON(nlk_sk(sk)->groups);
164} 165}
165 166
166/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 167/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d56cae112dc8..c718e7e3f7de 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -260,8 +260,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
260 260
261static void packet_sock_destruct(struct sock *sk) 261static void packet_sock_destruct(struct sock *sk)
262{ 262{
263 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 263 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
264 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 264 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
265 265
266 if (!sock_flag(sk, SOCK_DEAD)) { 266 if (!sock_flag(sk, SOCK_DEAD)) {
267 printk("Attempt to release alive packet socket: %p\n", sk); 267 printk("Attempt to release alive packet socket: %p\n", sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 4b2682feeedc..32e489118beb 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -660,9 +660,9 @@ static void rxrpc_sock_destructor(struct sock *sk)
660 660
661 rxrpc_purge_queue(&sk->sk_receive_queue); 661 rxrpc_purge_queue(&sk->sk_receive_queue);
662 662
663 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 663 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
664 BUG_TRAP(sk_unhashed(sk)); 664 WARN_ON(!sk_unhashed(sk));
665 BUG_TRAP(!sk->sk_socket); 665 WARN_ON(sk->sk_socket);
666 666
667 if (!sock_flag(sk, SOCK_DEAD)) { 667 if (!sock_flag(sk, SOCK_DEAD)) {
668 printk("Attempt to release alive rxrpc socket: %p\n", sk); 668 printk("Attempt to release alive rxrpc socket: %p\n", sk);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 74e662cbb2c5..d308c19aa3f9 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -41,7 +41,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
41 return; 41 return;
42 } 42 }
43 } 43 }
44 BUG_TRAP(0); 44 WARN_ON(1);
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 32c3f9d9fb7a..38015b493947 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -116,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p)
116 return; 116 return;
117 } 117 }
118 } 118 }
119 BUG_TRAP(0); 119 WARN_ON(1);
120} 120}
121 121
122static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { 122static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 527db2559dd2..246f9065ce34 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -345,7 +345,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
345 } 345 }
346 } 346 }
347 } 347 }
348 BUG_TRAP(0); 348 WARN_ON(1);
349 return 0; 349 return 0;
350} 350}
351 351
@@ -368,7 +368,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
368 struct tc_u_common *tp_c = tp->data; 368 struct tc_u_common *tp_c = tp->data;
369 struct tc_u_hnode **hn; 369 struct tc_u_hnode **hn;
370 370
371 BUG_TRAP(!ht->refcnt); 371 WARN_ON(ht->refcnt);
372 372
373 u32_clear_hnode(tp, ht); 373 u32_clear_hnode(tp, ht);
374 374
@@ -380,7 +380,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
380 } 380 }
381 } 381 }
382 382
383 BUG_TRAP(0); 383 WARN_ON(1);
384 return -ENOENT; 384 return -ENOENT;
385} 385}
386 386
@@ -389,7 +389,7 @@ static void u32_destroy(struct tcf_proto *tp)
389 struct tc_u_common *tp_c = tp->data; 389 struct tc_u_common *tp_c = tp->data;
390 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL); 390 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
391 391
392 BUG_TRAP(root_ht != NULL); 392 WARN_ON(root_ht == NULL);
393 393
394 if (root_ht && --root_ht->refcnt == 0) 394 if (root_ht && --root_ht->refcnt == 0)
395 u32_destroy_hnode(tp, root_ht); 395 u32_destroy_hnode(tp, root_ht);
@@ -407,7 +407,7 @@ static void u32_destroy(struct tcf_proto *tp)
407 while ((ht = tp_c->hlist) != NULL) { 407 while ((ht = tp_c->hlist) != NULL) {
408 tp_c->hlist = ht->next; 408 tp_c->hlist = ht->next;
409 409
410 BUG_TRAP(ht->refcnt == 0); 410 WARN_ON(ht->refcnt != 0);
411 411
412 kfree(ht); 412 kfree(ht);
413 } 413 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f1d2f8ec8b4c..14954bf4a683 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1175,7 +1175,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1175 this->tparent->children = NULL; 1175 this->tparent->children = NULL;
1176 } 1176 }
1177 } else { 1177 } else {
1178 BUG_TRAP(this->sibling == this); 1178 WARN_ON(this->sibling != this);
1179 } 1179 }
1180} 1180}
1181 1181
@@ -1699,7 +1699,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1699{ 1699{
1700 struct cbq_sched_data *q = qdisc_priv(sch); 1700 struct cbq_sched_data *q = qdisc_priv(sch);
1701 1701
1702 BUG_TRAP(!cl->filters); 1702 WARN_ON(cl->filters);
1703 1703
1704 tcf_destroy_chain(&cl->filter_list); 1704 tcf_destroy_chain(&cl->filter_list);
1705 qdisc_destroy(cl->q); 1705 qdisc_destroy(cl->q);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 43abd4d27ea6..fd2a6cadb115 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -746,5 +746,5 @@ void dev_shutdown(struct net_device *dev)
746{ 746{
747 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 747 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
748 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); 748 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
749 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 749 WARN_ON(timer_pending(&dev->watchdog_timer));
750} 750}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30c999c61b01..75a40951c4f2 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -524,7 +524,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
524 */ 524 */
525static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 525static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
526{ 526{
527 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); 527 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
528 528
529 if (!cl->prio_activity) { 529 if (!cl->prio_activity) {
530 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); 530 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
@@ -542,7 +542,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
542 */ 542 */
543static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) 543static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
544{ 544{
545 BUG_TRAP(cl->prio_activity); 545 WARN_ON(!cl->prio_activity);
546 546
547 htb_deactivate_prios(q, cl); 547 htb_deactivate_prios(q, cl);
548 cl->prio_activity = 0; 548 cl->prio_activity = 0;
@@ -757,7 +757,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
757 u32 *pid; 757 u32 *pid;
758 } stk[TC_HTB_MAXDEPTH], *sp = stk; 758 } stk[TC_HTB_MAXDEPTH], *sp = stk;
759 759
760 BUG_TRAP(tree->rb_node); 760 WARN_ON(!tree->rb_node);
761 sp->root = tree->rb_node; 761 sp->root = tree->rb_node;
762 sp->pptr = pptr; 762 sp->pptr = pptr;
763 sp->pid = pid; 763 sp->pid = pid;
@@ -777,7 +777,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
777 *sp->pptr = (*sp->pptr)->rb_left; 777 *sp->pptr = (*sp->pptr)->rb_left;
778 if (sp > stk) { 778 if (sp > stk) {
779 sp--; 779 sp--;
780 BUG_TRAP(*sp->pptr); 780 WARN_ON(!*sp->pptr);
781 if (!*sp->pptr) 781 if (!*sp->pptr)
782 return NULL; 782 return NULL;
783 htb_next_rb_node(sp->pptr); 783 htb_next_rb_node(sp->pptr);
@@ -792,7 +792,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
792 sp->pid = cl->un.inner.last_ptr_id + prio; 792 sp->pid = cl->un.inner.last_ptr_id + prio;
793 } 793 }
794 } 794 }
795 BUG_TRAP(0); 795 WARN_ON(1);
796 return NULL; 796 return NULL;
797} 797}
798 798
@@ -810,7 +810,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
810 810
811 do { 811 do {
812next: 812next:
813 BUG_TRAP(cl); 813 WARN_ON(!cl);
814 if (!cl) 814 if (!cl)
815 return NULL; 815 return NULL;
816 816
@@ -1185,7 +1185,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1185{ 1185{
1186 struct htb_class *parent = cl->parent; 1186 struct htb_class *parent = cl->parent;
1187 1187
1188 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); 1188 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1189 1189
1190 if (parent->cmode != HTB_CAN_SEND) 1190 if (parent->cmode != HTB_CAN_SEND)
1191 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); 1191 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
@@ -1205,7 +1205,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1205static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1205static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1206{ 1206{
1207 if (!cl->level) { 1207 if (!cl->level) {
1208 BUG_TRAP(cl->un.leaf.q); 1208 WARN_ON(!cl->un.leaf.q);
1209 qdisc_destroy(cl->un.leaf.q); 1209 qdisc_destroy(cl->un.leaf.q);
1210 } 1210 }
1211 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1211 gen_kill_estimator(&cl->bstats, &cl->rate_est);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ec2a0a33fd78..8472b8b349c4 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -464,7 +464,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
464 spin_unlock_bh(&sctp_assocs_id_lock); 464 spin_unlock_bh(&sctp_assocs_id_lock);
465 } 465 }
466 466
467 BUG_TRAP(!atomic_read(&asoc->rmem_alloc)); 467 WARN_ON(atomic_read(&asoc->rmem_alloc));
468 468
469 if (asoc->base.malloced) { 469 if (asoc->base.malloced) {
470 kfree(asoc); 470 kfree(asoc);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 70ceb1604ad8..24eb214581d5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -227,7 +227,7 @@ static void __unix_remove_socket(struct sock *sk)
227 227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) 228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{ 229{
230 BUG_TRAP(sk_unhashed(sk)); 230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list); 231 sk_add_node(sk, list);
232} 232}
233 233
@@ -350,9 +350,9 @@ static void unix_sock_destructor(struct sock *sk)
350 350
351 skb_queue_purge(&sk->sk_receive_queue); 351 skb_queue_purge(&sk->sk_receive_queue);
352 352
353 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 BUG_TRAP(sk_unhashed(sk)); 354 WARN_ON(!sk_unhashed(sk));
355 BUG_TRAP(!sk->sk_socket); 355 WARN_ON(sk->sk_socket);
356 if (!sock_flag(sk, SOCK_DEAD)) { 356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk("Attempt to release alive unix socket: %p\n", sk); 357 printk("Attempt to release alive unix socket: %p\n", sk);
358 return; 358 return;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 23a2cc04b8cd..96036cf2216d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -718,7 +718,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
719 int end; 719 int end;
720 720
721 BUG_TRAP(start <= offset + len); 721 WARN_ON(start > offset + len);
722 722
723 end = start + skb_shinfo(skb)->frags[i].size; 723 end = start + skb_shinfo(skb)->frags[i].size;
724 if ((copy = end - offset) > 0) { 724 if ((copy = end - offset) > 0) {
@@ -748,7 +748,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
748 for (; list; list = list->next) { 748 for (; list; list = list->next) {
749 int end; 749 int end;
750 750
751 BUG_TRAP(start <= offset + len); 751 WARN_ON(start > offset + len);
752 752
753 end = start + list->len; 753 end = start + list->len;
754 if ((copy = end - offset) > 0) { 754 if ((copy = end - offset) > 0) {
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 800f669083fb..c609a4b98e15 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -22,7 +22,6 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/rtnetlink.h>
26#include <linux/smp.h> 25#include <linux/smp.h>
27#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
28#include <net/ip.h> 27#include <net/ip.h>
@@ -251,7 +250,7 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
251 break; 250 break;
252 } 251 }
253 252
254 BUG_TRAP(pos); 253 WARN_ON(!pos);
255 254
256 if (--pos->users) 255 if (--pos->users)
257 return; 256 return;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 72fddafd891a..4c6914ef7d92 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -538,7 +538,7 @@ EXPORT_SYMBOL(xfrm_state_alloc);
538 538
539void __xfrm_state_destroy(struct xfrm_state *x) 539void __xfrm_state_destroy(struct xfrm_state *x)
540{ 540{
541 BUG_TRAP(x->km.state == XFRM_STATE_DEAD); 541 WARN_ON(x->km.state != XFRM_STATE_DEAD);
542 542
543 spin_lock_bh(&xfrm_state_lock); 543 spin_lock_bh(&xfrm_state_lock);
544 list_del(&x->all); 544 list_del(&x->all);