aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@austin.ibm.com>2005-08-10 12:15:13 -0400
committerDave Kleikamp <shaggy@austin.ibm.com>2005-08-10 12:15:13 -0400
commit2d610b80e954045ccfc27558f84e482709e5e5b7 (patch)
tree840b3bb52adba07b6f1e4ddf2beb5ad5df480486 /net/ipv4
parent8a9cd6d676728792aaee31f30015d284acd154a3 (diff)
parent86b3786078d63242d3194ffc58ae8dae1d1bbef3 (diff)
Merge with /home/shaggy/git/linus-clean/
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_semantics.c9
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c86
-rw-r--r--net/ipv4/udp.c34
8 files changed, 76 insertions, 85 deletions
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c886b28ba9f5..e278cb9d0075 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -593,10 +593,13 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
593 struct hlist_head *new_laddrhash, 593 struct hlist_head *new_laddrhash,
594 unsigned int new_size) 594 unsigned int new_size)
595{ 595{
596 struct hlist_head *old_info_hash, *old_laddrhash;
596 unsigned int old_size = fib_hash_size; 597 unsigned int old_size = fib_hash_size;
597 unsigned int i; 598 unsigned int i, bytes;
598 599
599 write_lock(&fib_info_lock); 600 write_lock(&fib_info_lock);
601 old_info_hash = fib_info_hash;
602 old_laddrhash = fib_info_laddrhash;
600 fib_hash_size = new_size; 603 fib_hash_size = new_size;
601 604
602 for (i = 0; i < old_size; i++) { 605 for (i = 0; i < old_size; i++) {
@@ -636,6 +639,10 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
636 fib_info_laddrhash = new_laddrhash; 639 fib_info_laddrhash = new_laddrhash;
637 640
638 write_unlock(&fib_info_lock); 641 write_unlock(&fib_info_lock);
642
643 bytes = old_size * sizeof(struct hlist_head *);
644 fib_hash_free(old_info_hash, bytes);
645 fib_hash_free(old_laddrhash, bytes);
639} 646}
640 647
641struct fib_info * 648struct fib_info *
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 279f57abfecb..3d78464f64ea 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -936,8 +936,7 @@ int icmp_rcv(struct sk_buff *skb)
936 case CHECKSUM_HW: 936 case CHECKSUM_HW:
937 if (!(u16)csum_fold(skb->csum)) 937 if (!(u16)csum_fold(skb->csum))
938 break; 938 break;
939 NETDEBUG(if (net_ratelimit()) 939 LIMIT_NETDEBUG(printk(KERN_DEBUG "icmp v4 hw csum failure\n"));
940 printk(KERN_DEBUG "icmp v4 hw csum failure\n"));
941 case CHECKSUM_NONE: 940 case CHECKSUM_NONE:
942 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) 941 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
943 goto error; 942 goto error;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 7f68e27eb4ea..eb377ae15305 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -377,7 +377,7 @@ static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
377 return ip_frag_intern(hash, qp); 377 return ip_frag_intern(hash, qp);
378 378
379out_nomem: 379out_nomem:
380 NETDEBUG(if (net_ratelimit()) printk(KERN_ERR "ip_frag_create: no memory left !\n")); 380 LIMIT_NETDEBUG(printk(KERN_ERR "ip_frag_create: no memory left !\n"));
381 return NULL; 381 return NULL;
382} 382}
383 383
@@ -625,10 +625,8 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
625 return head; 625 return head;
626 626
627out_nomem: 627out_nomem:
628 NETDEBUG(if (net_ratelimit()) 628 LIMIT_NETDEBUG(printk(KERN_ERR "IP: queue_glue: no memory for gluing "
629 printk(KERN_ERR 629 "queue %p\n", qp));
630 "IP: queue_glue: no memory for gluing queue %p\n",
631 qp));
632 goto out_fail; 630 goto out_fail;
633out_oversize: 631out_oversize:
634 if (net_ratelimit()) 632 if (net_ratelimit())
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fc7c481d0d79..ff4bd067b397 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -848,6 +848,9 @@ mc_msf_out:
848 848
849 case IP_IPSEC_POLICY: 849 case IP_IPSEC_POLICY:
850 case IP_XFRM_POLICY: 850 case IP_XFRM_POLICY:
851 err = -EPERM;
852 if (!capable(CAP_NET_ADMIN))
853 break;
851 err = xfrm_user_policy(sk, optname, optval, optlen); 854 err = xfrm_user_policy(sk, optname, optval, optlen);
852 break; 855 break;
853 856
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index bc59d0d6e89e..91d5ea1dbbc9 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -102,6 +102,10 @@ ip_nat_fn(unsigned int hooknum,
102 return NF_ACCEPT; 102 return NF_ACCEPT;
103 } 103 }
104 104
105 /* Don't try to NAT if this packet is not conntracked */
106 if (ct == &ip_conntrack_untracked)
107 return NF_ACCEPT;
108
105 switch (ctinfo) { 109 switch (ctinfo) {
106 case IP_CT_RELATED: 110 case IP_CT_RELATED:
107 case IP_CT_RELATED+IP_CT_IS_REPLY: 111 case IP_CT_RELATED+IP_CT_IS_REPLY:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 62f62bb05c2a..5d91213d34c0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1494,12 +1494,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1494 * to destinations, already remembered 1494 * to destinations, already remembered
1495 * to the moment of synflood. 1495 * to the moment of synflood.
1496 */ 1496 */
1497 NETDEBUG(if (net_ratelimit()) \ 1497 LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open "
1498 printk(KERN_DEBUG "TCP: drop open " 1498 "request from %u.%u."
1499 "request from %u.%u." 1499 "%u.%u/%u\n",
1500 "%u.%u/%u\n", \ 1500 NIPQUAD(saddr),
1501 NIPQUAD(saddr), 1501 ntohs(skb->h.th->source)));
1502 ntohs(skb->h.th->source)));
1503 dst_release(dst); 1502 dst_release(dst);
1504 goto drop_and_free; 1503 goto drop_and_free;
1505 } 1504 }
@@ -1627,8 +1626,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
1627 skb->nh.iph->daddr, skb->csum)) 1626 skb->nh.iph->daddr, skb->csum))
1628 return 0; 1627 return 0;
1629 1628
1630 NETDEBUG(if (net_ratelimit()) 1629 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1631 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1632 skb->ip_summed = CHECKSUM_NONE; 1630 skb->ip_summed = CHECKSUM_NONE;
1633 } 1631 }
1634 if (skb->len <= 76) { 1632 if (skb->len <= 76) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e3f8ea1bfa9c..7d076f0db100 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -403,11 +403,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
403 sk->sk_send_head = skb; 403 sk->sk_send_head = skb;
404} 404}
405 405
406static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb) 406static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
407{ 407{
408 struct tcp_sock *tp = tcp_sk(sk); 408 if (skb->len <= mss_now ||
409
410 if (skb->len <= tp->mss_cache ||
411 !(sk->sk_route_caps & NETIF_F_TSO)) { 409 !(sk->sk_route_caps & NETIF_F_TSO)) {
412 /* Avoid the costly divide in the normal 410 /* Avoid the costly divide in the normal
413 * non-TSO case. 411 * non-TSO case.
@@ -417,10 +415,10 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
417 } else { 415 } else {
418 unsigned int factor; 416 unsigned int factor;
419 417
420 factor = skb->len + (tp->mss_cache - 1); 418 factor = skb->len + (mss_now - 1);
421 factor /= tp->mss_cache; 419 factor /= mss_now;
422 skb_shinfo(skb)->tso_segs = factor; 420 skb_shinfo(skb)->tso_segs = factor;
423 skb_shinfo(skb)->tso_size = tp->mss_cache; 421 skb_shinfo(skb)->tso_size = mss_now;
424 } 422 }
425} 423}
426 424
@@ -429,7 +427,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
429 * packet to the list. This won't be called frequently, I hope. 427 * packet to the list. This won't be called frequently, I hope.
430 * Remember, these are still headerless SKBs at this point. 428 * Remember, these are still headerless SKBs at this point.
431 */ 429 */
432static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) 430static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
433{ 431{
434 struct tcp_sock *tp = tcp_sk(sk); 432 struct tcp_sock *tp = tcp_sk(sk);
435 struct sk_buff *buff; 433 struct sk_buff *buff;
@@ -492,8 +490,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
492 } 490 }
493 491
494 /* Fix up tso_factor for both original and new SKB. */ 492 /* Fix up tso_factor for both original and new SKB. */
495 tcp_set_skb_tso_segs(sk, skb); 493 tcp_set_skb_tso_segs(sk, skb, mss_now);
496 tcp_set_skb_tso_segs(sk, buff); 494 tcp_set_skb_tso_segs(sk, buff, mss_now);
497 495
498 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 496 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
499 tp->lost_out += tcp_skb_pcount(skb); 497 tp->lost_out += tcp_skb_pcount(skb);
@@ -569,7 +567,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
569 * factor and mss. 567 * factor and mss.
570 */ 568 */
571 if (tcp_skb_pcount(skb) > 1) 569 if (tcp_skb_pcount(skb) > 1)
572 tcp_set_skb_tso_segs(sk, skb); 570 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
573 571
574 return 0; 572 return 0;
575} 573}
@@ -734,12 +732,14 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
734/* This must be invoked the first time we consider transmitting 732/* This must be invoked the first time we consider transmitting
735 * SKB onto the wire. 733 * SKB onto the wire.
736 */ 734 */
737static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb) 735static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
738{ 736{
739 int tso_segs = tcp_skb_pcount(skb); 737 int tso_segs = tcp_skb_pcount(skb);
740 738
741 if (!tso_segs) { 739 if (!tso_segs ||
742 tcp_set_skb_tso_segs(sk, skb); 740 (tso_segs > 1 &&
741 skb_shinfo(skb)->tso_size != mss_now)) {
742 tcp_set_skb_tso_segs(sk, skb, mss_now);
743 tso_segs = tcp_skb_pcount(skb); 743 tso_segs = tcp_skb_pcount(skb);
744 } 744 }
745 return tso_segs; 745 return tso_segs;
@@ -817,7 +817,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
817 struct tcp_sock *tp = tcp_sk(sk); 817 struct tcp_sock *tp = tcp_sk(sk);
818 unsigned int cwnd_quota; 818 unsigned int cwnd_quota;
819 819
820 tcp_init_tso_segs(sk, skb); 820 tcp_init_tso_segs(sk, skb, cur_mss);
821 821
822 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 822 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
823 return 0; 823 return 0;
@@ -854,7 +854,7 @@ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
854 * know that all the data is in scatter-gather pages, and that the 854 * know that all the data is in scatter-gather pages, and that the
855 * packet has never been sent out before (and thus is not cloned). 855 * packet has never been sent out before (and thus is not cloned).
856 */ 856 */
857static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len) 857static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
858{ 858{
859 struct sk_buff *buff; 859 struct sk_buff *buff;
860 int nlen = skb->len - len; 860 int nlen = skb->len - len;
@@ -887,8 +887,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
887 skb_split(skb, buff, len); 887 skb_split(skb, buff, len);
888 888
889 /* Fix up tso_factor for both original and new SKB. */ 889 /* Fix up tso_factor for both original and new SKB. */
890 tcp_set_skb_tso_segs(sk, skb); 890 tcp_set_skb_tso_segs(sk, skb, mss_now);
891 tcp_set_skb_tso_segs(sk, buff); 891 tcp_set_skb_tso_segs(sk, buff, mss_now);
892 892
893 /* Link BUFF into the send queue. */ 893 /* Link BUFF into the send queue. */
894 skb_header_release(buff); 894 skb_header_release(buff);
@@ -972,19 +972,18 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
972 if (unlikely(sk->sk_state == TCP_CLOSE)) 972 if (unlikely(sk->sk_state == TCP_CLOSE))
973 return 0; 973 return 0;
974 974
975 skb = sk->sk_send_head;
976 if (unlikely(!skb))
977 return 0;
978
979 tso_segs = tcp_init_tso_segs(sk, skb);
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (unlikely(!cwnd_quota))
982 goto out;
983
984 sent_pkts = 0; 975 sent_pkts = 0;
985 while (likely(tcp_snd_wnd_test(tp, skb, mss_now))) { 976 while ((skb = sk->sk_send_head)) {
977 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
986 BUG_ON(!tso_segs); 978 BUG_ON(!tso_segs);
987 979
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (!cwnd_quota)
982 break;
983
984 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
985 break;
986
988 if (tso_segs == 1) { 987 if (tso_segs == 1) {
989 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 988 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
990 (tcp_skb_is_last(sk, skb) ? 989 (tcp_skb_is_last(sk, skb) ?
@@ -1006,11 +1005,11 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1006 limit = skb->len - trim; 1005 limit = skb->len - trim;
1007 } 1006 }
1008 if (skb->len > limit) { 1007 if (skb->len > limit) {
1009 if (tso_fragment(sk, skb, limit)) 1008 if (tso_fragment(sk, skb, limit, mss_now))
1010 break; 1009 break;
1011 } 1010 }
1012 } else if (unlikely(skb->len > mss_now)) { 1011 } else if (unlikely(skb->len > mss_now)) {
1013 if (unlikely(tcp_fragment(sk, skb, mss_now))) 1012 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1014 break; 1013 break;
1015 } 1014 }
1016 1015
@@ -1026,27 +1025,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1026 1025
1027 tcp_minshall_update(tp, mss_now, skb); 1026 tcp_minshall_update(tp, mss_now, skb);
1028 sent_pkts++; 1027 sent_pkts++;
1029
1030 /* Do not optimize this to use tso_segs. If we chopped up
1031 * the packet above, tso_segs will no longer be valid.
1032 */
1033 cwnd_quota -= tcp_skb_pcount(skb);
1034
1035 BUG_ON(cwnd_quota < 0);
1036 if (!cwnd_quota)
1037 break;
1038
1039 skb = sk->sk_send_head;
1040 if (!skb)
1041 break;
1042 tso_segs = tcp_init_tso_segs(sk, skb);
1043 } 1028 }
1044 1029
1045 if (likely(sent_pkts)) { 1030 if (likely(sent_pkts)) {
1046 tcp_cwnd_validate(sk, tp); 1031 tcp_cwnd_validate(sk, tp);
1047 return 0; 1032 return 0;
1048 } 1033 }
1049out:
1050 return !tp->packets_out && sk->sk_send_head; 1034 return !tp->packets_out && sk->sk_send_head;
1051} 1035}
1052 1036
@@ -1076,7 +1060,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1076 1060
1077 BUG_ON(!skb || skb->len < mss_now); 1061 BUG_ON(!skb || skb->len < mss_now);
1078 1062
1079 tso_segs = tcp_init_tso_segs(sk, skb); 1063 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1080 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1064 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1081 1065
1082 if (likely(cwnd_quota)) { 1066 if (likely(cwnd_quota)) {
@@ -1093,11 +1077,11 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1093 limit = skb->len - trim; 1077 limit = skb->len - trim;
1094 } 1078 }
1095 if (skb->len > limit) { 1079 if (skb->len > limit) {
1096 if (unlikely(tso_fragment(sk, skb, limit))) 1080 if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
1097 return; 1081 return;
1098 } 1082 }
1099 } else if (unlikely(skb->len > mss_now)) { 1083 } else if (unlikely(skb->len > mss_now)) {
1100 if (unlikely(tcp_fragment(sk, skb, mss_now))) 1084 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1101 return; 1085 return;
1102 } 1086 }
1103 1087
@@ -1388,7 +1372,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1388 int old_factor = tcp_skb_pcount(skb); 1372 int old_factor = tcp_skb_pcount(skb);
1389 int new_factor; 1373 int new_factor;
1390 1374
1391 if (tcp_fragment(sk, skb, cur_mss)) 1375 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1392 return -ENOMEM; /* We'll try again later. */ 1376 return -ENOMEM; /* We'll try again later. */
1393 1377
1394 /* New SKB created, account for it. */ 1378 /* New SKB created, account for it. */
@@ -1991,7 +1975,7 @@ int tcp_write_wakeup(struct sock *sk)
1991 skb->len > mss) { 1975 skb->len > mss) {
1992 seg_size = min(seg_size, mss); 1976 seg_size = min(seg_size, mss);
1993 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1977 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
1994 if (tcp_fragment(sk, skb, seg_size)) 1978 if (tcp_fragment(sk, skb, seg_size, mss))
1995 return -1; 1979 return -1;
1996 /* SWS override triggered forced fragmentation. 1980 /* SWS override triggered forced fragmentation.
1997 * Disable TSO, the connection is too sick. */ 1981 * Disable TSO, the connection is too sick. */
@@ -2000,7 +1984,7 @@ int tcp_write_wakeup(struct sock *sk)
2000 sk->sk_route_caps &= ~NETIF_F_TSO; 1984 sk->sk_route_caps &= ~NETIF_F_TSO;
2001 } 1985 }
2002 } else if (!tcp_skb_pcount(skb)) 1986 } else if (!tcp_skb_pcount(skb))
2003 tcp_set_skb_tso_segs(sk, skb); 1987 tcp_set_skb_tso_segs(sk, skb, mss);
2004 1988
2005 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1989 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2006 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1990 TCP_SKB_CB(skb)->when = tcp_time_stamp;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7c24e64b443f..dc4d07357e3a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -628,7 +628,7 @@ back_from_confirm:
628 /* ... which is an evident application bug. --ANK */ 628 /* ... which is an evident application bug. --ANK */
629 release_sock(sk); 629 release_sock(sk);
630 630
631 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 2\n")); 631 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 2\n"));
632 err = -EINVAL; 632 err = -EINVAL;
633 goto out; 633 goto out;
634 } 634 }
@@ -693,7 +693,7 @@ static int udp_sendpage(struct sock *sk, struct page *page, int offset,
693 if (unlikely(!up->pending)) { 693 if (unlikely(!up->pending)) {
694 release_sock(sk); 694 release_sock(sk);
695 695
696 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 3\n")); 696 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 3\n"));
697 return -EINVAL; 697 return -EINVAL;
698 } 698 }
699 699
@@ -1102,7 +1102,7 @@ static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1102 skb->ip_summed = CHECKSUM_UNNECESSARY; 1102 skb->ip_summed = CHECKSUM_UNNECESSARY;
1103 if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) 1103 if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1104 return 0; 1104 return 0;
1105 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v4 hw csum failure.\n")); 1105 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp v4 hw csum failure.\n"));
1106 skb->ip_summed = CHECKSUM_NONE; 1106 skb->ip_summed = CHECKSUM_NONE;
1107 } 1107 }
1108 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 1108 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -1181,14 +1181,13 @@ int udp_rcv(struct sk_buff *skb)
1181 return(0); 1181 return(0);
1182 1182
1183short_packet: 1183short_packet:
1184 NETDEBUG(if (net_ratelimit()) 1184 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1185 printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1185 NIPQUAD(saddr),
1186 NIPQUAD(saddr), 1186 ntohs(uh->source),
1187 ntohs(uh->source), 1187 ulen,
1188 ulen, 1188 len,
1189 len, 1189 NIPQUAD(daddr),
1190 NIPQUAD(daddr), 1190 ntohs(uh->dest)));
1191 ntohs(uh->dest)));
1192no_header: 1191no_header:
1193 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1192 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1194 kfree_skb(skb); 1193 kfree_skb(skb);
@@ -1199,13 +1198,12 @@ csum_error:
1199 * RFC1122: OK. Discards the bad packet silently (as far as 1198 * RFC1122: OK. Discards the bad packet silently (as far as
1200 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1199 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1201 */ 1200 */
1202 NETDEBUG(if (net_ratelimit()) 1201 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1203 printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1202 NIPQUAD(saddr),
1204 NIPQUAD(saddr), 1203 ntohs(uh->source),
1205 ntohs(uh->source), 1204 NIPQUAD(daddr),
1206 NIPQUAD(daddr), 1205 ntohs(uh->dest),
1207 ntohs(uh->dest), 1206 ulen));
1208 ulen));
1209drop: 1207drop:
1210 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1208 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1211 kfree_skb(skb); 1209 kfree_skb(skb);