aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-08-14 23:10:26 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-08-14 23:10:26 -0400
commit20445cc9159089d9d6b88d7864578efb10eb6590 (patch)
treefbbb27fd8baa82c77ebc4e020e8ee9ddb57e3ae2 /net
parent4339d328631aa815fe2181b9164b3690ca2db4da (diff)
parent4c0e176dd5e4c44dd60f398518f75eedbe1a65f3 (diff)
/spare/repo/netdev-2.6 branch 'ieee80211'
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/hci_event.c4
-rw-r--r--net/bluetooth/lib.c25
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/netpoll.c63
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/ipv4/fib_semantics.c9
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/ip_gre.c21
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ipip.c20
-rw-r--r--net/ipv4/ipmr.c6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c100
-rw-r--r--net/ipv4/udp.c34
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/sit.c21
-rw-r--r--net/sunrpc/svcsock.c2
24 files changed, 211 insertions, 184 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index fb5524365bc2..ffa26c10bfe8 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -299,7 +299,6 @@ struct hci_dev *hci_dev_get(int index)
299 read_unlock(&hci_dev_list_lock); 299 read_unlock(&hci_dev_list_lock);
300 return hdev; 300 return hdev;
301} 301}
302EXPORT_SYMBOL(hci_dev_get);
303 302
304/* ---- Inquiry support ---- */ 303/* ---- Inquiry support ---- */
305static void inquiry_cache_flush(struct hci_dev *hdev) 304static void inquiry_cache_flush(struct hci_dev *hdev)
@@ -1042,7 +1041,6 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
1042 1041
1043 return 0; 1042 return 0;
1044} 1043}
1045EXPORT_SYMBOL(hci_send_cmd);
1046 1044
1047/* Get data from the previously sent command */ 1045/* Get data from the previously sent command */
1048void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) 1046void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index c4b592b4ef10..46367bd129c3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1035,9 +1035,11 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
1035 ev->type = type; 1035 ev->type = type;
1036 memcpy(ev->data, data, dlen); 1036 memcpy(ev->data, data, dlen);
1037 1037
1038 bt_cb(skb)->incoming = 1;
1039 do_gettimeofday(&skb->stamp);
1040
1038 skb->pkt_type = HCI_EVENT_PKT; 1041 skb->pkt_type = HCI_EVENT_PKT;
1039 skb->dev = (void *) hdev; 1042 skb->dev = (void *) hdev;
1040 hci_send_to_sock(hdev, skb); 1043 hci_send_to_sock(hdev, skb);
1041 kfree_skb(skb); 1044 kfree_skb(skb);
1042} 1045}
1043EXPORT_SYMBOL(hci_si_event);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 9efb0a093612..ee6a66979913 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -34,31 +34,6 @@
34 34
35#include <net/bluetooth/bluetooth.h> 35#include <net/bluetooth/bluetooth.h>
36 36
37void bt_dump(char *pref, __u8 *buf, int count)
38{
39 char *ptr;
40 char line[100];
41 unsigned int i;
42
43 printk(KERN_INFO "%s: dump, len %d\n", pref, count);
44
45 ptr = line;
46 *ptr = 0;
47 for (i = 0; i < count; i++) {
48 ptr += sprintf(ptr, " %2.2X", buf[i]);
49
50 if (i && !((i + 1) % 20)) {
51 printk(KERN_INFO "%s:%s\n", pref, line);
52 ptr = line;
53 *ptr = 0;
54 }
55 }
56
57 if (line[0])
58 printk(KERN_INFO "%s:%s\n", pref, line);
59}
60EXPORT_SYMBOL(bt_dump);
61
62void baswap(bdaddr_t *dst, bdaddr_t *src) 37void baswap(bdaddr_t *dst, bdaddr_t *src)
63{ 38{
64 unsigned char *d = (unsigned char *) dst; 39 unsigned char *d = (unsigned char *) dst;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index e9e6fda66f1a..27bf5047cd33 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -389,8 +389,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
389 rfcomm_dlc_unlock(d); 389 rfcomm_dlc_unlock(d);
390 390
391 skb_queue_purge(&d->tx_queue); 391 skb_queue_purge(&d->tx_queue);
392 rfcomm_session_put(s);
393
394 rfcomm_dlc_unlink(d); 392 rfcomm_dlc_unlink(d);
395 } 393 }
396 394
@@ -600,8 +598,6 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
600 goto failed; 598 goto failed;
601 } 599 }
602 600
603 rfcomm_session_hold(s);
604
605 s->initiator = 1; 601 s->initiator = 1;
606 602
607 bacpy(&addr.l2_bdaddr, dst); 603 bacpy(&addr.l2_bdaddr, dst);
diff --git a/net/compat.c b/net/compat.c
index be5d936dc423..d99ab9695893 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -91,20 +91,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91 } else 91 } else
92 kern_msg->msg_name = NULL; 92 kern_msg->msg_name = NULL;
93 93
94 if(kern_msg->msg_iovlen > UIO_FASTIOV) {
95 kern_iov = kmalloc(kern_msg->msg_iovlen * sizeof(struct iovec),
96 GFP_KERNEL);
97 if(!kern_iov)
98 return -ENOMEM;
99 }
100
101 tot_len = iov_from_user_compat_to_kern(kern_iov, 94 tot_len = iov_from_user_compat_to_kern(kern_iov,
102 (struct compat_iovec __user *)kern_msg->msg_iov, 95 (struct compat_iovec __user *)kern_msg->msg_iov,
103 kern_msg->msg_iovlen); 96 kern_msg->msg_iovlen);
104 if(tot_len >= 0) 97 if(tot_len >= 0)
105 kern_msg->msg_iov = kern_iov; 98 kern_msg->msg_iov = kern_iov;
106 else if(kern_msg->msg_iovlen > UIO_FASTIOV)
107 kfree(kern_iov);
108 99
109 return tot_len; 100 return tot_len;
110} 101}
diff --git a/net/core/dev.c b/net/core/dev.c
index 52a3bf7ae177..faf59b02c4bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
1696 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1696 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1697 unsigned long start_time = jiffies; 1697 unsigned long start_time = jiffies;
1698 int budget = netdev_budget; 1698 int budget = netdev_budget;
1699 1699 void *have;
1700
1700 local_irq_disable(); 1701 local_irq_disable();
1701 1702
1702 while (!list_empty(&queue->poll_list)) { 1703 while (!list_empty(&queue->poll_list)) {
@@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
1709 1710
1710 dev = list_entry(queue->poll_list.next, 1711 dev = list_entry(queue->poll_list.next,
1711 struct net_device, poll_list); 1712 struct net_device, poll_list);
1712 netpoll_poll_lock(dev); 1713 have = netpoll_poll_lock(dev);
1713 1714
1714 if (dev->quota <= 0 || dev->poll(dev, &budget)) { 1715 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1715 netpoll_poll_unlock(dev); 1716 netpoll_poll_unlock(have);
1716 local_irq_disable(); 1717 local_irq_disable();
1717 list_del(&dev->poll_list); 1718 list_del(&dev->poll_list);
1718 list_add_tail(&dev->poll_list, &queue->poll_list); 1719 list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
1721 else 1722 else
1722 dev->quota = dev->weight; 1723 dev->quota = dev->weight;
1723 } else { 1724 } else {
1724 netpoll_poll_unlock(dev); 1725 netpoll_poll_unlock(have);
1725 dev_put(dev); 1726 dev_put(dev);
1726 local_irq_disable(); 1727 local_irq_disable();
1727 } 1728 }
diff --git a/net/core/dst.c b/net/core/dst.c
index fc434ade5270..334790da9f16 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -45,6 +45,7 @@ static struct timer_list dst_gc_timer =
45static void dst_run_gc(unsigned long dummy) 45static void dst_run_gc(unsigned long dummy)
46{ 46{
47 int delayed = 0; 47 int delayed = 0;
48 int work_performed;
48 struct dst_entry * dst, **dstp; 49 struct dst_entry * dst, **dstp;
49 50
50 if (!spin_trylock(&dst_lock)) { 51 if (!spin_trylock(&dst_lock)) {
@@ -52,9 +53,9 @@ static void dst_run_gc(unsigned long dummy)
52 return; 53 return;
53 } 54 }
54 55
55
56 del_timer(&dst_gc_timer); 56 del_timer(&dst_gc_timer);
57 dstp = &dst_garbage_list; 57 dstp = &dst_garbage_list;
58 work_performed = 0;
58 while ((dst = *dstp) != NULL) { 59 while ((dst = *dstp) != NULL) {
59 if (atomic_read(&dst->__refcnt)) { 60 if (atomic_read(&dst->__refcnt)) {
60 dstp = &dst->next; 61 dstp = &dst->next;
@@ -62,6 +63,7 @@ static void dst_run_gc(unsigned long dummy)
62 continue; 63 continue;
63 } 64 }
64 *dstp = dst->next; 65 *dstp = dst->next;
66 work_performed = 1;
65 67
66 dst = dst_destroy(dst); 68 dst = dst_destroy(dst);
67 if (dst) { 69 if (dst) {
@@ -86,9 +88,14 @@ static void dst_run_gc(unsigned long dummy)
86 dst_gc_timer_inc = DST_GC_MAX; 88 dst_gc_timer_inc = DST_GC_MAX;
87 goto out; 89 goto out;
88 } 90 }
89 if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) 91 if (!work_performed) {
90 dst_gc_timer_expires = DST_GC_MAX; 92 if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX)
91 dst_gc_timer_inc += DST_GC_INC; 93 dst_gc_timer_expires = DST_GC_MAX;
94 dst_gc_timer_inc += DST_GC_INC;
95 } else {
96 dst_gc_timer_inc = DST_GC_INC;
97 dst_gc_timer_expires = DST_GC_MIN;
98 }
92 dst_gc_timer.expires = jiffies + dst_gc_timer_expires; 99 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
93#if RT_CACHE_DEBUG >= 2 100#if RT_CACHE_DEBUG >= 2
94 printk("dst_total: %d/%d %ld\n", 101 printk("dst_total: %d/%d %ld\n",
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c327c9edadc5..a1a9a7abff50 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
33#define MAX_UDP_CHUNK 1460 33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32 34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36#define MAX_RETRIES 20000
36 37
37static DEFINE_SPINLOCK(skb_list_lock); 38static DEFINE_SPINLOCK(skb_list_lock);
38static int nr_skbs; 39static int nr_skbs;
@@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
248 int status; 249 int status;
249 struct netpoll_info *npinfo; 250 struct netpoll_info *npinfo;
250 251
251repeat: 252 if (!np || !np->dev || !netif_running(np->dev)) {
252 if(!np || !np->dev || !netif_running(np->dev)) {
253 __kfree_skb(skb); 253 __kfree_skb(skb);
254 return; 254 return;
255 } 255 }
256 256
257 /* avoid recursion */
258 npinfo = np->dev->npinfo; 257 npinfo = np->dev->npinfo;
258
259 /* avoid recursion */
259 if (npinfo->poll_owner == smp_processor_id() || 260 if (npinfo->poll_owner == smp_processor_id() ||
260 np->dev->xmit_lock_owner == smp_processor_id()) { 261 np->dev->xmit_lock_owner == smp_processor_id()) {
261 if (np->drop) 262 if (np->drop)
@@ -265,30 +266,37 @@ repeat:
265 return; 266 return;
266 } 267 }
267 268
268 spin_lock(&np->dev->xmit_lock); 269 do {
269 np->dev->xmit_lock_owner = smp_processor_id(); 270 npinfo->tries--;
271 spin_lock(&np->dev->xmit_lock);
272 np->dev->xmit_lock_owner = smp_processor_id();
270 273
271 /* 274 /*
272 * network drivers do not expect to be called if the queue is 275 * network drivers do not expect to be called if the queue is
273 * stopped. 276 * stopped.
274 */ 277 */
275 if (netif_queue_stopped(np->dev)) { 278 if (netif_queue_stopped(np->dev)) {
279 np->dev->xmit_lock_owner = -1;
280 spin_unlock(&np->dev->xmit_lock);
281 netpoll_poll(np);
282 udelay(50);
283 continue;
284 }
285
286 status = np->dev->hard_start_xmit(skb, np->dev);
276 np->dev->xmit_lock_owner = -1; 287 np->dev->xmit_lock_owner = -1;
277 spin_unlock(&np->dev->xmit_lock); 288 spin_unlock(&np->dev->xmit_lock);
278 289
279 netpoll_poll(np); 290 /* success */
280 goto repeat; 291 if(!status) {
281 } 292 npinfo->tries = MAX_RETRIES; /* reset */
282 293 return;
283 status = np->dev->hard_start_xmit(skb, np->dev); 294 }
284 np->dev->xmit_lock_owner = -1;
285 spin_unlock(&np->dev->xmit_lock);
286 295
287 /* transmit busy */ 296 /* transmit busy */
288 if(status) {
289 netpoll_poll(np); 297 netpoll_poll(np);
290 goto repeat; 298 udelay(50);
291 } 299 } while (npinfo->tries > 0);
292} 300}
293 301
294void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 302void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
349 unsigned char *arp_ptr; 357 unsigned char *arp_ptr;
350 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 358 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
351 u32 sip, tip; 359 u32 sip, tip;
352 unsigned long flags;
353 struct sk_buff *send_skb; 360 struct sk_buff *send_skb;
354 struct netpoll *np = NULL; 361 struct netpoll *np = NULL;
355 362
356 spin_lock_irqsave(&npinfo->rx_lock, flags);
357 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 363 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
358 np = npinfo->rx_np; 364 np = npinfo->rx_np;
359 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
360
361 if (!np) 365 if (!np)
362 return; 366 return;
363 367
@@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
639 if (!npinfo) 643 if (!npinfo)
640 goto release; 644 goto release;
641 645
646 npinfo->rx_flags = 0;
642 npinfo->rx_np = NULL; 647 npinfo->rx_np = NULL;
643 npinfo->poll_lock = SPIN_LOCK_UNLOCKED; 648 npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
644 npinfo->poll_owner = -1; 649 npinfo->poll_owner = -1;
650 npinfo->tries = MAX_RETRIES;
645 npinfo->rx_lock = SPIN_LOCK_UNLOCKED; 651 npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
646 } else 652 } else
647 npinfo = ndev->npinfo; 653 npinfo = ndev->npinfo;
@@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
718 npinfo->rx_np = np; 724 npinfo->rx_np = np;
719 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 725 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
720 } 726 }
727
728 /* fill up the skb queue */
729 refill_skbs();
730
721 /* last thing to do is link it to the net device structure */ 731 /* last thing to do is link it to the net device structure */
722 ndev->npinfo = npinfo; 732 ndev->npinfo = npinfo;
723 733
734 /* avoid racing with NAPI reading npinfo */
735 synchronize_rcu();
736
724 return 0; 737 return 0;
725 738
726 release: 739 release:
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 96a02800cd28..acdd18e6adb2 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1876,15 +1876,6 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1876 return mss_now; 1876 return mss_now;
1877} 1877}
1878 1878
1879static int dn_error(struct sock *sk, int flags, int err)
1880{
1881 if (err == -EPIPE)
1882 err = sock_error(sk) ? : -EPIPE;
1883 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1884 send_sig(SIGPIPE, current, 0);
1885 return err;
1886}
1887
1888static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, 1879static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1889 struct msghdr *msg, size_t size) 1880 struct msghdr *msg, size_t size)
1890{ 1881{
@@ -2045,7 +2036,7 @@ out:
2045 return sent ? sent : err; 2036 return sent ? sent : err;
2046 2037
2047out_err: 2038out_err:
2048 err = dn_error(sk, flags, err); 2039 err = sk_stream_error(sk, flags, err);
2049 release_sock(sk); 2040 release_sock(sk);
2050 return err; 2041 return err;
2051} 2042}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c886b28ba9f5..e278cb9d0075 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -593,10 +593,13 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
593 struct hlist_head *new_laddrhash, 593 struct hlist_head *new_laddrhash,
594 unsigned int new_size) 594 unsigned int new_size)
595{ 595{
596 struct hlist_head *old_info_hash, *old_laddrhash;
596 unsigned int old_size = fib_hash_size; 597 unsigned int old_size = fib_hash_size;
597 unsigned int i; 598 unsigned int i, bytes;
598 599
599 write_lock(&fib_info_lock); 600 write_lock(&fib_info_lock);
601 old_info_hash = fib_info_hash;
602 old_laddrhash = fib_info_laddrhash;
600 fib_hash_size = new_size; 603 fib_hash_size = new_size;
601 604
602 for (i = 0; i < old_size; i++) { 605 for (i = 0; i < old_size; i++) {
@@ -636,6 +639,10 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
636 fib_info_laddrhash = new_laddrhash; 639 fib_info_laddrhash = new_laddrhash;
637 640
638 write_unlock(&fib_info_lock); 641 write_unlock(&fib_info_lock);
642
643 bytes = old_size * sizeof(struct hlist_head *);
644 fib_hash_free(old_info_hash, bytes);
645 fib_hash_free(old_laddrhash, bytes);
639} 646}
640 647
641struct fib_info * 648struct fib_info *
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 279f57abfecb..3d78464f64ea 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -936,8 +936,7 @@ int icmp_rcv(struct sk_buff *skb)
936 case CHECKSUM_HW: 936 case CHECKSUM_HW:
937 if (!(u16)csum_fold(skb->csum)) 937 if (!(u16)csum_fold(skb->csum))
938 break; 938 break;
939 NETDEBUG(if (net_ratelimit()) 939 LIMIT_NETDEBUG(printk(KERN_DEBUG "icmp v4 hw csum failure\n"));
940 printk(KERN_DEBUG "icmp v4 hw csum failure\n"));
941 case CHECKSUM_NONE: 940 case CHECKSUM_NONE:
942 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) 941 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
943 goto error; 942 goto error;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 7f68e27eb4ea..eb377ae15305 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -377,7 +377,7 @@ static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
377 return ip_frag_intern(hash, qp); 377 return ip_frag_intern(hash, qp);
378 378
379out_nomem: 379out_nomem:
380 NETDEBUG(if (net_ratelimit()) printk(KERN_ERR "ip_frag_create: no memory left !\n")); 380 LIMIT_NETDEBUG(printk(KERN_ERR "ip_frag_create: no memory left !\n"));
381 return NULL; 381 return NULL;
382} 382}
383 383
@@ -625,10 +625,8 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
625 return head; 625 return head;
626 626
627out_nomem: 627out_nomem:
628 NETDEBUG(if (net_ratelimit()) 628 LIMIT_NETDEBUG(printk(KERN_ERR "IP: queue_glue: no memory for gluing "
629 printk(KERN_ERR 629 "queue %p\n", qp));
630 "IP: queue_glue: no memory for gluing queue %p\n",
631 qp));
632 goto out_fail; 630 goto out_fail;
633out_oversize: 631out_oversize:
634 if (net_ratelimit()) 632 if (net_ratelimit())
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 884835522224..f0d5740d7e22 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -290,7 +290,6 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
290 290
291 dev_hold(dev); 291 dev_hold(dev);
292 ipgre_tunnel_link(nt); 292 ipgre_tunnel_link(nt);
293 /* Do not decrement MOD_USE_COUNT here. */
294 return nt; 293 return nt;
295 294
296failed: 295failed:
@@ -1277,12 +1276,28 @@ err1:
1277 goto out; 1276 goto out;
1278} 1277}
1279 1278
1280static void ipgre_fini(void) 1279static void __exit ipgre_destroy_tunnels(void)
1280{
1281 int prio;
1282
1283 for (prio = 0; prio < 4; prio++) {
1284 int h;
1285 for (h = 0; h < HASH_SIZE; h++) {
1286 struct ip_tunnel *t;
1287 while ((t = tunnels[prio][h]) != NULL)
1288 unregister_netdevice(t->dev);
1289 }
1290 }
1291}
1292
1293static void __exit ipgre_fini(void)
1281{ 1294{
1282 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1295 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1283 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1296 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1284 1297
1285 unregister_netdev(ipgre_fb_tunnel_dev); 1298 rtnl_lock();
1299 ipgre_destroy_tunnels();
1300 rtnl_unlock();
1286} 1301}
1287 1302
1288module_init(ipgre_init); 1303module_init(ipgre_init);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fc7c481d0d79..ff4bd067b397 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -848,6 +848,9 @@ mc_msf_out:
848 848
849 case IP_IPSEC_POLICY: 849 case IP_IPSEC_POLICY:
850 case IP_XFRM_POLICY: 850 case IP_XFRM_POLICY:
851 err = -EPERM;
852 if (!capable(CAP_NET_ADMIN))
853 break;
851 err = xfrm_user_policy(sk, optname, optval, optlen); 854 err = xfrm_user_policy(sk, optname, optval, optlen);
852 break; 855 break;
853 856
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index c3947cd566b7..c05c1df0bb04 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -255,7 +255,6 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
255 255
256 dev_hold(dev); 256 dev_hold(dev);
257 ipip_tunnel_link(nt); 257 ipip_tunnel_link(nt);
258 /* Do not decrement MOD_USE_COUNT here. */
259 return nt; 258 return nt;
260 259
261failed: 260failed:
@@ -920,12 +919,29 @@ static int __init ipip_init(void)
920 goto out; 919 goto out;
921} 920}
922 921
922static void __exit ipip_destroy_tunnels(void)
923{
924 int prio;
925
926 for (prio = 1; prio < 4; prio++) {
927 int h;
928 for (h = 0; h < HASH_SIZE; h++) {
929 struct ip_tunnel *t;
930 while ((t = tunnels[prio][h]) != NULL)
931 unregister_netdevice(t->dev);
932 }
933 }
934}
935
923static void __exit ipip_fini(void) 936static void __exit ipip_fini(void)
924{ 937{
925 if (ipip_unregister() < 0) 938 if (ipip_unregister() < 0)
926 printk(KERN_INFO "ipip close: can't deregister tunnel\n"); 939 printk(KERN_INFO "ipip close: can't deregister tunnel\n");
927 940
928 unregister_netdev(ipip_fb_tunnel_dev); 941 rtnl_lock();
942 ipip_destroy_tunnels();
943 unregister_netdevice(ipip_fb_tunnel_dev);
944 rtnl_unlock();
929} 945}
930 946
931module_init(ipip_init); 947module_init(ipip_init);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7833d920bdba..dc806b578427 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -362,7 +362,7 @@ out:
362 362
363/* Fill oifs list. It is called under write locked mrt_lock. */ 363/* Fill oifs list. It is called under write locked mrt_lock. */
364 364
365static void ipmr_update_threshoulds(struct mfc_cache *cache, unsigned char *ttls) 365static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
366{ 366{
367 int vifi; 367 int vifi;
368 368
@@ -727,7 +727,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
727 if (c != NULL) { 727 if (c != NULL) {
728 write_lock_bh(&mrt_lock); 728 write_lock_bh(&mrt_lock);
729 c->mfc_parent = mfc->mfcc_parent; 729 c->mfc_parent = mfc->mfcc_parent;
730 ipmr_update_threshoulds(c, mfc->mfcc_ttls); 730 ipmr_update_thresholds(c, mfc->mfcc_ttls);
731 if (!mrtsock) 731 if (!mrtsock)
732 c->mfc_flags |= MFC_STATIC; 732 c->mfc_flags |= MFC_STATIC;
733 write_unlock_bh(&mrt_lock); 733 write_unlock_bh(&mrt_lock);
@@ -744,7 +744,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
744 c->mfc_origin=mfc->mfcc_origin.s_addr; 744 c->mfc_origin=mfc->mfcc_origin.s_addr;
745 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr; 745 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
746 c->mfc_parent=mfc->mfcc_parent; 746 c->mfc_parent=mfc->mfcc_parent;
747 ipmr_update_threshoulds(c, mfc->mfcc_ttls); 747 ipmr_update_thresholds(c, mfc->mfcc_ttls);
748 if (!mrtsock) 748 if (!mrtsock)
749 c->mfc_flags |= MFC_STATIC; 749 c->mfc_flags |= MFC_STATIC;
750 750
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 86f04e41dd8e..a7f0c821a9b2 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -513,6 +513,11 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
513#ifdef CONFIG_IP_NF_CONNTRACK_MARK 513#ifdef CONFIG_IP_NF_CONNTRACK_MARK
514 conntrack->mark = exp->master->mark; 514 conntrack->mark = exp->master->mark;
515#endif 515#endif
516#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
517 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
518 /* this is ugly, but there is no other place where to put it */
519 conntrack->nat.masq_index = exp->master->nat.masq_index;
520#endif
516 nf_conntrack_get(&conntrack->master->ct_general); 521 nf_conntrack_get(&conntrack->master->ct_general);
517 CONNTRACK_STAT_INC(expect_new); 522 CONNTRACK_STAT_INC(expect_new);
518 } else { 523 } else {
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index bc59d0d6e89e..91d5ea1dbbc9 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -102,6 +102,10 @@ ip_nat_fn(unsigned int hooknum,
102 return NF_ACCEPT; 102 return NF_ACCEPT;
103 } 103 }
104 104
105 /* Don't try to NAT if this packet is not conntracked */
106 if (ct == &ip_conntrack_untracked)
107 return NF_ACCEPT;
108
105 switch (ctinfo) { 109 switch (ctinfo) {
106 case IP_CT_RELATED: 110 case IP_CT_RELATED:
107 case IP_CT_RELATED+IP_CT_IS_REPLY: 111 case IP_CT_RELATED+IP_CT_IS_REPLY:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 62f62bb05c2a..5d91213d34c0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1494,12 +1494,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1494 * to destinations, already remembered 1494 * to destinations, already remembered
1495 * to the moment of synflood. 1495 * to the moment of synflood.
1496 */ 1496 */
1497 NETDEBUG(if (net_ratelimit()) \ 1497 LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open "
1498 printk(KERN_DEBUG "TCP: drop open " 1498 "request from %u.%u."
1499 "request from %u.%u." 1499 "%u.%u/%u\n",
1500 "%u.%u/%u\n", \ 1500 NIPQUAD(saddr),
1501 NIPQUAD(saddr), 1501 ntohs(skb->h.th->source)));
1502 ntohs(skb->h.th->source)));
1503 dst_release(dst); 1502 dst_release(dst);
1504 goto drop_and_free; 1503 goto drop_and_free;
1505 } 1504 }
@@ -1627,8 +1626,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
1627 skb->nh.iph->daddr, skb->csum)) 1626 skb->nh.iph->daddr, skb->csum))
1628 return 0; 1627 return 0;
1629 1628
1630 NETDEBUG(if (net_ratelimit()) 1629 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1631 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1632 skb->ip_summed = CHECKSUM_NONE; 1630 skb->ip_summed = CHECKSUM_NONE;
1633 } 1631 }
1634 if (skb->len <= 76) { 1632 if (skb->len <= 76) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e3f8ea1bfa9c..3ed6fc15815b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -403,11 +403,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
403 sk->sk_send_head = skb; 403 sk->sk_send_head = skb;
404} 404}
405 405
406static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb) 406static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
407{ 407{
408 struct tcp_sock *tp = tcp_sk(sk); 408 if (skb->len <= mss_now ||
409
410 if (skb->len <= tp->mss_cache ||
411 !(sk->sk_route_caps & NETIF_F_TSO)) { 409 !(sk->sk_route_caps & NETIF_F_TSO)) {
412 /* Avoid the costly divide in the normal 410 /* Avoid the costly divide in the normal
413 * non-TSO case. 411 * non-TSO case.
@@ -417,10 +415,10 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
417 } else { 415 } else {
418 unsigned int factor; 416 unsigned int factor;
419 417
420 factor = skb->len + (tp->mss_cache - 1); 418 factor = skb->len + (mss_now - 1);
421 factor /= tp->mss_cache; 419 factor /= mss_now;
422 skb_shinfo(skb)->tso_segs = factor; 420 skb_shinfo(skb)->tso_segs = factor;
423 skb_shinfo(skb)->tso_size = tp->mss_cache; 421 skb_shinfo(skb)->tso_size = mss_now;
424 } 422 }
425} 423}
426 424
@@ -429,7 +427,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
429 * packet to the list. This won't be called frequently, I hope. 427 * packet to the list. This won't be called frequently, I hope.
430 * Remember, these are still headerless SKBs at this point. 428 * Remember, these are still headerless SKBs at this point.
431 */ 429 */
432static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) 430static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
433{ 431{
434 struct tcp_sock *tp = tcp_sk(sk); 432 struct tcp_sock *tp = tcp_sk(sk);
435 struct sk_buff *buff; 433 struct sk_buff *buff;
@@ -492,8 +490,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
492 } 490 }
493 491
494 /* Fix up tso_factor for both original and new SKB. */ 492 /* Fix up tso_factor for both original and new SKB. */
495 tcp_set_skb_tso_segs(sk, skb); 493 tcp_set_skb_tso_segs(sk, skb, mss_now);
496 tcp_set_skb_tso_segs(sk, buff); 494 tcp_set_skb_tso_segs(sk, buff, mss_now);
497 495
498 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 496 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
499 tp->lost_out += tcp_skb_pcount(skb); 497 tp->lost_out += tcp_skb_pcount(skb);
@@ -569,7 +567,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
569 * factor and mss. 567 * factor and mss.
570 */ 568 */
571 if (tcp_skb_pcount(skb) > 1) 569 if (tcp_skb_pcount(skb) > 1)
572 tcp_set_skb_tso_segs(sk, skb); 570 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
573 571
574 return 0; 572 return 0;
575} 573}
@@ -734,12 +732,14 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
734/* This must be invoked the first time we consider transmitting 732/* This must be invoked the first time we consider transmitting
735 * SKB onto the wire. 733 * SKB onto the wire.
736 */ 734 */
737static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb) 735static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
738{ 736{
739 int tso_segs = tcp_skb_pcount(skb); 737 int tso_segs = tcp_skb_pcount(skb);
740 738
741 if (!tso_segs) { 739 if (!tso_segs ||
742 tcp_set_skb_tso_segs(sk, skb); 740 (tso_segs > 1 &&
741 skb_shinfo(skb)->tso_size != mss_now)) {
742 tcp_set_skb_tso_segs(sk, skb, mss_now);
743 tso_segs = tcp_skb_pcount(skb); 743 tso_segs = tcp_skb_pcount(skb);
744 } 744 }
745 return tso_segs; 745 return tso_segs;
@@ -817,7 +817,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
817 struct tcp_sock *tp = tcp_sk(sk); 817 struct tcp_sock *tp = tcp_sk(sk);
818 unsigned int cwnd_quota; 818 unsigned int cwnd_quota;
819 819
820 tcp_init_tso_segs(sk, skb); 820 tcp_init_tso_segs(sk, skb, cur_mss);
821 821
822 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 822 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
823 return 0; 823 return 0;
@@ -854,7 +854,7 @@ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
854 * know that all the data is in scatter-gather pages, and that the 854 * know that all the data is in scatter-gather pages, and that the
855 * packet has never been sent out before (and thus is not cloned). 855 * packet has never been sent out before (and thus is not cloned).
856 */ 856 */
857static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len) 857static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
858{ 858{
859 struct sk_buff *buff; 859 struct sk_buff *buff;
860 int nlen = skb->len - len; 860 int nlen = skb->len - len;
@@ -887,8 +887,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
887 skb_split(skb, buff, len); 887 skb_split(skb, buff, len);
888 888
889 /* Fix up tso_factor for both original and new SKB. */ 889 /* Fix up tso_factor for both original and new SKB. */
890 tcp_set_skb_tso_segs(sk, skb); 890 tcp_set_skb_tso_segs(sk, skb, mss_now);
891 tcp_set_skb_tso_segs(sk, buff); 891 tcp_set_skb_tso_segs(sk, buff, mss_now);
892 892
893 /* Link BUFF into the send queue. */ 893 /* Link BUFF into the send queue. */
894 skb_header_release(buff); 894 skb_header_release(buff);
@@ -972,19 +972,18 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
972 if (unlikely(sk->sk_state == TCP_CLOSE)) 972 if (unlikely(sk->sk_state == TCP_CLOSE))
973 return 0; 973 return 0;
974 974
975 skb = sk->sk_send_head;
976 if (unlikely(!skb))
977 return 0;
978
979 tso_segs = tcp_init_tso_segs(sk, skb);
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (unlikely(!cwnd_quota))
982 goto out;
983
984 sent_pkts = 0; 975 sent_pkts = 0;
985 while (likely(tcp_snd_wnd_test(tp, skb, mss_now))) { 976 while ((skb = sk->sk_send_head)) {
977 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
986 BUG_ON(!tso_segs); 978 BUG_ON(!tso_segs);
987 979
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (!cwnd_quota)
982 break;
983
984 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
985 break;
986
988 if (tso_segs == 1) { 987 if (tso_segs == 1) {
989 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 988 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
990 (tcp_skb_is_last(sk, skb) ? 989 (tcp_skb_is_last(sk, skb) ?
@@ -1006,11 +1005,11 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1006 limit = skb->len - trim; 1005 limit = skb->len - trim;
1007 } 1006 }
1008 if (skb->len > limit) { 1007 if (skb->len > limit) {
1009 if (tso_fragment(sk, skb, limit)) 1008 if (tso_fragment(sk, skb, limit, mss_now))
1010 break; 1009 break;
1011 } 1010 }
1012 } else if (unlikely(skb->len > mss_now)) { 1011 } else if (unlikely(skb->len > mss_now)) {
1013 if (unlikely(tcp_fragment(sk, skb, mss_now))) 1012 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1014 break; 1013 break;
1015 } 1014 }
1016 1015
@@ -1026,27 +1025,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1026 1025
1027 tcp_minshall_update(tp, mss_now, skb); 1026 tcp_minshall_update(tp, mss_now, skb);
1028 sent_pkts++; 1027 sent_pkts++;
1029
1030 /* Do not optimize this to use tso_segs. If we chopped up
1031 * the packet above, tso_segs will no longer be valid.
1032 */
1033 cwnd_quota -= tcp_skb_pcount(skb);
1034
1035 BUG_ON(cwnd_quota < 0);
1036 if (!cwnd_quota)
1037 break;
1038
1039 skb = sk->sk_send_head;
1040 if (!skb)
1041 break;
1042 tso_segs = tcp_init_tso_segs(sk, skb);
1043 } 1028 }
1044 1029
1045 if (likely(sent_pkts)) { 1030 if (likely(sent_pkts)) {
1046 tcp_cwnd_validate(sk, tp); 1031 tcp_cwnd_validate(sk, tp);
1047 return 0; 1032 return 0;
1048 } 1033 }
1049out:
1050 return !tp->packets_out && sk->sk_send_head; 1034 return !tp->packets_out && sk->sk_send_head;
1051} 1035}
1052 1036
@@ -1076,7 +1060,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1076 1060
1077 BUG_ON(!skb || skb->len < mss_now); 1061 BUG_ON(!skb || skb->len < mss_now);
1078 1062
1079 tso_segs = tcp_init_tso_segs(sk, skb); 1063 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1080 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1064 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1081 1065
1082 if (likely(cwnd_quota)) { 1066 if (likely(cwnd_quota)) {
@@ -1093,11 +1077,11 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1093 limit = skb->len - trim; 1077 limit = skb->len - trim;
1094 } 1078 }
1095 if (skb->len > limit) { 1079 if (skb->len > limit) {
1096 if (unlikely(tso_fragment(sk, skb, limit))) 1080 if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
1097 return; 1081 return;
1098 } 1082 }
1099 } else if (unlikely(skb->len > mss_now)) { 1083 } else if (unlikely(skb->len > mss_now)) {
1100 if (unlikely(tcp_fragment(sk, skb, mss_now))) 1084 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1101 return; 1085 return;
1102 } 1086 }
1103 1087
@@ -1386,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1386 1370
1387 if (skb->len > cur_mss) { 1371 if (skb->len > cur_mss) {
1388 int old_factor = tcp_skb_pcount(skb); 1372 int old_factor = tcp_skb_pcount(skb);
1389 int new_factor; 1373 int diff;
1390 1374
1391 if (tcp_fragment(sk, skb, cur_mss)) 1375 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1392 return -ENOMEM; /* We'll try again later. */ 1376 return -ENOMEM; /* We'll try again later. */
1393 1377
1394 /* New SKB created, account for it. */ 1378 /* New SKB created, account for it. */
1395 new_factor = tcp_skb_pcount(skb); 1379 diff = old_factor - tcp_skb_pcount(skb) -
1396 tp->packets_out -= old_factor - new_factor; 1380 tcp_skb_pcount(skb->next);
1397 tp->packets_out += tcp_skb_pcount(skb->next); 1381 tp->packets_out -= diff;
1382
1383 if (diff > 0) {
1384 tp->fackets_out -= diff;
1385 if ((int)tp->fackets_out < 0)
1386 tp->fackets_out = 0;
1387 }
1398 } 1388 }
1399 1389
1400 /* Collapse two adjacent packets if worthwhile and we can. */ 1390 /* Collapse two adjacent packets if worthwhile and we can. */
@@ -1991,7 +1981,7 @@ int tcp_write_wakeup(struct sock *sk)
1991 skb->len > mss) { 1981 skb->len > mss) {
1992 seg_size = min(seg_size, mss); 1982 seg_size = min(seg_size, mss);
1993 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1983 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
1994 if (tcp_fragment(sk, skb, seg_size)) 1984 if (tcp_fragment(sk, skb, seg_size, mss))
1995 return -1; 1985 return -1;
1996 /* SWS override triggered forced fragmentation. 1986 /* SWS override triggered forced fragmentation.
1997 * Disable TSO, the connection is too sick. */ 1987 * Disable TSO, the connection is too sick. */
@@ -2000,7 +1990,7 @@ int tcp_write_wakeup(struct sock *sk)
2000 sk->sk_route_caps &= ~NETIF_F_TSO; 1990 sk->sk_route_caps &= ~NETIF_F_TSO;
2001 } 1991 }
2002 } else if (!tcp_skb_pcount(skb)) 1992 } else if (!tcp_skb_pcount(skb))
2003 tcp_set_skb_tso_segs(sk, skb); 1993 tcp_set_skb_tso_segs(sk, skb, mss);
2004 1994
2005 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1995 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2006 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1996 TCP_SKB_CB(skb)->when = tcp_time_stamp;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7c24e64b443f..dc4d07357e3a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -628,7 +628,7 @@ back_from_confirm:
628 /* ... which is an evident application bug. --ANK */ 628 /* ... which is an evident application bug. --ANK */
629 release_sock(sk); 629 release_sock(sk);
630 630
631 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 2\n")); 631 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 2\n"));
632 err = -EINVAL; 632 err = -EINVAL;
633 goto out; 633 goto out;
634 } 634 }
@@ -693,7 +693,7 @@ static int udp_sendpage(struct sock *sk, struct page *page, int offset,
693 if (unlikely(!up->pending)) { 693 if (unlikely(!up->pending)) {
694 release_sock(sk); 694 release_sock(sk);
695 695
696 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 3\n")); 696 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp cork app bug 3\n"));
697 return -EINVAL; 697 return -EINVAL;
698 } 698 }
699 699
@@ -1102,7 +1102,7 @@ static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1102 skb->ip_summed = CHECKSUM_UNNECESSARY; 1102 skb->ip_summed = CHECKSUM_UNNECESSARY;
1103 if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) 1103 if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1104 return 0; 1104 return 0;
1105 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v4 hw csum failure.\n")); 1105 LIMIT_NETDEBUG(printk(KERN_DEBUG "udp v4 hw csum failure.\n"));
1106 skb->ip_summed = CHECKSUM_NONE; 1106 skb->ip_summed = CHECKSUM_NONE;
1107 } 1107 }
1108 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 1108 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -1181,14 +1181,13 @@ int udp_rcv(struct sk_buff *skb)
1181 return(0); 1181 return(0);
1182 1182
1183short_packet: 1183short_packet:
1184 NETDEBUG(if (net_ratelimit()) 1184 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1185 printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1185 NIPQUAD(saddr),
1186 NIPQUAD(saddr), 1186 ntohs(uh->source),
1187 ntohs(uh->source), 1187 ulen,
1188 ulen, 1188 len,
1189 len, 1189 NIPQUAD(daddr),
1190 NIPQUAD(daddr), 1190 ntohs(uh->dest)));
1191 ntohs(uh->dest)));
1192no_header: 1191no_header:
1193 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1192 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1194 kfree_skb(skb); 1193 kfree_skb(skb);
@@ -1199,13 +1198,12 @@ csum_error:
1199 * RFC1122: OK. Discards the bad packet silently (as far as 1198 * RFC1122: OK. Discards the bad packet silently (as far as
1200 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1199 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1201 */ 1200 */
1202 NETDEBUG(if (net_ratelimit()) 1201 LIMIT_NETDEBUG(printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1203 printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1202 NIPQUAD(saddr),
1204 NIPQUAD(saddr), 1203 ntohs(uh->source),
1205 ntohs(uh->source), 1204 NIPQUAD(daddr),
1206 NIPQUAD(daddr), 1205 ntohs(uh->dest),
1207 ntohs(uh->dest), 1206 ulen));
1208 ulen));
1209drop: 1207drop:
1210 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1208 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
1211 kfree_skb(skb); 1209 kfree_skb(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index f3ef4c38d315..3bc144a79fa5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -504,6 +504,9 @@ done:
504 break; 504 break;
505 case IPV6_IPSEC_POLICY: 505 case IPV6_IPSEC_POLICY:
506 case IPV6_XFRM_POLICY: 506 case IPV6_XFRM_POLICY:
507 retv = -EPERM;
508 if (!capable(CAP_NET_ADMIN))
509 break;
507 retv = xfrm_user_policy(sk, optname, optval, optlen); 510 retv = xfrm_user_policy(sk, optname, optval, optlen);
508 break; 511 break;
509 512
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b788f55e139b..e553e5b80d6e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -195,7 +195,6 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
195 dev_hold(dev); 195 dev_hold(dev);
196 196
197 ipip6_tunnel_link(nt); 197 ipip6_tunnel_link(nt);
198 /* Do not decrement MOD_USE_COUNT here. */
199 return nt; 198 return nt;
200 199
201failed: 200failed:
@@ -794,10 +793,28 @@ static struct net_protocol sit_protocol = {
794 .err_handler = ipip6_err, 793 .err_handler = ipip6_err,
795}; 794};
796 795
796static void __exit sit_destroy_tunnels(void)
797{
798 int prio;
799
800 for (prio = 1; prio < 4; prio++) {
801 int h;
802 for (h = 0; h < HASH_SIZE; h++) {
803 struct ip_tunnel *t;
804 while ((t = tunnels[prio][h]) != NULL)
805 unregister_netdevice(t->dev);
806 }
807 }
808}
809
797void __exit sit_cleanup(void) 810void __exit sit_cleanup(void)
798{ 811{
799 inet_del_protocol(&sit_protocol, IPPROTO_IPV6); 812 inet_del_protocol(&sit_protocol, IPPROTO_IPV6);
800 unregister_netdev(ipip6_fb_tunnel_dev); 813
814 rtnl_lock();
815 sit_destroy_tunnels();
816 unregister_netdevice(ipip6_fb_tunnel_dev);
817 rtnl_unlock();
801} 818}
802 819
803int __init sit_init(void) 820int __init sit_init(void)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 56db8f13e6cb..d0c3120d0233 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -586,7 +586,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
586 } 586 }
587 if (skb->stamp.tv_sec == 0) { 587 if (skb->stamp.tv_sec == 0) {
588 skb->stamp.tv_sec = xtime.tv_sec; 588 skb->stamp.tv_sec = xtime.tv_sec;
589 skb->stamp.tv_usec = xtime.tv_nsec * 1000; 589 skb->stamp.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
590 /* Don't enable netstamp, sunrpc doesn't 590 /* Don't enable netstamp, sunrpc doesn't
591 need that much accuracy */ 591 need that much accuracy */
592 } 592 }