aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-17 09:37:55 -0400
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-17 09:37:55 -0400
commit327b6b08d6ab3bf5488120ba02ed2fe06b09efe6 (patch)
tree592b0ebc2f5f18dac0bdc0fd6ba87d2c8b07b232 /net
parentc973b112c76c9d8fd042991128f218a738cc8d0a (diff)
parent2ad56496627630ebc99f06af5f81ca23e17e014e (diff)
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'net')
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/netpoll.c63
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xdr.c1
7 files changed, 56 insertions, 53 deletions
diff --git a/net/compat.c b/net/compat.c
index be5d936dc423..d99ab9695893 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -91,20 +91,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91 } else 91 } else
92 kern_msg->msg_name = NULL; 92 kern_msg->msg_name = NULL;
93 93
94 if(kern_msg->msg_iovlen > UIO_FASTIOV) {
95 kern_iov = kmalloc(kern_msg->msg_iovlen * sizeof(struct iovec),
96 GFP_KERNEL);
97 if(!kern_iov)
98 return -ENOMEM;
99 }
100
101 tot_len = iov_from_user_compat_to_kern(kern_iov, 94 tot_len = iov_from_user_compat_to_kern(kern_iov,
102 (struct compat_iovec __user *)kern_msg->msg_iov, 95 (struct compat_iovec __user *)kern_msg->msg_iov,
103 kern_msg->msg_iovlen); 96 kern_msg->msg_iovlen);
104 if(tot_len >= 0) 97 if(tot_len >= 0)
105 kern_msg->msg_iov = kern_iov; 98 kern_msg->msg_iov = kern_iov;
106 else if(kern_msg->msg_iovlen > UIO_FASTIOV)
107 kfree(kern_iov);
108 99
109 return tot_len; 100 return tot_len;
110} 101}
diff --git a/net/core/dev.c b/net/core/dev.c
index 52a3bf7ae177..faf59b02c4bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
1696 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1696 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1697 unsigned long start_time = jiffies; 1697 unsigned long start_time = jiffies;
1698 int budget = netdev_budget; 1698 int budget = netdev_budget;
1699 1699 void *have;
1700
1700 local_irq_disable(); 1701 local_irq_disable();
1701 1702
1702 while (!list_empty(&queue->poll_list)) { 1703 while (!list_empty(&queue->poll_list)) {
@@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
1709 1710
1710 dev = list_entry(queue->poll_list.next, 1711 dev = list_entry(queue->poll_list.next,
1711 struct net_device, poll_list); 1712 struct net_device, poll_list);
1712 netpoll_poll_lock(dev); 1713 have = netpoll_poll_lock(dev);
1713 1714
1714 if (dev->quota <= 0 || dev->poll(dev, &budget)) { 1715 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1715 netpoll_poll_unlock(dev); 1716 netpoll_poll_unlock(have);
1716 local_irq_disable(); 1717 local_irq_disable();
1717 list_del(&dev->poll_list); 1718 list_del(&dev->poll_list);
1718 list_add_tail(&dev->poll_list, &queue->poll_list); 1719 list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
1721 else 1722 else
1722 dev->quota = dev->weight; 1723 dev->quota = dev->weight;
1723 } else { 1724 } else {
1724 netpoll_poll_unlock(dev); 1725 netpoll_poll_unlock(have);
1725 dev_put(dev); 1726 dev_put(dev);
1726 local_irq_disable(); 1727 local_irq_disable();
1727 } 1728 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c327c9edadc5..a1a9a7abff50 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
33#define MAX_UDP_CHUNK 1460 33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32 34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36#define MAX_RETRIES 20000
36 37
37static DEFINE_SPINLOCK(skb_list_lock); 38static DEFINE_SPINLOCK(skb_list_lock);
38static int nr_skbs; 39static int nr_skbs;
@@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
248 int status; 249 int status;
249 struct netpoll_info *npinfo; 250 struct netpoll_info *npinfo;
250 251
251repeat: 252 if (!np || !np->dev || !netif_running(np->dev)) {
252 if(!np || !np->dev || !netif_running(np->dev)) {
253 __kfree_skb(skb); 253 __kfree_skb(skb);
254 return; 254 return;
255 } 255 }
256 256
257 /* avoid recursion */
258 npinfo = np->dev->npinfo; 257 npinfo = np->dev->npinfo;
258
259 /* avoid recursion */
259 if (npinfo->poll_owner == smp_processor_id() || 260 if (npinfo->poll_owner == smp_processor_id() ||
260 np->dev->xmit_lock_owner == smp_processor_id()) { 261 np->dev->xmit_lock_owner == smp_processor_id()) {
261 if (np->drop) 262 if (np->drop)
@@ -265,30 +266,37 @@ repeat:
265 return; 266 return;
266 } 267 }
267 268
268 spin_lock(&np->dev->xmit_lock); 269 do {
269 np->dev->xmit_lock_owner = smp_processor_id(); 270 npinfo->tries--;
271 spin_lock(&np->dev->xmit_lock);
272 np->dev->xmit_lock_owner = smp_processor_id();
270 273
271 /* 274 /*
272 * network drivers do not expect to be called if the queue is 275 * network drivers do not expect to be called if the queue is
273 * stopped. 276 * stopped.
274 */ 277 */
275 if (netif_queue_stopped(np->dev)) { 278 if (netif_queue_stopped(np->dev)) {
279 np->dev->xmit_lock_owner = -1;
280 spin_unlock(&np->dev->xmit_lock);
281 netpoll_poll(np);
282 udelay(50);
283 continue;
284 }
285
286 status = np->dev->hard_start_xmit(skb, np->dev);
276 np->dev->xmit_lock_owner = -1; 287 np->dev->xmit_lock_owner = -1;
277 spin_unlock(&np->dev->xmit_lock); 288 spin_unlock(&np->dev->xmit_lock);
278 289
279 netpoll_poll(np); 290 /* success */
280 goto repeat; 291 if(!status) {
281 } 292 npinfo->tries = MAX_RETRIES; /* reset */
282 293 return;
283 status = np->dev->hard_start_xmit(skb, np->dev); 294 }
284 np->dev->xmit_lock_owner = -1;
285 spin_unlock(&np->dev->xmit_lock);
286 295
287 /* transmit busy */ 296 /* transmit busy */
288 if(status) {
289 netpoll_poll(np); 297 netpoll_poll(np);
290 goto repeat; 298 udelay(50);
291 } 299 } while (npinfo->tries > 0);
292} 300}
293 301
294void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 302void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
349 unsigned char *arp_ptr; 357 unsigned char *arp_ptr;
350 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 358 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
351 u32 sip, tip; 359 u32 sip, tip;
352 unsigned long flags;
353 struct sk_buff *send_skb; 360 struct sk_buff *send_skb;
354 struct netpoll *np = NULL; 361 struct netpoll *np = NULL;
355 362
356 spin_lock_irqsave(&npinfo->rx_lock, flags);
357 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 363 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
358 np = npinfo->rx_np; 364 np = npinfo->rx_np;
359 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
360
361 if (!np) 365 if (!np)
362 return; 366 return;
363 367
@@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
639 if (!npinfo) 643 if (!npinfo)
640 goto release; 644 goto release;
641 645
646 npinfo->rx_flags = 0;
642 npinfo->rx_np = NULL; 647 npinfo->rx_np = NULL;
643 npinfo->poll_lock = SPIN_LOCK_UNLOCKED; 648 npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
644 npinfo->poll_owner = -1; 649 npinfo->poll_owner = -1;
650 npinfo->tries = MAX_RETRIES;
645 npinfo->rx_lock = SPIN_LOCK_UNLOCKED; 651 npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
646 } else 652 } else
647 npinfo = ndev->npinfo; 653 npinfo = ndev->npinfo;
@@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
718 npinfo->rx_np = np; 724 npinfo->rx_np = np;
719 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 725 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
720 } 726 }
727
728 /* fill up the skb queue */
729 refill_skbs();
730
721 /* last thing to do is link it to the net device structure */ 731 /* last thing to do is link it to the net device structure */
722 ndev->npinfo = npinfo; 732 ndev->npinfo = npinfo;
723 733
734 /* avoid racing with NAPI reading npinfo */
735 synchronize_rcu();
736
724 return 0; 737 return 0;
725 738
726 release: 739 release:
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 96a02800cd28..acdd18e6adb2 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1876,15 +1876,6 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1876 return mss_now; 1876 return mss_now;
1877} 1877}
1878 1878
1879static int dn_error(struct sock *sk, int flags, int err)
1880{
1881 if (err == -EPIPE)
1882 err = sock_error(sk) ? : -EPIPE;
1883 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1884 send_sig(SIGPIPE, current, 0);
1885 return err;
1886}
1887
1888static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, 1879static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1889 struct msghdr *msg, size_t size) 1880 struct msghdr *msg, size_t size)
1890{ 1881{
@@ -2045,7 +2036,7 @@ out:
2045 return sent ? sent : err; 2036 return sent ? sent : err;
2046 2037
2047out_err: 2038out_err:
2048 err = dn_error(sk, flags, err); 2039 err = sk_stream_error(sk, flags, err);
2049 release_sock(sk); 2040 release_sock(sk);
2050 return err; 2041 return err;
2051} 2042}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d076f0db100..3ed6fc15815b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1370,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1370 1370
1371 if (skb->len > cur_mss) { 1371 if (skb->len > cur_mss) {
1372 int old_factor = tcp_skb_pcount(skb); 1372 int old_factor = tcp_skb_pcount(skb);
1373 int new_factor; 1373 int diff;
1374 1374
1375 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1375 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1376 return -ENOMEM; /* We'll try again later. */ 1376 return -ENOMEM; /* We'll try again later. */
1377 1377
1378 /* New SKB created, account for it. */ 1378 /* New SKB created, account for it. */
1379 new_factor = tcp_skb_pcount(skb); 1379 diff = old_factor - tcp_skb_pcount(skb) -
1380 tp->packets_out -= old_factor - new_factor; 1380 tcp_skb_pcount(skb->next);
1381 tp->packets_out += tcp_skb_pcount(skb->next); 1381 tp->packets_out -= diff;
1382
1383 if (diff > 0) {
1384 tp->fackets_out -= diff;
1385 if ((int)tp->fackets_out < 0)
1386 tp->fackets_out = 0;
1387 }
1382 } 1388 }
1383 1389
1384 /* Collapse two adjacent packets if worthwhile and we can. */ 1390 /* Collapse two adjacent packets if worthwhile and we can. */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 56db8f13e6cb..d0c3120d0233 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -586,7 +586,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
586 } 586 }
587 if (skb->stamp.tv_sec == 0) { 587 if (skb->stamp.tv_sec == 0) {
588 skb->stamp.tv_sec = xtime.tv_sec; 588 skb->stamp.tv_sec = xtime.tv_sec;
589 skb->stamp.tv_usec = xtime.tv_nsec * 1000; 589 skb->stamp.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
590 /* Don't enable netstamp, sunrpc doesn't 590 /* Don't enable netstamp, sunrpc doesn't
591 need that much accuracy */ 591 need that much accuracy */
592 } 592 }
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 8a4d9c106af1..fde16f40a581 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -993,6 +993,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
993 return -EINVAL; 993 return -EINVAL;
994 } else { 994 } else {
995 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 995 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
996 desc->array_len > desc->array_maxlen ||
996 (unsigned long) base + 4 + desc->array_len * 997 (unsigned long) base + 4 + desc->array_len *
997 desc->elem_size > buf->len) 998 desc->elem_size > buf->len)
998 return -EINVAL; 999 return -EINVAL;