aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/udp.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2010-04-29 19:53:17 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-04-29 19:53:17 -0400
commitd9c5841e22231e4e49fd0a1004164e6fce59b7a6 (patch)
treee1f589c46b3ff79bbe7b1b2469f6362f94576da6 /net/ipv6/udp.c
parentb701a47ba48b698976fb2fe05fb285b0edc1d26a (diff)
parent5967ed87ade85a421ef814296c3c7f182b08c225 (diff)
Merge branch 'x86/asm' into x86/atomic
Merge reason: Conflict between LOCK_PREFIX_HERE and relative alternatives pointers Resolved Conflicts: arch/x86/include/asm/alternative.h arch/x86/kernel/alternative.c Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r--net/ipv6/udp.c55
1 files changed, 31 insertions, 24 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c47..90824852f598 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/slab.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include <net/ndisc.h> 40#include <net/ndisc.h>
@@ -258,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
258 if (hslot->count < hslot2->count) 259 if (hslot->count < hslot2->count)
259 goto begin; 260 goto begin;
260 261
261 result = udp6_lib_lookup2(net, &in6addr_any, sport, 262 result = udp6_lib_lookup2(net, saddr, sport,
262 daddr, hnum, dif, 263 &in6addr_any, hnum, dif,
263 hslot2, slot2); 264 hslot2, slot2);
264 } 265 }
265 rcu_read_unlock(); 266 rcu_read_unlock();
@@ -322,7 +323,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
322 struct ipv6_pinfo *np = inet6_sk(sk); 323 struct ipv6_pinfo *np = inet6_sk(sk);
323 struct inet_sock *inet = inet_sk(sk); 324 struct inet_sock *inet = inet_sk(sk);
324 struct sk_buff *skb; 325 struct sk_buff *skb;
325 unsigned int ulen, copied; 326 unsigned int ulen;
326 int peeked; 327 int peeked;
327 int err; 328 int err;
328 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +342,9 @@ try_again:
341 goto out; 342 goto out;
342 343
343 ulen = skb->len - sizeof(struct udphdr); 344 ulen = skb->len - sizeof(struct udphdr);
344 copied = len; 345 if (len > ulen)
345 if (copied > ulen) 346 len = ulen;
346 copied = ulen; 347 else if (len < ulen)
347 else if (copied < ulen)
348 msg->msg_flags |= MSG_TRUNC; 348 msg->msg_flags |= MSG_TRUNC;
349 349
350 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 350 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +355,14 @@ try_again:
355 * coverage checksum (UDP-Lite), do it before the copy. 355 * coverage checksum (UDP-Lite), do it before the copy.
356 */ 356 */
357 357
358 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 358 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
359 if (udp_lib_checksum_complete(skb)) 359 if (udp_lib_checksum_complete(skb))
360 goto csum_copy_err; 360 goto csum_copy_err;
361 } 361 }
362 362
363 if (skb_csum_unnecessary(skb)) 363 if (skb_csum_unnecessary(skb))
364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
365 msg->msg_iov, copied ); 365 msg->msg_iov,len);
366 else { 366 else {
367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
368 if (err == -EINVAL) 368 if (err == -EINVAL)
@@ -411,7 +411,7 @@ try_again:
411 datagram_recv_ctl(sk, msg, skb); 411 datagram_recv_ctl(sk, msg, skb);
412 } 412 }
413 413
414 err = copied; 414 err = len;
415 if (flags & MSG_TRUNC) 415 if (flags & MSG_TRUNC)
416 err = ulen; 416 err = ulen;
417 417
@@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 586 udpv6_queue_rcv_skb(sk, skb1);
587 else 587 else if (sk_add_backlog(sk, skb1)) {
588 sk_add_backlog(sk, skb1); 588 kfree_skb(skb1);
589 bh_unlock_sock(sk);
590 goto drop;
591 }
589 bh_unlock_sock(sk); 592 bh_unlock_sock(sk);
590 } else { 593 continue;
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 594 }
595drop:
596 atomic_inc(&sk->sk_drops);
597 UDP6_INC_STATS_BH(sock_net(sk),
598 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
599 UDP6_INC_STATS_BH(sock_net(sk),
600 UDP_MIB_INERRORS, IS_UDPLITE(sk));
597 } 601 }
598} 602}
599/* 603/*
@@ -681,12 +685,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
681int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 685int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
682 int proto) 686 int proto)
683{ 687{
688 struct net *net = dev_net(skb->dev);
684 struct sock *sk; 689 struct sock *sk;
685 struct udphdr *uh; 690 struct udphdr *uh;
686 struct net_device *dev = skb->dev;
687 struct in6_addr *saddr, *daddr; 691 struct in6_addr *saddr, *daddr;
688 u32 ulen = 0; 692 u32 ulen = 0;
689 struct net *net = dev_net(skb->dev);
690 693
691 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 694 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
692 goto short_packet; 695 goto short_packet;
@@ -745,7 +748,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
745 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, 748 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
746 proto == IPPROTO_UDPLITE); 749 proto == IPPROTO_UDPLITE);
747 750
748 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 751 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
749 752
750 kfree_skb(skb); 753 kfree_skb(skb);
751 return 0; 754 return 0;
@@ -756,8 +759,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 bh_lock_sock(sk); 759 bh_lock_sock(sk);
757 if (!sock_owned_by_user(sk)) 760 if (!sock_owned_by_user(sk))
758 udpv6_queue_rcv_skb(sk, skb); 761 udpv6_queue_rcv_skb(sk, skb);
759 else 762 else if (sk_add_backlog(sk, skb)) {
760 sk_add_backlog(sk, skb); 763 atomic_inc(&sk->sk_drops);
764 bh_unlock_sock(sk);
765 sock_put(sk);
766 goto discard;
767 }
761 bh_unlock_sock(sk); 768 bh_unlock_sock(sk);
762 sock_put(sk); 769 sock_put(sk);
763 return 0; 770 return 0;
@@ -1396,7 +1403,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1403 },
1397}; 1404};
1398 1405
1399int udp6_proc_init(struct net *net) 1406int __net_init udp6_proc_init(struct net *net)
1400{ 1407{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1408 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1409}