aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/udp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r--net/ipv6/udp.c50
1 files changed, 28 insertions, 22 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c47..3c0c9c755c92 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
322 struct ipv6_pinfo *np = inet6_sk(sk); 322 struct ipv6_pinfo *np = inet6_sk(sk);
323 struct inet_sock *inet = inet_sk(sk); 323 struct inet_sock *inet = inet_sk(sk);
324 struct sk_buff *skb; 324 struct sk_buff *skb;
325 unsigned int ulen, copied; 325 unsigned int ulen;
326 int peeked; 326 int peeked;
327 int err; 327 int err;
328 int is_udplite = IS_UDPLITE(sk); 328 int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +341,9 @@ try_again:
341 goto out; 341 goto out;
342 342
343 ulen = skb->len - sizeof(struct udphdr); 343 ulen = skb->len - sizeof(struct udphdr);
344 copied = len; 344 if (len > ulen)
345 if (copied > ulen) 345 len = ulen;
346 copied = ulen; 346 else if (len < ulen)
347 else if (copied < ulen)
348 msg->msg_flags |= MSG_TRUNC; 347 msg->msg_flags |= MSG_TRUNC;
349 348
350 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 349 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +354,14 @@ try_again:
355 * coverage checksum (UDP-Lite), do it before the copy. 354 * coverage checksum (UDP-Lite), do it before the copy.
356 */ 355 */
357 356
358 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 357 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
359 if (udp_lib_checksum_complete(skb)) 358 if (udp_lib_checksum_complete(skb))
360 goto csum_copy_err; 359 goto csum_copy_err;
361 } 360 }
362 361
363 if (skb_csum_unnecessary(skb)) 362 if (skb_csum_unnecessary(skb))
364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 363 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
365 msg->msg_iov, copied ); 364 msg->msg_iov,len);
366 else { 365 else {
367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 366 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
368 if (err == -EINVAL) 367 if (err == -EINVAL)
@@ -411,7 +410,7 @@ try_again:
411 datagram_recv_ctl(sk, msg, skb); 410 datagram_recv_ctl(sk, msg, skb);
412 } 411 }
413 412
414 err = copied; 413 err = len;
415 if (flags & MSG_TRUNC) 414 if (flags & MSG_TRUNC)
416 err = ulen; 415 err = ulen;
417 416
@@ -584,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 583 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 584 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 585 udpv6_queue_rcv_skb(sk, skb1);
587 else 586 else if (sk_add_backlog(sk, skb1)) {
588 sk_add_backlog(sk, skb1); 587 kfree_skb(skb1);
588 bh_unlock_sock(sk);
589 goto drop;
590 }
589 bh_unlock_sock(sk); 591 bh_unlock_sock(sk);
590 } else { 592 continue;
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 593 }
594drop:
595 atomic_inc(&sk->sk_drops);
596 UDP6_INC_STATS_BH(sock_net(sk),
597 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
598 UDP6_INC_STATS_BH(sock_net(sk),
599 UDP_MIB_INERRORS, IS_UDPLITE(sk));
597 } 600 }
598} 601}
599/* 602/*
@@ -681,12 +684,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
681int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 684int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
682 int proto) 685 int proto)
683{ 686{
687 struct net *net = dev_net(skb->dev);
684 struct sock *sk; 688 struct sock *sk;
685 struct udphdr *uh; 689 struct udphdr *uh;
686 struct net_device *dev = skb->dev;
687 struct in6_addr *saddr, *daddr; 690 struct in6_addr *saddr, *daddr;
688 u32 ulen = 0; 691 u32 ulen = 0;
689 struct net *net = dev_net(skb->dev);
690 692
691 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 693 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
692 goto short_packet; 694 goto short_packet;
@@ -745,7 +747,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
745 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, 747 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
746 proto == IPPROTO_UDPLITE); 748 proto == IPPROTO_UDPLITE);
747 749
748 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 750 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
749 751
750 kfree_skb(skb); 752 kfree_skb(skb);
751 return 0; 753 return 0;
@@ -756,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 bh_lock_sock(sk); 758 bh_lock_sock(sk);
757 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
758 udpv6_queue_rcv_skb(sk, skb); 760 udpv6_queue_rcv_skb(sk, skb);
759 else 761 else if (sk_add_backlog(sk, skb)) {
760 sk_add_backlog(sk, skb); 762 atomic_inc(&sk->sk_drops);
763 bh_unlock_sock(sk);
764 sock_put(sk);
765 goto discard;
766 }
761 bh_unlock_sock(sk); 767 bh_unlock_sock(sk);
762 sock_put(sk); 768 sock_put(sk);
763 return 0; 769 return 0;
@@ -1396,7 +1402,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1402 },
1397}; 1403};
1398 1404
1399int udp6_proc_init(struct net *net) 1405int __net_init udp6_proc_init(struct net *net)
1400{ 1406{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1407 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1408}