aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/udp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r--net/ipv6/udp.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 52b8347ae3b2..90824852f598 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/slab.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include <net/ndisc.h> 40#include <net/ndisc.h>
@@ -258,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
258 if (hslot->count < hslot2->count) 259 if (hslot->count < hslot2->count)
259 goto begin; 260 goto begin;
260 261
261 result = udp6_lib_lookup2(net, &in6addr_any, sport, 262 result = udp6_lib_lookup2(net, saddr, sport,
262 daddr, hnum, dif, 263 &in6addr_any, hnum, dif,
263 hslot2, slot2); 264 hslot2, slot2);
264 } 265 }
265 rcu_read_unlock(); 266 rcu_read_unlock();
@@ -583,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
583 bh_lock_sock(sk); 584 bh_lock_sock(sk);
584 if (!sock_owned_by_user(sk)) 585 if (!sock_owned_by_user(sk))
585 udpv6_queue_rcv_skb(sk, skb1); 586 udpv6_queue_rcv_skb(sk, skb1);
586 else 587 else if (sk_add_backlog(sk, skb1)) {
587 sk_add_backlog(sk, skb1); 588 kfree_skb(skb1);
589 bh_unlock_sock(sk);
590 goto drop;
591 }
588 bh_unlock_sock(sk); 592 bh_unlock_sock(sk);
589 } else { 593 continue;
590 atomic_inc(&sk->sk_drops);
591 UDP6_INC_STATS_BH(sock_net(sk),
592 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
593 UDP6_INC_STATS_BH(sock_net(sk),
594 UDP_MIB_INERRORS, IS_UDPLITE(sk));
595 } 594 }
595drop:
596 atomic_inc(&sk->sk_drops);
597 UDP6_INC_STATS_BH(sock_net(sk),
598 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
599 UDP6_INC_STATS_BH(sock_net(sk),
600 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 601 }
597} 602}
598/* 603/*
@@ -754,8 +759,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
754 bh_lock_sock(sk); 759 bh_lock_sock(sk);
755 if (!sock_owned_by_user(sk)) 760 if (!sock_owned_by_user(sk))
756 udpv6_queue_rcv_skb(sk, skb); 761 udpv6_queue_rcv_skb(sk, skb);
757 else 762 else if (sk_add_backlog(sk, skb)) {
758 sk_add_backlog(sk, skb); 763 atomic_inc(&sk->sk_drops);
764 bh_unlock_sock(sk);
765 sock_put(sk);
766 goto discard;
767 }
759 bh_unlock_sock(sk); 768 bh_unlock_sock(sk);
760 sock_put(sk); 769 sock_put(sk);
761 return 0; 770 return 0;