aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@kvack.org>2012-04-27 04:23:21 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-28 22:21:50 -0400
commitf7ad74fef3af6c6e2ef7f01c5589d77fe7db3d7c (patch)
treefc0276ddd7983b0e346ce08104c7165a0b710295 /net/ipv6
parenta319726af99d3da87f942a4ae95a4f6527badf4a (diff)
net/ipv6/udp: UDP encapsulation: break backlog_rcv into __udpv6_queue_rcv_skb
This is the first step in reworking the IPv6 UDP code to be structured more like the IPv4 UDP code. This patch creates __udpv6_queue_rcv_skb() with the equivalent sematics to __udp_queue_rcv_skb(), and wires it up to the backlog_rcv method. Signed-off-by: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/udp.c42
1 files changed, 27 insertions, 15 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d39bbc9e0622..6c0367ff7be7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -496,6 +496,28 @@ out:
496 sock_put(sk); 496 sock_put(sk);
497} 497}
498 498
499static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
500{
501 int rc;
502
503 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
504 sock_rps_save_rxhash(sk, skb);
505
506 rc = sock_queue_rcv_skb(sk, skb);
507 if (rc < 0) {
508 int is_udplite = IS_UDPLITE(sk);
509
510 /* Note that an ENOMEM error is charged twice */
511 if (rc == -ENOMEM)
512 UDP6_INC_STATS_BH(sock_net(sk),
513 UDP_MIB_RCVBUFERRORS, is_udplite);
514 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
515 kfree_skb(skb);
516 return -1;
517 }
518 return 0;
519}
520
499static __inline__ void udpv6_err(struct sk_buff *skb, 521static __inline__ void udpv6_err(struct sk_buff *skb,
500 struct inet6_skb_parm *opt, u8 type, 522 struct inet6_skb_parm *opt, u8 type,
501 u8 code, int offset, __be32 info ) 523 u8 code, int offset, __be32 info )
@@ -503,15 +525,12 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
503 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 525 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
504} 526}
505 527
506int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 528int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
507{ 529{
508 struct udp_sock *up = udp_sk(sk); 530 struct udp_sock *up = udp_sk(sk);
509 int rc; 531 int rc;
510 int is_udplite = IS_UDPLITE(sk); 532 int is_udplite = IS_UDPLITE(sk);
511 533
512 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
513 sock_rps_save_rxhash(sk, skb);
514
515 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 534 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
516 goto drop; 535 goto drop;
517 536
@@ -540,19 +559,12 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
540 } 559 }
541 560
542 skb_dst_drop(skb); 561 skb_dst_drop(skb);
543 rc = sock_queue_rcv_skb(sk, skb);
544 if (rc < 0) {
545 /* Note that an ENOMEM error is charged twice */
546 if (rc == -ENOMEM)
547 UDP6_INC_STATS_BH(sock_net(sk),
548 UDP_MIB_RCVBUFERRORS, is_udplite);
549 goto drop_no_sk_drops_inc;
550 }
551 562
552 return 0; 563 rc = __udpv6_queue_rcv_skb(sk, skb);
564
565 return rc;
553drop: 566drop:
554 atomic_inc(&sk->sk_drops); 567 atomic_inc(&sk->sk_drops);
555drop_no_sk_drops_inc:
556 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 568 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
557 kfree_skb(skb); 569 kfree_skb(skb);
558 return -1; 570 return -1;
@@ -1471,7 +1483,7 @@ struct proto udpv6_prot = {
1471 .getsockopt = udpv6_getsockopt, 1483 .getsockopt = udpv6_getsockopt,
1472 .sendmsg = udpv6_sendmsg, 1484 .sendmsg = udpv6_sendmsg,
1473 .recvmsg = udpv6_recvmsg, 1485 .recvmsg = udpv6_recvmsg,
1474 .backlog_rcv = udpv6_queue_rcv_skb, 1486 .backlog_rcv = __udpv6_queue_rcv_skb,
1475 .hash = udp_lib_hash, 1487 .hash = udp_lib_hash,
1476 .unhash = udp_lib_unhash, 1488 .unhash = udp_lib_unhash,
1477 .rehash = udp_v6_rehash, 1489 .rehash = udp_v6_rehash,