diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-11-08 05:18:52 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-08 23:53:09 -0500 |
commit | a1ab77f97ed03f5dae66ae4c64375beffab83772 (patch) | |
tree | 04ad6bf5def42fc415b85f3e0fe1638e64b26eea /net/ipv6/udp.c | |
parent | 1240d1373cd7f874dd0f3057c3e9643e71ef75c6 (diff) |
ipv6: udp: Optimise multicast reception
IPV6 UDP multicast rx path is a bit complex and can hold a spinlock
for a long time.
Using a small (32 or 64 entries) stack of socket pointers can help
to perform expensive operations (skb_clone(), udp_queue_rcv_skb())
outside of the lock, in most cases.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r-- | net/ipv6/udp.c | 71 |
1 files changed, 47 insertions, 24 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f580cf925112..948e823d70c2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -569,6 +569,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
569 | return NULL; | 569 | return NULL; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void flush_stack(struct sock **stack, unsigned int count, | ||
573 | struct sk_buff *skb, unsigned int final) | ||
574 | { | ||
575 | unsigned int i; | ||
576 | struct sock *sk; | ||
577 | struct sk_buff *skb1; | ||
578 | |||
579 | for (i = 0; i < count; i++) { | ||
580 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | ||
581 | |||
582 | if (skb1) { | ||
583 | sk = stack[i]; | ||
584 | bh_lock_sock(sk); | ||
585 | if (!sock_owned_by_user(sk)) | ||
586 | udpv6_queue_rcv_skb(sk, skb1); | ||
587 | else | ||
588 | sk_add_backlog(sk, skb1); | ||
589 | bh_unlock_sock(sk); | ||
590 | } | ||
591 | } | ||
592 | } | ||
572 | /* | 593 | /* |
573 | * Note: called only from the BH handler context, | 594 | * Note: called only from the BH handler context, |
574 | * so we don't need to lock the hashes. | 595 | * so we don't need to lock the hashes. |
@@ -577,41 +598,43 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
577 | struct in6_addr *saddr, struct in6_addr *daddr, | 598 | struct in6_addr *saddr, struct in6_addr *daddr, |
578 | struct udp_table *udptable) | 599 | struct udp_table *udptable) |
579 | { | 600 | { |
580 | struct sock *sk, *sk2; | 601 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; |
581 | const struct udphdr *uh = udp_hdr(skb); | 602 | const struct udphdr *uh = udp_hdr(skb); |
582 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); | 603 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); |
583 | int dif; | 604 | int dif; |
605 | unsigned int i, count = 0; | ||
584 | 606 | ||
585 | spin_lock(&hslot->lock); | 607 | spin_lock(&hslot->lock); |
586 | sk = sk_nulls_head(&hslot->head); | 608 | sk = sk_nulls_head(&hslot->head); |
587 | dif = inet6_iif(skb); | 609 | dif = inet6_iif(skb); |
588 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 610 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); |
589 | if (!sk) { | 611 | while (sk) { |
590 | kfree_skb(skb); | 612 | stack[count++] = sk; |
591 | goto out; | 613 | sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, |
592 | } | 614 | uh->source, saddr, dif); |
593 | 615 | if (unlikely(count == ARRAY_SIZE(stack))) { | |
594 | sk2 = sk; | 616 | if (!sk) |
595 | while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr, | 617 | break; |
596 | uh->source, saddr, dif))) { | 618 | flush_stack(stack, count, skb, ~0); |
597 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); | 619 | count = 0; |
598 | if (buff) { | ||
599 | bh_lock_sock(sk2); | ||
600 | if (!sock_owned_by_user(sk2)) | ||
601 | udpv6_queue_rcv_skb(sk2, buff); | ||
602 | else | ||
603 | sk_add_backlog(sk2, buff); | ||
604 | bh_unlock_sock(sk2); | ||
605 | } | 620 | } |
606 | } | 621 | } |
607 | bh_lock_sock(sk); | 622 | /* |
608 | if (!sock_owned_by_user(sk)) | 623 | * before releasing the lock, we must take reference on sockets |
609 | udpv6_queue_rcv_skb(sk, skb); | 624 | */ |
610 | else | 625 | for (i = 0; i < count; i++) |
611 | sk_add_backlog(sk, skb); | 626 | sock_hold(stack[i]); |
612 | bh_unlock_sock(sk); | 627 | |
613 | out: | ||
614 | spin_unlock(&hslot->lock); | 628 | spin_unlock(&hslot->lock); |
629 | |||
630 | if (count) { | ||
631 | flush_stack(stack, count, skb, count - 1); | ||
632 | |||
633 | for (i = 0; i < count; i++) | ||
634 | sock_put(stack[i]); | ||
635 | } else { | ||
636 | kfree_skb(skb); | ||
637 | } | ||
615 | return 0; | 638 | return 0; |
616 | } | 639 | } |
617 | 640 | ||