aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/icmp.c
diff options
context:
space:
mode:
authorDenis V. Lunev <den@openvz.org>2008-02-29 14:16:08 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-29 14:16:08 -0500
commitb7e729c4b4778aac4dbbec9dc070acde93071f4d (patch)
tree31c84099f0821e0929407ba2a2e704fd933e1ec1 /net/ipv6/icmp.c
parent1e3cf6834e7db1eac94314338c9e30c2103ac409 (diff)
[ICMP]: Store sock rather than socket for ICMP flow control.
Basically, there is no difference, what to store: socket or sock. Though, sock looks better as there will be 1 less dereferrence on the fast path. Signed-off-by: Denis V. Lunev <den@openvz.org> Acked-by: Daniel Lezcano <dlezcano@fr.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/icmp.c')
-rw-r--r--net/ipv6/icmp.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b9b13a77ba30..875bdc725dc4 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -80,8 +80,8 @@ EXPORT_SYMBOL(icmpv6msg_statistics);
80 * 80 *
81 * On SMP we have one ICMP socket per-cpu. 81 * On SMP we have one ICMP socket per-cpu.
82 */ 82 */
83static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; 83static DEFINE_PER_CPU(struct sock *, __icmpv6_sk) = NULL;
84#define icmpv6_socket __get_cpu_var(__icmpv6_socket) 84#define icmpv6_sk __get_cpu_var(__icmpv6_sk)
85 85
86static int icmpv6_rcv(struct sk_buff *skb); 86static int icmpv6_rcv(struct sk_buff *skb);
87 87
@@ -94,7 +94,7 @@ static __inline__ int icmpv6_xmit_lock(void)
94{ 94{
95 local_bh_disable(); 95 local_bh_disable();
96 96
97 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) { 97 if (unlikely(!spin_trylock(&icmpv6_sk->sk_lock.slock))) {
98 /* This can happen if the output path (f.e. SIT or 98 /* This can happen if the output path (f.e. SIT or
99 * ip6ip6 tunnel) signals dst_link_failure() for an 99 * ip6ip6 tunnel) signals dst_link_failure() for an
100 * outgoing ICMP6 packet. 100 * outgoing ICMP6 packet.
@@ -107,7 +107,7 @@ static __inline__ int icmpv6_xmit_lock(void)
107 107
108static __inline__ void icmpv6_xmit_unlock(void) 108static __inline__ void icmpv6_xmit_unlock(void)
109{ 109{
110 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock); 110 spin_unlock_bh(&icmpv6_sk->sk_lock.slock);
111} 111}
112 112
113/* 113/*
@@ -392,7 +392,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
392 if (icmpv6_xmit_lock()) 392 if (icmpv6_xmit_lock())
393 return; 393 return;
394 394
395 sk = icmpv6_socket->sk; 395 sk = icmpv6_sk;
396 np = inet6_sk(sk); 396 np = inet6_sk(sk);
397 397
398 if (!icmpv6_xrlim_allow(sk, type, &fl)) 398 if (!icmpv6_xrlim_allow(sk, type, &fl))
@@ -538,7 +538,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
538 if (icmpv6_xmit_lock()) 538 if (icmpv6_xmit_lock())
539 return; 539 return;
540 540
541 sk = icmpv6_socket->sk; 541 sk = icmpv6_sk;
542 np = inet6_sk(sk); 542 np = inet6_sk(sk);
543 543
544 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 544 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
@@ -776,7 +776,7 @@ drop_no_count:
776} 776}
777 777
778/* 778/*
779 * Special lock-class for __icmpv6_socket: 779 * Special lock-class for __icmpv6_sk:
780 */ 780 */
781static struct lock_class_key icmpv6_socket_sk_dst_lock_key; 781static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
782 782
@@ -786,8 +786,9 @@ int __init icmpv6_init(void)
786 int err, i, j; 786 int err, i, j;
787 787
788 for_each_possible_cpu(i) { 788 for_each_possible_cpu(i) {
789 struct socket *sock;
789 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, 790 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
790 &per_cpu(__icmpv6_socket, i)); 791 &sock);
791 if (err < 0) { 792 if (err < 0) {
792 printk(KERN_ERR 793 printk(KERN_ERR
793 "Failed to initialize the ICMP6 control socket " 794 "Failed to initialize the ICMP6 control socket "
@@ -796,12 +797,12 @@ int __init icmpv6_init(void)
796 goto fail; 797 goto fail;
797 } 798 }
798 799
799 sk = per_cpu(__icmpv6_socket, i)->sk; 800 per_cpu(__icmpv6_sk, i) = sk = sock->sk;
800 sk->sk_allocation = GFP_ATOMIC; 801 sk->sk_allocation = GFP_ATOMIC;
801 /* 802 /*
802 * Split off their lock-class, because sk->sk_dst_lock 803 * Split off their lock-class, because sk->sk_dst_lock
803 * gets used from softirqs, which is safe for 804 * gets used from softirqs, which is safe for
804 * __icmpv6_socket (because those never get directly used 805 * __icmpv6_sk (because those never get directly used
805 * via userspace syscalls), but unsafe for normal sockets. 806 * via userspace syscalls), but unsafe for normal sockets.
806 */ 807 */
807 lockdep_set_class(&sk->sk_dst_lock, 808 lockdep_set_class(&sk->sk_dst_lock,
@@ -829,7 +830,7 @@ int __init icmpv6_init(void)
829 for (j = 0; j < i; j++) { 830 for (j = 0; j < i; j++) {
830 if (!cpu_possible(j)) 831 if (!cpu_possible(j))
831 continue; 832 continue;
832 sock_release(per_cpu(__icmpv6_socket, j)); 833 sock_release(per_cpu(__icmpv6_sk, j)->sk_socket);
833 } 834 }
834 835
835 return err; 836 return err;
@@ -840,7 +841,7 @@ void icmpv6_cleanup(void)
840 int i; 841 int i;
841 842
842 for_each_possible_cpu(i) { 843 for_each_possible_cpu(i) {
843 sock_release(per_cpu(__icmpv6_socket, i)); 844 sock_release(per_cpu(__icmpv6_sk, i)->sk_socket);
844 } 845 }
845 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); 846 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
846} 847}