aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:25:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:07 -0400
commita09785a2414afb261d9f719d544742af4300df22 (patch)
treea9fcbd82e9610cb77e7a2e1338ab901876dbcea8
parentda21f24dd73954c2ed0cd39a698e2c9916c05d71 (diff)
[PATCH] lockdep: annotate af_unix locking
Teach special (recursive) locking code to the lock validator. Also splits af_unix's sk_receive_queue.lock class from the other networking skb-queue locks. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/net/af_unix.h3
-rw-r--r--net/unix/af_unix.c12
2 files changed, 14 insertions, 1 deletions
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 5ba72d95280c..2fec827c8801 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -67,6 +67,9 @@ struct unix_skb_parms {
67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock)
68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) 68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) 69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock)
70#define unix_state_wlock_nested(s) \
71 spin_lock_nested(&unix_sk(s)->lock, \
72 SINGLE_DEPTH_NESTING)
70#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) 73#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock)
71 74
72#ifdef __KERNEL__ 75#ifdef __KERNEL__
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aca650109425..e9a287bc3142 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -565,6 +565,14 @@ static struct proto unix_proto = {
565 .obj_size = sizeof(struct unix_sock), 565 .obj_size = sizeof(struct unix_sock),
566}; 566};
567 567
568/*
569 * AF_UNIX sockets do not interact with hardware, hence they
570 * dont trigger interrupts - so it's safe for them to have
571 * bh-unsafe locking for their sk_receive_queue.lock. Split off
572 * this special lock-class by reinitializing the spinlock key:
573 */
574static struct lock_class_key af_unix_sk_receive_queue_lock_key;
575
568static struct sock * unix_create1(struct socket *sock) 576static struct sock * unix_create1(struct socket *sock)
569{ 577{
570 struct sock *sk = NULL; 578 struct sock *sk = NULL;
@@ -580,6 +588,8 @@ static struct sock * unix_create1(struct socket *sock)
580 atomic_inc(&unix_nr_socks); 588 atomic_inc(&unix_nr_socks);
581 589
582 sock_init_data(sock,sk); 590 sock_init_data(sock,sk);
591 lockdep_set_class(&sk->sk_receive_queue.lock,
592 &af_unix_sk_receive_queue_lock_key);
583 593
584 sk->sk_write_space = unix_write_space; 594 sk->sk_write_space = unix_write_space;
585 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; 595 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
@@ -1045,7 +1055,7 @@ restart:
1045 goto out_unlock; 1055 goto out_unlock;
1046 } 1056 }
1047 1057
1048 unix_state_wlock(sk); 1058 unix_state_wlock_nested(sk);
1049 1059
1050 if (sk->sk_state != st) { 1060 if (sk->sk_state != st) {
1051 unix_state_wunlock(sk); 1061 unix_state_wunlock(sk);