aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c111
-rw-r--r--net/ipv4/route.c26
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/unix/af_unix.c12
9 files changed, 159 insertions, 30 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3948949a609a..458031bfff55 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -364,6 +364,14 @@ static void vlan_transfer_operstate(const struct net_device *dev, struct net_dev
364 } 364 }
365} 365}
366 366
367/*
368 * vlan network devices have devices nesting below it, and are a special
369 * "super class" of normal network devices; split their locks off into a
370 * separate class since they always nest.
371 */
372static struct lock_class_key vlan_netdev_xmit_lock_key;
373
374
367/* Attach a VLAN device to a mac address (ie Ethernet Card). 375/* Attach a VLAN device to a mac address (ie Ethernet Card).
368 * Returns the device that was created, or NULL if there was 376 * Returns the device that was created, or NULL if there was
369 * an error of some kind. 377 * an error of some kind.
@@ -460,6 +468,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
460 468
461 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 469 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
462 vlan_setup); 470 vlan_setup);
471
463 if (new_dev == NULL) 472 if (new_dev == NULL)
464 goto out_unlock; 473 goto out_unlock;
465 474
@@ -518,6 +527,8 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
518 if (register_netdevice(new_dev)) 527 if (register_netdevice(new_dev))
519 goto out_free_newdev; 528 goto out_free_newdev;
520 529
530 lockdep_set_class(&new_dev->_xmit_lock, &vlan_netdev_xmit_lock_key);
531
521 new_dev->iflink = real_dev->ifindex; 532 new_dev->iflink = real_dev->ifindex;
522 vlan_transfer_operstate(real_dev, new_dev); 533 vlan_transfer_operstate(real_dev, new_dev);
523 linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */ 534 linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7cfbdb215ba2..44f6a181a754 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,6 +71,13 @@ static kmem_cache_t *skbuff_head_cache __read_mostly;
71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly;
72 72
73/* 73/*
74 * lockdep: lock class key used by skb_queue_head_init():
75 */
76struct lock_class_key skb_queue_lock_key;
77
78EXPORT_SYMBOL(skb_queue_lock_key);
79
80/*
74 * Keep out-of-line to prevent kernel bloat. 81 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always 82 * __builtin_return_address is not used because it is not always
76 * reliable. 83 * reliable.
diff --git a/net/core/sock.c b/net/core/sock.c
index 533b9317144b..51fcfbc041a7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -129,6 +129,53 @@
129#include <net/tcp.h> 129#include <net/tcp.h>
130#endif 130#endif
131 131
132/*
133 * Each address family might have different locking rules, so we have
134 * one slock key per address family:
135 */
136static struct lock_class_key af_family_keys[AF_MAX];
137static struct lock_class_key af_family_slock_keys[AF_MAX];
138
139#ifdef CONFIG_DEBUG_LOCK_ALLOC
140/*
141 * Make lock validator output more readable. (we pre-construct these
142 * strings build-time, so that runtime initialization of socket
143 * locks is fast):
144 */
145static const char *af_family_key_strings[AF_MAX+1] = {
146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
149 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
150 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
151 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
152 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
153 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-29" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX"
157};
158static const char *af_family_slock_key_strings[AF_MAX+1] = {
159 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
160 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
161 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
162 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
163 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
164 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
165 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
166 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
167 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
168 "slock-27" , "slock-28" , "slock-29" ,
169 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX"
170};
171#endif
172
173/*
174 * sk_callback_lock locking rules are per-address-family,
175 * so split the lock classes by using a per-AF key:
176 */
177static struct lock_class_key af_callback_keys[AF_MAX];
178
132/* Take into consideration the size of the struct sk_buff overhead in the 179/* Take into consideration the size of the struct sk_buff overhead in the
133 * determination of these values, since that is non-constant across 180 * determination of these values, since that is non-constant across
134 * platforms. This makes socket queueing behavior and performance 181 * platforms. This makes socket queueing behavior and performance
@@ -237,9 +284,16 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
237 skb->dev = NULL; 284 skb->dev = NULL;
238 285
239 bh_lock_sock(sk); 286 bh_lock_sock(sk);
240 if (!sock_owned_by_user(sk)) 287 if (!sock_owned_by_user(sk)) {
288 /*
289 * trylock + unlock semantics:
290 */
291 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
292
241 rc = sk->sk_backlog_rcv(sk, skb); 293 rc = sk->sk_backlog_rcv(sk, skb);
242 else 294
295 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
296 } else
243 sk_add_backlog(sk, skb); 297 sk_add_backlog(sk, skb);
244 bh_unlock_sock(sk); 298 bh_unlock_sock(sk);
245out: 299out:
@@ -749,6 +803,33 @@ lenout:
749 return 0; 803 return 0;
750} 804}
751 805
806/*
807 * Initialize an sk_lock.
808 *
809 * (We also register the sk_lock with the lock validator.)
810 */
811static void inline sock_lock_init(struct sock *sk)
812{
813 spin_lock_init(&sk->sk_lock.slock);
814 sk->sk_lock.owner = NULL;
815 init_waitqueue_head(&sk->sk_lock.wq);
816 /*
817 * Make sure we are not reinitializing a held lock:
818 */
819 debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
820
821 /*
822 * Mark both the sk_lock and the sk_lock.slock as a
823 * per-address-family lock class:
824 */
825 lockdep_set_class_and_name(&sk->sk_lock.slock,
826 af_family_slock_keys + sk->sk_family,
827 af_family_slock_key_strings[sk->sk_family]);
828 lockdep_init_map(&sk->sk_lock.dep_map,
829 af_family_key_strings[sk->sk_family],
830 af_family_keys + sk->sk_family);
831}
832
752/** 833/**
753 * sk_alloc - All socket objects are allocated here 834 * sk_alloc - All socket objects are allocated here
754 * @family: protocol family 835 * @family: protocol family
@@ -848,6 +929,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
848 929
849 rwlock_init(&newsk->sk_dst_lock); 930 rwlock_init(&newsk->sk_dst_lock);
850 rwlock_init(&newsk->sk_callback_lock); 931 rwlock_init(&newsk->sk_callback_lock);
932 lockdep_set_class(&newsk->sk_callback_lock,
933 af_callback_keys + newsk->sk_family);
851 934
852 newsk->sk_dst_cache = NULL; 935 newsk->sk_dst_cache = NULL;
853 newsk->sk_wmem_queued = 0; 936 newsk->sk_wmem_queued = 0;
@@ -1422,6 +1505,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1422 1505
1423 rwlock_init(&sk->sk_dst_lock); 1506 rwlock_init(&sk->sk_dst_lock);
1424 rwlock_init(&sk->sk_callback_lock); 1507 rwlock_init(&sk->sk_callback_lock);
1508 lockdep_set_class(&sk->sk_callback_lock,
1509 af_callback_keys + sk->sk_family);
1425 1510
1426 sk->sk_state_change = sock_def_wakeup; 1511 sk->sk_state_change = sock_def_wakeup;
1427 sk->sk_data_ready = sock_def_readable; 1512 sk->sk_data_ready = sock_def_readable;
@@ -1449,24 +1534,34 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1449void fastcall lock_sock(struct sock *sk) 1534void fastcall lock_sock(struct sock *sk)
1450{ 1535{
1451 might_sleep(); 1536 might_sleep();
1452 spin_lock_bh(&(sk->sk_lock.slock)); 1537 spin_lock_bh(&sk->sk_lock.slock);
1453 if (sk->sk_lock.owner) 1538 if (sk->sk_lock.owner)
1454 __lock_sock(sk); 1539 __lock_sock(sk);
1455 sk->sk_lock.owner = (void *)1; 1540 sk->sk_lock.owner = (void *)1;
1456 spin_unlock_bh(&(sk->sk_lock.slock)); 1541 spin_unlock(&sk->sk_lock.slock);
1542 /*
1543 * The sk_lock has mutex_lock() semantics here:
1544 */
1545 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1546 local_bh_enable();
1457} 1547}
1458 1548
1459EXPORT_SYMBOL(lock_sock); 1549EXPORT_SYMBOL(lock_sock);
1460 1550
1461void fastcall release_sock(struct sock *sk) 1551void fastcall release_sock(struct sock *sk)
1462{ 1552{
1463 spin_lock_bh(&(sk->sk_lock.slock)); 1553 /*
1554 * The sk_lock has mutex_unlock() semantics:
1555 */
1556 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1557
1558 spin_lock_bh(&sk->sk_lock.slock);
1464 if (sk->sk_backlog.tail) 1559 if (sk->sk_backlog.tail)
1465 __release_sock(sk); 1560 __release_sock(sk);
1466 sk->sk_lock.owner = NULL; 1561 sk->sk_lock.owner = NULL;
1467 if (waitqueue_active(&(sk->sk_lock.wq))) 1562 if (waitqueue_active(&sk->sk_lock.wq))
1468 wake_up(&(sk->sk_lock.wq)); 1563 wake_up(&sk->sk_lock.wq);
1469 spin_unlock_bh(&(sk->sk_lock.slock)); 1564 spin_unlock_bh(&sk->sk_lock.slock);
1470} 1565}
1471EXPORT_SYMBOL(release_sock); 1566EXPORT_SYMBOL(release_sock);
1472 1567
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index da44fabf4dc5..2dc6dbb28467 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -205,21 +205,27 @@ __u8 ip_tos2prio[16] = {
205struct rt_hash_bucket { 205struct rt_hash_bucket {
206 struct rtable *chain; 206 struct rtable *chain;
207}; 207};
208#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 208#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
209 defined(CONFIG_PROVE_LOCKING)
209/* 210/*
210 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks 211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
211 * The size of this table is a power of two and depends on the number of CPUS. 212 * The size of this table is a power of two and depends on the number of CPUS.
213 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
212 */ 214 */
213#if NR_CPUS >= 32 215#ifdef CONFIG_LOCKDEP
214#define RT_HASH_LOCK_SZ 4096 216# define RT_HASH_LOCK_SZ 256
215#elif NR_CPUS >= 16
216#define RT_HASH_LOCK_SZ 2048
217#elif NR_CPUS >= 8
218#define RT_HASH_LOCK_SZ 1024
219#elif NR_CPUS >= 4
220#define RT_HASH_LOCK_SZ 512
221#else 217#else
222#define RT_HASH_LOCK_SZ 256 218# if NR_CPUS >= 32
219# define RT_HASH_LOCK_SZ 4096
220# elif NR_CPUS >= 16
221# define RT_HASH_LOCK_SZ 2048
222# elif NR_CPUS >= 8
223# define RT_HASH_LOCK_SZ 1024
224# elif NR_CPUS >= 4
225# define RT_HASH_LOCK_SZ 512
226# else
227# define RT_HASH_LOCK_SZ 256
228# endif
223#endif 229#endif
224 230
225static spinlock_t *rt_hash_locks; 231static spinlock_t *rt_hash_locks;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8355b729fa95..5a886e6efbbe 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -90,7 +90,7 @@ static struct socket *tcp_socket;
90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
91 91
92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED, 93 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
94 .lhash_users = ATOMIC_INIT(0), 94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
96}; 96};
@@ -1090,7 +1090,7 @@ process:
1090 1090
1091 skb->dev = NULL; 1091 skb->dev = NULL;
1092 1092
1093 bh_lock_sock(sk); 1093 bh_lock_sock_nested(sk);
1094 ret = 0; 1094 ret = 0;
1095 if (!sock_owned_by_user(sk)) { 1095 if (!sock_owned_by_user(sk)) {
1096#ifdef CONFIG_NET_DMA 1096#ifdef CONFIG_NET_DMA
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e0851697ad5e..0ccb7cb22b15 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow;
40struct inet_timewait_death_row tcp_death_row = { 40struct inet_timewait_death_row tcp_death_row = {
41 .sysctl_max_tw_buckets = NR_FILE * 2, 41 .sysctl_max_tw_buckets = NR_FILE * 2,
42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, 42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
43 .death_lock = SPIN_LOCK_UNLOCKED, 43 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
44 .hashinfo = &tcp_hashinfo, 44 .hashinfo = &tcp_hashinfo,
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row), 46 (unsigned long)&tcp_death_row),
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 70cee82a98bf..55c0adc8f115 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -156,7 +156,7 @@ static void netlink_sock_destruct(struct sock *sk)
156 156
157static void netlink_table_grab(void) 157static void netlink_table_grab(void)
158{ 158{
159 write_lock_bh(&nl_table_lock); 159 write_lock_irq(&nl_table_lock);
160 160
161 if (atomic_read(&nl_table_users)) { 161 if (atomic_read(&nl_table_users)) {
162 DECLARE_WAITQUEUE(wait, current); 162 DECLARE_WAITQUEUE(wait, current);
@@ -166,9 +166,9 @@ static void netlink_table_grab(void)
166 set_current_state(TASK_UNINTERRUPTIBLE); 166 set_current_state(TASK_UNINTERRUPTIBLE);
167 if (atomic_read(&nl_table_users) == 0) 167 if (atomic_read(&nl_table_users) == 0)
168 break; 168 break;
169 write_unlock_bh(&nl_table_lock); 169 write_unlock_irq(&nl_table_lock);
170 schedule(); 170 schedule();
171 write_lock_bh(&nl_table_lock); 171 write_lock_irq(&nl_table_lock);
172 } 172 }
173 173
174 __set_current_state(TASK_RUNNING); 174 __set_current_state(TASK_RUNNING);
@@ -178,7 +178,7 @@ static void netlink_table_grab(void)
178 178
179static __inline__ void netlink_table_ungrab(void) 179static __inline__ void netlink_table_ungrab(void)
180{ 180{
181 write_unlock_bh(&nl_table_lock); 181 write_unlock_irq(&nl_table_lock);
182 wake_up(&nl_table_wait); 182 wake_up(&nl_table_wait);
183} 183}
184 184
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 6db6006616c6..dc6cb93c8830 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -515,7 +515,7 @@ rpc_depopulate(struct dentry *parent)
515 struct dentry *dentry, *dvec[10]; 515 struct dentry *dentry, *dvec[10];
516 int n = 0; 516 int n = 0;
517 517
518 mutex_lock(&dir->i_mutex); 518 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
519repeat: 519repeat:
520 spin_lock(&dcache_lock); 520 spin_lock(&dcache_lock);
521 list_for_each_safe(pos, next, &parent->d_subdirs) { 521 list_for_each_safe(pos, next, &parent->d_subdirs) {
@@ -631,7 +631,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
631 if ((error = rpc_lookup_parent(path, nd)) != 0) 631 if ((error = rpc_lookup_parent(path, nd)) != 0)
632 return ERR_PTR(error); 632 return ERR_PTR(error);
633 dir = nd->dentry->d_inode; 633 dir = nd->dentry->d_inode;
634 mutex_lock(&dir->i_mutex); 634 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
635 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); 635 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
636 if (IS_ERR(dentry)) 636 if (IS_ERR(dentry))
637 goto out_err; 637 goto out_err;
@@ -693,7 +693,7 @@ rpc_rmdir(char *path)
693 if ((error = rpc_lookup_parent(path, &nd)) != 0) 693 if ((error = rpc_lookup_parent(path, &nd)) != 0)
694 return error; 694 return error;
695 dir = nd.dentry->d_inode; 695 dir = nd.dentry->d_inode;
696 mutex_lock(&dir->i_mutex); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 if (IS_ERR(dentry)) { 698 if (IS_ERR(dentry)) {
699 error = PTR_ERR(dentry); 699 error = PTR_ERR(dentry);
@@ -754,7 +754,7 @@ rpc_unlink(char *path)
754 if ((error = rpc_lookup_parent(path, &nd)) != 0) 754 if ((error = rpc_lookup_parent(path, &nd)) != 0)
755 return error; 755 return error;
756 dir = nd.dentry->d_inode; 756 dir = nd.dentry->d_inode;
757 mutex_lock(&dir->i_mutex); 757 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
759 if (IS_ERR(dentry)) { 759 if (IS_ERR(dentry)) {
760 error = PTR_ERR(dentry); 760 error = PTR_ERR(dentry);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aca650109425..e9a287bc3142 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -565,6 +565,14 @@ static struct proto unix_proto = {
565 .obj_size = sizeof(struct unix_sock), 565 .obj_size = sizeof(struct unix_sock),
566}; 566};
567 567
568/*
569 * AF_UNIX sockets do not interact with hardware, hence they
570 * dont trigger interrupts - so it's safe for them to have
571 * bh-unsafe locking for their sk_receive_queue.lock. Split off
572 * this special lock-class by reinitializing the spinlock key:
573 */
574static struct lock_class_key af_unix_sk_receive_queue_lock_key;
575
568static struct sock * unix_create1(struct socket *sock) 576static struct sock * unix_create1(struct socket *sock)
569{ 577{
570 struct sock *sk = NULL; 578 struct sock *sk = NULL;
@@ -580,6 +588,8 @@ static struct sock * unix_create1(struct socket *sock)
580 atomic_inc(&unix_nr_socks); 588 atomic_inc(&unix_nr_socks);
581 589
582 sock_init_data(sock,sk); 590 sock_init_data(sock,sk);
591 lockdep_set_class(&sk->sk_receive_queue.lock,
592 &af_unix_sk_receive_queue_lock_key);
583 593
584 sk->sk_write_space = unix_write_space; 594 sk->sk_write_space = unix_write_space;
585 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; 595 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
@@ -1045,7 +1055,7 @@ restart:
1045 goto out_unlock; 1055 goto out_unlock;
1046 } 1056 }
1047 1057
1048 unix_state_wlock(sk); 1058 unix_state_wlock_nested(sk);
1049 1059
1050 if (sk->sk_state != st) { 1060 if (sk->sk_state != st) {
1051 unix_state_wunlock(sk); 1061 unix_state_wunlock(sk);