diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:25:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:10 -0400 |
commit | a5b5bb9a053a973c23b867738c074acb3e80c0a0 (patch) | |
tree | 8eb78d458cfb309c566d6aaafb25639723920bcd | |
parent | 0afffc723c8041a005134099847ac2a2fd0316a0 (diff) |
[PATCH] lockdep: annotate sk_locks
Teach sk_lock semantics to the lock validator. In the softirq path the
slock has mutex_trylock()+mutex_unlock() semantics, in the process context
sock_lock() case it has mutex_lock()/mutex_unlock() semantics.
Thus we treat sock_owned_by_user() flagged areas as an exclusion area too,
not just those areas covered by a held sk_lock.slock.
Effect on non-lockdep kernels: minimal, sk_lock_sock_init() has been turned
into an inline function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/net/sock.h | 20 | ||||
-rw-r--r-- | net/core/sock.c | 97 |
2 files changed, 98 insertions, 19 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 0969fb60d6ea..324b3ea233d6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/timer.h> | 44 | #include <linux/timer.h> |
45 | #include <linux/cache.h> | 45 | #include <linux/cache.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/lockdep.h> | ||
47 | #include <linux/netdevice.h> | 48 | #include <linux/netdevice.h> |
48 | #include <linux/skbuff.h> /* struct sk_buff */ | 49 | #include <linux/skbuff.h> /* struct sk_buff */ |
49 | #include <linux/security.h> | 50 | #include <linux/security.h> |
@@ -78,18 +79,17 @@ typedef struct { | |||
78 | spinlock_t slock; | 79 | spinlock_t slock; |
79 | struct sock_iocb *owner; | 80 | struct sock_iocb *owner; |
80 | wait_queue_head_t wq; | 81 | wait_queue_head_t wq; |
82 | /* | ||
83 | * We express the mutex-alike socket_lock semantics | ||
84 | * to the lock validator by explicitly managing | ||
85 | * the slock as a lock variant (in addition to | ||
86 | * the slock itself): | ||
87 | */ | ||
88 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
89 | struct lockdep_map dep_map; | ||
90 | #endif | ||
81 | } socket_lock_t; | 91 | } socket_lock_t; |
82 | 92 | ||
83 | extern struct lock_class_key af_family_keys[AF_MAX]; | ||
84 | |||
85 | #define sock_lock_init(__sk) \ | ||
86 | do { spin_lock_init(&((__sk)->sk_lock.slock)); \ | ||
87 | lockdep_set_class(&(__sk)->sk_lock.slock, \ | ||
88 | af_family_keys + (__sk)->sk_family); \ | ||
89 | (__sk)->sk_lock.owner = NULL; \ | ||
90 | init_waitqueue_head(&((__sk)->sk_lock.wq)); \ | ||
91 | } while(0) | ||
92 | |||
93 | struct sock; | 93 | struct sock; |
94 | struct proto; | 94 | struct proto; |
95 | 95 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 0b4d5d25b23c..51fcfbc041a7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -133,7 +133,42 @@ | |||
133 | * Each address family might have different locking rules, so we have | 133 | * Each address family might have different locking rules, so we have |
134 | * one slock key per address family: | 134 | * one slock key per address family: |
135 | */ | 135 | */ |
136 | struct lock_class_key af_family_keys[AF_MAX]; | 136 | static struct lock_class_key af_family_keys[AF_MAX]; |
137 | static struct lock_class_key af_family_slock_keys[AF_MAX]; | ||
138 | |||
139 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
140 | /* | ||
141 | * Make lock validator output more readable. (we pre-construct these | ||
142 | * strings build-time, so that runtime initialization of socket | ||
143 | * locks is fast): | ||
144 | */ | ||
145 | static const char *af_family_key_strings[AF_MAX+1] = { | ||
146 | "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , | ||
147 | "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", | ||
148 | "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , | ||
149 | "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , | ||
150 | "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , | ||
151 | "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , | ||
152 | "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , | ||
153 | "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , | ||
154 | "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , | ||
155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-29" , | ||
156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX" | ||
157 | }; | ||
158 | static const char *af_family_slock_key_strings[AF_MAX+1] = { | ||
159 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , | ||
160 | "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", | ||
161 | "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , | ||
162 | "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , | ||
163 | "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , | ||
164 | "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , | ||
165 | "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , | ||
166 | "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , | ||
167 | "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , | ||
168 | "slock-27" , "slock-28" , "slock-29" , | ||
169 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX" | ||
170 | }; | ||
171 | #endif | ||
137 | 172 | ||
138 | /* | 173 | /* |
139 | * sk_callback_lock locking rules are per-address-family, | 174 | * sk_callback_lock locking rules are per-address-family, |
@@ -249,9 +284,16 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | |||
249 | skb->dev = NULL; | 284 | skb->dev = NULL; |
250 | 285 | ||
251 | bh_lock_sock(sk); | 286 | bh_lock_sock(sk); |
252 | if (!sock_owned_by_user(sk)) | 287 | if (!sock_owned_by_user(sk)) { |
288 | /* | ||
289 | * trylock + unlock semantics: | ||
290 | */ | ||
291 | mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); | ||
292 | |||
253 | rc = sk->sk_backlog_rcv(sk, skb); | 293 | rc = sk->sk_backlog_rcv(sk, skb); |
254 | else | 294 | |
295 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | ||
296 | } else | ||
255 | sk_add_backlog(sk, skb); | 297 | sk_add_backlog(sk, skb); |
256 | bh_unlock_sock(sk); | 298 | bh_unlock_sock(sk); |
257 | out: | 299 | out: |
@@ -761,6 +803,33 @@ lenout: | |||
761 | return 0; | 803 | return 0; |
762 | } | 804 | } |
763 | 805 | ||
806 | /* | ||
807 | * Initialize an sk_lock. | ||
808 | * | ||
809 | * (We also register the sk_lock with the lock validator.) | ||
810 | */ | ||
811 | static void inline sock_lock_init(struct sock *sk) | ||
812 | { | ||
813 | spin_lock_init(&sk->sk_lock.slock); | ||
814 | sk->sk_lock.owner = NULL; | ||
815 | init_waitqueue_head(&sk->sk_lock.wq); | ||
816 | /* | ||
817 | * Make sure we are not reinitializing a held lock: | ||
818 | */ | ||
819 | debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); | ||
820 | |||
821 | /* | ||
822 | * Mark both the sk_lock and the sk_lock.slock as a | ||
823 | * per-address-family lock class: | ||
824 | */ | ||
825 | lockdep_set_class_and_name(&sk->sk_lock.slock, | ||
826 | af_family_slock_keys + sk->sk_family, | ||
827 | af_family_slock_key_strings[sk->sk_family]); | ||
828 | lockdep_init_map(&sk->sk_lock.dep_map, | ||
829 | af_family_key_strings[sk->sk_family], | ||
830 | af_family_keys + sk->sk_family); | ||
831 | } | ||
832 | |||
764 | /** | 833 | /** |
765 | * sk_alloc - All socket objects are allocated here | 834 | * sk_alloc - All socket objects are allocated here |
766 | * @family: protocol family | 835 | * @family: protocol family |
@@ -1465,24 +1534,34 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1465 | void fastcall lock_sock(struct sock *sk) | 1534 | void fastcall lock_sock(struct sock *sk) |
1466 | { | 1535 | { |
1467 | might_sleep(); | 1536 | might_sleep(); |
1468 | spin_lock_bh(&(sk->sk_lock.slock)); | 1537 | spin_lock_bh(&sk->sk_lock.slock); |
1469 | if (sk->sk_lock.owner) | 1538 | if (sk->sk_lock.owner) |
1470 | __lock_sock(sk); | 1539 | __lock_sock(sk); |
1471 | sk->sk_lock.owner = (void *)1; | 1540 | sk->sk_lock.owner = (void *)1; |
1472 | spin_unlock_bh(&(sk->sk_lock.slock)); | 1541 | spin_unlock(&sk->sk_lock.slock); |
1542 | /* | ||
1543 | * The sk_lock has mutex_lock() semantics here: | ||
1544 | */ | ||
1545 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | ||
1546 | local_bh_enable(); | ||
1473 | } | 1547 | } |
1474 | 1548 | ||
1475 | EXPORT_SYMBOL(lock_sock); | 1549 | EXPORT_SYMBOL(lock_sock); |
1476 | 1550 | ||
1477 | void fastcall release_sock(struct sock *sk) | 1551 | void fastcall release_sock(struct sock *sk) |
1478 | { | 1552 | { |
1479 | spin_lock_bh(&(sk->sk_lock.slock)); | 1553 | /* |
1554 | * The sk_lock has mutex_unlock() semantics: | ||
1555 | */ | ||
1556 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | ||
1557 | |||
1558 | spin_lock_bh(&sk->sk_lock.slock); | ||
1480 | if (sk->sk_backlog.tail) | 1559 | if (sk->sk_backlog.tail) |
1481 | __release_sock(sk); | 1560 | __release_sock(sk); |
1482 | sk->sk_lock.owner = NULL; | 1561 | sk->sk_lock.owner = NULL; |
1483 | if (waitqueue_active(&(sk->sk_lock.wq))) | 1562 | if (waitqueue_active(&sk->sk_lock.wq)) |
1484 | wake_up(&(sk->sk_lock.wq)); | 1563 | wake_up(&sk->sk_lock.wq); |
1485 | spin_unlock_bh(&(sk->sk_lock.slock)); | 1564 | spin_unlock_bh(&sk->sk_lock.slock); |
1486 | } | 1565 | } |
1487 | EXPORT_SYMBOL(release_sock); | 1566 | EXPORT_SYMBOL(release_sock); |
1488 | 1567 | ||