aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@infradead.org>2006-03-21 01:33:17 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-21 01:33:17 -0500
commit4a3e2f711a00a1feb72ae12fdc749da10179d185 (patch)
tree76ced9d3270dea4b864da71fa1d4415d2e3c8b11
parentd4ccd08cdfa8d34f4d25b62041343c52fc79385f (diff)
[NET] sem2mutex: net/
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/xfrm.h3
-rw-r--r--net/atm/ioctl.c15
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/flow.c7
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/netfilter/ip_queue.c11
-rw-r--r--net/ipv4/xfrm4_tunnel.c11
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/netfilter/ip6_queue.c11
-rw-r--r--net/ipv6/xfrm6_tunnel.c11
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netfilter/nf_sockopt.c25
-rw-r--r--net/socket.c31
-rw-r--r--net/sunrpc/cache.c17
-rw-r--r--net/sunrpc/sched.c11
-rw-r--r--net/unix/garbage.c7
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c4
19 files changed, 118 insertions, 101 deletions
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 156f52ef8a91..786371365f2b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -11,6 +11,7 @@
11#include <linux/crypto.h> 11#include <linux/crypto.h>
12#include <linux/pfkeyv2.h> 12#include <linux/pfkeyv2.h>
13#include <linux/in6.h> 13#include <linux/in6.h>
14#include <linux/mutex.h>
14 15
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/dst.h> 17#include <net/dst.h>
@@ -24,7 +25,7 @@ extern struct sock *xfrm_nl;
24extern u32 sysctl_xfrm_aevent_etime; 25extern u32 sysctl_xfrm_aevent_etime;
25extern u32 sysctl_xfrm_aevent_rseqth; 26extern u32 sysctl_xfrm_aevent_rseqth;
26 27
27extern struct semaphore xfrm_cfg_sem; 28extern struct mutex xfrm_cfg_mutex;
28 29
29/* Organization of SPD aka "XFRM rules" 30/* Organization of SPD aka "XFRM rules"
30 ------------------------------------ 31 ------------------------------------
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index eb109af7eb4a..851cfa6312af 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -18,6 +18,7 @@
18#include <linux/atmmpc.h> 18#include <linux/atmmpc.h>
19#include <net/atmclip.h> 19#include <net/atmclip.h>
20#include <linux/atmlec.h> 20#include <linux/atmlec.h>
21#include <linux/mutex.h>
21#include <asm/ioctls.h> 22#include <asm/ioctls.h>
22 23
23#include "resources.h" 24#include "resources.h"
@@ -25,22 +26,22 @@
25#include "common.h" 26#include "common.h"
26 27
27 28
28static DECLARE_MUTEX(ioctl_mutex); 29static DEFINE_MUTEX(ioctl_mutex);
29static LIST_HEAD(ioctl_list); 30static LIST_HEAD(ioctl_list);
30 31
31 32
32void register_atm_ioctl(struct atm_ioctl *ioctl) 33void register_atm_ioctl(struct atm_ioctl *ioctl)
33{ 34{
34 down(&ioctl_mutex); 35 mutex_lock(&ioctl_mutex);
35 list_add_tail(&ioctl->list, &ioctl_list); 36 list_add_tail(&ioctl->list, &ioctl_list);
36 up(&ioctl_mutex); 37 mutex_unlock(&ioctl_mutex);
37} 38}
38 39
39void deregister_atm_ioctl(struct atm_ioctl *ioctl) 40void deregister_atm_ioctl(struct atm_ioctl *ioctl)
40{ 41{
41 down(&ioctl_mutex); 42 mutex_lock(&ioctl_mutex);
42 list_del(&ioctl->list); 43 list_del(&ioctl->list);
43 up(&ioctl_mutex); 44 mutex_unlock(&ioctl_mutex);
44} 45}
45 46
46EXPORT_SYMBOL(register_atm_ioctl); 47EXPORT_SYMBOL(register_atm_ioctl);
@@ -137,7 +138,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
137 138
138 error = -ENOIOCTLCMD; 139 error = -ENOIOCTLCMD;
139 140
140 down(&ioctl_mutex); 141 mutex_lock(&ioctl_mutex);
141 list_for_each(pos, &ioctl_list) { 142 list_for_each(pos, &ioctl_list) {
142 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); 143 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list);
143 if (try_module_get(ic->owner)) { 144 if (try_module_get(ic->owner)) {
@@ -147,7 +148,7 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
147 break; 148 break;
148 } 149 }
149 } 150 }
150 up(&ioctl_mutex); 151 mutex_unlock(&ioctl_mutex);
151 152
152 if (error != -ENOIOCTLCMD) 153 if (error != -ENOIOCTLCMD)
153 goto done; 154 goto done;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5b4253c61f62..e99010ce8bb2 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -37,6 +37,8 @@
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/net.h> 39#include <linux/net.h>
40#include <linux/mutex.h>
41
40#include <net/sock.h> 42#include <net/sock.h>
41#include <asm/uaccess.h> 43#include <asm/uaccess.h>
42#include <asm/unaligned.h> 44#include <asm/unaligned.h>
@@ -57,9 +59,9 @@ static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
57 59
58static struct task_struct *rfcomm_thread; 60static struct task_struct *rfcomm_thread;
59 61
60static DECLARE_MUTEX(rfcomm_sem); 62static DEFINE_MUTEX(rfcomm_mutex);
61#define rfcomm_lock() down(&rfcomm_sem); 63#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
62#define rfcomm_unlock() up(&rfcomm_sem); 64#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
63 65
64static unsigned long rfcomm_event; 66static unsigned long rfcomm_event;
65 67
diff --git a/net/core/dev.c b/net/core/dev.c
index ee044097f7f2..08dec6eb922b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -81,6 +81,7 @@
81#include <linux/types.h> 81#include <linux/types.h>
82#include <linux/kernel.h> 82#include <linux/kernel.h>
83#include <linux/sched.h> 83#include <linux/sched.h>
84#include <linux/mutex.h>
84#include <linux/string.h> 85#include <linux/string.h>
85#include <linux/mm.h> 86#include <linux/mm.h>
86#include <linux/socket.h> 87#include <linux/socket.h>
@@ -2931,7 +2932,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
2931 * 2) Since we run with the RTNL semaphore not held, we can sleep 2932 * 2) Since we run with the RTNL semaphore not held, we can sleep
2932 * safely in order to wait for the netdev refcnt to drop to zero. 2933 * safely in order to wait for the netdev refcnt to drop to zero.
2933 */ 2934 */
2934static DECLARE_MUTEX(net_todo_run_mutex); 2935static DEFINE_MUTEX(net_todo_run_mutex);
2935void netdev_run_todo(void) 2936void netdev_run_todo(void)
2936{ 2937{
2937 struct list_head list = LIST_HEAD_INIT(list); 2938 struct list_head list = LIST_HEAD_INIT(list);
@@ -2939,7 +2940,7 @@ void netdev_run_todo(void)
2939 2940
2940 2941
2941 /* Need to guard against multiple cpu's getting out of order. */ 2942 /* Need to guard against multiple cpu's getting out of order. */
2942 down(&net_todo_run_mutex); 2943 mutex_lock(&net_todo_run_mutex);
2943 2944
2944 /* Not safe to do outside the semaphore. We must not return 2945 /* Not safe to do outside the semaphore. We must not return
2945 * until all unregister events invoked by the local processor 2946 * until all unregister events invoked by the local processor
@@ -2996,7 +2997,7 @@ void netdev_run_todo(void)
2996 } 2997 }
2997 2998
2998out: 2999out:
2999 up(&net_todo_run_mutex); 3000 mutex_unlock(&net_todo_run_mutex);
3000} 3001}
3001 3002
3002/** 3003/**
diff --git a/net/core/flow.c b/net/core/flow.c
index c4f25385029f..55789f832eda 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -20,6 +20,7 @@
20#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/mutex.h>
23#include <net/flow.h> 24#include <net/flow.h>
24#include <asm/atomic.h> 25#include <asm/atomic.h>
25#include <asm/semaphore.h> 26#include <asm/semaphore.h>
@@ -287,11 +288,11 @@ static void flow_cache_flush_per_cpu(void *data)
287void flow_cache_flush(void) 288void flow_cache_flush(void)
288{ 289{
289 struct flow_flush_info info; 290 struct flow_flush_info info;
290 static DECLARE_MUTEX(flow_flush_sem); 291 static DEFINE_MUTEX(flow_flush_sem);
291 292
292 /* Don't want cpus going down or up during this. */ 293 /* Don't want cpus going down or up during this. */
293 lock_cpu_hotplug(); 294 lock_cpu_hotplug();
294 down(&flow_flush_sem); 295 mutex_lock(&flow_flush_sem);
295 atomic_set(&info.cpuleft, num_online_cpus()); 296 atomic_set(&info.cpuleft, num_online_cpus());
296 init_completion(&info.completion); 297 init_completion(&info.completion);
297 298
@@ -301,7 +302,7 @@ void flow_cache_flush(void)
301 local_bh_enable(); 302 local_bh_enable();
302 303
303 wait_for_completion(&info.completion); 304 wait_for_completion(&info.completion);
304 up(&flow_flush_sem); 305 mutex_unlock(&flow_flush_sem);
305 unlock_cpu_hotplug(); 306 unlock_cpu_hotplug();
306} 307}
307 308
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d64e2ec8da7b..c95020f7c81e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -24,6 +24,7 @@
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
27#include <linux/mutex.h>
27#include <net/ip.h> 28#include <net/ip.h>
28#include <net/xfrm.h> 29#include <net/xfrm.h>
29#include <net/icmp.h> 30#include <net/icmp.h>
@@ -36,7 +37,7 @@ struct ipcomp_tfms {
36 int users; 37 int users;
37}; 38};
38 39
39static DECLARE_MUTEX(ipcomp_resource_sem); 40static DEFINE_MUTEX(ipcomp_resource_mutex);
40static void **ipcomp_scratches; 41static void **ipcomp_scratches;
41static int ipcomp_scratch_users; 42static int ipcomp_scratch_users;
42static LIST_HEAD(ipcomp_tfms_list); 43static LIST_HEAD(ipcomp_tfms_list);
@@ -253,7 +254,7 @@ error:
253} 254}
254 255
255/* 256/*
256 * Must be protected by xfrm_cfg_sem. State and tunnel user references are 257 * Must be protected by xfrm_cfg_mutex. State and tunnel user references are
257 * always incremented on success. 258 * always incremented on success.
258 */ 259 */
259static int ipcomp_tunnel_attach(struct xfrm_state *x) 260static int ipcomp_tunnel_attach(struct xfrm_state *x)
@@ -411,9 +412,9 @@ static void ipcomp_destroy(struct xfrm_state *x)
411 if (!ipcd) 412 if (!ipcd)
412 return; 413 return;
413 xfrm_state_delete_tunnel(x); 414 xfrm_state_delete_tunnel(x);
414 down(&ipcomp_resource_sem); 415 mutex_lock(&ipcomp_resource_mutex);
415 ipcomp_free_data(ipcd); 416 ipcomp_free_data(ipcd);
416 up(&ipcomp_resource_sem); 417 mutex_unlock(&ipcomp_resource_mutex);
417 kfree(ipcd); 418 kfree(ipcd);
418} 419}
419 420
@@ -440,14 +441,14 @@ static int ipcomp_init_state(struct xfrm_state *x)
440 if (x->props.mode) 441 if (x->props.mode)
441 x->props.header_len += sizeof(struct iphdr); 442 x->props.header_len += sizeof(struct iphdr);
442 443
443 down(&ipcomp_resource_sem); 444 mutex_lock(&ipcomp_resource_mutex);
444 if (!ipcomp_alloc_scratches()) 445 if (!ipcomp_alloc_scratches())
445 goto error; 446 goto error;
446 447
447 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 448 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
448 if (!ipcd->tfms) 449 if (!ipcd->tfms)
449 goto error; 450 goto error;
450 up(&ipcomp_resource_sem); 451 mutex_unlock(&ipcomp_resource_mutex);
451 452
452 if (x->props.mode) { 453 if (x->props.mode) {
453 err = ipcomp_tunnel_attach(x); 454 err = ipcomp_tunnel_attach(x);
@@ -464,10 +465,10 @@ out:
464 return err; 465 return err;
465 466
466error_tunnel: 467error_tunnel:
467 down(&ipcomp_resource_sem); 468 mutex_lock(&ipcomp_resource_mutex);
468error: 469error:
469 ipcomp_free_data(ipcd); 470 ipcomp_free_data(ipcd);
470 up(&ipcomp_resource_sem); 471 mutex_unlock(&ipcomp_resource_mutex);
471 kfree(ipcd); 472 kfree(ipcd);
472 goto out; 473 goto out;
473} 474}
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 08f80e2ea2aa..1655866c55b9 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -35,6 +35,7 @@
35#include <linux/sysctl.h> 35#include <linux/sysctl.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/security.h> 37#include <linux/security.h>
38#include <linux/mutex.h>
38#include <net/sock.h> 39#include <net/sock.h>
39#include <net/route.h> 40#include <net/route.h>
40 41
@@ -61,7 +62,7 @@ static unsigned int queue_dropped = 0;
61static unsigned int queue_user_dropped = 0; 62static unsigned int queue_user_dropped = 0;
62static struct sock *ipqnl; 63static struct sock *ipqnl;
63static LIST_HEAD(queue_list); 64static LIST_HEAD(queue_list);
64static DECLARE_MUTEX(ipqnl_sem); 65static DEFINE_MUTEX(ipqnl_mutex);
65 66
66static void 67static void
67ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 68ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
@@ -539,7 +540,7 @@ ipq_rcv_sk(struct sock *sk, int len)
539 struct sk_buff *skb; 540 struct sk_buff *skb;
540 unsigned int qlen; 541 unsigned int qlen;
541 542
542 down(&ipqnl_sem); 543 mutex_lock(&ipqnl_mutex);
543 544
544 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { 545 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
545 skb = skb_dequeue(&sk->sk_receive_queue); 546 skb = skb_dequeue(&sk->sk_receive_queue);
@@ -547,7 +548,7 @@ ipq_rcv_sk(struct sock *sk, int len)
547 kfree_skb(skb); 548 kfree_skb(skb);
548 } 549 }
549 550
550 up(&ipqnl_sem); 551 mutex_unlock(&ipqnl_mutex);
551} 552}
552 553
553static int 554static int
@@ -708,8 +709,8 @@ cleanup_sysctl:
708 709
709cleanup_ipqnl: 710cleanup_ipqnl:
710 sock_release(ipqnl->sk_socket); 711 sock_release(ipqnl->sk_socket);
711 down(&ipqnl_sem); 712 mutex_lock(&ipqnl_mutex);
712 up(&ipqnl_sem); 713 mutex_unlock(&ipqnl_mutex);
713 714
714cleanup_netlink_notifier: 715cleanup_netlink_notifier:
715 netlink_unregister_notifier(&ipq_nl_notifier); 716 netlink_unregister_notifier(&ipq_nl_notifier);
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index afbb0d4cc305..b08d56b117f8 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/mutex.h>
8#include <net/xfrm.h> 9#include <net/xfrm.h>
9#include <net/ip.h> 10#include <net/ip.h>
10#include <net/protocol.h> 11#include <net/protocol.h>
@@ -26,19 +27,19 @@ static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, s
26} 27}
27 28
28static struct xfrm_tunnel *ipip_handler; 29static struct xfrm_tunnel *ipip_handler;
29static DECLARE_MUTEX(xfrm4_tunnel_sem); 30static DEFINE_MUTEX(xfrm4_tunnel_mutex);
30 31
31int xfrm4_tunnel_register(struct xfrm_tunnel *handler) 32int xfrm4_tunnel_register(struct xfrm_tunnel *handler)
32{ 33{
33 int ret; 34 int ret;
34 35
35 down(&xfrm4_tunnel_sem); 36 mutex_lock(&xfrm4_tunnel_mutex);
36 ret = 0; 37 ret = 0;
37 if (ipip_handler != NULL) 38 if (ipip_handler != NULL)
38 ret = -EINVAL; 39 ret = -EINVAL;
39 if (!ret) 40 if (!ret)
40 ipip_handler = handler; 41 ipip_handler = handler;
41 up(&xfrm4_tunnel_sem); 42 mutex_unlock(&xfrm4_tunnel_mutex);
42 43
43 return ret; 44 return ret;
44} 45}
@@ -49,13 +50,13 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler)
49{ 50{
50 int ret; 51 int ret;
51 52
52 down(&xfrm4_tunnel_sem); 53 mutex_lock(&xfrm4_tunnel_mutex);
53 ret = 0; 54 ret = 0;
54 if (ipip_handler != handler) 55 if (ipip_handler != handler)
55 ret = -EINVAL; 56 ret = -EINVAL;
56 if (!ret) 57 if (!ret)
57 ipip_handler = NULL; 58 ipip_handler = NULL;
58 up(&xfrm4_tunnel_sem); 59 mutex_unlock(&xfrm4_tunnel_mutex);
59 60
60 synchronize_net(); 61 synchronize_net();
61 62
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 6107592fbd8c..3c7b324cd20c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -50,6 +50,7 @@
50#include <net/protocol.h> 50#include <net/protocol.h>
51#include <linux/ipv6.h> 51#include <linux/ipv6.h>
52#include <linux/icmpv6.h> 52#include <linux/icmpv6.h>
53#include <linux/mutex.h>
53 54
54struct ipcomp6_tfms { 55struct ipcomp6_tfms {
55 struct list_head list; 56 struct list_head list;
@@ -57,7 +58,7 @@ struct ipcomp6_tfms {
57 int users; 58 int users;
58}; 59};
59 60
60static DECLARE_MUTEX(ipcomp6_resource_sem); 61static DEFINE_MUTEX(ipcomp6_resource_mutex);
61static void **ipcomp6_scratches; 62static void **ipcomp6_scratches;
62static int ipcomp6_scratch_users; 63static int ipcomp6_scratch_users;
63static LIST_HEAD(ipcomp6_tfms_list); 64static LIST_HEAD(ipcomp6_tfms_list);
@@ -405,9 +406,9 @@ static void ipcomp6_destroy(struct xfrm_state *x)
405 if (!ipcd) 406 if (!ipcd)
406 return; 407 return;
407 xfrm_state_delete_tunnel(x); 408 xfrm_state_delete_tunnel(x);
408 down(&ipcomp6_resource_sem); 409 mutex_lock(&ipcomp6_resource_mutex);
409 ipcomp6_free_data(ipcd); 410 ipcomp6_free_data(ipcd);
410 up(&ipcomp6_resource_sem); 411 mutex_unlock(&ipcomp6_resource_mutex);
411 kfree(ipcd); 412 kfree(ipcd);
412 413
413 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 414 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
@@ -436,14 +437,14 @@ static int ipcomp6_init_state(struct xfrm_state *x)
436 if (x->props.mode) 437 if (x->props.mode)
437 x->props.header_len += sizeof(struct ipv6hdr); 438 x->props.header_len += sizeof(struct ipv6hdr);
438 439
439 down(&ipcomp6_resource_sem); 440 mutex_lock(&ipcomp6_resource_mutex);
440 if (!ipcomp6_alloc_scratches()) 441 if (!ipcomp6_alloc_scratches())
441 goto error; 442 goto error;
442 443
443 ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name); 444 ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name);
444 if (!ipcd->tfms) 445 if (!ipcd->tfms)
445 goto error; 446 goto error;
446 up(&ipcomp6_resource_sem); 447 mutex_unlock(&ipcomp6_resource_mutex);
447 448
448 if (x->props.mode) { 449 if (x->props.mode) {
449 err = ipcomp6_tunnel_attach(x); 450 err = ipcomp6_tunnel_attach(x);
@@ -459,10 +460,10 @@ static int ipcomp6_init_state(struct xfrm_state *x)
459out: 460out:
460 return err; 461 return err;
461error_tunnel: 462error_tunnel:
462 down(&ipcomp6_resource_sem); 463 mutex_lock(&ipcomp6_resource_mutex);
463error: 464error:
464 ipcomp6_free_data(ipcd); 465 ipcomp6_free_data(ipcd);
465 up(&ipcomp6_resource_sem); 466 mutex_unlock(&ipcomp6_resource_mutex);
466 kfree(ipcd); 467 kfree(ipcd);
467 468
468 goto out; 469 goto out;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index af0635084df8..344eab3b5da8 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/sysctl.h> 36#include <linux/sysctl.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/mutex.h>
38#include <net/sock.h> 39#include <net/sock.h>
39#include <net/ipv6.h> 40#include <net/ipv6.h>
40#include <net/ip6_route.h> 41#include <net/ip6_route.h>
@@ -65,7 +66,7 @@ static unsigned int queue_dropped = 0;
65static unsigned int queue_user_dropped = 0; 66static unsigned int queue_user_dropped = 0;
66static struct sock *ipqnl; 67static struct sock *ipqnl;
67static LIST_HEAD(queue_list); 68static LIST_HEAD(queue_list);
68static DECLARE_MUTEX(ipqnl_sem); 69static DEFINE_MUTEX(ipqnl_mutex);
69 70
70static void 71static void
71ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 72ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
@@ -537,7 +538,7 @@ ipq_rcv_sk(struct sock *sk, int len)
537 struct sk_buff *skb; 538 struct sk_buff *skb;
538 unsigned int qlen; 539 unsigned int qlen;
539 540
540 down(&ipqnl_sem); 541 mutex_lock(&ipqnl_mutex);
541 542
542 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { 543 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
543 skb = skb_dequeue(&sk->sk_receive_queue); 544 skb = skb_dequeue(&sk->sk_receive_queue);
@@ -545,7 +546,7 @@ ipq_rcv_sk(struct sock *sk, int len)
545 kfree_skb(skb); 546 kfree_skb(skb);
546 } 547 }
547 548
548 up(&ipqnl_sem); 549 mutex_unlock(&ipqnl_mutex);
549} 550}
550 551
551static int 552static int
@@ -704,8 +705,8 @@ cleanup_sysctl:
704 705
705cleanup_ipqnl: 706cleanup_ipqnl:
706 sock_release(ipqnl->sk_socket); 707 sock_release(ipqnl->sk_socket);
707 down(&ipqnl_sem); 708 mutex_lock(&ipqnl_mutex);
708 up(&ipqnl_sem); 709 mutex_unlock(&ipqnl_mutex);
709 710
710cleanup_netlink_notifier: 711cleanup_netlink_notifier:
711 netlink_unregister_notifier(&ipq_nl_notifier); 712 netlink_unregister_notifier(&ipq_nl_notifier);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 8cfc58b96fc2..08f9abbdf1d7 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -31,6 +31,7 @@
31#include <net/protocol.h> 31#include <net/protocol.h>
32#include <linux/ipv6.h> 32#include <linux/ipv6.h>
33#include <linux/icmpv6.h> 33#include <linux/icmpv6.h>
34#include <linux/mutex.h>
34 35
35#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG 36#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
36# define X6TDEBUG 3 37# define X6TDEBUG 3
@@ -357,19 +358,19 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *dec
357} 358}
358 359
359static struct xfrm6_tunnel *xfrm6_tunnel_handler; 360static struct xfrm6_tunnel *xfrm6_tunnel_handler;
360static DECLARE_MUTEX(xfrm6_tunnel_sem); 361static DEFINE_MUTEX(xfrm6_tunnel_mutex);
361 362
362int xfrm6_tunnel_register(struct xfrm6_tunnel *handler) 363int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
363{ 364{
364 int ret; 365 int ret;
365 366
366 down(&xfrm6_tunnel_sem); 367 mutex_lock(&xfrm6_tunnel_mutex);
367 ret = 0; 368 ret = 0;
368 if (xfrm6_tunnel_handler != NULL) 369 if (xfrm6_tunnel_handler != NULL)
369 ret = -EINVAL; 370 ret = -EINVAL;
370 if (!ret) 371 if (!ret)
371 xfrm6_tunnel_handler = handler; 372 xfrm6_tunnel_handler = handler;
372 up(&xfrm6_tunnel_sem); 373 mutex_unlock(&xfrm6_tunnel_mutex);
373 374
374 return ret; 375 return ret;
375} 376}
@@ -380,13 +381,13 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
380{ 381{
381 int ret; 382 int ret;
382 383
383 down(&xfrm6_tunnel_sem); 384 mutex_lock(&xfrm6_tunnel_mutex);
384 ret = 0; 385 ret = 0;
385 if (xfrm6_tunnel_handler != handler) 386 if (xfrm6_tunnel_handler != handler)
386 ret = -EINVAL; 387 ret = -EINVAL;
387 if (!ret) 388 if (!ret)
388 xfrm6_tunnel_handler = NULL; 389 xfrm6_tunnel_handler = NULL;
389 up(&xfrm6_tunnel_sem); 390 mutex_unlock(&xfrm6_tunnel_mutex);
390 391
391 synchronize_net(); 392 synchronize_net();
392 393
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 520fe70d0993..859582275cab 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3080,9 +3080,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
3080 if (!hdr) 3080 if (!hdr)
3081 goto out; 3081 goto out;
3082 3082
3083 down(&xfrm_cfg_sem); 3083 mutex_lock(&xfrm_cfg_mutex);
3084 err = pfkey_process(sk, skb, hdr); 3084 err = pfkey_process(sk, skb, hdr);
3085 up(&xfrm_cfg_sem); 3085 mutex_unlock(&xfrm_cfg_mutex);
3086 3086
3087out: 3087out:
3088 if (err && hdr && pfkey_error(hdr, err, sk) == 0) 3088 if (err && hdr && pfkey_error(hdr, err, sk) == 0)
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 61a833a9caa6..0e5c5e204799 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/skbuff.h> 5#include <linux/skbuff.h>
6#include <linux/netfilter.h> 6#include <linux/netfilter.h>
7#include <linux/mutex.h>
7#include <net/sock.h> 8#include <net/sock.h>
8 9
9#include "nf_internals.h" 10#include "nf_internals.h"
@@ -11,7 +12,7 @@
11/* Sockopts only registered and called from user context, so 12/* Sockopts only registered and called from user context, so
12 net locking would be overkill. Also, [gs]etsockopt calls may 13 net locking would be overkill. Also, [gs]etsockopt calls may
13 sleep. */ 14 sleep. */
14static DECLARE_MUTEX(nf_sockopt_mutex); 15static DEFINE_MUTEX(nf_sockopt_mutex);
15static LIST_HEAD(nf_sockopts); 16static LIST_HEAD(nf_sockopts);
16 17
17/* Do exclusive ranges overlap? */ 18/* Do exclusive ranges overlap? */
@@ -26,7 +27,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
26 struct list_head *i; 27 struct list_head *i;
27 int ret = 0; 28 int ret = 0;
28 29
29 if (down_interruptible(&nf_sockopt_mutex) != 0) 30 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
30 return -EINTR; 31 return -EINTR;
31 32
32 list_for_each(i, &nf_sockopts) { 33 list_for_each(i, &nf_sockopts) {
@@ -48,7 +49,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
48 49
49 list_add(&reg->list, &nf_sockopts); 50 list_add(&reg->list, &nf_sockopts);
50out: 51out:
51 up(&nf_sockopt_mutex); 52 mutex_unlock(&nf_sockopt_mutex);
52 return ret; 53 return ret;
53} 54}
54EXPORT_SYMBOL(nf_register_sockopt); 55EXPORT_SYMBOL(nf_register_sockopt);
@@ -57,18 +58,18 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
57{ 58{
58 /* No point being interruptible: we're probably in cleanup_module() */ 59 /* No point being interruptible: we're probably in cleanup_module() */
59 restart: 60 restart:
60 down(&nf_sockopt_mutex); 61 mutex_lock(&nf_sockopt_mutex);
61 if (reg->use != 0) { 62 if (reg->use != 0) {
62 /* To be woken by nf_sockopt call... */ 63 /* To be woken by nf_sockopt call... */
63 /* FIXME: Stuart Young's name appears gratuitously. */ 64 /* FIXME: Stuart Young's name appears gratuitously. */
64 set_current_state(TASK_UNINTERRUPTIBLE); 65 set_current_state(TASK_UNINTERRUPTIBLE);
65 reg->cleanup_task = current; 66 reg->cleanup_task = current;
66 up(&nf_sockopt_mutex); 67 mutex_unlock(&nf_sockopt_mutex);
67 schedule(); 68 schedule();
68 goto restart; 69 goto restart;
69 } 70 }
70 list_del(&reg->list); 71 list_del(&reg->list);
71 up(&nf_sockopt_mutex); 72 mutex_unlock(&nf_sockopt_mutex);
72} 73}
73EXPORT_SYMBOL(nf_unregister_sockopt); 74EXPORT_SYMBOL(nf_unregister_sockopt);
74 75
@@ -80,7 +81,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
80 struct nf_sockopt_ops *ops; 81 struct nf_sockopt_ops *ops;
81 int ret; 82 int ret;
82 83
83 if (down_interruptible(&nf_sockopt_mutex) != 0) 84 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
84 return -EINTR; 85 return -EINTR;
85 86
86 list_for_each(i, &nf_sockopts) { 87 list_for_each(i, &nf_sockopts) {
@@ -90,7 +91,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
90 if (val >= ops->get_optmin 91 if (val >= ops->get_optmin
91 && val < ops->get_optmax) { 92 && val < ops->get_optmax) {
92 ops->use++; 93 ops->use++;
93 up(&nf_sockopt_mutex); 94 mutex_unlock(&nf_sockopt_mutex);
94 ret = ops->get(sk, val, opt, len); 95 ret = ops->get(sk, val, opt, len);
95 goto out; 96 goto out;
96 } 97 }
@@ -98,22 +99,22 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
98 if (val >= ops->set_optmin 99 if (val >= ops->set_optmin
99 && val < ops->set_optmax) { 100 && val < ops->set_optmax) {
100 ops->use++; 101 ops->use++;
101 up(&nf_sockopt_mutex); 102 mutex_unlock(&nf_sockopt_mutex);
102 ret = ops->set(sk, val, opt, *len); 103 ret = ops->set(sk, val, opt, *len);
103 goto out; 104 goto out;
104 } 105 }
105 } 106 }
106 } 107 }
107 } 108 }
108 up(&nf_sockopt_mutex); 109 mutex_unlock(&nf_sockopt_mutex);
109 return -ENOPROTOOPT; 110 return -ENOPROTOOPT;
110 111
111 out: 112 out:
112 down(&nf_sockopt_mutex); 113 mutex_lock(&nf_sockopt_mutex);
113 ops->use--; 114 ops->use--;
114 if (ops->cleanup_task) 115 if (ops->cleanup_task)
115 wake_up_process(ops->cleanup_task); 116 wake_up_process(ops->cleanup_task);
116 up(&nf_sockopt_mutex); 117 mutex_unlock(&nf_sockopt_mutex);
117 return ret; 118 return ret;
118} 119}
119 120
diff --git a/net/socket.c b/net/socket.c
index 510ae18d220a..e3c21d5ec288 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -68,6 +68,7 @@
68#include <linux/netdevice.h> 68#include <linux/netdevice.h>
69#include <linux/proc_fs.h> 69#include <linux/proc_fs.h>
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include <linux/mutex.h>
71#include <linux/wanrouter.h> 72#include <linux/wanrouter.h>
72#include <linux/if_bridge.h> 73#include <linux/if_bridge.h>
73#include <linux/if_frad.h> 74#include <linux/if_frad.h>
@@ -826,36 +827,36 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf,
826 * with module unload. 827 * with module unload.
827 */ 828 */
828 829
829static DECLARE_MUTEX(br_ioctl_mutex); 830static DEFINE_MUTEX(br_ioctl_mutex);
830static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL; 831static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
831 832
832void brioctl_set(int (*hook)(unsigned int, void __user *)) 833void brioctl_set(int (*hook)(unsigned int, void __user *))
833{ 834{
834 down(&br_ioctl_mutex); 835 mutex_lock(&br_ioctl_mutex);
835 br_ioctl_hook = hook; 836 br_ioctl_hook = hook;
836 up(&br_ioctl_mutex); 837 mutex_unlock(&br_ioctl_mutex);
837} 838}
838EXPORT_SYMBOL(brioctl_set); 839EXPORT_SYMBOL(brioctl_set);
839 840
840static DECLARE_MUTEX(vlan_ioctl_mutex); 841static DEFINE_MUTEX(vlan_ioctl_mutex);
841static int (*vlan_ioctl_hook)(void __user *arg); 842static int (*vlan_ioctl_hook)(void __user *arg);
842 843
843void vlan_ioctl_set(int (*hook)(void __user *)) 844void vlan_ioctl_set(int (*hook)(void __user *))
844{ 845{
845 down(&vlan_ioctl_mutex); 846 mutex_lock(&vlan_ioctl_mutex);
846 vlan_ioctl_hook = hook; 847 vlan_ioctl_hook = hook;
847 up(&vlan_ioctl_mutex); 848 mutex_unlock(&vlan_ioctl_mutex);
848} 849}
849EXPORT_SYMBOL(vlan_ioctl_set); 850EXPORT_SYMBOL(vlan_ioctl_set);
850 851
851static DECLARE_MUTEX(dlci_ioctl_mutex); 852static DEFINE_MUTEX(dlci_ioctl_mutex);
852static int (*dlci_ioctl_hook)(unsigned int, void __user *); 853static int (*dlci_ioctl_hook)(unsigned int, void __user *);
853 854
854void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)) 855void dlci_ioctl_set(int (*hook)(unsigned int, void __user *))
855{ 856{
856 down(&dlci_ioctl_mutex); 857 mutex_lock(&dlci_ioctl_mutex);
857 dlci_ioctl_hook = hook; 858 dlci_ioctl_hook = hook;
858 up(&dlci_ioctl_mutex); 859 mutex_unlock(&dlci_ioctl_mutex);
859} 860}
860EXPORT_SYMBOL(dlci_ioctl_set); 861EXPORT_SYMBOL(dlci_ioctl_set);
861 862
@@ -899,10 +900,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
899 if (!br_ioctl_hook) 900 if (!br_ioctl_hook)
900 request_module("bridge"); 901 request_module("bridge");
901 902
902 down(&br_ioctl_mutex); 903 mutex_lock(&br_ioctl_mutex);
903 if (br_ioctl_hook) 904 if (br_ioctl_hook)
904 err = br_ioctl_hook(cmd, argp); 905 err = br_ioctl_hook(cmd, argp);
905 up(&br_ioctl_mutex); 906 mutex_unlock(&br_ioctl_mutex);
906 break; 907 break;
907 case SIOCGIFVLAN: 908 case SIOCGIFVLAN:
908 case SIOCSIFVLAN: 909 case SIOCSIFVLAN:
@@ -910,10 +911,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
910 if (!vlan_ioctl_hook) 911 if (!vlan_ioctl_hook)
911 request_module("8021q"); 912 request_module("8021q");
912 913
913 down(&vlan_ioctl_mutex); 914 mutex_lock(&vlan_ioctl_mutex);
914 if (vlan_ioctl_hook) 915 if (vlan_ioctl_hook)
915 err = vlan_ioctl_hook(argp); 916 err = vlan_ioctl_hook(argp);
916 up(&vlan_ioctl_mutex); 917 mutex_unlock(&vlan_ioctl_mutex);
917 break; 918 break;
918 case SIOCGIFDIVERT: 919 case SIOCGIFDIVERT:
919 case SIOCSIFDIVERT: 920 case SIOCSIFDIVERT:
@@ -927,9 +928,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
927 request_module("dlci"); 928 request_module("dlci");
928 929
929 if (dlci_ioctl_hook) { 930 if (dlci_ioctl_hook) {
930 down(&dlci_ioctl_mutex); 931 mutex_lock(&dlci_ioctl_mutex);
931 err = dlci_ioctl_hook(cmd, argp); 932 err = dlci_ioctl_hook(cmd, argp);
932 up(&dlci_ioctl_mutex); 933 mutex_unlock(&dlci_ioctl_mutex);
933 } 934 }
934 break; 935 break;
935 default: 936 default:
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index dcaa0c4453ff..0acccfeeb284 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -26,6 +26,7 @@
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/mutex.h>
29#include <asm/ioctls.h> 30#include <asm/ioctls.h>
30#include <linux/sunrpc/types.h> 31#include <linux/sunrpc/types.h>
31#include <linux/sunrpc/cache.h> 32#include <linux/sunrpc/cache.h>
@@ -532,7 +533,7 @@ void cache_clean_deferred(void *owner)
532 */ 533 */
533 534
534static DEFINE_SPINLOCK(queue_lock); 535static DEFINE_SPINLOCK(queue_lock);
535static DECLARE_MUTEX(queue_io_sem); 536static DEFINE_MUTEX(queue_io_mutex);
536 537
537struct cache_queue { 538struct cache_queue {
538 struct list_head list; 539 struct list_head list;
@@ -561,7 +562,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
561 if (count == 0) 562 if (count == 0)
562 return 0; 563 return 0;
563 564
564 down(&queue_io_sem); /* protect against multiple concurrent 565 mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
565 * readers on this file */ 566 * readers on this file */
566 again: 567 again:
567 spin_lock(&queue_lock); 568 spin_lock(&queue_lock);
@@ -574,7 +575,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
574 } 575 }
575 if (rp->q.list.next == &cd->queue) { 576 if (rp->q.list.next == &cd->queue) {
576 spin_unlock(&queue_lock); 577 spin_unlock(&queue_lock);
577 up(&queue_io_sem); 578 mutex_unlock(&queue_io_mutex);
578 BUG_ON(rp->offset); 579 BUG_ON(rp->offset);
579 return 0; 580 return 0;
580 } 581 }
@@ -621,11 +622,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
621 } 622 }
622 if (err == -EAGAIN) 623 if (err == -EAGAIN)
623 goto again; 624 goto again;
624 up(&queue_io_sem); 625 mutex_unlock(&queue_io_mutex);
625 return err ? err : count; 626 return err ? err : count;
626} 627}
627 628
628static char write_buf[8192]; /* protected by queue_io_sem */ 629static char write_buf[8192]; /* protected by queue_io_mutex */
629 630
630static ssize_t 631static ssize_t
631cache_write(struct file *filp, const char __user *buf, size_t count, 632cache_write(struct file *filp, const char __user *buf, size_t count,
@@ -639,10 +640,10 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
639 if (count >= sizeof(write_buf)) 640 if (count >= sizeof(write_buf))
640 return -EINVAL; 641 return -EINVAL;
641 642
642 down(&queue_io_sem); 643 mutex_lock(&queue_io_mutex);
643 644
644 if (copy_from_user(write_buf, buf, count)) { 645 if (copy_from_user(write_buf, buf, count)) {
645 up(&queue_io_sem); 646 mutex_unlock(&queue_io_mutex);
646 return -EFAULT; 647 return -EFAULT;
647 } 648 }
648 write_buf[count] = '\0'; 649 write_buf[count] = '\0';
@@ -651,7 +652,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
651 else 652 else
652 err = -EINVAL; 653 err = -EINVAL;
653 654
654 up(&queue_io_sem); 655 mutex_unlock(&queue_io_mutex);
655 return err ? err : count; 656 return err ? err : count;
656} 657}
657 658
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index e838d042f7f5..dff07795bd16 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/mutex.h>
21 22
22#include <linux/sunrpc/clnt.h> 23#include <linux/sunrpc/clnt.h>
23#include <linux/sunrpc/xprt.h> 24#include <linux/sunrpc/xprt.h>
@@ -62,7 +63,7 @@ static LIST_HEAD(all_tasks);
62/* 63/*
63 * rpciod-related stuff 64 * rpciod-related stuff
64 */ 65 */
65static DECLARE_MUTEX(rpciod_sema); 66static DEFINE_MUTEX(rpciod_mutex);
66static unsigned int rpciod_users; 67static unsigned int rpciod_users;
67static struct workqueue_struct *rpciod_workqueue; 68static struct workqueue_struct *rpciod_workqueue;
68 69
@@ -1047,7 +1048,7 @@ rpciod_up(void)
1047 struct workqueue_struct *wq; 1048 struct workqueue_struct *wq;
1048 int error = 0; 1049 int error = 0;
1049 1050
1050 down(&rpciod_sema); 1051 mutex_lock(&rpciod_mutex);
1051 dprintk("rpciod_up: users %d\n", rpciod_users); 1052 dprintk("rpciod_up: users %d\n", rpciod_users);
1052 rpciod_users++; 1053 rpciod_users++;
1053 if (rpciod_workqueue) 1054 if (rpciod_workqueue)
@@ -1070,14 +1071,14 @@ rpciod_up(void)
1070 rpciod_workqueue = wq; 1071 rpciod_workqueue = wq;
1071 error = 0; 1072 error = 0;
1072out: 1073out:
1073 up(&rpciod_sema); 1074 mutex_unlock(&rpciod_mutex);
1074 return error; 1075 return error;
1075} 1076}
1076 1077
1077void 1078void
1078rpciod_down(void) 1079rpciod_down(void)
1079{ 1080{
1080 down(&rpciod_sema); 1081 mutex_lock(&rpciod_mutex);
1081 dprintk("rpciod_down sema %d\n", rpciod_users); 1082 dprintk("rpciod_down sema %d\n", rpciod_users);
1082 if (rpciod_users) { 1083 if (rpciod_users) {
1083 if (--rpciod_users) 1084 if (--rpciod_users)
@@ -1094,7 +1095,7 @@ rpciod_down(void)
1094 destroy_workqueue(rpciod_workqueue); 1095 destroy_workqueue(rpciod_workqueue);
1095 rpciod_workqueue = NULL; 1096 rpciod_workqueue = NULL;
1096 out: 1097 out:
1097 up(&rpciod_sema); 1098 mutex_unlock(&rpciod_mutex);
1098} 1099}
1099 1100
1100#ifdef RPC_DEBUG 1101#ifdef RPC_DEBUG
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 411802bd4d37..746c2f4a5fa6 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -76,6 +76,7 @@
76#include <linux/netdevice.h> 76#include <linux/netdevice.h>
77#include <linux/file.h> 77#include <linux/file.h>
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/mutex.h>
79 80
80#include <net/sock.h> 81#include <net/sock.h>
81#include <net/af_unix.h> 82#include <net/af_unix.h>
@@ -169,7 +170,7 @@ static void maybe_unmark_and_push(struct sock *x)
169 170
170void unix_gc(void) 171void unix_gc(void)
171{ 172{
172 static DECLARE_MUTEX(unix_gc_sem); 173 static DEFINE_MUTEX(unix_gc_sem);
173 int i; 174 int i;
174 struct sock *s; 175 struct sock *s;
175 struct sk_buff_head hitlist; 176 struct sk_buff_head hitlist;
@@ -179,7 +180,7 @@ void unix_gc(void)
179 * Avoid a recursive GC. 180 * Avoid a recursive GC.
180 */ 181 */
181 182
182 if (down_trylock(&unix_gc_sem)) 183 if (!mutex_trylock(&unix_gc_sem))
183 return; 184 return;
184 185
185 spin_lock(&unix_table_lock); 186 spin_lock(&unix_table_lock);
@@ -308,5 +309,5 @@ void unix_gc(void)
308 */ 309 */
309 310
310 __skb_queue_purge(&hitlist); 311 __skb_queue_purge(&hitlist);
311 up(&unix_gc_sem); 312 mutex_unlock(&unix_gc_sem);
312} 313}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b46079263e8b..f5eae9febd26 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,8 +26,8 @@
26#include <net/xfrm.h> 26#include <net/xfrm.h>
27#include <net/ip.h> 27#include <net/ip.h>
28 28
29DECLARE_MUTEX(xfrm_cfg_sem); 29DEFINE_MUTEX(xfrm_cfg_mutex);
30EXPORT_SYMBOL(xfrm_cfg_sem); 30EXPORT_SYMBOL(xfrm_cfg_mutex);
31 31
32static DEFINE_RWLOCK(xfrm_policy_lock); 32static DEFINE_RWLOCK(xfrm_policy_lock);
33 33
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7b1acd995168..4a7120a7e10f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1486,9 +1486,9 @@ static void xfrm_netlink_rcv(struct sock *sk, int len)
1486 unsigned int qlen = 0; 1486 unsigned int qlen = 0;
1487 1487
1488 do { 1488 do {
1489 down(&xfrm_cfg_sem); 1489 mutex_lock(&xfrm_cfg_mutex);
1490 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); 1490 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
1491 up(&xfrm_cfg_sem); 1491 mutex_unlock(&xfrm_cfg_mutex);
1492 1492
1493 } while (qlen); 1493 } while (qlen);
1494} 1494}