diff options
Diffstat (limited to 'fs/lockd/host.c')
-rw-r--r-- | fs/lockd/host.c | 325 |
1 files changed, 246 insertions, 79 deletions
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index a0d0b58ce7a4..fb24a9730345 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -27,46 +27,60 @@ | |||
27 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) | 27 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) |
28 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) | 28 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) |
29 | 29 | ||
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | 30 | static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; |
31 | static unsigned long next_gc; | 31 | static unsigned long next_gc; |
32 | static int nrhosts; | 32 | static int nrhosts; |
33 | static DEFINE_MUTEX(nlm_host_mutex); | 33 | static DEFINE_MUTEX(nlm_host_mutex); |
34 | 34 | ||
35 | 35 | ||
36 | static void nlm_gc_hosts(void); | 36 | static void nlm_gc_hosts(void); |
37 | static struct nsm_handle * __nsm_find(const struct sockaddr_in *, | ||
38 | const char *, int, int); | ||
37 | 39 | ||
38 | /* | 40 | /* |
39 | * Find an NLM server handle in the cache. If there is none, create it. | 41 | * Find an NLM server handle in the cache. If there is none, create it. |
40 | */ | 42 | */ |
41 | struct nlm_host * | 43 | struct nlm_host * |
42 | nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) | 44 | nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, |
45 | const char *hostname, int hostname_len) | ||
43 | { | 46 | { |
44 | return nlm_lookup_host(0, sin, proto, version); | 47 | return nlm_lookup_host(0, sin, proto, version, |
48 | hostname, hostname_len); | ||
45 | } | 49 | } |
46 | 50 | ||
47 | /* | 51 | /* |
48 | * Find an NLM client handle in the cache. If there is none, create it. | 52 | * Find an NLM client handle in the cache. If there is none, create it. |
49 | */ | 53 | */ |
50 | struct nlm_host * | 54 | struct nlm_host * |
51 | nlmsvc_lookup_host(struct svc_rqst *rqstp) | 55 | nlmsvc_lookup_host(struct svc_rqst *rqstp, |
56 | const char *hostname, int hostname_len) | ||
52 | { | 57 | { |
53 | return nlm_lookup_host(1, &rqstp->rq_addr, | 58 | return nlm_lookup_host(1, &rqstp->rq_addr, |
54 | rqstp->rq_prot, rqstp->rq_vers); | 59 | rqstp->rq_prot, rqstp->rq_vers, |
60 | hostname, hostname_len); | ||
55 | } | 61 | } |
56 | 62 | ||
57 | /* | 63 | /* |
58 | * Common host lookup routine for server & client | 64 | * Common host lookup routine for server & client |
59 | */ | 65 | */ |
60 | struct nlm_host * | 66 | struct nlm_host * |
61 | nlm_lookup_host(int server, struct sockaddr_in *sin, | 67 | nlm_lookup_host(int server, const struct sockaddr_in *sin, |
62 | int proto, int version) | 68 | int proto, int version, |
69 | const char *hostname, | ||
70 | int hostname_len) | ||
63 | { | 71 | { |
64 | struct nlm_host *host, **hp; | 72 | struct hlist_head *chain; |
65 | u32 addr; | 73 | struct hlist_node *pos; |
74 | struct nlm_host *host; | ||
75 | struct nsm_handle *nsm = NULL; | ||
66 | int hash; | 76 | int hash; |
67 | 77 | ||
68 | dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", | 78 | dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n", |
69 | (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); | 79 | NIPQUAD(sin->sin_addr.s_addr), proto, version, |
80 | server? "server" : "client", | ||
81 | hostname_len, | ||
82 | hostname? hostname : "<none>"); | ||
83 | |||
70 | 84 | ||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | 85 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); |
72 | 86 | ||
@@ -76,7 +90,22 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
76 | if (time_after_eq(jiffies, next_gc)) | 90 | if (time_after_eq(jiffies, next_gc)) |
77 | nlm_gc_hosts(); | 91 | nlm_gc_hosts(); |
78 | 92 | ||
79 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | 93 | /* We may keep several nlm_host objects for a peer, because each |
94 | * nlm_host is identified by | ||
95 | * (address, protocol, version, server/client) | ||
96 | * We could probably simplify this a little by putting all those | ||
97 | * different NLM rpc_clients into one single nlm_host object. | ||
98 | * This would allow us to have one nlm_host per address. | ||
99 | */ | ||
100 | chain = &nlm_hosts[hash]; | ||
101 | hlist_for_each_entry(host, pos, chain, h_hash) { | ||
102 | if (!nlm_cmp_addr(&host->h_addr, sin)) | ||
103 | continue; | ||
104 | |||
105 | /* See if we have an NSM handle for this client */ | ||
106 | if (!nsm) | ||
107 | nsm = host->h_nsmhandle; | ||
108 | |||
80 | if (host->h_proto != proto) | 109 | if (host->h_proto != proto) |
81 | continue; | 110 | continue; |
82 | if (host->h_version != version) | 111 | if (host->h_version != version) |
@@ -84,28 +113,30 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
84 | if (host->h_server != server) | 113 | if (host->h_server != server) |
85 | continue; | 114 | continue; |
86 | 115 | ||
87 | if (nlm_cmp_addr(&host->h_addr, sin)) { | 116 | /* Move to head of hash chain. */ |
88 | if (hp != nlm_hosts + hash) { | 117 | hlist_del(&host->h_hash); |
89 | *hp = host->h_next; | 118 | hlist_add_head(&host->h_hash, chain); |
90 | host->h_next = nlm_hosts[hash]; | ||
91 | nlm_hosts[hash] = host; | ||
92 | } | ||
93 | nlm_get_host(host); | ||
94 | mutex_unlock(&nlm_host_mutex); | ||
95 | return host; | ||
96 | } | ||
97 | } | ||
98 | 119 | ||
99 | /* Ooops, no host found, create it */ | 120 | nlm_get_host(host); |
100 | dprintk("lockd: creating host entry\n"); | 121 | goto out; |
122 | } | ||
123 | if (nsm) | ||
124 | atomic_inc(&nsm->sm_count); | ||
101 | 125 | ||
102 | host = kzalloc(sizeof(*host), GFP_KERNEL); | 126 | host = NULL; |
103 | if (!host) | ||
104 | goto nohost; | ||
105 | 127 | ||
106 | addr = sin->sin_addr.s_addr; | 128 | /* Sadly, the host isn't in our hash table yet. See if |
107 | sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); | 129 | * we have an NSM handle for it. If not, create one. |
130 | */ | ||
131 | if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len))) | ||
132 | goto out; | ||
108 | 133 | ||
134 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
135 | if (!host) { | ||
136 | nsm_release(nsm); | ||
137 | goto out; | ||
138 | } | ||
139 | host->h_name = nsm->sm_name; | ||
109 | host->h_addr = *sin; | 140 | host->h_addr = *sin; |
110 | host->h_addr.sin_port = 0; /* ouch! */ | 141 | host->h_addr.sin_port = 0; /* ouch! */ |
111 | host->h_version = version; | 142 | host->h_version = version; |
@@ -119,9 +150,9 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
119 | init_rwsem(&host->h_rwsem); | 150 | init_rwsem(&host->h_rwsem); |
120 | host->h_state = 0; /* pseudo NSM state */ | 151 | host->h_state = 0; /* pseudo NSM state */ |
121 | host->h_nsmstate = 0; /* real NSM state */ | 152 | host->h_nsmstate = 0; /* real NSM state */ |
153 | host->h_nsmhandle = nsm; | ||
122 | host->h_server = server; | 154 | host->h_server = server; |
123 | host->h_next = nlm_hosts[hash]; | 155 | hlist_add_head(&host->h_hash, chain); |
124 | nlm_hosts[hash] = host; | ||
125 | INIT_LIST_HEAD(&host->h_lockowners); | 156 | INIT_LIST_HEAD(&host->h_lockowners); |
126 | spin_lock_init(&host->h_lock); | 157 | spin_lock_init(&host->h_lock); |
127 | INIT_LIST_HEAD(&host->h_granted); | 158 | INIT_LIST_HEAD(&host->h_granted); |
@@ -130,35 +161,39 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
130 | if (++nrhosts > NLM_HOST_MAX) | 161 | if (++nrhosts > NLM_HOST_MAX) |
131 | next_gc = 0; | 162 | next_gc = 0; |
132 | 163 | ||
133 | nohost: | 164 | out: |
134 | mutex_unlock(&nlm_host_mutex); | 165 | mutex_unlock(&nlm_host_mutex); |
135 | return host; | 166 | return host; |
136 | } | 167 | } |
137 | 168 | ||
138 | struct nlm_host * | 169 | /* |
139 | nlm_find_client(void) | 170 | * Destroy a host |
171 | */ | ||
172 | static void | ||
173 | nlm_destroy_host(struct nlm_host *host) | ||
140 | { | 174 | { |
141 | /* find a nlm_host for a client for which h_killed == 0. | 175 | struct rpc_clnt *clnt; |
142 | * and return it | 176 | |
177 | BUG_ON(!list_empty(&host->h_lockowners)); | ||
178 | BUG_ON(atomic_read(&host->h_count)); | ||
179 | |||
180 | /* | ||
181 | * Release NSM handle and unmonitor host. | ||
143 | */ | 182 | */ |
144 | int hash; | 183 | nsm_unmonitor(host); |
145 | mutex_lock(&nlm_host_mutex); | 184 | |
146 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { | 185 | if ((clnt = host->h_rpcclnt) != NULL) { |
147 | struct nlm_host *host, **hp; | 186 | if (atomic_read(&clnt->cl_users)) { |
148 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | 187 | printk(KERN_WARNING |
149 | if (host->h_server && | 188 | "lockd: active RPC handle\n"); |
150 | host->h_killed == 0) { | 189 | clnt->cl_dead = 1; |
151 | nlm_get_host(host); | 190 | } else { |
152 | mutex_unlock(&nlm_host_mutex); | 191 | rpc_destroy_client(host->h_rpcclnt); |
153 | return host; | ||
154 | } | ||
155 | } | 192 | } |
156 | } | 193 | } |
157 | mutex_unlock(&nlm_host_mutex); | 194 | kfree(host); |
158 | return NULL; | ||
159 | } | 195 | } |
160 | 196 | ||
161 | |||
162 | /* | 197 | /* |
163 | * Create the NLM RPC client for an NLM peer | 198 | * Create the NLM RPC client for an NLM peer |
164 | */ | 199 | */ |
@@ -260,22 +295,82 @@ void nlm_release_host(struct nlm_host *host) | |||
260 | } | 295 | } |
261 | 296 | ||
262 | /* | 297 | /* |
298 | * We were notified that the host indicated by address &sin | ||
299 | * has rebooted. | ||
300 | * Release all resources held by that peer. | ||
301 | */ | ||
302 | void nlm_host_rebooted(const struct sockaddr_in *sin, | ||
303 | const char *hostname, int hostname_len, | ||
304 | u32 new_state) | ||
305 | { | ||
306 | struct hlist_head *chain; | ||
307 | struct hlist_node *pos; | ||
308 | struct nsm_handle *nsm; | ||
309 | struct nlm_host *host; | ||
310 | |||
311 | dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n", | ||
312 | hostname, NIPQUAD(sin->sin_addr)); | ||
313 | |||
314 | /* Find the NSM handle for this peer */ | ||
315 | if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0))) | ||
316 | return; | ||
317 | |||
318 | /* When reclaiming locks on this peer, make sure that | ||
319 | * we set up a new notification */ | ||
320 | nsm->sm_monitored = 0; | ||
321 | |||
322 | /* Mark all hosts tied to this NSM state as having rebooted. | ||
323 | * We run the loop repeatedly, because we drop the host table | ||
324 | * lock for this. | ||
325 | * To avoid processing a host several times, we match the nsmstate. | ||
326 | */ | ||
327 | again: mutex_lock(&nlm_host_mutex); | ||
328 | for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { | ||
329 | hlist_for_each_entry(host, pos, chain, h_hash) { | ||
330 | if (host->h_nsmhandle == nsm | ||
331 | && host->h_nsmstate != new_state) { | ||
332 | host->h_nsmstate = new_state; | ||
333 | host->h_state++; | ||
334 | |||
335 | nlm_get_host(host); | ||
336 | mutex_unlock(&nlm_host_mutex); | ||
337 | |||
338 | if (host->h_server) { | ||
339 | /* We're server for this guy, just ditch | ||
340 | * all the locks he held. */ | ||
341 | nlmsvc_free_host_resources(host); | ||
342 | } else { | ||
343 | /* He's the server, initiate lock recovery. */ | ||
344 | nlmclnt_recovery(host); | ||
345 | } | ||
346 | |||
347 | nlm_release_host(host); | ||
348 | goto again; | ||
349 | } | ||
350 | } | ||
351 | } | ||
352 | |||
353 | mutex_unlock(&nlm_host_mutex); | ||
354 | } | ||
355 | |||
356 | /* | ||
263 | * Shut down the hosts module. | 357 | * Shut down the hosts module. |
264 | * Note that this routine is called only at server shutdown time. | 358 | * Note that this routine is called only at server shutdown time. |
265 | */ | 359 | */ |
266 | void | 360 | void |
267 | nlm_shutdown_hosts(void) | 361 | nlm_shutdown_hosts(void) |
268 | { | 362 | { |
363 | struct hlist_head *chain; | ||
364 | struct hlist_node *pos; | ||
269 | struct nlm_host *host; | 365 | struct nlm_host *host; |
270 | int i; | ||
271 | 366 | ||
272 | dprintk("lockd: shutting down host module\n"); | 367 | dprintk("lockd: shutting down host module\n"); |
273 | mutex_lock(&nlm_host_mutex); | 368 | mutex_lock(&nlm_host_mutex); |
274 | 369 | ||
275 | /* First, make all hosts eligible for gc */ | 370 | /* First, make all hosts eligible for gc */ |
276 | dprintk("lockd: nuking all hosts...\n"); | 371 | dprintk("lockd: nuking all hosts...\n"); |
277 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | 372 | for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { |
278 | for (host = nlm_hosts[i]; host; host = host->h_next) | 373 | hlist_for_each_entry(host, pos, chain, h_hash) |
279 | host->h_expires = jiffies - 1; | 374 | host->h_expires = jiffies - 1; |
280 | } | 375 | } |
281 | 376 | ||
@@ -287,8 +382,8 @@ nlm_shutdown_hosts(void) | |||
287 | if (nrhosts) { | 382 | if (nrhosts) { |
288 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | 383 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); |
289 | dprintk("lockd: %d hosts left:\n", nrhosts); | 384 | dprintk("lockd: %d hosts left:\n", nrhosts); |
290 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | 385 | for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { |
291 | for (host = nlm_hosts[i]; host; host = host->h_next) { | 386 | hlist_for_each_entry(host, pos, chain, h_hash) { |
292 | dprintk(" %s (cnt %d use %d exp %ld)\n", | 387 | dprintk(" %s (cnt %d use %d exp %ld)\n", |
293 | host->h_name, atomic_read(&host->h_count), | 388 | host->h_name, atomic_read(&host->h_count), |
294 | host->h_inuse, host->h_expires); | 389 | host->h_inuse, host->h_expires); |
@@ -305,45 +400,32 @@ nlm_shutdown_hosts(void) | |||
305 | static void | 400 | static void |
306 | nlm_gc_hosts(void) | 401 | nlm_gc_hosts(void) |
307 | { | 402 | { |
308 | struct nlm_host **q, *host; | 403 | struct hlist_head *chain; |
309 | struct rpc_clnt *clnt; | 404 | struct hlist_node *pos, *next; |
310 | int i; | 405 | struct nlm_host *host; |
311 | 406 | ||
312 | dprintk("lockd: host garbage collection\n"); | 407 | dprintk("lockd: host garbage collection\n"); |
313 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | 408 | for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { |
314 | for (host = nlm_hosts[i]; host; host = host->h_next) | 409 | hlist_for_each_entry(host, pos, chain, h_hash) |
315 | host->h_inuse = 0; | 410 | host->h_inuse = 0; |
316 | } | 411 | } |
317 | 412 | ||
318 | /* Mark all hosts that hold locks, blocks or shares */ | 413 | /* Mark all hosts that hold locks, blocks or shares */ |
319 | nlmsvc_mark_resources(); | 414 | nlmsvc_mark_resources(); |
320 | 415 | ||
321 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | 416 | for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { |
322 | q = &nlm_hosts[i]; | 417 | hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { |
323 | while ((host = *q) != NULL) { | ||
324 | if (atomic_read(&host->h_count) || host->h_inuse | 418 | if (atomic_read(&host->h_count) || host->h_inuse |
325 | || time_before(jiffies, host->h_expires)) { | 419 | || time_before(jiffies, host->h_expires)) { |
326 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", | 420 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", |
327 | host->h_name, atomic_read(&host->h_count), | 421 | host->h_name, atomic_read(&host->h_count), |
328 | host->h_inuse, host->h_expires); | 422 | host->h_inuse, host->h_expires); |
329 | q = &host->h_next; | ||
330 | continue; | 423 | continue; |
331 | } | 424 | } |
332 | dprintk("lockd: delete host %s\n", host->h_name); | 425 | dprintk("lockd: delete host %s\n", host->h_name); |
333 | *q = host->h_next; | 426 | hlist_del_init(&host->h_hash); |
334 | /* Don't unmonitor hosts that have been invalidated */ | 427 | |
335 | if (host->h_monitored && !host->h_killed) | 428 | nlm_destroy_host(host); |
336 | nsm_unmonitor(host); | ||
337 | if ((clnt = host->h_rpcclnt) != NULL) { | ||
338 | if (atomic_read(&clnt->cl_users)) { | ||
339 | printk(KERN_WARNING | ||
340 | "lockd: active RPC handle\n"); | ||
341 | clnt->cl_dead = 1; | ||
342 | } else { | ||
343 | rpc_destroy_client(host->h_rpcclnt); | ||
344 | } | ||
345 | } | ||
346 | kfree(host); | ||
347 | nrhosts--; | 429 | nrhosts--; |
348 | } | 430 | } |
349 | } | 431 | } |
@@ -351,3 +433,88 @@ nlm_gc_hosts(void) | |||
351 | next_gc = jiffies + NLM_HOST_COLLECT; | 433 | next_gc = jiffies + NLM_HOST_COLLECT; |
352 | } | 434 | } |
353 | 435 | ||
436 | |||
437 | /* | ||
438 | * Manage NSM handles | ||
439 | */ | ||
440 | static LIST_HEAD(nsm_handles); | ||
441 | static DEFINE_MUTEX(nsm_mutex); | ||
442 | |||
443 | static struct nsm_handle * | ||
444 | __nsm_find(const struct sockaddr_in *sin, | ||
445 | const char *hostname, int hostname_len, | ||
446 | int create) | ||
447 | { | ||
448 | struct nsm_handle *nsm = NULL; | ||
449 | struct list_head *pos; | ||
450 | |||
451 | if (!sin) | ||
452 | return NULL; | ||
453 | |||
454 | if (hostname && memchr(hostname, '/', hostname_len) != NULL) { | ||
455 | if (printk_ratelimit()) { | ||
456 | printk(KERN_WARNING "Invalid hostname \"%.*s\" " | ||
457 | "in NFS lock request\n", | ||
458 | hostname_len, hostname); | ||
459 | } | ||
460 | return NULL; | ||
461 | } | ||
462 | |||
463 | mutex_lock(&nsm_mutex); | ||
464 | list_for_each(pos, &nsm_handles) { | ||
465 | nsm = list_entry(pos, struct nsm_handle, sm_link); | ||
466 | |||
467 | if (hostname && nsm_use_hostnames) { | ||
468 | if (strlen(nsm->sm_name) != hostname_len | ||
469 | || memcmp(nsm->sm_name, hostname, hostname_len)) | ||
470 | continue; | ||
471 | } else if (!nlm_cmp_addr(&nsm->sm_addr, sin)) | ||
472 | continue; | ||
473 | atomic_inc(&nsm->sm_count); | ||
474 | goto out; | ||
475 | } | ||
476 | |||
477 | if (!create) { | ||
478 | nsm = NULL; | ||
479 | goto out; | ||
480 | } | ||
481 | |||
482 | nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL); | ||
483 | if (nsm != NULL) { | ||
484 | nsm->sm_addr = *sin; | ||
485 | nsm->sm_name = (char *) (nsm + 1); | ||
486 | memcpy(nsm->sm_name, hostname, hostname_len); | ||
487 | nsm->sm_name[hostname_len] = '\0'; | ||
488 | atomic_set(&nsm->sm_count, 1); | ||
489 | |||
490 | list_add(&nsm->sm_link, &nsm_handles); | ||
491 | } | ||
492 | |||
493 | out: | ||
494 | mutex_unlock(&nsm_mutex); | ||
495 | return nsm; | ||
496 | } | ||
497 | |||
498 | struct nsm_handle * | ||
499 | nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len) | ||
500 | { | ||
501 | return __nsm_find(sin, hostname, hostname_len, 1); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Release an NSM handle | ||
506 | */ | ||
507 | void | ||
508 | nsm_release(struct nsm_handle *nsm) | ||
509 | { | ||
510 | if (!nsm) | ||
511 | return; | ||
512 | if (atomic_dec_and_test(&nsm->sm_count)) { | ||
513 | mutex_lock(&nsm_mutex); | ||
514 | if (atomic_read(&nsm->sm_count) == 0) { | ||
515 | list_del(&nsm->sm_link); | ||
516 | kfree(nsm); | ||
517 | } | ||
518 | mutex_unlock(&nsm_mutex); | ||
519 | } | ||
520 | } | ||