aboutsummaryrefslogtreecommitdiffstats
path: root/fs/lockd
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2008-12-23 15:21:33 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-12-23 15:21:33 -0500
commitdf94f000c46c055cf439f5b92807cd827557ffbc (patch)
treec3b224d3ae7c07a5edc3fc804a6d91c8cbdd5cc1 /fs/lockd
parent2de59872a7842143f4507832e7c1f5123c47feb7 (diff)
lockd: convert reclaimer thread to kthread interface
My understanding is that there is a push to turn the kernel_thread interface into a non-exported symbol and move all kernel threads to use the kthread API. This patch changes lockd to use kthread_run to spawn the reclaimer thread. I've made the assumption here that the extra module references taken when we spawn this thread are unnecessary and removed them. I've also added a KERN_ERR printk that pops if the thread can't be spawned to warn the admin that the locks won't be reclaimed. In the future, it would be nice to be able to notify userspace that locks have been lost (probably by implementing SIGLOST), and adding some good policies about how long we should reattempt to reclaim the locks. Finally, I removed a comment about memory leaks that I believe is obsolete and added a new one to clarify the result of sending a SIGKILL to the reclaimer thread. As best I can tell, doing so doesn't actually cause a memory leak. I consider this patch 2.6.29 material. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/lockd')
-rw-r--r--fs/lockd/clntlock.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 8307dd64bf46..94d42cc4e393 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -14,6 +14,7 @@
14#include <linux/sunrpc/svc.h> 14#include <linux/sunrpc/svc.h>
15#include <linux/lockd/lockd.h> 15#include <linux/lockd/lockd.h>
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include <linux/kthread.h>
17 18
18#define NLMDBG_FACILITY NLMDBG_CLIENT 19#define NLMDBG_FACILITY NLMDBG_CLIENT
19 20
@@ -191,11 +192,15 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
191void 192void
192nlmclnt_recovery(struct nlm_host *host) 193nlmclnt_recovery(struct nlm_host *host)
193{ 194{
195 struct task_struct *task;
196
194 if (!host->h_reclaiming++) { 197 if (!host->h_reclaiming++) {
195 nlm_get_host(host); 198 nlm_get_host(host);
196 __module_get(THIS_MODULE); 199 task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name);
197 if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0) 200 if (IS_ERR(task))
198 module_put(THIS_MODULE); 201 printk(KERN_ERR "lockd: unable to spawn reclaimer "
202 "thread. Locks for %s won't be reclaimed! "
203 "(%ld)\n", host->h_name, PTR_ERR(task));
199 } 204 }
200} 205}
201 206
@@ -207,7 +212,6 @@ reclaimer(void *ptr)
207 struct file_lock *fl, *next; 212 struct file_lock *fl, *next;
208 u32 nsmstate; 213 u32 nsmstate;
209 214
210 daemonize("%s-reclaim", host->h_name);
211 allow_signal(SIGKILL); 215 allow_signal(SIGKILL);
212 216
213 down_write(&host->h_rwsem); 217 down_write(&host->h_rwsem);
@@ -233,7 +237,12 @@ restart:
233 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { 237 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
234 list_del_init(&fl->fl_u.nfs_fl.list); 238 list_del_init(&fl->fl_u.nfs_fl.list);
235 239
236 /* Why are we leaking memory here? --okir */ 240 /*
241 * sending this thread a SIGKILL will result in any unreclaimed
242 * locks being removed from the h_granted list. This means that
243 * the kernel will not attempt to reclaim them again if a new
244 * reclaimer thread is spawned for this host.
245 */
237 if (signalled()) 246 if (signalled())
238 continue; 247 continue;
239 if (nlmclnt_reclaim(host, fl) != 0) 248 if (nlmclnt_reclaim(host, fl) != 0)
@@ -261,5 +270,5 @@ restart:
261 nlm_release_host(host); 270 nlm_release_host(host);
262 lockd_down(); 271 lockd_down();
263 unlock_kernel(); 272 unlock_kernel();
264 module_put_and_exit(0); 273 return 0;
265} 274}