diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/callback.c | 93 | ||||
-rw-r--r-- | fs/nfs/symlink.c | 1 |
2 files changed, 48 insertions, 46 deletions
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 66648dd92d97..5606ae3d72d3 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/nfs_fs.h> | 15 | #include <linux/nfs_fs.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/kthread.h> | ||
18 | 19 | ||
19 | #include <net/inet_sock.h> | 20 | #include <net/inet_sock.h> |
20 | 21 | ||
@@ -27,9 +28,7 @@ | |||
27 | struct nfs_callback_data { | 28 | struct nfs_callback_data { |
28 | unsigned int users; | 29 | unsigned int users; |
29 | struct svc_serv *serv; | 30 | struct svc_serv *serv; |
30 | pid_t pid; | 31 | struct task_struct *task; |
31 | struct completion started; | ||
32 | struct completion stopped; | ||
33 | }; | 32 | }; |
34 | 33 | ||
35 | static struct nfs_callback_data nfs_callback_info; | 34 | static struct nfs_callback_data nfs_callback_info; |
@@ -57,48 +56,44 @@ module_param_call(callback_tcpport, param_set_port, param_get_int, | |||
57 | /* | 56 | /* |
58 | * This is the callback kernel thread. | 57 | * This is the callback kernel thread. |
59 | */ | 58 | */ |
60 | static void nfs_callback_svc(struct svc_rqst *rqstp) | 59 | static int |
60 | nfs_callback_svc(void *vrqstp) | ||
61 | { | 61 | { |
62 | int err; | 62 | int err, preverr = 0; |
63 | struct svc_rqst *rqstp = vrqstp; | ||
63 | 64 | ||
64 | __module_get(THIS_MODULE); | ||
65 | lock_kernel(); | ||
66 | |||
67 | nfs_callback_info.pid = current->pid; | ||
68 | daemonize("nfsv4-svc"); | ||
69 | /* Process request with signals blocked, but allow SIGKILL. */ | ||
70 | allow_signal(SIGKILL); | ||
71 | set_freezable(); | 65 | set_freezable(); |
72 | 66 | ||
73 | complete(&nfs_callback_info.started); | 67 | /* |
74 | 68 | * FIXME: do we really need to run this under the BKL? If so, please | |
75 | for(;;) { | 69 | * add a comment about what it's intended to protect. |
76 | if (signalled()) { | 70 | */ |
77 | if (nfs_callback_info.users == 0) | 71 | lock_kernel(); |
78 | break; | 72 | while (!kthread_should_stop()) { |
79 | flush_signals(current); | ||
80 | } | ||
81 | /* | 73 | /* |
82 | * Listen for a request on the socket | 74 | * Listen for a request on the socket |
83 | */ | 75 | */ |
84 | err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); | 76 | err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); |
85 | if (err == -EAGAIN || err == -EINTR) | 77 | if (err == -EAGAIN || err == -EINTR) { |
78 | preverr = err; | ||
86 | continue; | 79 | continue; |
80 | } | ||
87 | if (err < 0) { | 81 | if (err < 0) { |
88 | printk(KERN_WARNING | 82 | if (err != preverr) { |
89 | "%s: terminating on error %d\n", | 83 | printk(KERN_WARNING "%s: unexpected error " |
90 | __FUNCTION__, -err); | 84 | "from svc_recv (%d)\n", __func__, err); |
91 | break; | 85 | preverr = err; |
86 | } | ||
87 | schedule_timeout_uninterruptible(HZ); | ||
88 | continue; | ||
92 | } | 89 | } |
90 | preverr = err; | ||
93 | svc_process(rqstp); | 91 | svc_process(rqstp); |
94 | } | 92 | } |
95 | |||
96 | flush_signals(current); | ||
97 | svc_exit_thread(rqstp); | ||
98 | nfs_callback_info.pid = 0; | ||
99 | complete(&nfs_callback_info.stopped); | ||
100 | unlock_kernel(); | 93 | unlock_kernel(); |
101 | module_put_and_exit(0); | 94 | nfs_callback_info.task = NULL; |
95 | svc_exit_thread(rqstp); | ||
96 | return 0; | ||
102 | } | 97 | } |
103 | 98 | ||
104 | /* | 99 | /* |
@@ -107,14 +102,13 @@ static void nfs_callback_svc(struct svc_rqst *rqstp) | |||
107 | int nfs_callback_up(void) | 102 | int nfs_callback_up(void) |
108 | { | 103 | { |
109 | struct svc_serv *serv = NULL; | 104 | struct svc_serv *serv = NULL; |
105 | struct svc_rqst *rqstp; | ||
110 | int ret = 0; | 106 | int ret = 0; |
111 | 107 | ||
112 | lock_kernel(); | 108 | lock_kernel(); |
113 | mutex_lock(&nfs_callback_mutex); | 109 | mutex_lock(&nfs_callback_mutex); |
114 | if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) | 110 | if (nfs_callback_info.users++ || nfs_callback_info.task != NULL) |
115 | goto out; | 111 | goto out; |
116 | init_completion(&nfs_callback_info.started); | ||
117 | init_completion(&nfs_callback_info.stopped); | ||
118 | serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL); | 112 | serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL); |
119 | ret = -ENOMEM; | 113 | ret = -ENOMEM; |
120 | if (!serv) | 114 | if (!serv) |
@@ -127,15 +121,28 @@ int nfs_callback_up(void) | |||
127 | nfs_callback_tcpport = ret; | 121 | nfs_callback_tcpport = ret; |
128 | dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); | 122 | dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); |
129 | 123 | ||
130 | ret = svc_create_thread(nfs_callback_svc, serv); | 124 | rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]); |
131 | if (ret < 0) | 125 | if (IS_ERR(rqstp)) { |
126 | ret = PTR_ERR(rqstp); | ||
132 | goto out_err; | 127 | goto out_err; |
128 | } | ||
129 | |||
130 | svc_sock_update_bufs(serv); | ||
133 | nfs_callback_info.serv = serv; | 131 | nfs_callback_info.serv = serv; |
134 | wait_for_completion(&nfs_callback_info.started); | 132 | |
133 | nfs_callback_info.task = kthread_run(nfs_callback_svc, rqstp, | ||
134 | "nfsv4-svc"); | ||
135 | if (IS_ERR(nfs_callback_info.task)) { | ||
136 | ret = PTR_ERR(nfs_callback_info.task); | ||
137 | nfs_callback_info.serv = NULL; | ||
138 | nfs_callback_info.task = NULL; | ||
139 | svc_exit_thread(rqstp); | ||
140 | goto out_err; | ||
141 | } | ||
135 | out: | 142 | out: |
136 | /* | 143 | /* |
137 | * svc_create creates the svc_serv with sv_nrthreads == 1, and then | 144 | * svc_create creates the svc_serv with sv_nrthreads == 1, and then |
138 | * svc_create_thread increments that. So we need to call svc_destroy | 145 | * svc_prepare_thread increments that. So we need to call svc_destroy |
139 | * on both success and failure so that the refcount is 1 when the | 146 | * on both success and failure so that the refcount is 1 when the |
140 | * thread exits. | 147 | * thread exits. |
141 | */ | 148 | */ |
@@ -152,19 +159,15 @@ out_err: | |||
152 | } | 159 | } |
153 | 160 | ||
154 | /* | 161 | /* |
155 | * Kill the server process if it is not already up. | 162 | * Kill the server process if it is not already down. |
156 | */ | 163 | */ |
157 | void nfs_callback_down(void) | 164 | void nfs_callback_down(void) |
158 | { | 165 | { |
159 | lock_kernel(); | 166 | lock_kernel(); |
160 | mutex_lock(&nfs_callback_mutex); | 167 | mutex_lock(&nfs_callback_mutex); |
161 | nfs_callback_info.users--; | 168 | nfs_callback_info.users--; |
162 | do { | 169 | if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) |
163 | if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) | 170 | kthread_stop(nfs_callback_info.task); |
164 | break; | ||
165 | if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) | ||
166 | break; | ||
167 | } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); | ||
168 | mutex_unlock(&nfs_callback_mutex); | 171 | mutex_unlock(&nfs_callback_mutex); |
169 | unlock_kernel(); | 172 | unlock_kernel(); |
170 | } | 173 | } |
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 83e865a16ad1..412738dbfbc7 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c | |||
@@ -10,7 +10,6 @@ | |||
10 | * nfs symlink handling code | 10 | * nfs symlink handling code |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define NFS_NEED_XDR_TYPES | ||
14 | #include <linux/time.h> | 13 | #include <linux/time.h> |
15 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
16 | #include <linux/sunrpc/clnt.h> | 15 | #include <linux/sunrpc/clnt.h> |