diff options
Diffstat (limited to 'fs/nfs')
35 files changed, 819 insertions, 532 deletions
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 59e5673b4597..a43d07e7b924 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig | |||
@@ -95,8 +95,7 @@ config ROOT_NFS | |||
95 | Most people say N here. | 95 | Most people say N here. |
96 | 96 | ||
97 | config NFS_FSCACHE | 97 | config NFS_FSCACHE |
98 | bool "Provide NFS client caching support (EXPERIMENTAL)" | 98 | bool "Provide NFS client caching support" |
99 | depends on EXPERIMENTAL | ||
100 | depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y | 99 | depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y |
101 | help | 100 | help |
102 | Say Y here if you want NFS data to be cached locally on disc through | 101 | Say Y here if you want NFS data to be cached locally on disc through |
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index b4ffd0146ea6..84690319e625 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/moduleparam.h> | 10 | #include <linux/moduleparam.h> |
11 | #include <linux/mount.h> | 11 | #include <linux/mount.h> |
12 | #include <linux/namei.h> | 12 | #include <linux/namei.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/sunrpc/cache.h> | 14 | #include <linux/sunrpc/cache.h> |
14 | #include <linux/sunrpc/rpc_pipe_fs.h> | 15 | #include <linux/sunrpc/rpc_pipe_fs.h> |
15 | 16 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 73ab220354df..36dfdae95123 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -118,7 +118,6 @@ nfs4_callback_up(struct svc_serv *serv) | |||
118 | dprintk("NFS: Callback listener port = %u (af %u)\n", | 118 | dprintk("NFS: Callback listener port = %u (af %u)\n", |
119 | nfs_callback_tcpport, PF_INET); | 119 | nfs_callback_tcpport, PF_INET); |
120 | 120 | ||
121 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
122 | ret = svc_create_xprt(serv, "tcp", PF_INET6, | 121 | ret = svc_create_xprt(serv, "tcp", PF_INET6, |
123 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 122 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
124 | if (ret > 0) { | 123 | if (ret > 0) { |
@@ -129,7 +128,6 @@ nfs4_callback_up(struct svc_serv *serv) | |||
129 | ret = 0; | 128 | ret = 0; |
130 | else | 129 | else |
131 | goto out_err; | 130 | goto out_err; |
132 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | ||
133 | 131 | ||
134 | return svc_prepare_thread(serv, &serv->sv_pools[0]); | 132 | return svc_prepare_thread(serv, &serv->sv_pools[0]); |
135 | 133 | ||
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index d4036be0b589..85a7cfd1b8dd 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h | |||
@@ -119,6 +119,14 @@ struct cb_recallanyargs { | |||
119 | }; | 119 | }; |
120 | 120 | ||
121 | extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); | 121 | extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); |
122 | |||
123 | struct cb_recallslotargs { | ||
124 | struct sockaddr *crsa_addr; | ||
125 | uint32_t crsa_target_max_slots; | ||
126 | }; | ||
127 | extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, | ||
128 | void *dummy); | ||
129 | |||
122 | #endif /* CONFIG_NFS_V4_1 */ | 130 | #endif /* CONFIG_NFS_V4_1 */ |
123 | 131 | ||
124 | extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); | 132 | extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); |
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index defa9b4c470e..a08770a7e857 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/nfs4.h> | 8 | #include <linux/nfs4.h> |
9 | #include <linux/nfs_fs.h> | 9 | #include <linux/nfs_fs.h> |
10 | #include <linux/slab.h> | ||
10 | #include "nfs4_fs.h" | 11 | #include "nfs4_fs.h" |
11 | #include "callback.h" | 12 | #include "callback.h" |
12 | #include "delegation.h" | 13 | #include "delegation.h" |
@@ -143,44 +144,49 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n | |||
143 | * Return success if the sequenceID is one more than what we last saw on | 144 | * Return success if the sequenceID is one more than what we last saw on |
144 | * this slot, accounting for wraparound. Increments the slot's sequence. | 145 | * this slot, accounting for wraparound. Increments the slot's sequence. |
145 | * | 146 | * |
146 | * We don't yet implement a duplicate request cache, so at this time | 147 | * We don't yet implement a duplicate request cache, instead we set the |
147 | * we will log replays, and process them as if we had not seen them before, | 148 | * back channel ca_maxresponsesize_cached to zero. This is OK for now |
148 | * but we don't bump the sequence in the slot. Not too worried about it, | ||
149 | * since we only currently implement idempotent callbacks anyway. | 149 | * since we only currently implement idempotent callbacks anyway. |
150 | * | 150 | * |
151 | * We have a single slot backchannel at this time, so we don't bother | 151 | * We have a single slot backchannel at this time, so we don't bother |
152 | * checking the used_slots bit array on the table. The lower layer guarantees | 152 | * checking the used_slots bit array on the table. The lower layer guarantees |
153 | * a single outstanding callback request at a time. | 153 | * a single outstanding callback request at a time. |
154 | */ | 154 | */ |
155 | static int | 155 | static __be32 |
156 | validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) | 156 | validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) |
157 | { | 157 | { |
158 | struct nfs4_slot *slot; | 158 | struct nfs4_slot *slot; |
159 | 159 | ||
160 | dprintk("%s enter. slotid %d seqid %d\n", | 160 | dprintk("%s enter. slotid %d seqid %d\n", |
161 | __func__, slotid, seqid); | 161 | __func__, args->csa_slotid, args->csa_sequenceid); |
162 | 162 | ||
163 | if (slotid > NFS41_BC_MAX_CALLBACKS) | 163 | if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS) |
164 | return htonl(NFS4ERR_BADSLOT); | 164 | return htonl(NFS4ERR_BADSLOT); |
165 | 165 | ||
166 | slot = tbl->slots + slotid; | 166 | slot = tbl->slots + args->csa_slotid; |
167 | dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); | 167 | dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); |
168 | 168 | ||
169 | /* Normal */ | 169 | /* Normal */ |
170 | if (likely(seqid == slot->seq_nr + 1)) { | 170 | if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { |
171 | slot->seq_nr++; | 171 | slot->seq_nr++; |
172 | return htonl(NFS4_OK); | 172 | return htonl(NFS4_OK); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* Replay */ | 175 | /* Replay */ |
176 | if (seqid == slot->seq_nr) { | 176 | if (args->csa_sequenceid == slot->seq_nr) { |
177 | dprintk("%s seqid %d is a replay - no DRC available\n", | 177 | dprintk("%s seqid %d is a replay\n", |
178 | __func__, seqid); | 178 | __func__, args->csa_sequenceid); |
179 | return htonl(NFS4_OK); | 179 | /* Signal process_op to set this error on next op */ |
180 | if (args->csa_cachethis == 0) | ||
181 | return htonl(NFS4ERR_RETRY_UNCACHED_REP); | ||
182 | |||
183 | /* The ca_maxresponsesize_cached is 0 with no DRC */ | ||
184 | else if (args->csa_cachethis == 1) | ||
185 | return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); | ||
180 | } | 186 | } |
181 | 187 | ||
182 | /* Wraparound */ | 188 | /* Wraparound */ |
183 | if (seqid == 1 && (slot->seq_nr + 1) == 0) { | 189 | if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { |
184 | slot->seq_nr = 1; | 190 | slot->seq_nr = 1; |
185 | return htonl(NFS4_OK); | 191 | return htonl(NFS4_OK); |
186 | } | 192 | } |
@@ -225,27 +231,87 @@ validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) | |||
225 | return NULL; | 231 | return NULL; |
226 | } | 232 | } |
227 | 233 | ||
228 | /* FIXME: referring calls should be processed */ | 234 | /* |
229 | unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, | 235 | * For each referring call triple, check the session's slot table for |
236 | * a match. If the slot is in use and the sequence numbers match, the | ||
237 | * client is still waiting for a response to the original request. | ||
238 | */ | ||
239 | static bool referring_call_exists(struct nfs_client *clp, | ||
240 | uint32_t nrclists, | ||
241 | struct referring_call_list *rclists) | ||
242 | { | ||
243 | bool status = 0; | ||
244 | int i, j; | ||
245 | struct nfs4_session *session; | ||
246 | struct nfs4_slot_table *tbl; | ||
247 | struct referring_call_list *rclist; | ||
248 | struct referring_call *ref; | ||
249 | |||
250 | /* | ||
251 | * XXX When client trunking is implemented, this becomes | ||
252 | * a session lookup from within the loop | ||
253 | */ | ||
254 | session = clp->cl_session; | ||
255 | tbl = &session->fc_slot_table; | ||
256 | |||
257 | for (i = 0; i < nrclists; i++) { | ||
258 | rclist = &rclists[i]; | ||
259 | if (memcmp(session->sess_id.data, | ||
260 | rclist->rcl_sessionid.data, | ||
261 | NFS4_MAX_SESSIONID_LEN) != 0) | ||
262 | continue; | ||
263 | |||
264 | for (j = 0; j < rclist->rcl_nrefcalls; j++) { | ||
265 | ref = &rclist->rcl_refcalls[j]; | ||
266 | |||
267 | dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " | ||
268 | "slotid %u\n", __func__, | ||
269 | ((u32 *)&rclist->rcl_sessionid.data)[0], | ||
270 | ((u32 *)&rclist->rcl_sessionid.data)[1], | ||
271 | ((u32 *)&rclist->rcl_sessionid.data)[2], | ||
272 | ((u32 *)&rclist->rcl_sessionid.data)[3], | ||
273 | ref->rc_sequenceid, ref->rc_slotid); | ||
274 | |||
275 | spin_lock(&tbl->slot_tbl_lock); | ||
276 | status = (test_bit(ref->rc_slotid, tbl->used_slots) && | ||
277 | tbl->slots[ref->rc_slotid].seq_nr == | ||
278 | ref->rc_sequenceid); | ||
279 | spin_unlock(&tbl->slot_tbl_lock); | ||
280 | if (status) | ||
281 | goto out; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | out: | ||
286 | return status; | ||
287 | } | ||
288 | |||
289 | __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, | ||
230 | struct cb_sequenceres *res) | 290 | struct cb_sequenceres *res) |
231 | { | 291 | { |
232 | struct nfs_client *clp; | 292 | struct nfs_client *clp; |
233 | int i, status; | 293 | int i; |
234 | 294 | __be32 status; | |
235 | for (i = 0; i < args->csa_nrclists; i++) | ||
236 | kfree(args->csa_rclists[i].rcl_refcalls); | ||
237 | kfree(args->csa_rclists); | ||
238 | 295 | ||
239 | status = htonl(NFS4ERR_BADSESSION); | 296 | status = htonl(NFS4ERR_BADSESSION); |
240 | clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); | 297 | clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); |
241 | if (clp == NULL) | 298 | if (clp == NULL) |
242 | goto out; | 299 | goto out; |
243 | 300 | ||
244 | status = validate_seqid(&clp->cl_session->bc_slot_table, | 301 | status = validate_seqid(&clp->cl_session->bc_slot_table, args); |
245 | args->csa_slotid, args->csa_sequenceid); | ||
246 | if (status) | 302 | if (status) |
247 | goto out_putclient; | 303 | goto out_putclient; |
248 | 304 | ||
305 | /* | ||
306 | * Check for pending referring calls. If a match is found, a | ||
307 | * related callback was received before the response to the original | ||
308 | * call. | ||
309 | */ | ||
310 | if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { | ||
311 | status = htonl(NFS4ERR_DELAY); | ||
312 | goto out_putclient; | ||
313 | } | ||
314 | |||
249 | memcpy(&res->csr_sessionid, &args->csa_sessionid, | 315 | memcpy(&res->csr_sessionid, &args->csa_sessionid, |
250 | sizeof(res->csr_sessionid)); | 316 | sizeof(res->csr_sessionid)); |
251 | res->csr_sequenceid = args->csa_sequenceid; | 317 | res->csr_sequenceid = args->csa_sequenceid; |
@@ -256,15 +322,23 @@ unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, | |||
256 | out_putclient: | 322 | out_putclient: |
257 | nfs_put_client(clp); | 323 | nfs_put_client(clp); |
258 | out: | 324 | out: |
259 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 325 | for (i = 0; i < args->csa_nrclists; i++) |
260 | res->csr_status = status; | 326 | kfree(args->csa_rclists[i].rcl_refcalls); |
261 | return res->csr_status; | 327 | kfree(args->csa_rclists); |
328 | |||
329 | if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) | ||
330 | res->csr_status = 0; | ||
331 | else | ||
332 | res->csr_status = status; | ||
333 | dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, | ||
334 | ntohl(status), ntohl(res->csr_status)); | ||
335 | return status; | ||
262 | } | 336 | } |
263 | 337 | ||
264 | unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) | 338 | __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) |
265 | { | 339 | { |
266 | struct nfs_client *clp; | 340 | struct nfs_client *clp; |
267 | int status; | 341 | __be32 status; |
268 | fmode_t flags = 0; | 342 | fmode_t flags = 0; |
269 | 343 | ||
270 | status = htonl(NFS4ERR_OP_NOT_IN_SESSION); | 344 | status = htonl(NFS4ERR_OP_NOT_IN_SESSION); |
@@ -289,4 +363,40 @@ out: | |||
289 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 363 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
290 | return status; | 364 | return status; |
291 | } | 365 | } |
366 | |||
367 | /* Reduce the fore channel's max_slots to the target value */ | ||
368 | __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) | ||
369 | { | ||
370 | struct nfs_client *clp; | ||
371 | struct nfs4_slot_table *fc_tbl; | ||
372 | __be32 status; | ||
373 | |||
374 | status = htonl(NFS4ERR_OP_NOT_IN_SESSION); | ||
375 | clp = nfs_find_client(args->crsa_addr, 4); | ||
376 | if (clp == NULL) | ||
377 | goto out; | ||
378 | |||
379 | dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", | ||
380 | rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), | ||
381 | args->crsa_target_max_slots); | ||
382 | |||
383 | fc_tbl = &clp->cl_session->fc_slot_table; | ||
384 | |||
385 | status = htonl(NFS4ERR_BAD_HIGH_SLOT); | ||
386 | if (args->crsa_target_max_slots > fc_tbl->max_slots || | ||
387 | args->crsa_target_max_slots < 1) | ||
388 | goto out_putclient; | ||
389 | |||
390 | status = htonl(NFS4_OK); | ||
391 | if (args->crsa_target_max_slots == fc_tbl->max_slots) | ||
392 | goto out_putclient; | ||
393 | |||
394 | fc_tbl->target_max_slots = args->crsa_target_max_slots; | ||
395 | nfs41_handle_recall_slot(clp); | ||
396 | out_putclient: | ||
397 | nfs_put_client(clp); /* balance nfs_find_client */ | ||
398 | out: | ||
399 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | ||
400 | return status; | ||
401 | } | ||
292 | #endif /* CONFIG_NFS_V4_1 */ | 402 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 8e1a2511c8be..05af212f0edf 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/sunrpc/svc.h> | 9 | #include <linux/sunrpc/svc.h> |
10 | #include <linux/nfs4.h> | 10 | #include <linux/nfs4.h> |
11 | #include <linux/nfs_fs.h> | 11 | #include <linux/nfs_fs.h> |
12 | #include <linux/slab.h> | ||
12 | #include "nfs4_fs.h" | 13 | #include "nfs4_fs.h" |
13 | #include "callback.h" | 14 | #include "callback.h" |
14 | 15 | ||
@@ -24,10 +25,14 @@ | |||
24 | #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ | 25 | #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ |
25 | 4 + 1 + 3) | 26 | 4 + 1 + 3) |
26 | #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | 27 | #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) |
28 | #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) | ||
27 | #endif /* CONFIG_NFS_V4_1 */ | 29 | #endif /* CONFIG_NFS_V4_1 */ |
28 | 30 | ||
29 | #define NFSDBG_FACILITY NFSDBG_CALLBACK | 31 | #define NFSDBG_FACILITY NFSDBG_CALLBACK |
30 | 32 | ||
33 | /* Internal error code */ | ||
34 | #define NFS4ERR_RESOURCE_HDR 11050 | ||
35 | |||
31 | typedef __be32 (*callback_process_op_t)(void *, void *); | 36 | typedef __be32 (*callback_process_op_t)(void *, void *); |
32 | typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); | 37 | typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); |
33 | typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); | 38 | typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); |
@@ -173,7 +178,7 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) | |||
173 | __be32 *p; | 178 | __be32 *p; |
174 | p = read_buf(xdr, 4); | 179 | p = read_buf(xdr, 4); |
175 | if (unlikely(p == NULL)) | 180 | if (unlikely(p == NULL)) |
176 | return htonl(NFS4ERR_RESOURCE); | 181 | return htonl(NFS4ERR_RESOURCE_HDR); |
177 | *op = ntohl(*p); | 182 | *op = ntohl(*p); |
178 | return 0; | 183 | return 0; |
179 | } | 184 | } |
@@ -215,10 +220,10 @@ out: | |||
215 | 220 | ||
216 | #if defined(CONFIG_NFS_V4_1) | 221 | #if defined(CONFIG_NFS_V4_1) |
217 | 222 | ||
218 | static unsigned decode_sessionid(struct xdr_stream *xdr, | 223 | static __be32 decode_sessionid(struct xdr_stream *xdr, |
219 | struct nfs4_sessionid *sid) | 224 | struct nfs4_sessionid *sid) |
220 | { | 225 | { |
221 | uint32_t *p; | 226 | __be32 *p; |
222 | int len = NFS4_MAX_SESSIONID_LEN; | 227 | int len = NFS4_MAX_SESSIONID_LEN; |
223 | 228 | ||
224 | p = read_buf(xdr, len); | 229 | p = read_buf(xdr, len); |
@@ -229,12 +234,12 @@ static unsigned decode_sessionid(struct xdr_stream *xdr, | |||
229 | return 0; | 234 | return 0; |
230 | } | 235 | } |
231 | 236 | ||
232 | static unsigned decode_rc_list(struct xdr_stream *xdr, | 237 | static __be32 decode_rc_list(struct xdr_stream *xdr, |
233 | struct referring_call_list *rc_list) | 238 | struct referring_call_list *rc_list) |
234 | { | 239 | { |
235 | uint32_t *p; | 240 | __be32 *p; |
236 | int i; | 241 | int i; |
237 | unsigned status; | 242 | __be32 status; |
238 | 243 | ||
239 | status = decode_sessionid(xdr, &rc_list->rcl_sessionid); | 244 | status = decode_sessionid(xdr, &rc_list->rcl_sessionid); |
240 | if (status) | 245 | if (status) |
@@ -267,13 +272,13 @@ out: | |||
267 | return status; | 272 | return status; |
268 | } | 273 | } |
269 | 274 | ||
270 | static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, | 275 | static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, |
271 | struct xdr_stream *xdr, | 276 | struct xdr_stream *xdr, |
272 | struct cb_sequenceargs *args) | 277 | struct cb_sequenceargs *args) |
273 | { | 278 | { |
274 | uint32_t *p; | 279 | __be32 *p; |
275 | int i; | 280 | int i; |
276 | unsigned status; | 281 | __be32 status; |
277 | 282 | ||
278 | status = decode_sessionid(xdr, &args->csa_sessionid); | 283 | status = decode_sessionid(xdr, &args->csa_sessionid); |
279 | if (status) | 284 | if (status) |
@@ -327,11 +332,11 @@ out_free: | |||
327 | goto out; | 332 | goto out; |
328 | } | 333 | } |
329 | 334 | ||
330 | static unsigned decode_recallany_args(struct svc_rqst *rqstp, | 335 | static __be32 decode_recallany_args(struct svc_rqst *rqstp, |
331 | struct xdr_stream *xdr, | 336 | struct xdr_stream *xdr, |
332 | struct cb_recallanyargs *args) | 337 | struct cb_recallanyargs *args) |
333 | { | 338 | { |
334 | uint32_t *p; | 339 | __be32 *p; |
335 | 340 | ||
336 | args->craa_addr = svc_addr(rqstp); | 341 | args->craa_addr = svc_addr(rqstp); |
337 | p = read_buf(xdr, 4); | 342 | p = read_buf(xdr, 4); |
@@ -346,6 +351,20 @@ static unsigned decode_recallany_args(struct svc_rqst *rqstp, | |||
346 | return 0; | 351 | return 0; |
347 | } | 352 | } |
348 | 353 | ||
354 | static __be32 decode_recallslot_args(struct svc_rqst *rqstp, | ||
355 | struct xdr_stream *xdr, | ||
356 | struct cb_recallslotargs *args) | ||
357 | { | ||
358 | __be32 *p; | ||
359 | |||
360 | args->crsa_addr = svc_addr(rqstp); | ||
361 | p = read_buf(xdr, 4); | ||
362 | if (unlikely(p == NULL)) | ||
363 | return htonl(NFS4ERR_BADXDR); | ||
364 | args->crsa_target_max_slots = ntohl(*p++); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
349 | #endif /* CONFIG_NFS_V4_1 */ | 368 | #endif /* CONFIG_NFS_V4_1 */ |
350 | 369 | ||
351 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) | 370 | static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) |
@@ -465,7 +484,7 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res) | |||
465 | 484 | ||
466 | p = xdr_reserve_space(xdr, 8); | 485 | p = xdr_reserve_space(xdr, 8); |
467 | if (unlikely(p == NULL)) | 486 | if (unlikely(p == NULL)) |
468 | return htonl(NFS4ERR_RESOURCE); | 487 | return htonl(NFS4ERR_RESOURCE_HDR); |
469 | *p++ = htonl(op); | 488 | *p++ = htonl(op); |
470 | *p = res; | 489 | *p = res; |
471 | return 0; | 490 | return 0; |
@@ -499,10 +518,10 @@ out: | |||
499 | 518 | ||
500 | #if defined(CONFIG_NFS_V4_1) | 519 | #if defined(CONFIG_NFS_V4_1) |
501 | 520 | ||
502 | static unsigned encode_sessionid(struct xdr_stream *xdr, | 521 | static __be32 encode_sessionid(struct xdr_stream *xdr, |
503 | const struct nfs4_sessionid *sid) | 522 | const struct nfs4_sessionid *sid) |
504 | { | 523 | { |
505 | uint32_t *p; | 524 | __be32 *p; |
506 | int len = NFS4_MAX_SESSIONID_LEN; | 525 | int len = NFS4_MAX_SESSIONID_LEN; |
507 | 526 | ||
508 | p = xdr_reserve_space(xdr, len); | 527 | p = xdr_reserve_space(xdr, len); |
@@ -513,11 +532,11 @@ static unsigned encode_sessionid(struct xdr_stream *xdr, | |||
513 | return 0; | 532 | return 0; |
514 | } | 533 | } |
515 | 534 | ||
516 | static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp, | 535 | static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, |
517 | struct xdr_stream *xdr, | 536 | struct xdr_stream *xdr, |
518 | const struct cb_sequenceres *res) | 537 | const struct cb_sequenceres *res) |
519 | { | 538 | { |
520 | uint32_t *p; | 539 | __be32 *p; |
521 | unsigned status = res->csr_status; | 540 | unsigned status = res->csr_status; |
522 | 541 | ||
523 | if (unlikely(status != 0)) | 542 | if (unlikely(status != 0)) |
@@ -554,6 +573,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) | |||
554 | case OP_CB_RECALL: | 573 | case OP_CB_RECALL: |
555 | case OP_CB_SEQUENCE: | 574 | case OP_CB_SEQUENCE: |
556 | case OP_CB_RECALL_ANY: | 575 | case OP_CB_RECALL_ANY: |
576 | case OP_CB_RECALL_SLOT: | ||
557 | *op = &callback_ops[op_nr]; | 577 | *op = &callback_ops[op_nr]; |
558 | break; | 578 | break; |
559 | 579 | ||
@@ -562,7 +582,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) | |||
562 | case OP_CB_NOTIFY: | 582 | case OP_CB_NOTIFY: |
563 | case OP_CB_PUSH_DELEG: | 583 | case OP_CB_PUSH_DELEG: |
564 | case OP_CB_RECALLABLE_OBJ_AVAIL: | 584 | case OP_CB_RECALLABLE_OBJ_AVAIL: |
565 | case OP_CB_RECALL_SLOT: | ||
566 | case OP_CB_WANTS_CANCELLED: | 585 | case OP_CB_WANTS_CANCELLED: |
567 | case OP_CB_NOTIFY_LOCK: | 586 | case OP_CB_NOTIFY_LOCK: |
568 | return htonl(NFS4ERR_NOTSUPP); | 587 | return htonl(NFS4ERR_NOTSUPP); |
@@ -602,20 +621,18 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) | |||
602 | static __be32 process_op(uint32_t minorversion, int nop, | 621 | static __be32 process_op(uint32_t minorversion, int nop, |
603 | struct svc_rqst *rqstp, | 622 | struct svc_rqst *rqstp, |
604 | struct xdr_stream *xdr_in, void *argp, | 623 | struct xdr_stream *xdr_in, void *argp, |
605 | struct xdr_stream *xdr_out, void *resp) | 624 | struct xdr_stream *xdr_out, void *resp, int* drc_status) |
606 | { | 625 | { |
607 | struct callback_op *op = &callback_ops[0]; | 626 | struct callback_op *op = &callback_ops[0]; |
608 | unsigned int op_nr = OP_CB_ILLEGAL; | 627 | unsigned int op_nr; |
609 | __be32 status; | 628 | __be32 status; |
610 | long maxlen; | 629 | long maxlen; |
611 | __be32 res; | 630 | __be32 res; |
612 | 631 | ||
613 | dprintk("%s: start\n", __func__); | 632 | dprintk("%s: start\n", __func__); |
614 | status = decode_op_hdr(xdr_in, &op_nr); | 633 | status = decode_op_hdr(xdr_in, &op_nr); |
615 | if (unlikely(status)) { | 634 | if (unlikely(status)) |
616 | status = htonl(NFS4ERR_OP_ILLEGAL); | 635 | return status; |
617 | goto out; | ||
618 | } | ||
619 | 636 | ||
620 | dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", | 637 | dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", |
621 | __func__, minorversion, nop, op_nr); | 638 | __func__, minorversion, nop, op_nr); |
@@ -624,19 +641,32 @@ static __be32 process_op(uint32_t minorversion, int nop, | |||
624 | preprocess_nfs4_op(op_nr, &op); | 641 | preprocess_nfs4_op(op_nr, &op); |
625 | if (status == htonl(NFS4ERR_OP_ILLEGAL)) | 642 | if (status == htonl(NFS4ERR_OP_ILLEGAL)) |
626 | op_nr = OP_CB_ILLEGAL; | 643 | op_nr = OP_CB_ILLEGAL; |
627 | out: | 644 | if (status) |
645 | goto encode_hdr; | ||
646 | |||
647 | if (*drc_status) { | ||
648 | status = *drc_status; | ||
649 | goto encode_hdr; | ||
650 | } | ||
651 | |||
628 | maxlen = xdr_out->end - xdr_out->p; | 652 | maxlen = xdr_out->end - xdr_out->p; |
629 | if (maxlen > 0 && maxlen < PAGE_SIZE) { | 653 | if (maxlen > 0 && maxlen < PAGE_SIZE) { |
630 | if (likely(status == 0 && op->decode_args != NULL)) | 654 | status = op->decode_args(rqstp, xdr_in, argp); |
631 | status = op->decode_args(rqstp, xdr_in, argp); | 655 | if (likely(status == 0)) |
632 | if (likely(status == 0 && op->process_op != NULL)) | ||
633 | status = op->process_op(argp, resp); | 656 | status = op->process_op(argp, resp); |
634 | } else | 657 | } else |
635 | status = htonl(NFS4ERR_RESOURCE); | 658 | status = htonl(NFS4ERR_RESOURCE); |
636 | 659 | ||
660 | /* Only set by OP_CB_SEQUENCE processing */ | ||
661 | if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { | ||
662 | *drc_status = status; | ||
663 | status = 0; | ||
664 | } | ||
665 | |||
666 | encode_hdr: | ||
637 | res = encode_op_hdr(xdr_out, op_nr, status); | 667 | res = encode_op_hdr(xdr_out, op_nr, status); |
638 | if (status == 0) | 668 | if (unlikely(res)) |
639 | status = res; | 669 | return res; |
640 | if (op->encode_res != NULL && status == 0) | 670 | if (op->encode_res != NULL && status == 0) |
641 | status = op->encode_res(rqstp, xdr_out, resp); | 671 | status = op->encode_res(rqstp, xdr_out, resp); |
642 | dprintk("%s: done, status = %d\n", __func__, ntohl(status)); | 672 | dprintk("%s: done, status = %d\n", __func__, ntohl(status)); |
@@ -652,7 +682,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
652 | struct cb_compound_hdr_res hdr_res = { NULL }; | 682 | struct cb_compound_hdr_res hdr_res = { NULL }; |
653 | struct xdr_stream xdr_in, xdr_out; | 683 | struct xdr_stream xdr_in, xdr_out; |
654 | __be32 *p; | 684 | __be32 *p; |
655 | __be32 status; | 685 | __be32 status, drc_status = 0; |
656 | unsigned int nops = 0; | 686 | unsigned int nops = 0; |
657 | 687 | ||
658 | dprintk("%s: start\n", __func__); | 688 | dprintk("%s: start\n", __func__); |
@@ -672,11 +702,18 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r | |||
672 | return rpc_system_err; | 702 | return rpc_system_err; |
673 | 703 | ||
674 | while (status == 0 && nops != hdr_arg.nops) { | 704 | while (status == 0 && nops != hdr_arg.nops) { |
675 | status = process_op(hdr_arg.minorversion, nops, | 705 | status = process_op(hdr_arg.minorversion, nops, rqstp, |
676 | rqstp, &xdr_in, argp, &xdr_out, resp); | 706 | &xdr_in, argp, &xdr_out, resp, &drc_status); |
677 | nops++; | 707 | nops++; |
678 | } | 708 | } |
679 | 709 | ||
710 | /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return | ||
711 | * resource error in cb_compound status without returning op */ | ||
712 | if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) { | ||
713 | status = htonl(NFS4ERR_RESOURCE); | ||
714 | nops--; | ||
715 | } | ||
716 | |||
680 | *hdr_res.status = status; | 717 | *hdr_res.status = status; |
681 | *hdr_res.nops = htonl(nops); | 718 | *hdr_res.nops = htonl(nops); |
682 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); | 719 | dprintk("%s: done, status = %u\n", __func__, ntohl(status)); |
@@ -713,6 +750,11 @@ static struct callback_op callback_ops[] = { | |||
713 | .decode_args = (callback_decode_arg_t)decode_recallany_args, | 750 | .decode_args = (callback_decode_arg_t)decode_recallany_args, |
714 | .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, | 751 | .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, |
715 | }, | 752 | }, |
753 | [OP_CB_RECALL_SLOT] = { | ||
754 | .process_op = (callback_process_op_t)nfs4_callback_recallslot, | ||
755 | .decode_args = (callback_decode_arg_t)decode_recallslot_args, | ||
756 | .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, | ||
757 | }, | ||
716 | #endif /* CONFIG_NFS_V4_1 */ | 758 | #endif /* CONFIG_NFS_V4_1 */ |
717 | }; | 759 | }; |
718 | 760 | ||
@@ -741,6 +783,7 @@ struct svc_version nfs4_callback_version1 = { | |||
741 | .vs_proc = nfs4_callback_procedures1, | 783 | .vs_proc = nfs4_callback_procedures1, |
742 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, | 784 | .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, |
743 | .vs_dispatch = NULL, | 785 | .vs_dispatch = NULL, |
786 | .vs_hidden = 1, | ||
744 | }; | 787 | }; |
745 | 788 | ||
746 | struct svc_version nfs4_callback_version4 = { | 789 | struct svc_version nfs4_callback_version4 = { |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ee77713ce68b..a8766c4ef2e0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/vfs.h> | 35 | #include <linux/vfs.h> |
36 | #include <linux/inet.h> | 36 | #include <linux/inet.h> |
37 | #include <linux/in6.h> | 37 | #include <linux/in6.h> |
38 | #include <linux/slab.h> | ||
38 | #include <net/ipv6.h> | 39 | #include <net/ipv6.h> |
39 | #include <linux/nfs_xdr.h> | 40 | #include <linux/nfs_xdr.h> |
40 | #include <linux/sunrpc/bc_xprt.h> | 41 | #include <linux/sunrpc/bc_xprt.h> |
@@ -164,30 +165,7 @@ error_0: | |||
164 | return ERR_PTR(err); | 165 | return ERR_PTR(err); |
165 | } | 166 | } |
166 | 167 | ||
167 | static void nfs4_shutdown_client(struct nfs_client *clp) | ||
168 | { | ||
169 | #ifdef CONFIG_NFS_V4 | ||
170 | if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) | ||
171 | nfs4_kill_renewd(clp); | ||
172 | BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); | ||
173 | if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) | ||
174 | nfs_idmap_delete(clp); | ||
175 | |||
176 | rpc_destroy_wait_queue(&clp->cl_rpcwaitq); | ||
177 | #endif | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Destroy the NFS4 callback service | ||
182 | */ | ||
183 | static void nfs4_destroy_callback(struct nfs_client *clp) | ||
184 | { | ||
185 | #ifdef CONFIG_NFS_V4 | 168 | #ifdef CONFIG_NFS_V4 |
186 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) | ||
187 | nfs_callback_down(clp->cl_minorversion); | ||
188 | #endif /* CONFIG_NFS_V4 */ | ||
189 | } | ||
190 | |||
191 | /* | 169 | /* |
192 | * Clears/puts all minor version specific parts from an nfs_client struct | 170 | * Clears/puts all minor version specific parts from an nfs_client struct |
193 | * reverting it to minorversion 0. | 171 | * reverting it to minorversion 0. |
@@ -202,9 +180,33 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp) | |||
202 | 180 | ||
203 | clp->cl_call_sync = _nfs4_call_sync; | 181 | clp->cl_call_sync = _nfs4_call_sync; |
204 | #endif /* CONFIG_NFS_V4_1 */ | 182 | #endif /* CONFIG_NFS_V4_1 */ |
183 | } | ||
184 | |||
185 | /* | ||
186 | * Destroy the NFS4 callback service | ||
187 | */ | ||
188 | static void nfs4_destroy_callback(struct nfs_client *clp) | ||
189 | { | ||
190 | if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) | ||
191 | nfs_callback_down(clp->cl_minorversion); | ||
192 | } | ||
205 | 193 | ||
194 | static void nfs4_shutdown_client(struct nfs_client *clp) | ||
195 | { | ||
196 | if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) | ||
197 | nfs4_kill_renewd(clp); | ||
198 | nfs4_clear_client_minor_version(clp); | ||
206 | nfs4_destroy_callback(clp); | 199 | nfs4_destroy_callback(clp); |
200 | if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) | ||
201 | nfs_idmap_delete(clp); | ||
202 | |||
203 | rpc_destroy_wait_queue(&clp->cl_rpcwaitq); | ||
204 | } | ||
205 | #else | ||
206 | static void nfs4_shutdown_client(struct nfs_client *clp) | ||
207 | { | ||
207 | } | 208 | } |
209 | #endif /* CONFIG_NFS_V4 */ | ||
208 | 210 | ||
209 | /* | 211 | /* |
210 | * Destroy a shared client record | 212 | * Destroy a shared client record |
@@ -213,7 +215,6 @@ static void nfs_free_client(struct nfs_client *clp) | |||
213 | { | 215 | { |
214 | dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); | 216 | dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); |
215 | 217 | ||
216 | nfs4_clear_client_minor_version(clp); | ||
217 | nfs4_shutdown_client(clp); | 218 | nfs4_shutdown_client(clp); |
218 | 219 | ||
219 | nfs_fscache_release_client_cookie(clp); | 220 | nfs_fscache_release_client_cookie(clp); |
@@ -1293,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server, | |||
1293 | 1294 | ||
1294 | /* Initialise the client representation from the mount data */ | 1295 | /* Initialise the client representation from the mount data */ |
1295 | server->flags = data->flags; | 1296 | server->flags = data->flags; |
1296 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; | 1297 | server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| |
1298 | NFS_CAP_POSIX_LOCK; | ||
1297 | server->options = data->options; | 1299 | server->options = data->options; |
1298 | 1300 | ||
1299 | /* Get a client record */ | 1301 | /* Get a client record */ |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 2563bebc4c67..15671245c6ee 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kthread.h> | 10 | #include <linux/kthread.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/smp_lock.h> | 14 | #include <linux/smp_lock.h> |
14 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
15 | 16 | ||
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 944b627ec6e1..69e7b8140122 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h | |||
@@ -71,4 +71,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode) | |||
71 | } | 71 | } |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | static inline int nfs_have_delegated_attributes(struct inode *inode) | ||
75 | { | ||
76 | return nfs_have_delegation(inode, FMODE_READ) && | ||
77 | !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED); | ||
78 | } | ||
79 | |||
74 | #endif | 80 | #endif |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2c5ace4f00a7..be46f26c9a56 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -560,7 +560,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
560 | desc->entry = &my_entry; | 560 | desc->entry = &my_entry; |
561 | 561 | ||
562 | nfs_block_sillyrename(dentry); | 562 | nfs_block_sillyrename(dentry); |
563 | res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping); | 563 | res = nfs_revalidate_mapping(inode, filp->f_mapping); |
564 | if (res < 0) | 564 | if (res < 0) |
565 | goto out; | 565 | goto out; |
566 | 566 | ||
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1025 | res = NULL; | 1025 | res = NULL; |
1026 | goto out; | 1026 | goto out; |
1027 | /* This turned out not to be a regular file */ | 1027 | /* This turned out not to be a regular file */ |
1028 | case -EISDIR: | ||
1028 | case -ENOTDIR: | 1029 | case -ENOTDIR: |
1029 | goto no_open; | 1030 | goto no_open; |
1030 | case -ELOOP: | 1031 | case -ELOOP: |
1031 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1032 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1032 | goto no_open; | 1033 | goto no_open; |
1033 | /* case -EISDIR: */ | ||
1034 | /* case -EINVAL: */ | 1034 | /* case -EINVAL: */ |
1035 | default: | 1035 | default: |
1036 | goto out; | 1036 | goto out; |
@@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1615 | goto out; | 1615 | goto out; |
1616 | 1616 | ||
1617 | new_dentry = dentry; | 1617 | new_dentry = dentry; |
1618 | rehash = NULL; | ||
1618 | new_inode = NULL; | 1619 | new_inode = NULL; |
1619 | } | 1620 | } |
1620 | } | 1621 | } |
@@ -1788,7 +1789,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str | |||
1788 | cache = nfs_access_search_rbtree(inode, cred); | 1789 | cache = nfs_access_search_rbtree(inode, cred); |
1789 | if (cache == NULL) | 1790 | if (cache == NULL) |
1790 | goto out; | 1791 | goto out; |
1791 | if (!nfs_have_delegation(inode, FMODE_READ) && | 1792 | if (!nfs_have_delegated_attributes(inode) && |
1792 | !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) | 1793 | !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) |
1793 | goto out_stale; | 1794 | goto out_stale; |
1794 | res->jiffies = cache->jiffies; | 1795 | res->jiffies = cache->jiffies; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e1d415e97849..ad4cd31d6050 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/file.h> | 44 | #include <linux/file.h> |
45 | #include <linux/pagemap.h> | 45 | #include <linux/pagemap.h> |
46 | #include <linux/kref.h> | 46 | #include <linux/kref.h> |
47 | #include <linux/slab.h> | ||
47 | 48 | ||
48 | #include <linux/nfs_fs.h> | 49 | #include <linux/nfs_fs.h> |
49 | #include <linux/nfs_page.h> | 50 | #include <linux/nfs_page.h> |
@@ -342,6 +343,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
342 | data->res.fattr = &data->fattr; | 343 | data->res.fattr = &data->fattr; |
343 | data->res.eof = 0; | 344 | data->res.eof = 0; |
344 | data->res.count = bytes; | 345 | data->res.count = bytes; |
346 | nfs_fattr_init(&data->fattr); | ||
345 | msg.rpc_argp = &data->args; | 347 | msg.rpc_argp = &data->args; |
346 | msg.rpc_resp = &data->res; | 348 | msg.rpc_resp = &data->res; |
347 | 349 | ||
@@ -575,6 +577,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
575 | data->res.count = 0; | 577 | data->res.count = 0; |
576 | data->res.fattr = &data->fattr; | 578 | data->res.fattr = &data->fattr; |
577 | data->res.verf = &data->verf; | 579 | data->res.verf = &data->verf; |
580 | nfs_fattr_init(&data->fattr); | ||
578 | 581 | ||
579 | NFS_PROTO(data->inode)->commit_setup(data, &msg); | 582 | NFS_PROTO(data->inode)->commit_setup(data, &msg); |
580 | 583 | ||
@@ -766,6 +769,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
766 | data->res.fattr = &data->fattr; | 769 | data->res.fattr = &data->fattr; |
767 | data->res.count = bytes; | 770 | data->res.count = bytes; |
768 | data->res.verf = &data->verf; | 771 | data->res.verf = &data->verf; |
772 | nfs_fattr_init(&data->fattr); | ||
769 | 773 | ||
770 | task_setup_data.task = &data->task; | 774 | task_setup_data.task = &data->task; |
771 | task_setup_data.callback_data = data; | 775 | task_setup_data.callback_data = data; |
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 95e1ca765d47..76fd235d0024 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/hash.h> | 9 | #include <linux/hash.h> |
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/kmod.h> | 11 | #include <linux/kmod.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/socket.h> | 14 | #include <linux/socket.h> |
14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
@@ -36,6 +37,19 @@ struct nfs_dns_ent { | |||
36 | }; | 37 | }; |
37 | 38 | ||
38 | 39 | ||
40 | static void nfs_dns_ent_update(struct cache_head *cnew, | ||
41 | struct cache_head *ckey) | ||
42 | { | ||
43 | struct nfs_dns_ent *new; | ||
44 | struct nfs_dns_ent *key; | ||
45 | |||
46 | new = container_of(cnew, struct nfs_dns_ent, h); | ||
47 | key = container_of(ckey, struct nfs_dns_ent, h); | ||
48 | |||
49 | memcpy(&new->addr, &key->addr, key->addrlen); | ||
50 | new->addrlen = key->addrlen; | ||
51 | } | ||
52 | |||
39 | static void nfs_dns_ent_init(struct cache_head *cnew, | 53 | static void nfs_dns_ent_init(struct cache_head *cnew, |
40 | struct cache_head *ckey) | 54 | struct cache_head *ckey) |
41 | { | 55 | { |
@@ -49,8 +63,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew, | |||
49 | new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); | 63 | new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); |
50 | if (new->hostname) { | 64 | if (new->hostname) { |
51 | new->namelen = key->namelen; | 65 | new->namelen = key->namelen; |
52 | memcpy(&new->addr, &key->addr, key->addrlen); | 66 | nfs_dns_ent_update(cnew, ckey); |
53 | new->addrlen = key->addrlen; | ||
54 | } else { | 67 | } else { |
55 | new->namelen = 0; | 68 | new->namelen = 0; |
56 | new->addrlen = 0; | 69 | new->addrlen = 0; |
@@ -234,7 +247,7 @@ static struct cache_detail nfs_dns_resolve = { | |||
234 | .cache_show = nfs_dns_show, | 247 | .cache_show = nfs_dns_show, |
235 | .match = nfs_dns_match, | 248 | .match = nfs_dns_match, |
236 | .init = nfs_dns_ent_init, | 249 | .init = nfs_dns_ent_init, |
237 | .update = nfs_dns_ent_init, | 250 | .update = nfs_dns_ent_update, |
238 | .alloc = nfs_dns_ent_alloc, | 251 | .alloc = nfs_dns_ent_alloc, |
239 | }; | 252 | }; |
240 | 253 | ||
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6b891328f332..8d965bddb87e 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -24,9 +24,9 @@ | |||
24 | #include <linux/nfs_fs.h> | 24 | #include <linux/nfs_fs.h> |
25 | #include <linux/nfs_mount.h> | 25 | #include <linux/nfs_mount.h> |
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/slab.h> | ||
28 | #include <linux/pagemap.h> | 27 | #include <linux/pagemap.h> |
29 | #include <linux/aio.h> | 28 | #include <linux/aio.h> |
29 | #include <linux/gfp.h> | ||
30 | 30 | ||
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/system.h> | 32 | #include <asm/system.h> |
@@ -123,11 +123,11 @@ nfs_file_open(struct inode *inode, struct file *filp) | |||
123 | filp->f_path.dentry->d_parent->d_name.name, | 123 | filp->f_path.dentry->d_parent->d_name.name, |
124 | filp->f_path.dentry->d_name.name); | 124 | filp->f_path.dentry->d_name.name); |
125 | 125 | ||
126 | nfs_inc_stats(inode, NFSIOS_VFSOPEN); | ||
126 | res = nfs_check_flags(filp->f_flags); | 127 | res = nfs_check_flags(filp->f_flags); |
127 | if (res) | 128 | if (res) |
128 | return res; | 129 | return res; |
129 | 130 | ||
130 | nfs_inc_stats(inode, NFSIOS_VFSOPEN); | ||
131 | res = nfs_open(inode, filp); | 131 | res = nfs_open(inode, filp); |
132 | return res; | 132 | return res; |
133 | } | 133 | } |
@@ -237,9 +237,9 @@ nfs_file_flush(struct file *file, fl_owner_t id) | |||
237 | dentry->d_parent->d_name.name, | 237 | dentry->d_parent->d_name.name, |
238 | dentry->d_name.name); | 238 | dentry->d_name.name); |
239 | 239 | ||
240 | nfs_inc_stats(inode, NFSIOS_VFSFLUSH); | ||
240 | if ((file->f_mode & FMODE_WRITE) == 0) | 241 | if ((file->f_mode & FMODE_WRITE) == 0) |
241 | return 0; | 242 | return 0; |
242 | nfs_inc_stats(inode, NFSIOS_VFSFLUSH); | ||
243 | 243 | ||
244 | /* Flush writes to the server and return any errors */ | 244 | /* Flush writes to the server and return any errors */ |
245 | return nfs_do_fsync(ctx, inode); | 245 | return nfs_do_fsync(ctx, inode); |
@@ -262,9 +262,11 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov, | |||
262 | (unsigned long) count, (unsigned long) pos); | 262 | (unsigned long) count, (unsigned long) pos); |
263 | 263 | ||
264 | result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); | 264 | result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); |
265 | nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); | 265 | if (!result) { |
266 | if (!result) | ||
267 | result = generic_file_aio_read(iocb, iov, nr_segs, pos); | 266 | result = generic_file_aio_read(iocb, iov, nr_segs, pos); |
267 | if (result > 0) | ||
268 | nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); | ||
269 | } | ||
268 | return result; | 270 | return result; |
269 | } | 271 | } |
270 | 272 | ||
@@ -282,8 +284,11 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos, | |||
282 | (unsigned long) count, (unsigned long long) *ppos); | 284 | (unsigned long) count, (unsigned long long) *ppos); |
283 | 285 | ||
284 | res = nfs_revalidate_mapping(inode, filp->f_mapping); | 286 | res = nfs_revalidate_mapping(inode, filp->f_mapping); |
285 | if (!res) | 287 | if (!res) { |
286 | res = generic_file_splice_read(filp, ppos, pipe, count, flags); | 288 | res = generic_file_splice_read(filp, ppos, pipe, count, flags); |
289 | if (res > 0) | ||
290 | nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); | ||
291 | } | ||
287 | return res; | 292 | return res; |
288 | } | 293 | } |
289 | 294 | ||
@@ -486,6 +491,9 @@ static int nfs_release_page(struct page *page, gfp_t gfp) | |||
486 | { | 491 | { |
487 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); | 492 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); |
488 | 493 | ||
494 | /* Only do I/O if gfp is a superset of GFP_KERNEL */ | ||
495 | if ((gfp & GFP_KERNEL) == GFP_KERNEL) | ||
496 | nfs_wb_page(page->mapping->host, page); | ||
489 | /* If PagePrivate() is set, then the page is not freeable */ | 497 | /* If PagePrivate() is set, then the page is not freeable */ |
490 | if (PagePrivate(page)) | 498 | if (PagePrivate(page)) |
491 | return 0; | 499 | return 0; |
@@ -594,6 +602,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, | |||
594 | { | 602 | { |
595 | struct dentry * dentry = iocb->ki_filp->f_path.dentry; | 603 | struct dentry * dentry = iocb->ki_filp->f_path.dentry; |
596 | struct inode * inode = dentry->d_inode; | 604 | struct inode * inode = dentry->d_inode; |
605 | unsigned long written = 0; | ||
597 | ssize_t result; | 606 | ssize_t result; |
598 | size_t count = iov_length(iov, nr_segs); | 607 | size_t count = iov_length(iov, nr_segs); |
599 | 608 | ||
@@ -620,14 +629,18 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, | |||
620 | if (!count) | 629 | if (!count) |
621 | goto out; | 630 | goto out; |
622 | 631 | ||
623 | nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); | ||
624 | result = generic_file_aio_write(iocb, iov, nr_segs, pos); | 632 | result = generic_file_aio_write(iocb, iov, nr_segs, pos); |
633 | if (result > 0) | ||
634 | written = result; | ||
635 | |||
625 | /* Return error values for O_DSYNC and IS_SYNC() */ | 636 | /* Return error values for O_DSYNC and IS_SYNC() */ |
626 | if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { | 637 | if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { |
627 | int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); | 638 | int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); |
628 | if (err < 0) | 639 | if (err < 0) |
629 | result = err; | 640 | result = err; |
630 | } | 641 | } |
642 | if (result > 0) | ||
643 | nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); | ||
631 | out: | 644 | out: |
632 | return result; | 645 | return result; |
633 | 646 | ||
@@ -642,6 +655,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, | |||
642 | { | 655 | { |
643 | struct dentry *dentry = filp->f_path.dentry; | 656 | struct dentry *dentry = filp->f_path.dentry; |
644 | struct inode *inode = dentry->d_inode; | 657 | struct inode *inode = dentry->d_inode; |
658 | unsigned long written = 0; | ||
645 | ssize_t ret; | 659 | ssize_t ret; |
646 | 660 | ||
647 | dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", | 661 | dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", |
@@ -652,14 +666,17 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, | |||
652 | * The combination of splice and an O_APPEND destination is disallowed. | 666 | * The combination of splice and an O_APPEND destination is disallowed. |
653 | */ | 667 | */ |
654 | 668 | ||
655 | nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); | ||
656 | |||
657 | ret = generic_file_splice_write(pipe, filp, ppos, count, flags); | 669 | ret = generic_file_splice_write(pipe, filp, ppos, count, flags); |
670 | if (ret > 0) | ||
671 | written = ret; | ||
672 | |||
658 | if (ret >= 0 && nfs_need_sync_write(filp, inode)) { | 673 | if (ret >= 0 && nfs_need_sync_write(filp, inode)) { |
659 | int err = nfs_do_fsync(nfs_file_open_context(filp), inode); | 674 | int err = nfs_do_fsync(nfs_file_open_context(filp), inode); |
660 | if (err < 0) | 675 | if (err < 0) |
661 | ret = err; | 676 | ret = err; |
662 | } | 677 | } |
678 | if (ret > 0) | ||
679 | nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); | ||
663 | return ret; | 680 | return ret; |
664 | } | 681 | } |
665 | 682 | ||
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index fa588006588d..a6b16ed93229 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/nfs_fs_sb.h> | 17 | #include <linux/nfs_fs_sb.h> |
18 | #include <linux/in6.h> | 18 | #include <linux/in6.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/slab.h> | ||
20 | 21 | ||
21 | #include "internal.h" | 22 | #include "internal.h" |
22 | #include "iostat.h" | 23 | #include "iostat.h" |
@@ -354,12 +355,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) | |||
354 | */ | 355 | */ |
355 | int nfs_fscache_release_page(struct page *page, gfp_t gfp) | 356 | int nfs_fscache_release_page(struct page *page, gfp_t gfp) |
356 | { | 357 | { |
357 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); | ||
358 | struct fscache_cookie *cookie = nfsi->fscache; | ||
359 | |||
360 | BUG_ON(!cookie); | ||
361 | |||
362 | if (PageFsCache(page)) { | 358 | if (PageFsCache(page)) { |
359 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); | ||
360 | struct fscache_cookie *cookie = nfsi->fscache; | ||
361 | |||
362 | BUG_ON(!cookie); | ||
363 | dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", | 363 | dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", |
364 | cookie, page, nfsi); | 364 | cookie, page, nfsi); |
365 | 365 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index faa091865ad0..50a56edca0b5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/vfs.h> | 36 | #include <linux/vfs.h> |
37 | #include <linux/inet.h> | 37 | #include <linux/inet.h> |
38 | #include <linux/nfs_xdr.h> | 38 | #include <linux/nfs_xdr.h> |
39 | #include <linux/slab.h> | ||
39 | 40 | ||
40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
@@ -97,22 +98,6 @@ u64 nfs_compat_user_ino64(u64 fileid) | |||
97 | return ino; | 98 | return ino; |
98 | } | 99 | } |
99 | 100 | ||
100 | int nfs_write_inode(struct inode *inode, int sync) | ||
101 | { | ||
102 | int ret; | ||
103 | |||
104 | if (sync) { | ||
105 | ret = filemap_fdatawait(inode->i_mapping); | ||
106 | if (ret == 0) | ||
107 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | ||
108 | } else | ||
109 | ret = nfs_commit_inode(inode, 0); | ||
110 | if (ret >= 0) | ||
111 | return 0; | ||
112 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | void nfs_clear_inode(struct inode *inode) | 101 | void nfs_clear_inode(struct inode *inode) |
117 | { | 102 | { |
118 | /* | 103 | /* |
@@ -130,16 +115,12 @@ void nfs_clear_inode(struct inode *inode) | |||
130 | */ | 115 | */ |
131 | int nfs_sync_mapping(struct address_space *mapping) | 116 | int nfs_sync_mapping(struct address_space *mapping) |
132 | { | 117 | { |
133 | int ret; | 118 | int ret = 0; |
134 | 119 | ||
135 | if (mapping->nrpages == 0) | 120 | if (mapping->nrpages != 0) { |
136 | return 0; | 121 | unmap_mapping_range(mapping, 0, 0, 0); |
137 | unmap_mapping_range(mapping, 0, 0, 0); | 122 | ret = nfs_wb_all(mapping->host); |
138 | ret = filemap_write_and_wait(mapping); | 123 | } |
139 | if (ret != 0) | ||
140 | goto out; | ||
141 | ret = nfs_wb_all(mapping->host); | ||
142 | out: | ||
143 | return ret; | 124 | return ret; |
144 | } | 125 | } |
145 | 126 | ||
@@ -511,17 +492,11 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |||
511 | int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; | 492 | int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; |
512 | int err; | 493 | int err; |
513 | 494 | ||
514 | /* | 495 | /* Flush out writes to the server in order to update c/mtime. */ |
515 | * Flush out writes to the server in order to update c/mtime. | ||
516 | * | ||
517 | * Hold the i_mutex to suspend application writes temporarily; | ||
518 | * this prevents long-running writing applications from blocking | ||
519 | * nfs_wb_nocommit. | ||
520 | */ | ||
521 | if (S_ISREG(inode->i_mode)) { | 496 | if (S_ISREG(inode->i_mode)) { |
522 | mutex_lock(&inode->i_mutex); | 497 | err = filemap_write_and_wait(inode->i_mapping); |
523 | nfs_wb_nocommit(inode); | 498 | if (err) |
524 | mutex_unlock(&inode->i_mutex); | 499 | goto out; |
525 | } | 500 | } |
526 | 501 | ||
527 | /* | 502 | /* |
@@ -545,6 +520,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |||
545 | generic_fillattr(inode, stat); | 520 | generic_fillattr(inode, stat); |
546 | stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); | 521 | stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); |
547 | } | 522 | } |
523 | out: | ||
548 | return err; | 524 | return err; |
549 | } | 525 | } |
550 | 526 | ||
@@ -574,14 +550,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) | |||
574 | nfs_revalidate_inode(server, inode); | 550 | nfs_revalidate_inode(server, inode); |
575 | } | 551 | } |
576 | 552 | ||
577 | static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred) | 553 | static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred) |
578 | { | 554 | { |
579 | struct nfs_open_context *ctx; | 555 | struct nfs_open_context *ctx; |
580 | 556 | ||
581 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 557 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
582 | if (ctx != NULL) { | 558 | if (ctx != NULL) { |
583 | ctx->path.dentry = dget(dentry); | 559 | ctx->path = *path; |
584 | ctx->path.mnt = mntget(mnt); | 560 | path_get(&ctx->path); |
585 | ctx->cred = get_rpccred(cred); | 561 | ctx->cred = get_rpccred(cred); |
586 | ctx->state = NULL; | 562 | ctx->state = NULL; |
587 | ctx->lockowner = current->files; | 563 | ctx->lockowner = current->files; |
@@ -620,11 +596,6 @@ void put_nfs_open_context(struct nfs_open_context *ctx) | |||
620 | __put_nfs_open_context(ctx, 0); | 596 | __put_nfs_open_context(ctx, 0); |
621 | } | 597 | } |
622 | 598 | ||
623 | static void put_nfs_open_context_sync(struct nfs_open_context *ctx) | ||
624 | { | ||
625 | __put_nfs_open_context(ctx, 1); | ||
626 | } | ||
627 | |||
628 | /* | 599 | /* |
629 | * Ensure that mmap has a recent RPC credential for use when writing out | 600 | * Ensure that mmap has a recent RPC credential for use when writing out |
630 | * shared pages | 601 | * shared pages |
@@ -652,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c | |||
652 | list_for_each_entry(pos, &nfsi->open_files, list) { | 623 | list_for_each_entry(pos, &nfsi->open_files, list) { |
653 | if (cred != NULL && pos->cred != cred) | 624 | if (cred != NULL && pos->cred != cred) |
654 | continue; | 625 | continue; |
655 | if ((pos->mode & mode) == mode) { | 626 | if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode) |
656 | ctx = get_nfs_open_context(pos); | 627 | continue; |
657 | break; | 628 | ctx = get_nfs_open_context(pos); |
658 | } | 629 | break; |
659 | } | 630 | } |
660 | spin_unlock(&inode->i_lock); | 631 | spin_unlock(&inode->i_lock); |
661 | return ctx; | 632 | return ctx; |
@@ -671,7 +642,7 @@ static void nfs_file_clear_open_context(struct file *filp) | |||
671 | spin_lock(&inode->i_lock); | 642 | spin_lock(&inode->i_lock); |
672 | list_move_tail(&ctx->list, &NFS_I(inode)->open_files); | 643 | list_move_tail(&ctx->list, &NFS_I(inode)->open_files); |
673 | spin_unlock(&inode->i_lock); | 644 | spin_unlock(&inode->i_lock); |
674 | put_nfs_open_context_sync(ctx); | 645 | __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1); |
675 | } | 646 | } |
676 | } | 647 | } |
677 | 648 | ||
@@ -686,7 +657,7 @@ int nfs_open(struct inode *inode, struct file *filp) | |||
686 | cred = rpc_lookup_cred(); | 657 | cred = rpc_lookup_cred(); |
687 | if (IS_ERR(cred)) | 658 | if (IS_ERR(cred)) |
688 | return PTR_ERR(cred); | 659 | return PTR_ERR(cred); |
689 | ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred); | 660 | ctx = alloc_nfs_open_context(&filp->f_path, cred); |
690 | put_rpccred(cred); | 661 | put_rpccred(cred); |
691 | if (ctx == NULL) | 662 | if (ctx == NULL) |
692 | return -ENOMEM; | 663 | return -ENOMEM; |
@@ -759,7 +730,7 @@ int nfs_attribute_timeout(struct inode *inode) | |||
759 | { | 730 | { |
760 | struct nfs_inode *nfsi = NFS_I(inode); | 731 | struct nfs_inode *nfsi = NFS_I(inode); |
761 | 732 | ||
762 | if (nfs_have_delegation(inode, FMODE_READ)) | 733 | if (nfs_have_delegated_attributes(inode)) |
763 | return 0; | 734 | return 0; |
764 | return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); | 735 | return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); |
765 | } | 736 | } |
@@ -779,7 +750,7 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
779 | return __nfs_revalidate_inode(server, inode); | 750 | return __nfs_revalidate_inode(server, inode); |
780 | } | 751 | } |
781 | 752 | ||
782 | static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) | 753 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) |
783 | { | 754 | { |
784 | struct nfs_inode *nfsi = NFS_I(inode); | 755 | struct nfs_inode *nfsi = NFS_I(inode); |
785 | 756 | ||
@@ -800,49 +771,10 @@ static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_spa | |||
800 | return 0; | 771 | return 0; |
801 | } | 772 | } |
802 | 773 | ||
803 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) | ||
804 | { | ||
805 | int ret = 0; | ||
806 | |||
807 | mutex_lock(&inode->i_mutex); | ||
808 | if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) { | ||
809 | ret = nfs_sync_mapping(mapping); | ||
810 | if (ret == 0) | ||
811 | ret = nfs_invalidate_mapping_nolock(inode, mapping); | ||
812 | } | ||
813 | mutex_unlock(&inode->i_mutex); | ||
814 | return ret; | ||
815 | } | ||
816 | |||
817 | /** | ||
818 | * nfs_revalidate_mapping_nolock - Revalidate the pagecache | ||
819 | * @inode - pointer to host inode | ||
820 | * @mapping - pointer to mapping | ||
821 | */ | ||
822 | int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) | ||
823 | { | ||
824 | struct nfs_inode *nfsi = NFS_I(inode); | ||
825 | int ret = 0; | ||
826 | |||
827 | if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) | ||
828 | || nfs_attribute_timeout(inode) || NFS_STALE(inode)) { | ||
829 | ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); | ||
830 | if (ret < 0) | ||
831 | goto out; | ||
832 | } | ||
833 | if (nfsi->cache_validity & NFS_INO_INVALID_DATA) | ||
834 | ret = nfs_invalidate_mapping_nolock(inode, mapping); | ||
835 | out: | ||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | /** | 774 | /** |
840 | * nfs_revalidate_mapping - Revalidate the pagecache | 775 | * nfs_revalidate_mapping - Revalidate the pagecache |
841 | * @inode - pointer to host inode | 776 | * @inode - pointer to host inode |
842 | * @mapping - pointer to mapping | 777 | * @mapping - pointer to mapping |
843 | * | ||
844 | * This version of the function will take the inode->i_mutex and attempt to | ||
845 | * flush out all dirty data if it needs to invalidate the page cache. | ||
846 | */ | 778 | */ |
847 | int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) | 779 | int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) |
848 | { | 780 | { |
@@ -1261,8 +1193,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1261 | 1193 | ||
1262 | if (fattr->valid & NFS_ATTR_FATTR_MODE) { | 1194 | if (fattr->valid & NFS_ATTR_FATTR_MODE) { |
1263 | if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { | 1195 | if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { |
1196 | umode_t newmode = inode->i_mode & S_IFMT; | ||
1197 | newmode |= fattr->mode & S_IALLUGO; | ||
1198 | inode->i_mode = newmode; | ||
1264 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; | 1199 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; |
1265 | inode->i_mode = fattr->mode; | ||
1266 | } | 1200 | } |
1267 | } else if (server->caps & NFS_CAP_MODE) | 1201 | } else if (server->caps & NFS_CAP_MODE) |
1268 | invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR | 1202 | invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR |
@@ -1418,6 +1352,7 @@ static void init_once(void *foo) | |||
1418 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); | 1352 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); |
1419 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); | 1353 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); |
1420 | nfsi->npages = 0; | 1354 | nfsi->npages = 0; |
1355 | nfsi->ncommit = 0; | ||
1421 | atomic_set(&nfsi->silly_count, 1); | 1356 | atomic_set(&nfsi->silly_count, 1); |
1422 | INIT_HLIST_HEAD(&nfsi->silly_list); | 1357 | INIT_HLIST_HEAD(&nfsi->silly_list); |
1423 | init_waitqueue_head(&nfsi->waitqueue); | 1358 | init_waitqueue_head(&nfsi->waitqueue); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 29e464d23b32..11f82f03c5de 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -211,7 +211,7 @@ extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); | |||
211 | extern struct workqueue_struct *nfsiod_workqueue; | 211 | extern struct workqueue_struct *nfsiod_workqueue; |
212 | extern struct inode *nfs_alloc_inode(struct super_block *sb); | 212 | extern struct inode *nfs_alloc_inode(struct super_block *sb); |
213 | extern void nfs_destroy_inode(struct inode *); | 213 | extern void nfs_destroy_inode(struct inode *); |
214 | extern int nfs_write_inode(struct inode *,int); | 214 | extern int nfs_write_inode(struct inode *, struct writeback_control *); |
215 | extern void nfs_clear_inode(struct inode *); | 215 | extern void nfs_clear_inode(struct inode *); |
216 | #ifdef CONFIG_NFS_V4 | 216 | #ifdef CONFIG_NFS_V4 |
217 | extern void nfs4_clear_inode(struct inode *); | 217 | extern void nfs4_clear_inode(struct inode *); |
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index 46d779abafd3..1d8d5c813b01 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h | |||
@@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode, | |||
57 | } | 57 | } |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | static inline struct nfs_iostats *nfs_alloc_iostats(void) | 60 | static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) |
61 | { | 61 | { |
62 | return alloc_percpu(struct nfs_iostats); | 62 | return alloc_percpu(struct nfs_iostats); |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void nfs_free_iostats(struct nfs_iostats *stats) | 65 | static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) |
66 | { | 66 | { |
67 | if (stats != NULL) | 67 | if (stats != NULL) |
68 | free_percpu(stats); | 68 | free_percpu(stats); |
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 0adefc40cc89..59047f8d7d72 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c | |||
@@ -120,7 +120,7 @@ static struct { | |||
120 | { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, | 120 | { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, |
121 | { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, | 121 | { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, |
122 | { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, | 122 | { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, |
123 | { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, | 123 | { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | struct mountres { | 126 | struct mountres { |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 40c766782891..7888cf36022d 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dcache.h> | 10 | #include <linux/dcache.h> |
11 | #include <linux/gfp.h> | ||
11 | #include <linux/mount.h> | 12 | #include <linux/mount.h> |
12 | #include <linux/namei.h> | 13 | #include <linux/namei.h> |
13 | #include <linux/nfs_fs.h> | 14 | #include <linux/nfs_fs.h> |
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5e078b222b4e..81cf14257916 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/param.h> | 12 | #include <linux/param.h> |
13 | #include <linux/time.h> | 13 | #include <linux/time.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/slab.h> | ||
16 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
17 | #include <linux/string.h> | 16 | #include <linux/string.h> |
18 | #include <linux/in.h> | 17 | #include <linux/in.h> |
@@ -699,7 +698,7 @@ static struct { | |||
699 | { NFSERR_BAD_COOKIE, -EBADCOOKIE }, | 698 | { NFSERR_BAD_COOKIE, -EBADCOOKIE }, |
700 | { NFSERR_NOTSUPP, -ENOTSUPP }, | 699 | { NFSERR_NOTSUPP, -ENOTSUPP }, |
701 | { NFSERR_TOOSMALL, -ETOOSMALL }, | 700 | { NFSERR_TOOSMALL, -ETOOSMALL }, |
702 | { NFSERR_SERVERFAULT, -ESERVERFAULT }, | 701 | { NFSERR_SERVERFAULT, -EREMOTEIO }, |
703 | { NFSERR_BADTYPE, -EBADTYPE }, | 702 | { NFSERR_BADTYPE, -EBADTYPE }, |
704 | { NFSERR_JUKEBOX, -EJUKEBOX }, | 703 | { NFSERR_JUKEBOX, -EJUKEBOX }, |
705 | { -1, -EIO } | 704 | { -1, -EIO } |
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index bac60515a4b3..d150ae0c5ecd 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/fs.h> | 1 | #include <linux/fs.h> |
2 | #include <linux/gfp.h> | ||
2 | #include <linux/nfs.h> | 3 | #include <linux/nfs.h> |
3 | #include <linux/nfs3.h> | 4 | #include <linux/nfs3.h> |
4 | #include <linux/nfs_fs.h> | 5 | #include <linux/nfs_fs.h> |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 3f8881d1a050..e701002694e5 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/sunrpc/clnt.h> | 12 | #include <linux/sunrpc/clnt.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/nfs.h> | 14 | #include <linux/nfs.h> |
14 | #include <linux/nfs3.h> | 15 | #include <linux/nfs3.h> |
15 | #include <linux/nfs_fs.h> | 16 | #include <linux/nfs_fs.h> |
@@ -22,14 +23,14 @@ | |||
22 | 23 | ||
23 | #define NFSDBG_FACILITY NFSDBG_PROC | 24 | #define NFSDBG_FACILITY NFSDBG_PROC |
24 | 25 | ||
25 | /* A wrapper to handle the EJUKEBOX error message */ | 26 | /* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */ |
26 | static int | 27 | static int |
27 | nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | 28 | nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) |
28 | { | 29 | { |
29 | int res; | 30 | int res; |
30 | do { | 31 | do { |
31 | res = rpc_call_sync(clnt, msg, flags); | 32 | res = rpc_call_sync(clnt, msg, flags); |
32 | if (res != -EJUKEBOX) | 33 | if (res != -EJUKEBOX && res != -EKEYEXPIRED) |
33 | break; | 34 | break; |
34 | schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); | 35 | schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); |
35 | res = -ERESTARTSYS; | 36 | res = -ERESTARTSYS; |
@@ -42,9 +43,10 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
42 | static int | 43 | static int |
43 | nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) | 44 | nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) |
44 | { | 45 | { |
45 | if (task->tk_status != -EJUKEBOX) | 46 | if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED) |
46 | return 0; | 47 | return 0; |
47 | nfs_inc_stats(inode, NFSIOS_DELAY); | 48 | if (task->tk_status == -EJUKEBOX) |
49 | nfs_inc_stats(inode, NFSIOS_DELAY); | ||
48 | task->tk_status = 0; | 50 | task->tk_status = 0; |
49 | rpc_restart_call(task); | 51 | rpc_restart_call(task); |
50 | rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); | 52 | rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); |
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 5fe5492fbd29..56a86f6ac8b5 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/param.h> | 9 | #include <linux/param.h> |
10 | #include <linux/time.h> | 10 | #include <linux/time.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/slab.h> | ||
13 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
14 | #include <linux/string.h> | 13 | #include <linux/string.h> |
15 | #include <linux/in.h> | 14 | #include <linux/in.h> |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 865265bdca03..a187200a7aac 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -46,6 +46,7 @@ enum nfs4_client_state { | |||
46 | NFS4CLNT_DELEGRETURN, | 46 | NFS4CLNT_DELEGRETURN, |
47 | NFS4CLNT_SESSION_RESET, | 47 | NFS4CLNT_SESSION_RESET, |
48 | NFS4CLNT_SESSION_DRAINING, | 48 | NFS4CLNT_SESSION_DRAINING, |
49 | NFS4CLNT_RECALL_SLOT, | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | /* | 52 | /* |
@@ -146,6 +147,7 @@ enum { | |||
146 | NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ | 147 | NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ |
147 | NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ | 148 | NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ |
148 | NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ | 149 | NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ |
150 | NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ | ||
149 | }; | 151 | }; |
150 | 152 | ||
151 | struct nfs4_state { | 153 | struct nfs4_state { |
@@ -277,7 +279,9 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); | |||
277 | extern void nfs4_schedule_state_recovery(struct nfs_client *); | 279 | extern void nfs4_schedule_state_recovery(struct nfs_client *); |
278 | extern void nfs4_schedule_state_manager(struct nfs_client *); | 280 | extern void nfs4_schedule_state_manager(struct nfs_client *); |
279 | extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); | 281 | extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); |
282 | extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); | ||
280 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); | 283 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); |
284 | extern void nfs41_handle_recall_slot(struct nfs_client *clp); | ||
281 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); | 285 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); |
282 | extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); | 286 | extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); |
283 | extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); | 287 | extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); |
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index fa3408f20112..f071d12c613b 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/mount.h> | 11 | #include <linux/mount.h> |
12 | #include <linux/namei.h> | 12 | #include <linux/namei.h> |
13 | #include <linux/nfs_fs.h> | 13 | #include <linux/nfs_fs.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
15 | #include <linux/sunrpc/clnt.h> | 16 | #include <linux/sunrpc/clnt.h> |
16 | #include <linux/vfs.h> | 17 | #include <linux/vfs.h> |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 198d51d17c13..638067007c65 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/slab.h> | ||
42 | #include <linux/sunrpc/clnt.h> | 43 | #include <linux/sunrpc/clnt.h> |
43 | #include <linux/nfs.h> | 44 | #include <linux/nfs.h> |
44 | #include <linux/nfs4.h> | 45 | #include <linux/nfs4.h> |
@@ -249,19 +250,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
249 | if (state == NULL) | 250 | if (state == NULL) |
250 | break; | 251 | break; |
251 | nfs4_state_mark_reclaim_nograce(clp, state); | 252 | nfs4_state_mark_reclaim_nograce(clp, state); |
252 | case -NFS4ERR_STALE_CLIENTID: | 253 | goto do_state_recovery; |
253 | case -NFS4ERR_STALE_STATEID: | 254 | case -NFS4ERR_STALE_STATEID: |
254 | case -NFS4ERR_EXPIRED: | 255 | if (state == NULL) |
255 | nfs4_schedule_state_recovery(clp); | ||
256 | ret = nfs4_wait_clnt_recover(clp); | ||
257 | if (ret == 0) | ||
258 | exception->retry = 1; | ||
259 | #if !defined(CONFIG_NFS_V4_1) | ||
260 | break; | ||
261 | #else /* !defined(CONFIG_NFS_V4_1) */ | ||
262 | if (!nfs4_has_session(server->nfs_client)) | ||
263 | break; | 256 | break; |
264 | /* FALLTHROUGH */ | 257 | nfs4_state_mark_reclaim_reboot(clp, state); |
258 | case -NFS4ERR_STALE_CLIENTID: | ||
259 | case -NFS4ERR_EXPIRED: | ||
260 | goto do_state_recovery; | ||
261 | #if defined(CONFIG_NFS_V4_1) | ||
265 | case -NFS4ERR_BADSESSION: | 262 | case -NFS4ERR_BADSESSION: |
266 | case -NFS4ERR_BADSLOT: | 263 | case -NFS4ERR_BADSLOT: |
267 | case -NFS4ERR_BAD_HIGH_SLOT: | 264 | case -NFS4ERR_BAD_HIGH_SLOT: |
@@ -274,7 +271,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
274 | nfs4_schedule_state_recovery(clp); | 271 | nfs4_schedule_state_recovery(clp); |
275 | exception->retry = 1; | 272 | exception->retry = 1; |
276 | break; | 273 | break; |
277 | #endif /* !defined(CONFIG_NFS_V4_1) */ | 274 | #endif /* defined(CONFIG_NFS_V4_1) */ |
278 | case -NFS4ERR_FILE_OPEN: | 275 | case -NFS4ERR_FILE_OPEN: |
279 | if (exception->timeout > HZ) { | 276 | if (exception->timeout > HZ) { |
280 | /* We have retried a decent amount, time to | 277 | /* We have retried a decent amount, time to |
@@ -285,6 +282,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
285 | } | 282 | } |
286 | case -NFS4ERR_GRACE: | 283 | case -NFS4ERR_GRACE: |
287 | case -NFS4ERR_DELAY: | 284 | case -NFS4ERR_DELAY: |
285 | case -EKEYEXPIRED: | ||
288 | ret = nfs4_delay(server->client, &exception->timeout); | 286 | ret = nfs4_delay(server->client, &exception->timeout); |
289 | if (ret != 0) | 287 | if (ret != 0) |
290 | break; | 288 | break; |
@@ -293,6 +291,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
293 | } | 291 | } |
294 | /* We failed to handle the error */ | 292 | /* We failed to handle the error */ |
295 | return nfs4_map_errors(ret); | 293 | return nfs4_map_errors(ret); |
294 | do_state_recovery: | ||
295 | nfs4_schedule_state_recovery(clp); | ||
296 | ret = nfs4_wait_clnt_recover(clp); | ||
297 | if (ret == 0) | ||
298 | exception->retry = 1; | ||
299 | return ret; | ||
296 | } | 300 | } |
297 | 301 | ||
298 | 302 | ||
@@ -416,7 +420,8 @@ static void nfs41_sequence_done(struct nfs_client *clp, | |||
416 | clp->cl_last_renewal = timestamp; | 420 | clp->cl_last_renewal = timestamp; |
417 | spin_unlock(&clp->cl_lock); | 421 | spin_unlock(&clp->cl_lock); |
418 | /* Check sequence flags */ | 422 | /* Check sequence flags */ |
419 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | 423 | if (atomic_read(&clp->cl_count) > 1) |
424 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | ||
420 | } | 425 | } |
421 | out: | 426 | out: |
422 | /* The session may be reset by one of the error handlers. */ | 427 | /* The session may be reset by one of the error handlers. */ |
@@ -722,8 +727,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, | |||
722 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); | 727 | p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); |
723 | if (p->o_arg.seqid == NULL) | 728 | if (p->o_arg.seqid == NULL) |
724 | goto err_free; | 729 | goto err_free; |
725 | p->path.mnt = mntget(path->mnt); | 730 | path_get(path); |
726 | p->path.dentry = dget(path->dentry); | 731 | p->path = *path; |
727 | p->dir = parent; | 732 | p->dir = parent; |
728 | p->owner = sp; | 733 | p->owner = sp; |
729 | atomic_inc(&sp->so_count); | 734 | atomic_inc(&sp->so_count); |
@@ -1161,7 +1166,7 @@ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state | |||
1161 | int err; | 1166 | int err; |
1162 | do { | 1167 | do { |
1163 | err = _nfs4_do_open_reclaim(ctx, state); | 1168 | err = _nfs4_do_open_reclaim(ctx, state); |
1164 | if (err != -NFS4ERR_DELAY) | 1169 | if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) |
1165 | break; | 1170 | break; |
1166 | nfs4_handle_exception(server, err, &exception); | 1171 | nfs4_handle_exception(server, err, &exception); |
1167 | } while (exception.retry); | 1172 | } while (exception.retry); |
@@ -1518,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
1518 | nfs_post_op_update_inode(dir, o_res->dir_attr); | 1523 | nfs_post_op_update_inode(dir, o_res->dir_attr); |
1519 | } else | 1524 | } else |
1520 | nfs_refresh_inode(dir, o_res->dir_attr); | 1525 | nfs_refresh_inode(dir, o_res->dir_attr); |
1526 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | ||
1527 | server->caps &= ~NFS_CAP_POSIX_LOCK; | ||
1521 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { | 1528 | if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { |
1522 | status = _nfs4_proc_open_confirm(data); | 1529 | status = _nfs4_proc_open_confirm(data); |
1523 | if (status != 0) | 1530 | if (status != 0) |
@@ -1580,6 +1587,7 @@ static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state | |||
1580 | goto out; | 1587 | goto out; |
1581 | case -NFS4ERR_GRACE: | 1588 | case -NFS4ERR_GRACE: |
1582 | case -NFS4ERR_DELAY: | 1589 | case -NFS4ERR_DELAY: |
1590 | case -EKEYEXPIRED: | ||
1583 | nfs4_handle_exception(server, err, &exception); | 1591 | nfs4_handle_exception(server, err, &exception); |
1584 | err = 0; | 1592 | err = 0; |
1585 | } | 1593 | } |
@@ -1658,6 +1666,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in | |||
1658 | status = PTR_ERR(state); | 1666 | status = PTR_ERR(state); |
1659 | if (IS_ERR(state)) | 1667 | if (IS_ERR(state)) |
1660 | goto err_opendata_put; | 1668 | goto err_opendata_put; |
1669 | if (server->caps & NFS_CAP_POSIX_LOCK) | ||
1670 | set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | ||
1661 | nfs4_opendata_put(opendata); | 1671 | nfs4_opendata_put(opendata); |
1662 | nfs4_put_state_owner(sp); | 1672 | nfs4_put_state_owner(sp); |
1663 | *res = state; | 1673 | *res = state; |
@@ -1940,8 +1950,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) | |||
1940 | calldata->res.seqid = calldata->arg.seqid; | 1950 | calldata->res.seqid = calldata->arg.seqid; |
1941 | calldata->res.server = server; | 1951 | calldata->res.server = server; |
1942 | calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; | 1952 | calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; |
1943 | calldata->path.mnt = mntget(path->mnt); | 1953 | path_get(path); |
1944 | calldata->path.dentry = dget(path->dentry); | 1954 | calldata->path = *path; |
1945 | 1955 | ||
1946 | msg.rpc_argp = &calldata->arg, | 1956 | msg.rpc_argp = &calldata->arg, |
1947 | msg.rpc_resp = &calldata->res, | 1957 | msg.rpc_resp = &calldata->res, |
@@ -2060,8 +2070,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st | |||
2060 | case -EDQUOT: | 2070 | case -EDQUOT: |
2061 | case -ENOSPC: | 2071 | case -ENOSPC: |
2062 | case -EROFS: | 2072 | case -EROFS: |
2063 | lookup_instantiate_filp(nd, (struct dentry *)state, NULL); | 2073 | return PTR_ERR(state); |
2064 | return 1; | ||
2065 | default: | 2074 | default: |
2066 | goto out_drop; | 2075 | goto out_drop; |
2067 | } | 2076 | } |
@@ -3141,10 +3150,19 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa | |||
3141 | * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special | 3150 | * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special |
3142 | * standalone procedure for queueing an asynchronous RENEW. | 3151 | * standalone procedure for queueing an asynchronous RENEW. |
3143 | */ | 3152 | */ |
3153 | static void nfs4_renew_release(void *data) | ||
3154 | { | ||
3155 | struct nfs_client *clp = data; | ||
3156 | |||
3157 | if (atomic_read(&clp->cl_count) > 1) | ||
3158 | nfs4_schedule_state_renewal(clp); | ||
3159 | nfs_put_client(clp); | ||
3160 | } | ||
3161 | |||
3144 | static void nfs4_renew_done(struct rpc_task *task, void *data) | 3162 | static void nfs4_renew_done(struct rpc_task *task, void *data) |
3145 | { | 3163 | { |
3146 | struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; | 3164 | struct nfs_client *clp = data; |
3147 | unsigned long timestamp = (unsigned long)data; | 3165 | unsigned long timestamp = task->tk_start; |
3148 | 3166 | ||
3149 | if (task->tk_status < 0) { | 3167 | if (task->tk_status < 0) { |
3150 | /* Unless we're shutting down, schedule state recovery! */ | 3168 | /* Unless we're shutting down, schedule state recovery! */ |
@@ -3160,6 +3178,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data) | |||
3160 | 3178 | ||
3161 | static const struct rpc_call_ops nfs4_renew_ops = { | 3179 | static const struct rpc_call_ops nfs4_renew_ops = { |
3162 | .rpc_call_done = nfs4_renew_done, | 3180 | .rpc_call_done = nfs4_renew_done, |
3181 | .rpc_release = nfs4_renew_release, | ||
3163 | }; | 3182 | }; |
3164 | 3183 | ||
3165 | int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) | 3184 | int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) |
@@ -3170,8 +3189,10 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) | |||
3170 | .rpc_cred = cred, | 3189 | .rpc_cred = cred, |
3171 | }; | 3190 | }; |
3172 | 3191 | ||
3192 | if (!atomic_inc_not_zero(&clp->cl_count)) | ||
3193 | return -EIO; | ||
3173 | return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, | 3194 | return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, |
3174 | &nfs4_renew_ops, (void *)jiffies); | 3195 | &nfs4_renew_ops, clp); |
3175 | } | 3196 | } |
3176 | 3197 | ||
3177 | int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) | 3198 | int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) |
@@ -3422,15 +3443,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3422 | if (state == NULL) | 3443 | if (state == NULL) |
3423 | break; | 3444 | break; |
3424 | nfs4_state_mark_reclaim_nograce(clp, state); | 3445 | nfs4_state_mark_reclaim_nograce(clp, state); |
3425 | case -NFS4ERR_STALE_CLIENTID: | 3446 | goto do_state_recovery; |
3426 | case -NFS4ERR_STALE_STATEID: | 3447 | case -NFS4ERR_STALE_STATEID: |
3448 | if (state == NULL) | ||
3449 | break; | ||
3450 | nfs4_state_mark_reclaim_reboot(clp, state); | ||
3451 | case -NFS4ERR_STALE_CLIENTID: | ||
3427 | case -NFS4ERR_EXPIRED: | 3452 | case -NFS4ERR_EXPIRED: |
3428 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); | 3453 | goto do_state_recovery; |
3429 | nfs4_schedule_state_recovery(clp); | ||
3430 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) | ||
3431 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); | ||
3432 | task->tk_status = 0; | ||
3433 | return -EAGAIN; | ||
3434 | #if defined(CONFIG_NFS_V4_1) | 3454 | #if defined(CONFIG_NFS_V4_1) |
3435 | case -NFS4ERR_BADSESSION: | 3455 | case -NFS4ERR_BADSESSION: |
3436 | case -NFS4ERR_BADSLOT: | 3456 | case -NFS4ERR_BADSLOT: |
@@ -3449,6 +3469,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3449 | if (server) | 3469 | if (server) |
3450 | nfs_inc_server_stats(server, NFSIOS_DELAY); | 3470 | nfs_inc_server_stats(server, NFSIOS_DELAY); |
3451 | case -NFS4ERR_GRACE: | 3471 | case -NFS4ERR_GRACE: |
3472 | case -EKEYEXPIRED: | ||
3452 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 3473 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
3453 | task->tk_status = 0; | 3474 | task->tk_status = 0; |
3454 | return -EAGAIN; | 3475 | return -EAGAIN; |
@@ -3458,6 +3479,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3458 | } | 3479 | } |
3459 | task->tk_status = nfs4_map_errors(task->tk_status); | 3480 | task->tk_status = nfs4_map_errors(task->tk_status); |
3460 | return 0; | 3481 | return 0; |
3482 | do_state_recovery: | ||
3483 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); | ||
3484 | nfs4_schedule_state_recovery(clp); | ||
3485 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) | ||
3486 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); | ||
3487 | task->tk_status = 0; | ||
3488 | return -EAGAIN; | ||
3461 | } | 3489 | } |
3462 | 3490 | ||
3463 | static int | 3491 | static int |
@@ -3554,6 +3582,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred) | |||
3554 | case -NFS4ERR_RESOURCE: | 3582 | case -NFS4ERR_RESOURCE: |
3555 | /* The IBM lawyers misread another document! */ | 3583 | /* The IBM lawyers misread another document! */ |
3556 | case -NFS4ERR_DELAY: | 3584 | case -NFS4ERR_DELAY: |
3585 | case -EKEYEXPIRED: | ||
3557 | err = nfs4_delay(clp->cl_rpcclient, &timeout); | 3586 | err = nfs4_delay(clp->cl_rpcclient, &timeout); |
3558 | } | 3587 | } |
3559 | } while (err == 0); | 3588 | } while (err == 0); |
@@ -4088,6 +4117,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { | |||
4088 | .rpc_release = nfs4_lock_release, | 4117 | .rpc_release = nfs4_lock_release, |
4089 | }; | 4118 | }; |
4090 | 4119 | ||
4120 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) | ||
4121 | { | ||
4122 | struct nfs_client *clp = server->nfs_client; | ||
4123 | struct nfs4_state *state = lsp->ls_state; | ||
4124 | |||
4125 | switch (error) { | ||
4126 | case -NFS4ERR_ADMIN_REVOKED: | ||
4127 | case -NFS4ERR_BAD_STATEID: | ||
4128 | case -NFS4ERR_EXPIRED: | ||
4129 | if (new_lock_owner != 0 || | ||
4130 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | ||
4131 | nfs4_state_mark_reclaim_nograce(clp, state); | ||
4132 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | ||
4133 | break; | ||
4134 | case -NFS4ERR_STALE_STATEID: | ||
4135 | if (new_lock_owner != 0 || | ||
4136 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | ||
4137 | nfs4_state_mark_reclaim_reboot(clp, state); | ||
4138 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | ||
4139 | }; | ||
4140 | } | ||
4141 | |||
4091 | static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) | 4142 | static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) |
4092 | { | 4143 | { |
4093 | struct nfs4_lockdata *data; | 4144 | struct nfs4_lockdata *data; |
@@ -4126,6 +4177,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f | |||
4126 | ret = nfs4_wait_for_completion_rpc_task(task); | 4177 | ret = nfs4_wait_for_completion_rpc_task(task); |
4127 | if (ret == 0) { | 4178 | if (ret == 0) { |
4128 | ret = data->rpc_status; | 4179 | ret = data->rpc_status; |
4180 | if (ret) | ||
4181 | nfs4_handle_setlk_error(data->server, data->lsp, | ||
4182 | data->arg.new_lock_owner, ret); | ||
4129 | } else | 4183 | } else |
4130 | data->cancelled = 1; | 4184 | data->cancelled = 1; |
4131 | rpc_put_task(task); | 4185 | rpc_put_task(task); |
@@ -4144,7 +4198,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request | |||
4144 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) | 4198 | if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) |
4145 | return 0; | 4199 | return 0; |
4146 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); | 4200 | err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); |
4147 | if (err != -NFS4ERR_DELAY) | 4201 | if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED) |
4148 | break; | 4202 | break; |
4149 | nfs4_handle_exception(server, err, &exception); | 4203 | nfs4_handle_exception(server, err, &exception); |
4150 | } while (exception.retry); | 4204 | } while (exception.retry); |
@@ -4169,6 +4223,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request | |||
4169 | goto out; | 4223 | goto out; |
4170 | case -NFS4ERR_GRACE: | 4224 | case -NFS4ERR_GRACE: |
4171 | case -NFS4ERR_DELAY: | 4225 | case -NFS4ERR_DELAY: |
4226 | case -EKEYEXPIRED: | ||
4172 | nfs4_handle_exception(server, err, &exception); | 4227 | nfs4_handle_exception(server, err, &exception); |
4173 | err = 0; | 4228 | err = 0; |
4174 | } | 4229 | } |
@@ -4181,8 +4236,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock | |||
4181 | { | 4236 | { |
4182 | struct nfs_inode *nfsi = NFS_I(state->inode); | 4237 | struct nfs_inode *nfsi = NFS_I(state->inode); |
4183 | unsigned char fl_flags = request->fl_flags; | 4238 | unsigned char fl_flags = request->fl_flags; |
4184 | int status; | 4239 | int status = -ENOLCK; |
4185 | 4240 | ||
4241 | if ((fl_flags & FL_POSIX) && | ||
4242 | !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) | ||
4243 | goto out; | ||
4186 | /* Is this a delegated open? */ | 4244 | /* Is this a delegated open? */ |
4187 | status = nfs4_set_lock_state(state, request); | 4245 | status = nfs4_set_lock_state(state, request); |
4188 | if (status != 0) | 4246 | if (status != 0) |
@@ -4317,6 +4375,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | |||
4317 | err = 0; | 4375 | err = 0; |
4318 | goto out; | 4376 | goto out; |
4319 | case -NFS4ERR_DELAY: | 4377 | case -NFS4ERR_DELAY: |
4378 | case -EKEYEXPIRED: | ||
4320 | break; | 4379 | break; |
4321 | } | 4380 | } |
4322 | err = nfs4_handle_exception(server, err, &exception); | 4381 | err = nfs4_handle_exception(server, err, &exception); |
@@ -4462,7 +4521,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) | |||
4462 | 4521 | ||
4463 | status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); | 4522 | status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); |
4464 | 4523 | ||
4465 | if (status != NFS4ERR_CLID_INUSE) | 4524 | if (status != -NFS4ERR_CLID_INUSE) |
4466 | break; | 4525 | break; |
4467 | 4526 | ||
4468 | if (signalled()) | 4527 | if (signalled()) |
@@ -4516,6 +4575,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) | |||
4516 | switch (task->tk_status) { | 4575 | switch (task->tk_status) { |
4517 | case -NFS4ERR_DELAY: | 4576 | case -NFS4ERR_DELAY: |
4518 | case -NFS4ERR_GRACE: | 4577 | case -NFS4ERR_GRACE: |
4578 | case -EKEYEXPIRED: | ||
4519 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); | 4579 | dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); |
4520 | rpc_delay(task, NFS4_POLL_RETRY_MIN); | 4580 | rpc_delay(task, NFS4_POLL_RETRY_MIN); |
4521 | task->tk_status = 0; | 4581 | task->tk_status = 0; |
@@ -4573,26 +4633,32 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) | |||
4573 | /* | 4633 | /* |
4574 | * Reset a slot table | 4634 | * Reset a slot table |
4575 | */ | 4635 | */ |
4576 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, | 4636 | static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, |
4577 | int old_max_slots, int ivalue) | 4637 | int ivalue) |
4578 | { | 4638 | { |
4639 | struct nfs4_slot *new = NULL; | ||
4579 | int i; | 4640 | int i; |
4580 | int ret = 0; | 4641 | int ret = 0; |
4581 | 4642 | ||
4582 | dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); | 4643 | dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, |
4644 | max_reqs, tbl->max_slots); | ||
4583 | 4645 | ||
4584 | /* | 4646 | /* Does the newly negotiated max_reqs match the existing slot table? */ |
4585 | * Until we have dynamic slot table adjustment, insist | 4647 | if (max_reqs != tbl->max_slots) { |
4586 | * upon the same slot table size | 4648 | ret = -ENOMEM; |
4587 | */ | 4649 | new = kmalloc(max_reqs * sizeof(struct nfs4_slot), |
4588 | if (max_slots != old_max_slots) { | 4650 | GFP_KERNEL); |
4589 | dprintk("%s reset slot table does't match old\n", | 4651 | if (!new) |
4590 | __func__); | 4652 | goto out; |
4591 | ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ | 4653 | ret = 0; |
4592 | goto out; | 4654 | kfree(tbl->slots); |
4593 | } | 4655 | } |
4594 | spin_lock(&tbl->slot_tbl_lock); | 4656 | spin_lock(&tbl->slot_tbl_lock); |
4595 | for (i = 0; i < max_slots; ++i) | 4657 | if (new) { |
4658 | tbl->slots = new; | ||
4659 | tbl->max_slots = max_reqs; | ||
4660 | } | ||
4661 | for (i = 0; i < tbl->max_slots; ++i) | ||
4596 | tbl->slots[i].seq_nr = ivalue; | 4662 | tbl->slots[i].seq_nr = ivalue; |
4597 | spin_unlock(&tbl->slot_tbl_lock); | 4663 | spin_unlock(&tbl->slot_tbl_lock); |
4598 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, | 4664 | dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, |
@@ -4610,16 +4676,12 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session) | |||
4610 | int status; | 4676 | int status; |
4611 | 4677 | ||
4612 | status = nfs4_reset_slot_table(&session->fc_slot_table, | 4678 | status = nfs4_reset_slot_table(&session->fc_slot_table, |
4613 | session->fc_attrs.max_reqs, | 4679 | session->fc_attrs.max_reqs, 1); |
4614 | session->fc_slot_table.max_slots, | ||
4615 | 1); | ||
4616 | if (status) | 4680 | if (status) |
4617 | return status; | 4681 | return status; |
4618 | 4682 | ||
4619 | status = nfs4_reset_slot_table(&session->bc_slot_table, | 4683 | status = nfs4_reset_slot_table(&session->bc_slot_table, |
4620 | session->bc_attrs.max_reqs, | 4684 | session->bc_attrs.max_reqs, 0); |
4621 | session->bc_slot_table.max_slots, | ||
4622 | 0); | ||
4623 | return status; | 4685 | return status; |
4624 | } | 4686 | } |
4625 | 4687 | ||
@@ -4760,16 +4822,14 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) | |||
4760 | args->fc_attrs.headerpadsz = 0; | 4822 | args->fc_attrs.headerpadsz = 0; |
4761 | args->fc_attrs.max_rqst_sz = mxrqst_sz; | 4823 | args->fc_attrs.max_rqst_sz = mxrqst_sz; |
4762 | args->fc_attrs.max_resp_sz = mxresp_sz; | 4824 | args->fc_attrs.max_resp_sz = mxresp_sz; |
4763 | args->fc_attrs.max_resp_sz_cached = mxresp_sz; | ||
4764 | args->fc_attrs.max_ops = NFS4_MAX_OPS; | 4825 | args->fc_attrs.max_ops = NFS4_MAX_OPS; |
4765 | args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; | 4826 | args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; |
4766 | 4827 | ||
4767 | dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " | 4828 | dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " |
4768 | "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", | 4829 | "max_ops=%u max_reqs=%u\n", |
4769 | __func__, | 4830 | __func__, |
4770 | args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, | 4831 | args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, |
4771 | args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, | 4832 | args->fc_attrs.max_ops, args->fc_attrs.max_reqs); |
4772 | args->fc_attrs.max_reqs); | ||
4773 | 4833 | ||
4774 | /* Back channel attributes */ | 4834 | /* Back channel attributes */ |
4775 | args->bc_attrs.headerpadsz = 0; | 4835 | args->bc_attrs.headerpadsz = 0; |
@@ -4978,7 +5038,16 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | |||
4978 | &res, args.sa_cache_this, 1); | 5038 | &res, args.sa_cache_this, 1); |
4979 | } | 5039 | } |
4980 | 5040 | ||
4981 | void nfs41_sequence_call_done(struct rpc_task *task, void *data) | 5041 | static void nfs41_sequence_release(void *data) |
5042 | { | ||
5043 | struct nfs_client *clp = (struct nfs_client *)data; | ||
5044 | |||
5045 | if (atomic_read(&clp->cl_count) > 1) | ||
5046 | nfs4_schedule_state_renewal(clp); | ||
5047 | nfs_put_client(clp); | ||
5048 | } | ||
5049 | |||
5050 | static void nfs41_sequence_call_done(struct rpc_task *task, void *data) | ||
4982 | { | 5051 | { |
4983 | struct nfs_client *clp = (struct nfs_client *)data; | 5052 | struct nfs_client *clp = (struct nfs_client *)data; |
4984 | 5053 | ||
@@ -4986,6 +5055,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data) | |||
4986 | 5055 | ||
4987 | if (task->tk_status < 0) { | 5056 | if (task->tk_status < 0) { |
4988 | dprintk("%s ERROR %d\n", __func__, task->tk_status); | 5057 | dprintk("%s ERROR %d\n", __func__, task->tk_status); |
5058 | if (atomic_read(&clp->cl_count) == 1) | ||
5059 | goto out; | ||
4989 | 5060 | ||
4990 | if (_nfs4_async_handle_error(task, NULL, clp, NULL) | 5061 | if (_nfs4_async_handle_error(task, NULL, clp, NULL) |
4991 | == -EAGAIN) { | 5062 | == -EAGAIN) { |
@@ -4994,7 +5065,7 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data) | |||
4994 | } | 5065 | } |
4995 | } | 5066 | } |
4996 | dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); | 5067 | dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); |
4997 | 5068 | out: | |
4998 | kfree(task->tk_msg.rpc_argp); | 5069 | kfree(task->tk_msg.rpc_argp); |
4999 | kfree(task->tk_msg.rpc_resp); | 5070 | kfree(task->tk_msg.rpc_resp); |
5000 | 5071 | ||
@@ -5019,6 +5090,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) | |||
5019 | static const struct rpc_call_ops nfs41_sequence_ops = { | 5090 | static const struct rpc_call_ops nfs41_sequence_ops = { |
5020 | .rpc_call_done = nfs41_sequence_call_done, | 5091 | .rpc_call_done = nfs41_sequence_call_done, |
5021 | .rpc_call_prepare = nfs41_sequence_prepare, | 5092 | .rpc_call_prepare = nfs41_sequence_prepare, |
5093 | .rpc_release = nfs41_sequence_release, | ||
5022 | }; | 5094 | }; |
5023 | 5095 | ||
5024 | static int nfs41_proc_async_sequence(struct nfs_client *clp, | 5096 | static int nfs41_proc_async_sequence(struct nfs_client *clp, |
@@ -5031,12 +5103,14 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, | |||
5031 | .rpc_cred = cred, | 5103 | .rpc_cred = cred, |
5032 | }; | 5104 | }; |
5033 | 5105 | ||
5106 | if (!atomic_inc_not_zero(&clp->cl_count)) | ||
5107 | return -EIO; | ||
5034 | args = kzalloc(sizeof(*args), GFP_KERNEL); | 5108 | args = kzalloc(sizeof(*args), GFP_KERNEL); |
5035 | if (!args) | ||
5036 | return -ENOMEM; | ||
5037 | res = kzalloc(sizeof(*res), GFP_KERNEL); | 5109 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
5038 | if (!res) { | 5110 | if (!args || !res) { |
5039 | kfree(args); | 5111 | kfree(args); |
5112 | kfree(res); | ||
5113 | nfs_put_client(clp); | ||
5040 | return -ENOMEM; | 5114 | return -ENOMEM; |
5041 | } | 5115 | } |
5042 | res->sr_slotid = NFS4_MAX_SLOT_TABLE; | 5116 | res->sr_slotid = NFS4_MAX_SLOT_TABLE; |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 0156c01c212c..d87f10327b72 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -36,11 +36,6 @@ | |||
36 | * as an rpc_task, not a real kernel thread, so it always runs in rpciod's | 36 | * as an rpc_task, not a real kernel thread, so it always runs in rpciod's |
37 | * context. There is one renewd per nfs_server. | 37 | * context. There is one renewd per nfs_server. |
38 | * | 38 | * |
39 | * TODO: If the send queue gets backlogged (e.g., if the server goes down), | ||
40 | * we will keep filling the queue with periodic RENEW requests. We need a | ||
41 | * mechanism for ensuring that if renewd successfully sends off a request, | ||
42 | * then it only wakes up when the request is finished. Maybe use the | ||
43 | * child task framework of the RPC layer? | ||
44 | */ | 39 | */ |
45 | 40 | ||
46 | #include <linux/mm.h> | 41 | #include <linux/mm.h> |
@@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work) | |||
63 | struct nfs_client *clp = | 58 | struct nfs_client *clp = |
64 | container_of(work, struct nfs_client, cl_renewd.work); | 59 | container_of(work, struct nfs_client, cl_renewd.work); |
65 | struct rpc_cred *cred; | 60 | struct rpc_cred *cred; |
66 | long lease, timeout; | 61 | long lease; |
67 | unsigned long last, now; | 62 | unsigned long last, now; |
68 | 63 | ||
69 | ops = nfs4_state_renewal_ops[clp->cl_minorversion]; | 64 | ops = nfs4_state_renewal_ops[clp->cl_minorversion]; |
@@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work) | |||
75 | lease = clp->cl_lease_time; | 70 | lease = clp->cl_lease_time; |
76 | last = clp->cl_last_renewal; | 71 | last = clp->cl_last_renewal; |
77 | now = jiffies; | 72 | now = jiffies; |
78 | timeout = (2 * lease) / 3 + (long)last - (long)now; | ||
79 | /* Are we close to a lease timeout? */ | 73 | /* Are we close to a lease timeout? */ |
80 | if (time_after(now, last + lease/3)) { | 74 | if (time_after(now, last + lease/3)) { |
81 | cred = ops->get_state_renewal_cred_locked(clp); | 75 | cred = ops->get_state_renewal_cred_locked(clp); |
@@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work) | |||
90 | /* Queue an asynchronous RENEW. */ | 84 | /* Queue an asynchronous RENEW. */ |
91 | ops->sched_state_renewal(clp, cred); | 85 | ops->sched_state_renewal(clp, cred); |
92 | put_rpccred(cred); | 86 | put_rpccred(cred); |
87 | goto out_exp; | ||
93 | } | 88 | } |
94 | timeout = (2 * lease) / 3; | 89 | } else { |
95 | spin_lock(&clp->cl_lock); | ||
96 | } else | ||
97 | dprintk("%s: failed to call renewd. Reason: lease not expired \n", | 90 | dprintk("%s: failed to call renewd. Reason: lease not expired \n", |
98 | __func__); | 91 | __func__); |
99 | if (timeout < 5 * HZ) /* safeguard */ | 92 | spin_unlock(&clp->cl_lock); |
100 | timeout = 5 * HZ; | 93 | } |
101 | dprintk("%s: requeueing work. Lease period = %ld\n", | 94 | nfs4_schedule_state_renewal(clp); |
102 | __func__, (timeout + HZ - 1) / HZ); | 95 | out_exp: |
103 | cancel_delayed_work(&clp->cl_renewd); | ||
104 | schedule_delayed_work(&clp->cl_renewd, timeout); | ||
105 | spin_unlock(&clp->cl_lock); | ||
106 | nfs_expire_unreferenced_delegations(clp); | 96 | nfs_expire_unreferenced_delegations(clp); |
107 | out: | 97 | out: |
108 | dprintk("%s: done\n", __func__); | 98 | dprintk("%s: done\n", __func__); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6d263ed79e92..6c5ed51f105e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) | |||
901 | nfs4_schedule_state_manager(clp); | 901 | nfs4_schedule_state_manager(clp); |
902 | } | 902 | } |
903 | 903 | ||
904 | static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) | 904 | int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) |
905 | { | 905 | { |
906 | 906 | ||
907 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 907 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
@@ -1249,26 +1249,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) | |||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | #ifdef CONFIG_NFS_V4_1 | 1251 | #ifdef CONFIG_NFS_V4_1 |
1252 | void nfs41_handle_recall_slot(struct nfs_client *clp) | ||
1253 | { | ||
1254 | set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
1255 | nfs4_schedule_state_recovery(clp); | ||
1256 | } | ||
1257 | |||
1258 | static void nfs4_reset_all_state(struct nfs_client *clp) | ||
1259 | { | ||
1260 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | ||
1261 | clp->cl_boot_time = CURRENT_TIME; | ||
1262 | nfs4_state_start_reclaim_nograce(clp); | ||
1263 | nfs4_schedule_state_recovery(clp); | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | static void nfs41_handle_server_reboot(struct nfs_client *clp) | ||
1268 | { | ||
1269 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | ||
1270 | nfs4_state_start_reclaim_reboot(clp); | ||
1271 | nfs4_schedule_state_recovery(clp); | ||
1272 | } | ||
1273 | } | ||
1274 | |||
1275 | static void nfs41_handle_state_revoked(struct nfs_client *clp) | ||
1276 | { | ||
1277 | /* Temporary */ | ||
1278 | nfs4_reset_all_state(clp); | ||
1279 | } | ||
1280 | |||
1281 | static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) | ||
1282 | { | ||
1283 | /* This will need to handle layouts too */ | ||
1284 | nfs_expire_all_delegations(clp); | ||
1285 | } | ||
1286 | |||
1287 | static void nfs41_handle_cb_path_down(struct nfs_client *clp) | ||
1288 | { | ||
1289 | nfs_expire_all_delegations(clp); | ||
1290 | if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) | ||
1291 | nfs4_schedule_state_recovery(clp); | ||
1292 | } | ||
1293 | |||
1252 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) | 1294 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) |
1253 | { | 1295 | { |
1254 | if (!flags) | 1296 | if (!flags) |
1255 | return; | 1297 | return; |
1256 | else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { | 1298 | else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) |
1257 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | 1299 | nfs41_handle_server_reboot(clp); |
1258 | nfs4_state_start_reclaim_reboot(clp); | 1300 | else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | |
1259 | nfs4_schedule_state_recovery(clp); | ||
1260 | } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | | ||
1261 | SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | | 1301 | SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | |
1262 | SEQ4_STATUS_ADMIN_STATE_REVOKED | | 1302 | SEQ4_STATUS_ADMIN_STATE_REVOKED | |
1263 | SEQ4_STATUS_RECALLABLE_STATE_REVOKED | | 1303 | SEQ4_STATUS_LEASE_MOVED)) |
1264 | SEQ4_STATUS_LEASE_MOVED)) { | 1304 | nfs41_handle_state_revoked(clp); |
1265 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | 1305 | else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) |
1266 | nfs4_state_start_reclaim_nograce(clp); | 1306 | nfs41_handle_recallable_state_revoked(clp); |
1267 | nfs4_schedule_state_recovery(clp); | 1307 | else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | |
1268 | } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | | ||
1269 | SEQ4_STATUS_BACKCHANNEL_FAULT | | 1308 | SEQ4_STATUS_BACKCHANNEL_FAULT | |
1270 | SEQ4_STATUS_CB_PATH_DOWN_SESSION)) | 1309 | SEQ4_STATUS_CB_PATH_DOWN_SESSION)) |
1271 | nfs_expire_all_delegations(clp); | 1310 | nfs41_handle_cb_path_down(clp); |
1272 | } | 1311 | } |
1273 | 1312 | ||
1274 | static int nfs4_reset_session(struct nfs_client *clp) | 1313 | static int nfs4_reset_session(struct nfs_client *clp) |
@@ -1285,23 +1324,52 @@ static int nfs4_reset_session(struct nfs_client *clp) | |||
1285 | 1324 | ||
1286 | memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); | 1325 | memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); |
1287 | status = nfs4_proc_create_session(clp); | 1326 | status = nfs4_proc_create_session(clp); |
1288 | if (status) | 1327 | if (status) { |
1289 | status = nfs4_recovery_handle_error(clp, status); | 1328 | status = nfs4_recovery_handle_error(clp, status); |
1329 | goto out; | ||
1330 | } | ||
1331 | /* create_session negotiated new slot table */ | ||
1332 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
1290 | 1333 | ||
1291 | out: | 1334 | /* Let the state manager reestablish state */ |
1292 | /* | 1335 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) |
1293 | * Let the state manager reestablish state | ||
1294 | */ | ||
1295 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && | ||
1296 | status == 0) | ||
1297 | nfs41_setup_state_renewal(clp); | 1336 | nfs41_setup_state_renewal(clp); |
1298 | 1337 | out: | |
1299 | return status; | 1338 | return status; |
1300 | } | 1339 | } |
1301 | 1340 | ||
1341 | static int nfs4_recall_slot(struct nfs_client *clp) | ||
1342 | { | ||
1343 | struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; | ||
1344 | struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; | ||
1345 | struct nfs4_slot *new, *old; | ||
1346 | int i; | ||
1347 | |||
1348 | nfs4_begin_drain_session(clp); | ||
1349 | new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), | ||
1350 | GFP_KERNEL); | ||
1351 | if (!new) | ||
1352 | return -ENOMEM; | ||
1353 | |||
1354 | spin_lock(&fc_tbl->slot_tbl_lock); | ||
1355 | for (i = 0; i < fc_tbl->target_max_slots; i++) | ||
1356 | new[i].seq_nr = fc_tbl->slots[i].seq_nr; | ||
1357 | old = fc_tbl->slots; | ||
1358 | fc_tbl->slots = new; | ||
1359 | fc_tbl->max_slots = fc_tbl->target_max_slots; | ||
1360 | fc_tbl->target_max_slots = 0; | ||
1361 | fc_attrs->max_reqs = fc_tbl->max_slots; | ||
1362 | spin_unlock(&fc_tbl->slot_tbl_lock); | ||
1363 | |||
1364 | kfree(old); | ||
1365 | nfs4_end_drain_session(clp); | ||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1302 | #else /* CONFIG_NFS_V4_1 */ | 1369 | #else /* CONFIG_NFS_V4_1 */ |
1303 | static int nfs4_reset_session(struct nfs_client *clp) { return 0; } | 1370 | static int nfs4_reset_session(struct nfs_client *clp) { return 0; } |
1304 | static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } | 1371 | static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } |
1372 | static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } | ||
1305 | #endif /* CONFIG_NFS_V4_1 */ | 1373 | #endif /* CONFIG_NFS_V4_1 */ |
1306 | 1374 | ||
1307 | /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors | 1375 | /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors |
@@ -1314,6 +1382,7 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status) | |||
1314 | case -NFS4ERR_DELAY: | 1382 | case -NFS4ERR_DELAY: |
1315 | case -NFS4ERR_CLID_INUSE: | 1383 | case -NFS4ERR_CLID_INUSE: |
1316 | case -EAGAIN: | 1384 | case -EAGAIN: |
1385 | case -EKEYEXPIRED: | ||
1317 | break; | 1386 | break; |
1318 | 1387 | ||
1319 | case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery | 1388 | case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery |
@@ -1397,6 +1466,15 @@ static void nfs4_state_manager(struct nfs_client *clp) | |||
1397 | nfs_client_return_marked_delegations(clp); | 1466 | nfs_client_return_marked_delegations(clp); |
1398 | continue; | 1467 | continue; |
1399 | } | 1468 | } |
1469 | /* Recall session slots */ | ||
1470 | if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) | ||
1471 | && nfs4_has_session(clp)) { | ||
1472 | status = nfs4_recall_slot(clp); | ||
1473 | if (status < 0) | ||
1474 | goto out_error; | ||
1475 | continue; | ||
1476 | } | ||
1477 | |||
1400 | 1478 | ||
1401 | nfs4_clear_state_manager_bit(clp); | 1479 | nfs4_clear_state_manager_bit(clp); |
1402 | /* Did we race with an attempt to give us more work? */ | 1480 | /* Did we race with an attempt to give us more work? */ |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e437fd6a819f..38f3b582e7c2 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/param.h> | 38 | #include <linux/param.h> |
39 | #include <linux/time.h> | 39 | #include <linux/time.h> |
40 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
41 | #include <linux/slab.h> | ||
42 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
43 | #include <linux/string.h> | 42 | #include <linux/string.h> |
44 | #include <linux/in.h> | 43 | #include <linux/in.h> |
@@ -1578,6 +1577,14 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1578 | char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; | 1577 | char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; |
1579 | uint32_t len; | 1578 | uint32_t len; |
1580 | struct nfs_client *clp = args->client; | 1579 | struct nfs_client *clp = args->client; |
1580 | u32 max_resp_sz_cached; | ||
1581 | |||
1582 | /* | ||
1583 | * Assumes OPEN is the biggest non-idempotent compound. | ||
1584 | * 2 is the verifier. | ||
1585 | */ | ||
1586 | max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + | ||
1587 | RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; | ||
1581 | 1588 | ||
1582 | len = scnprintf(machine_name, sizeof(machine_name), "%s", | 1589 | len = scnprintf(machine_name, sizeof(machine_name), "%s", |
1583 | clp->cl_ipaddr); | 1590 | clp->cl_ipaddr); |
@@ -1592,7 +1599,7 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1592 | *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ | 1599 | *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ |
1593 | *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ | 1600 | *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ |
1594 | *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ | 1601 | *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ |
1595 | *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */ | 1602 | *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ |
1596 | *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ | 1603 | *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ |
1597 | *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ | 1604 | *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ |
1598 | *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ | 1605 | *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ |
@@ -4631,7 +4638,7 @@ static int decode_sequence(struct xdr_stream *xdr, | |||
4631 | * If the server returns different values for sessionID, slotID or | 4638 | * If the server returns different values for sessionID, slotID or |
4632 | * sequence number, the server is looney tunes. | 4639 | * sequence number, the server is looney tunes. |
4633 | */ | 4640 | */ |
4634 | status = -ESERVERFAULT; | 4641 | status = -EREMOTEIO; |
4635 | 4642 | ||
4636 | if (memcmp(id.data, res->sr_session->sess_id.data, | 4643 | if (memcmp(id.data, res->sr_session->sess_id.data, |
4637 | NFS4_MAX_SESSIONID_LEN)) { | 4644 | NFS4_MAX_SESSIONID_LEN)) { |
@@ -5544,6 +5551,8 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf | |||
5544 | if (status != 0) | 5551 | if (status != 0) |
5545 | goto out; | 5552 | goto out; |
5546 | status = decode_delegreturn(&xdr); | 5553 | status = decode_delegreturn(&xdr); |
5554 | if (status != 0) | ||
5555 | goto out; | ||
5547 | decode_getfattr(&xdr, res->fattr, res->server, | 5556 | decode_getfattr(&xdr, res->fattr, res->server, |
5548 | !RPC_IS_ASYNC(rqstp->rq_task)); | 5557 | !RPC_IS_ASYNC(rqstp->rq_task)); |
5549 | out: | 5558 | out: |
@@ -5774,7 +5783,7 @@ static struct { | |||
5774 | { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, | 5783 | { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, |
5775 | { NFS4ERR_NOTSUPP, -ENOTSUPP }, | 5784 | { NFS4ERR_NOTSUPP, -ENOTSUPP }, |
5776 | { NFS4ERR_TOOSMALL, -ETOOSMALL }, | 5785 | { NFS4ERR_TOOSMALL, -ETOOSMALL }, |
5777 | { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, | 5786 | { NFS4ERR_SERVERFAULT, -EREMOTEIO }, |
5778 | { NFS4ERR_BADTYPE, -EBADTYPE }, | 5787 | { NFS4ERR_BADTYPE, -EBADTYPE }, |
5779 | { NFS4ERR_LOCKED, -EAGAIN }, | 5788 | { NFS4ERR_LOCKED, -EAGAIN }, |
5780 | { NFS4ERR_SYMLINK, -ELOOP }, | 5789 | { NFS4ERR_SYMLINK, -ELOOP }, |
@@ -5801,7 +5810,7 @@ nfs4_stat_to_errno(int stat) | |||
5801 | } | 5810 | } |
5802 | if (stat <= 10000 || stat > 10100) { | 5811 | if (stat <= 10000 || stat > 10100) { |
5803 | /* The server is looney tunes. */ | 5812 | /* The server is looney tunes. */ |
5804 | return -ESERVERFAULT; | 5813 | return -EREMOTEIO; |
5805 | } | 5814 | } |
5806 | /* If we cannot translate the error, the recovery routines should | 5815 | /* If we cannot translate the error, the recovery routines should |
5807 | * handle it. | 5816 | * handle it. |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e2975939126a..29d9d36cd5f4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req) | |||
112 | */ | 112 | */ |
113 | int nfs_set_page_tag_locked(struct nfs_page *req) | 113 | int nfs_set_page_tag_locked(struct nfs_page *req) |
114 | { | 114 | { |
115 | struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); | ||
116 | |||
117 | if (!nfs_lock_request_dontget(req)) | 115 | if (!nfs_lock_request_dontget(req)) |
118 | return 0; | 116 | return 0; |
119 | if (req->wb_page != NULL) | 117 | if (req->wb_page != NULL) |
120 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); | 118 | radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
121 | return 1; | 119 | return 1; |
122 | } | 120 | } |
123 | 121 | ||
@@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req) | |||
126 | */ | 124 | */ |
127 | void nfs_clear_page_tag_locked(struct nfs_page *req) | 125 | void nfs_clear_page_tag_locked(struct nfs_page *req) |
128 | { | 126 | { |
129 | struct inode *inode = req->wb_context->path.dentry->d_inode; | ||
130 | struct nfs_inode *nfsi = NFS_I(inode); | ||
131 | |||
132 | if (req->wb_page != NULL) { | 127 | if (req->wb_page != NULL) { |
128 | struct inode *inode = req->wb_context->path.dentry->d_inode; | ||
129 | struct nfs_inode *nfsi = NFS_I(inode); | ||
130 | |||
133 | spin_lock(&inode->i_lock); | 131 | spin_lock(&inode->i_lock); |
134 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); | 132 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
135 | nfs_unlock_request(req); | 133 | nfs_unlock_request(req); |
@@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req) | |||
142 | * nfs_clear_request - Free up all resources allocated to the request | 140 | * nfs_clear_request - Free up all resources allocated to the request |
143 | * @req: | 141 | * @req: |
144 | * | 142 | * |
145 | * Release page resources associated with a write request after it | 143 | * Release page and open context resources associated with a read/write |
146 | * has completed. | 144 | * request after it has completed. |
147 | */ | 145 | */ |
148 | void nfs_clear_request(struct nfs_page *req) | 146 | void nfs_clear_request(struct nfs_page *req) |
149 | { | 147 | { |
150 | struct page *page = req->wb_page; | 148 | struct page *page = req->wb_page; |
149 | struct nfs_open_context *ctx = req->wb_context; | ||
150 | |||
151 | if (page != NULL) { | 151 | if (page != NULL) { |
152 | page_cache_release(page); | 152 | page_cache_release(page); |
153 | req->wb_page = NULL; | 153 | req->wb_page = NULL; |
154 | } | 154 | } |
155 | if (ctx != NULL) { | ||
156 | put_nfs_open_context(ctx); | ||
157 | req->wb_context = NULL; | ||
158 | } | ||
155 | } | 159 | } |
156 | 160 | ||
157 | 161 | ||
@@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref) | |||
165 | { | 169 | { |
166 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); | 170 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
167 | 171 | ||
168 | /* Release struct file or cached credential */ | 172 | /* Release struct file and open context */ |
169 | nfs_clear_request(req); | 173 | nfs_clear_request(req); |
170 | put_nfs_open_context(req->wb_context); | ||
171 | nfs_page_free(req); | 174 | nfs_page_free(req); |
172 | } | 175 | } |
173 | 176 | ||
@@ -176,6 +179,12 @@ void nfs_release_request(struct nfs_page *req) | |||
176 | kref_put(&req->wb_kref, nfs_free_request); | 179 | kref_put(&req->wb_kref, nfs_free_request); |
177 | } | 180 | } |
178 | 181 | ||
182 | static int nfs_wait_bit_uninterruptible(void *word) | ||
183 | { | ||
184 | io_schedule(); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
179 | /** | 188 | /** |
180 | * nfs_wait_on_request - Wait for a request to complete. | 189 | * nfs_wait_on_request - Wait for a request to complete. |
181 | * @req: request to wait upon. | 190 | * @req: request to wait upon. |
@@ -186,14 +195,9 @@ void nfs_release_request(struct nfs_page *req) | |||
186 | int | 195 | int |
187 | nfs_wait_on_request(struct nfs_page *req) | 196 | nfs_wait_on_request(struct nfs_page *req) |
188 | { | 197 | { |
189 | int ret = 0; | 198 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
190 | 199 | nfs_wait_bit_uninterruptible, | |
191 | if (!test_bit(PG_BUSY, &req->wb_flags)) | 200 | TASK_UNINTERRUPTIBLE); |
192 | goto out; | ||
193 | ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, | ||
194 | nfs_wait_bit_killable, TASK_KILLABLE); | ||
195 | out: | ||
196 | return ret; | ||
197 | } | 201 | } |
198 | 202 | ||
199 | /** | 203 | /** |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index ef583854d8d0..0288be80444f 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/param.h> | 31 | #include <linux/param.h> |
32 | #include <linux/slab.h> | ||
33 | #include <linux/time.h> | 32 | #include <linux/time.h> |
34 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
@@ -47,6 +46,39 @@ | |||
47 | #define NFSDBG_FACILITY NFSDBG_PROC | 46 | #define NFSDBG_FACILITY NFSDBG_PROC |
48 | 47 | ||
49 | /* | 48 | /* |
49 | * wrapper to handle the -EKEYEXPIRED error message. This should generally | ||
50 | * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't | ||
51 | * support the NFSERR_JUKEBOX error code, but we handle this situation in the | ||
52 | * same way that we handle that error with NFSv3. | ||
53 | */ | ||
54 | static int | ||
55 | nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | ||
56 | { | ||
57 | int res; | ||
58 | do { | ||
59 | res = rpc_call_sync(clnt, msg, flags); | ||
60 | if (res != -EKEYEXPIRED) | ||
61 | break; | ||
62 | schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); | ||
63 | res = -ERESTARTSYS; | ||
64 | } while (!fatal_signal_pending(current)); | ||
65 | return res; | ||
66 | } | ||
67 | |||
68 | #define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags) | ||
69 | |||
70 | static int | ||
71 | nfs_async_handle_expired_key(struct rpc_task *task) | ||
72 | { | ||
73 | if (task->tk_status != -EKEYEXPIRED) | ||
74 | return 0; | ||
75 | task->tk_status = 0; | ||
76 | rpc_restart_call(task); | ||
77 | rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); | ||
78 | return 1; | ||
79 | } | ||
80 | |||
81 | /* | ||
50 | * Bare-bones access to getattr: this is for nfs_read_super. | 82 | * Bare-bones access to getattr: this is for nfs_read_super. |
51 | */ | 83 | */ |
52 | static int | 84 | static int |
@@ -307,6 +339,8 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) | |||
307 | 339 | ||
308 | static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) | 340 | static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) |
309 | { | 341 | { |
342 | if (nfs_async_handle_expired_key(task)) | ||
343 | return 0; | ||
310 | nfs_mark_for_revalidate(dir); | 344 | nfs_mark_for_revalidate(dir); |
311 | return 1; | 345 | return 1; |
312 | } | 346 | } |
@@ -560,6 +594,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, | |||
560 | 594 | ||
561 | static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) | 595 | static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) |
562 | { | 596 | { |
597 | if (nfs_async_handle_expired_key(task)) | ||
598 | return -EAGAIN; | ||
599 | |||
563 | nfs_invalidate_atime(data->inode); | 600 | nfs_invalidate_atime(data->inode); |
564 | if (task->tk_status >= 0) { | 601 | if (task->tk_status >= 0) { |
565 | nfs_refresh_inode(data->inode, data->res.fattr); | 602 | nfs_refresh_inode(data->inode, data->res.fattr); |
@@ -579,6 +616,9 @@ static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message * | |||
579 | 616 | ||
580 | static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) | 617 | static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) |
581 | { | 618 | { |
619 | if (nfs_async_handle_expired_key(task)) | ||
620 | return -EAGAIN; | ||
621 | |||
582 | if (task->tk_status >= 0) | 622 | if (task->tk_status >= 0) |
583 | nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); | 623 | nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); |
584 | return 0; | 624 | return 0; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ce907efc5508..e01637240eeb 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/vfs.h> | 48 | #include <linux/vfs.h> |
49 | #include <linux/inet.h> | 49 | #include <linux/inet.h> |
50 | #include <linux/in6.h> | 50 | #include <linux/in6.h> |
51 | #include <linux/slab.h> | ||
51 | #include <net/ipv6.h> | 52 | #include <net/ipv6.h> |
52 | #include <linux/netdevice.h> | 53 | #include <linux/netdevice.h> |
53 | #include <linux/nfs_xdr.h> | 54 | #include <linux/nfs_xdr.h> |
@@ -243,6 +244,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *); | |||
243 | static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); | 244 | static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); |
244 | static int nfs_xdev_get_sb(struct file_system_type *fs_type, | 245 | static int nfs_xdev_get_sb(struct file_system_type *fs_type, |
245 | int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); | 246 | int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); |
247 | static void nfs_put_super(struct super_block *); | ||
246 | static void nfs_kill_super(struct super_block *); | 248 | static void nfs_kill_super(struct super_block *); |
247 | static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); | 249 | static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); |
248 | 250 | ||
@@ -266,6 +268,7 @@ static const struct super_operations nfs_sops = { | |||
266 | .alloc_inode = nfs_alloc_inode, | 268 | .alloc_inode = nfs_alloc_inode, |
267 | .destroy_inode = nfs_destroy_inode, | 269 | .destroy_inode = nfs_destroy_inode, |
268 | .write_inode = nfs_write_inode, | 270 | .write_inode = nfs_write_inode, |
271 | .put_super = nfs_put_super, | ||
269 | .statfs = nfs_statfs, | 272 | .statfs = nfs_statfs, |
270 | .clear_inode = nfs_clear_inode, | 273 | .clear_inode = nfs_clear_inode, |
271 | .umount_begin = nfs_umount_begin, | 274 | .umount_begin = nfs_umount_begin, |
@@ -335,6 +338,7 @@ static const struct super_operations nfs4_sops = { | |||
335 | .alloc_inode = nfs_alloc_inode, | 338 | .alloc_inode = nfs_alloc_inode, |
336 | .destroy_inode = nfs_destroy_inode, | 339 | .destroy_inode = nfs_destroy_inode, |
337 | .write_inode = nfs_write_inode, | 340 | .write_inode = nfs_write_inode, |
341 | .put_super = nfs_put_super, | ||
338 | .statfs = nfs_statfs, | 342 | .statfs = nfs_statfs, |
339 | .clear_inode = nfs4_clear_inode, | 343 | .clear_inode = nfs4_clear_inode, |
340 | .umount_begin = nfs_umount_begin, | 344 | .umount_begin = nfs_umount_begin, |
@@ -2211,7 +2215,7 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
2211 | } else { | 2215 | } else { |
2212 | error = nfs_bdi_register(server); | 2216 | error = nfs_bdi_register(server); |
2213 | if (error) | 2217 | if (error) |
2214 | goto error_splat_super; | 2218 | goto error_splat_bdi; |
2215 | } | 2219 | } |
2216 | 2220 | ||
2217 | if (!s->s_root) { | 2221 | if (!s->s_root) { |
@@ -2253,11 +2257,25 @@ out_err_nosb: | |||
2253 | error_splat_root: | 2257 | error_splat_root: |
2254 | dput(mntroot); | 2258 | dput(mntroot); |
2255 | error_splat_super: | 2259 | error_splat_super: |
2260 | if (server && !s->s_root) | ||
2261 | bdi_unregister(&server->backing_dev_info); | ||
2262 | error_splat_bdi: | ||
2256 | deactivate_locked_super(s); | 2263 | deactivate_locked_super(s); |
2257 | goto out; | 2264 | goto out; |
2258 | } | 2265 | } |
2259 | 2266 | ||
2260 | /* | 2267 | /* |
2268 | * Ensure that we unregister the bdi before kill_anon_super | ||
2269 | * releases the device name | ||
2270 | */ | ||
2271 | static void nfs_put_super(struct super_block *s) | ||
2272 | { | ||
2273 | struct nfs_server *server = NFS_SB(s); | ||
2274 | |||
2275 | bdi_unregister(&server->backing_dev_info); | ||
2276 | } | ||
2277 | |||
2278 | /* | ||
2261 | * Destroy an NFS2/3 superblock | 2279 | * Destroy an NFS2/3 superblock |
2262 | */ | 2280 | */ |
2263 | static void nfs_kill_super(struct super_block *s) | 2281 | static void nfs_kill_super(struct super_block *s) |
@@ -2265,7 +2283,6 @@ static void nfs_kill_super(struct super_block *s) | |||
2265 | struct nfs_server *server = NFS_SB(s); | 2283 | struct nfs_server *server = NFS_SB(s); |
2266 | 2284 | ||
2267 | kill_anon_super(s); | 2285 | kill_anon_super(s); |
2268 | bdi_unregister(&server->backing_dev_info); | ||
2269 | nfs_fscache_release_super_cookie(s); | 2286 | nfs_fscache_release_super_cookie(s); |
2270 | nfs_free_server(server); | 2287 | nfs_free_server(server); |
2271 | } | 2288 | } |
@@ -2313,7 +2330,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
2313 | } else { | 2330 | } else { |
2314 | error = nfs_bdi_register(server); | 2331 | error = nfs_bdi_register(server); |
2315 | if (error) | 2332 | if (error) |
2316 | goto error_splat_super; | 2333 | goto error_splat_bdi; |
2317 | } | 2334 | } |
2318 | 2335 | ||
2319 | if (!s->s_root) { | 2336 | if (!s->s_root) { |
@@ -2350,6 +2367,9 @@ out_err_noserver: | |||
2350 | return error; | 2367 | return error; |
2351 | 2368 | ||
2352 | error_splat_super: | 2369 | error_splat_super: |
2370 | if (server && !s->s_root) | ||
2371 | bdi_unregister(&server->backing_dev_info); | ||
2372 | error_splat_bdi: | ||
2353 | deactivate_locked_super(s); | 2373 | deactivate_locked_super(s); |
2354 | dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error); | 2374 | dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error); |
2355 | return error; | 2375 | return error; |
@@ -2565,7 +2585,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type, | |||
2565 | } else { | 2585 | } else { |
2566 | error = nfs_bdi_register(server); | 2586 | error = nfs_bdi_register(server); |
2567 | if (error) | 2587 | if (error) |
2568 | goto error_splat_super; | 2588 | goto error_splat_bdi; |
2569 | } | 2589 | } |
2570 | 2590 | ||
2571 | if (!s->s_root) { | 2591 | if (!s->s_root) { |
@@ -2603,6 +2623,9 @@ out_free: | |||
2603 | error_splat_root: | 2623 | error_splat_root: |
2604 | dput(mntroot); | 2624 | dput(mntroot); |
2605 | error_splat_super: | 2625 | error_splat_super: |
2626 | if (server && !s->s_root) | ||
2627 | bdi_unregister(&server->backing_dev_info); | ||
2628 | error_splat_bdi: | ||
2606 | deactivate_locked_super(s); | 2629 | deactivate_locked_super(s); |
2607 | goto out; | 2630 | goto out; |
2608 | } | 2631 | } |
@@ -2798,7 +2821,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
2798 | } else { | 2821 | } else { |
2799 | error = nfs_bdi_register(server); | 2822 | error = nfs_bdi_register(server); |
2800 | if (error) | 2823 | if (error) |
2801 | goto error_splat_super; | 2824 | goto error_splat_bdi; |
2802 | } | 2825 | } |
2803 | 2826 | ||
2804 | if (!s->s_root) { | 2827 | if (!s->s_root) { |
@@ -2834,6 +2857,9 @@ out_err_noserver: | |||
2834 | return error; | 2857 | return error; |
2835 | 2858 | ||
2836 | error_splat_super: | 2859 | error_splat_super: |
2860 | if (server && !s->s_root) | ||
2861 | bdi_unregister(&server->backing_dev_info); | ||
2862 | error_splat_bdi: | ||
2837 | deactivate_locked_super(s); | 2863 | deactivate_locked_super(s); |
2838 | dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error); | 2864 | dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error); |
2839 | return error; | 2865 | return error; |
@@ -2880,7 +2906,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type, | |||
2880 | } else { | 2906 | } else { |
2881 | error = nfs_bdi_register(server); | 2907 | error = nfs_bdi_register(server); |
2882 | if (error) | 2908 | if (error) |
2883 | goto error_splat_super; | 2909 | goto error_splat_bdi; |
2884 | } | 2910 | } |
2885 | 2911 | ||
2886 | if (!s->s_root) { | 2912 | if (!s->s_root) { |
@@ -2916,6 +2942,9 @@ out_err_noserver: | |||
2916 | return error; | 2942 | return error; |
2917 | 2943 | ||
2918 | error_splat_super: | 2944 | error_splat_super: |
2945 | if (server && !s->s_root) | ||
2946 | bdi_unregister(&server->backing_dev_info); | ||
2947 | error_splat_bdi: | ||
2919 | deactivate_locked_super(s); | 2948 | deactivate_locked_super(s); |
2920 | dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error); | 2949 | dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error); |
2921 | return error; | 2950 | return error; |
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 412738dbfbc7..05c9e02f4153 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
20 | #include <linux/stat.h> | 20 | #include <linux/stat.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/string.h> | 22 | #include <linux/string.h> |
24 | #include <linux/namei.h> | 23 | #include <linux/namei.h> |
25 | 24 | ||
@@ -50,7 +49,7 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
50 | struct page *page; | 49 | struct page *page; |
51 | void *err; | 50 | void *err; |
52 | 51 | ||
53 | err = ERR_PTR(nfs_revalidate_mapping_nolock(inode, inode->i_mapping)); | 52 | err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); |
54 | if (err) | 53 | if (err) |
55 | goto read_failed; | 54 | goto read_failed; |
56 | page = read_cache_page(&inode->i_data, 0, | 55 | page = read_cache_page(&inode->i_data, 0, |
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 70e1fbbaaeab..ad4d2e787b20 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c | |||
@@ -15,8 +15,10 @@ | |||
15 | 15 | ||
16 | #include "callback.h" | 16 | #include "callback.h" |
17 | 17 | ||
18 | #ifdef CONFIG_NFS_V4 | ||
18 | static const int nfs_set_port_min = 0; | 19 | static const int nfs_set_port_min = 0; |
19 | static const int nfs_set_port_max = 65535; | 20 | static const int nfs_set_port_max = 65535; |
21 | #endif | ||
20 | static struct ctl_table_header *nfs_callback_sysctl_table; | 22 | static struct ctl_table_header *nfs_callback_sysctl_table; |
21 | 23 | ||
22 | static ctl_table nfs_cb_sysctls[] = { | 24 | static ctl_table nfs_cb_sysctls[] = { |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index d171696017f4..de38d63aa920 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page) | |||
201 | struct inode *inode = page->mapping->host; | 201 | struct inode *inode = page->mapping->host; |
202 | struct nfs_server *nfss = NFS_SERVER(inode); | 202 | struct nfs_server *nfss = NFS_SERVER(inode); |
203 | 203 | ||
204 | page_cache_get(page); | ||
204 | if (atomic_long_inc_return(&nfss->writeback) > | 205 | if (atomic_long_inc_return(&nfss->writeback) > |
205 | NFS_CONGESTION_ON_THRESH) { | 206 | NFS_CONGESTION_ON_THRESH) { |
206 | set_bdi_congested(&nfss->backing_dev_info, | 207 | set_bdi_congested(&nfss->backing_dev_info, |
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page) | |||
216 | struct nfs_server *nfss = NFS_SERVER(inode); | 217 | struct nfs_server *nfss = NFS_SERVER(inode); |
217 | 218 | ||
218 | end_page_writeback(page); | 219 | end_page_writeback(page); |
220 | page_cache_release(page); | ||
219 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) | 221 | if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) |
220 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); | 222 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); |
221 | } | 223 | } |
@@ -421,6 +423,7 @@ static void | |||
421 | nfs_mark_request_dirty(struct nfs_page *req) | 423 | nfs_mark_request_dirty(struct nfs_page *req) |
422 | { | 424 | { |
423 | __set_page_dirty_nobuffers(req->wb_page); | 425 | __set_page_dirty_nobuffers(req->wb_page); |
426 | __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); | ||
424 | } | 427 | } |
425 | 428 | ||
426 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 429 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
@@ -438,6 +441,7 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
438 | radix_tree_tag_set(&nfsi->nfs_page_tree, | 441 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
439 | req->wb_index, | 442 | req->wb_index, |
440 | NFS_PAGE_TAG_COMMIT); | 443 | NFS_PAGE_TAG_COMMIT); |
444 | nfsi->ncommit++; | ||
441 | spin_unlock(&inode->i_lock); | 445 | spin_unlock(&inode->i_lock); |
442 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 446 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
443 | inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); | 447 | inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); |
@@ -501,57 +505,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) | |||
501 | } | 505 | } |
502 | #endif | 506 | #endif |
503 | 507 | ||
504 | /* | ||
505 | * Wait for a request to complete. | ||
506 | * | ||
507 | * Interruptible by fatal signals only. | ||
508 | */ | ||
509 | static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) | ||
510 | { | ||
511 | struct nfs_inode *nfsi = NFS_I(inode); | ||
512 | struct nfs_page *req; | ||
513 | pgoff_t idx_end, next; | ||
514 | unsigned int res = 0; | ||
515 | int error; | ||
516 | |||
517 | if (npages == 0) | ||
518 | idx_end = ~0; | ||
519 | else | ||
520 | idx_end = idx_start + npages - 1; | ||
521 | |||
522 | next = idx_start; | ||
523 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { | ||
524 | if (req->wb_index > idx_end) | ||
525 | break; | ||
526 | |||
527 | next = req->wb_index + 1; | ||
528 | BUG_ON(!NFS_WBACK_BUSY(req)); | ||
529 | |||
530 | kref_get(&req->wb_kref); | ||
531 | spin_unlock(&inode->i_lock); | ||
532 | error = nfs_wait_on_request(req); | ||
533 | nfs_release_request(req); | ||
534 | spin_lock(&inode->i_lock); | ||
535 | if (error < 0) | ||
536 | return error; | ||
537 | res++; | ||
538 | } | ||
539 | return res; | ||
540 | } | ||
541 | |||
542 | static void nfs_cancel_commit_list(struct list_head *head) | ||
543 | { | ||
544 | struct nfs_page *req; | ||
545 | |||
546 | while(!list_empty(head)) { | ||
547 | req = nfs_list_entry(head->next); | ||
548 | nfs_list_remove_request(req); | ||
549 | nfs_clear_request_commit(req); | ||
550 | nfs_inode_remove_request(req); | ||
551 | nfs_unlock_request(req); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 508 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
556 | static int | 509 | static int |
557 | nfs_need_commit(struct nfs_inode *nfsi) | 510 | nfs_need_commit(struct nfs_inode *nfsi) |
@@ -573,11 +526,17 @@ static int | |||
573 | nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) | 526 | nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) |
574 | { | 527 | { |
575 | struct nfs_inode *nfsi = NFS_I(inode); | 528 | struct nfs_inode *nfsi = NFS_I(inode); |
529 | int ret; | ||
576 | 530 | ||
577 | if (!nfs_need_commit(nfsi)) | 531 | if (!nfs_need_commit(nfsi)) |
578 | return 0; | 532 | return 0; |
579 | 533 | ||
580 | return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); | 534 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); |
535 | if (ret > 0) | ||
536 | nfsi->ncommit -= ret; | ||
537 | if (nfs_need_commit(NFS_I(inode))) | ||
538 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
539 | return ret; | ||
581 | } | 540 | } |
582 | #else | 541 | #else |
583 | static inline int nfs_need_commit(struct nfs_inode *nfsi) | 542 | static inline int nfs_need_commit(struct nfs_inode *nfsi) |
@@ -642,9 +601,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
642 | spin_lock(&inode->i_lock); | 601 | spin_lock(&inode->i_lock); |
643 | } | 602 | } |
644 | 603 | ||
645 | if (nfs_clear_request_commit(req)) | 604 | if (nfs_clear_request_commit(req) && |
646 | radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, | 605 | radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, |
647 | req->wb_index, NFS_PAGE_TAG_COMMIT); | 606 | req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) |
607 | NFS_I(inode)->ncommit--; | ||
648 | 608 | ||
649 | /* Okay, the request matches. Update the region */ | 609 | /* Okay, the request matches. Update the region */ |
650 | if (offset < req->wb_offset) { | 610 | if (offset < req->wb_offset) { |
@@ -703,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
703 | req = nfs_setup_write_request(ctx, page, offset, count); | 663 | req = nfs_setup_write_request(ctx, page, offset, count); |
704 | if (IS_ERR(req)) | 664 | if (IS_ERR(req)) |
705 | return PTR_ERR(req); | 665 | return PTR_ERR(req); |
666 | nfs_mark_request_dirty(req); | ||
706 | /* Update file length */ | 667 | /* Update file length */ |
707 | nfs_grow_file(page, offset, count); | 668 | nfs_grow_file(page, offset, count); |
708 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | 669 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); |
670 | nfs_mark_request_dirty(req); | ||
709 | nfs_clear_page_tag_locked(req); | 671 | nfs_clear_page_tag_locked(req); |
710 | return 0; | 672 | return 0; |
711 | } | 673 | } |
@@ -782,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
782 | status = nfs_writepage_setup(ctx, page, offset, count); | 744 | status = nfs_writepage_setup(ctx, page, offset, count); |
783 | if (status < 0) | 745 | if (status < 0) |
784 | nfs_set_pageerror(page); | 746 | nfs_set_pageerror(page); |
785 | else | ||
786 | __set_page_dirty_nobuffers(page); | ||
787 | 747 | ||
788 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", | 748 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", |
789 | status, (long long)i_size_read(inode)); | 749 | status, (long long)i_size_read(inode)); |
@@ -792,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
792 | 752 | ||
793 | static void nfs_writepage_release(struct nfs_page *req) | 753 | static void nfs_writepage_release(struct nfs_page *req) |
794 | { | 754 | { |
755 | struct page *page = req->wb_page; | ||
795 | 756 | ||
796 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { | 757 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) |
797 | nfs_end_page_writeback(req->wb_page); | ||
798 | nfs_inode_remove_request(req); | 758 | nfs_inode_remove_request(req); |
799 | } else | ||
800 | nfs_end_page_writeback(req->wb_page); | ||
801 | nfs_clear_page_tag_locked(req); | 759 | nfs_clear_page_tag_locked(req); |
760 | nfs_end_page_writeback(page); | ||
802 | } | 761 | } |
803 | 762 | ||
804 | static int flush_task_priority(int how) | 763 | static int flush_task_priority(int how) |
@@ -822,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
822 | int how) | 781 | int how) |
823 | { | 782 | { |
824 | struct inode *inode = req->wb_context->path.dentry->d_inode; | 783 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
825 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
826 | int priority = flush_task_priority(how); | 784 | int priority = flush_task_priority(how); |
827 | struct rpc_task *task; | 785 | struct rpc_task *task; |
828 | struct rpc_message msg = { | 786 | struct rpc_message msg = { |
@@ -837,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
837 | .callback_ops = call_ops, | 795 | .callback_ops = call_ops, |
838 | .callback_data = data, | 796 | .callback_data = data, |
839 | .workqueue = nfsiod_workqueue, | 797 | .workqueue = nfsiod_workqueue, |
840 | .flags = flags, | 798 | .flags = RPC_TASK_ASYNC, |
841 | .priority = priority, | 799 | .priority = priority, |
842 | }; | 800 | }; |
801 | int ret = 0; | ||
843 | 802 | ||
844 | /* Set up the RPC argument and reply structs | 803 | /* Set up the RPC argument and reply structs |
845 | * NB: take care not to mess about with data->commit et al. */ | 804 | * NB: take care not to mess about with data->commit et al. */ |
@@ -878,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
878 | (unsigned long long)data->args.offset); | 837 | (unsigned long long)data->args.offset); |
879 | 838 | ||
880 | task = rpc_run_task(&task_setup_data); | 839 | task = rpc_run_task(&task_setup_data); |
881 | if (IS_ERR(task)) | 840 | if (IS_ERR(task)) { |
882 | return PTR_ERR(task); | 841 | ret = PTR_ERR(task); |
842 | goto out; | ||
843 | } | ||
844 | if (how & FLUSH_SYNC) { | ||
845 | ret = rpc_wait_for_completion_task(task); | ||
846 | if (ret == 0) | ||
847 | ret = task->tk_status; | ||
848 | } | ||
883 | rpc_put_task(task); | 849 | rpc_put_task(task); |
884 | return 0; | 850 | out: |
851 | return ret; | ||
885 | } | 852 | } |
886 | 853 | ||
887 | /* If a nfs_flush_* function fails, it should remove reqs from @head and | 854 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
@@ -890,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
890 | */ | 857 | */ |
891 | static void nfs_redirty_request(struct nfs_page *req) | 858 | static void nfs_redirty_request(struct nfs_page *req) |
892 | { | 859 | { |
860 | struct page *page = req->wb_page; | ||
861 | |||
893 | nfs_mark_request_dirty(req); | 862 | nfs_mark_request_dirty(req); |
894 | nfs_end_page_writeback(req->wb_page); | ||
895 | nfs_clear_page_tag_locked(req); | 863 | nfs_clear_page_tag_locked(req); |
864 | nfs_end_page_writeback(page); | ||
896 | } | 865 | } |
897 | 866 | ||
898 | /* | 867 | /* |
@@ -1127,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata) | |||
1127 | if (nfs_write_need_commit(data)) { | 1096 | if (nfs_write_need_commit(data)) { |
1128 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1097 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
1129 | nfs_mark_request_commit(req); | 1098 | nfs_mark_request_commit(req); |
1130 | nfs_end_page_writeback(page); | ||
1131 | dprintk(" marked for commit\n"); | 1099 | dprintk(" marked for commit\n"); |
1132 | goto next; | 1100 | goto next; |
1133 | } | 1101 | } |
1134 | dprintk(" OK\n"); | 1102 | dprintk(" OK\n"); |
1135 | remove_request: | 1103 | remove_request: |
1136 | nfs_end_page_writeback(page); | ||
1137 | nfs_inode_remove_request(req); | 1104 | nfs_inode_remove_request(req); |
1138 | next: | 1105 | next: |
1139 | nfs_clear_page_tag_locked(req); | 1106 | nfs_clear_page_tag_locked(req); |
1107 | nfs_end_page_writeback(page); | ||
1140 | } | 1108 | } |
1141 | nfs_writedata_release(calldata); | 1109 | nfs_writedata_release(calldata); |
1142 | } | 1110 | } |
@@ -1233,7 +1201,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | |||
1233 | 1201 | ||
1234 | 1202 | ||
1235 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1203 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1236 | void nfs_commitdata_release(void *data) | 1204 | static void nfs_commitdata_release(void *data) |
1237 | { | 1205 | { |
1238 | struct nfs_write_data *wdata = data; | 1206 | struct nfs_write_data *wdata = data; |
1239 | 1207 | ||
@@ -1250,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1250 | { | 1218 | { |
1251 | struct nfs_page *first = nfs_list_entry(head->next); | 1219 | struct nfs_page *first = nfs_list_entry(head->next); |
1252 | struct inode *inode = first->wb_context->path.dentry->d_inode; | 1220 | struct inode *inode = first->wb_context->path.dentry->d_inode; |
1253 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | ||
1254 | int priority = flush_task_priority(how); | 1221 | int priority = flush_task_priority(how); |
1255 | struct rpc_task *task; | 1222 | struct rpc_task *task; |
1256 | struct rpc_message msg = { | 1223 | struct rpc_message msg = { |
@@ -1265,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1265 | .callback_ops = &nfs_commit_ops, | 1232 | .callback_ops = &nfs_commit_ops, |
1266 | .callback_data = data, | 1233 | .callback_data = data, |
1267 | .workqueue = nfsiod_workqueue, | 1234 | .workqueue = nfsiod_workqueue, |
1268 | .flags = flags, | 1235 | .flags = RPC_TASK_ASYNC, |
1269 | .priority = priority, | 1236 | .priority = priority, |
1270 | }; | 1237 | }; |
1271 | 1238 | ||
@@ -1295,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1295 | task = rpc_run_task(&task_setup_data); | 1262 | task = rpc_run_task(&task_setup_data); |
1296 | if (IS_ERR(task)) | 1263 | if (IS_ERR(task)) |
1297 | return PTR_ERR(task); | 1264 | return PTR_ERR(task); |
1265 | if (how & FLUSH_SYNC) | ||
1266 | rpc_wait_for_completion_task(task); | ||
1298 | rpc_put_task(task); | 1267 | rpc_put_task(task); |
1299 | return 0; | 1268 | return 0; |
1300 | } | 1269 | } |
@@ -1391,7 +1360,7 @@ static const struct rpc_call_ops nfs_commit_ops = { | |||
1391 | .rpc_release = nfs_commit_release, | 1360 | .rpc_release = nfs_commit_release, |
1392 | }; | 1361 | }; |
1393 | 1362 | ||
1394 | int nfs_commit_inode(struct inode *inode, int how) | 1363 | static int nfs_commit_inode(struct inode *inode, int how) |
1395 | { | 1364 | { |
1396 | LIST_HEAD(head); | 1365 | LIST_HEAD(head); |
1397 | int res; | 1366 | int res; |
@@ -1406,92 +1375,51 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1406 | } | 1375 | } |
1407 | return res; | 1376 | return res; |
1408 | } | 1377 | } |
1409 | #else | ||
1410 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | ||
1411 | { | ||
1412 | return 0; | ||
1413 | } | ||
1414 | #endif | ||
1415 | 1378 | ||
1416 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | 1379 | static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) |
1417 | { | 1380 | { |
1418 | struct inode *inode = mapping->host; | 1381 | struct nfs_inode *nfsi = NFS_I(inode); |
1419 | pgoff_t idx_start, idx_end; | 1382 | int flags = FLUSH_SYNC; |
1420 | unsigned int npages = 0; | 1383 | int ret = 0; |
1421 | LIST_HEAD(head); | 1384 | |
1422 | int nocommit = how & FLUSH_NOCOMMIT; | 1385 | /* Don't commit yet if this is a non-blocking flush and there are |
1423 | long pages, ret; | 1386 | * lots of outstanding writes for this mapping. |
1424 | 1387 | */ | |
1425 | /* FIXME */ | 1388 | if (wbc->sync_mode == WB_SYNC_NONE && |
1426 | if (wbc->range_cyclic) | 1389 | nfsi->ncommit <= (nfsi->npages >> 1)) |
1427 | idx_start = 0; | 1390 | goto out_mark_dirty; |
1428 | else { | 1391 | |
1429 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 1392 | if (wbc->nonblocking || wbc->for_background) |
1430 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 1393 | flags = 0; |
1431 | if (idx_end > idx_start) { | 1394 | ret = nfs_commit_inode(inode, flags); |
1432 | pgoff_t l_npages = 1 + idx_end - idx_start; | 1395 | if (ret >= 0) { |
1433 | npages = l_npages; | 1396 | if (wbc->sync_mode == WB_SYNC_NONE) { |
1434 | if (sizeof(npages) != sizeof(l_npages) && | 1397 | if (ret < wbc->nr_to_write) |
1435 | (pgoff_t)npages != l_npages) | 1398 | wbc->nr_to_write -= ret; |
1436 | npages = 0; | 1399 | else |
1400 | wbc->nr_to_write = 0; | ||
1437 | } | 1401 | } |
1402 | return 0; | ||
1438 | } | 1403 | } |
1439 | how &= ~FLUSH_NOCOMMIT; | 1404 | out_mark_dirty: |
1440 | spin_lock(&inode->i_lock); | 1405 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
1441 | do { | ||
1442 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | ||
1443 | if (ret != 0) | ||
1444 | continue; | ||
1445 | if (nocommit) | ||
1446 | break; | ||
1447 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | ||
1448 | if (pages == 0) | ||
1449 | break; | ||
1450 | if (how & FLUSH_INVALIDATE) { | ||
1451 | spin_unlock(&inode->i_lock); | ||
1452 | nfs_cancel_commit_list(&head); | ||
1453 | ret = pages; | ||
1454 | spin_lock(&inode->i_lock); | ||
1455 | continue; | ||
1456 | } | ||
1457 | pages += nfs_scan_commit(inode, &head, 0, 0); | ||
1458 | spin_unlock(&inode->i_lock); | ||
1459 | ret = nfs_commit_list(inode, &head, how); | ||
1460 | spin_lock(&inode->i_lock); | ||
1461 | |||
1462 | } while (ret >= 0); | ||
1463 | spin_unlock(&inode->i_lock); | ||
1464 | return ret; | 1406 | return ret; |
1465 | } | 1407 | } |
1466 | 1408 | #else | |
1467 | static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) | 1409 | static int nfs_commit_inode(struct inode *inode, int how) |
1468 | { | 1410 | { |
1469 | int ret; | ||
1470 | |||
1471 | ret = nfs_writepages(mapping, wbc); | ||
1472 | if (ret < 0) | ||
1473 | goto out; | ||
1474 | ret = nfs_sync_mapping_wait(mapping, wbc, how); | ||
1475 | if (ret < 0) | ||
1476 | goto out; | ||
1477 | return 0; | 1411 | return 0; |
1478 | out: | ||
1479 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | ||
1480 | return ret; | ||
1481 | } | 1412 | } |
1482 | 1413 | ||
1483 | /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ | 1414 | static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) |
1484 | static int nfs_write_mapping(struct address_space *mapping, int how) | ||
1485 | { | 1415 | { |
1486 | struct writeback_control wbc = { | 1416 | return 0; |
1487 | .bdi = mapping->backing_dev_info, | 1417 | } |
1488 | .sync_mode = WB_SYNC_ALL, | 1418 | #endif |
1489 | .nr_to_write = LONG_MAX, | ||
1490 | .range_start = 0, | ||
1491 | .range_end = LLONG_MAX, | ||
1492 | }; | ||
1493 | 1419 | ||
1494 | return __nfs_write_mapping(mapping, &wbc, how); | 1420 | int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
1421 | { | ||
1422 | return nfs_commit_unstable_pages(inode, wbc); | ||
1495 | } | 1423 | } |
1496 | 1424 | ||
1497 | /* | 1425 | /* |
@@ -1499,37 +1427,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how) | |||
1499 | */ | 1427 | */ |
1500 | int nfs_wb_all(struct inode *inode) | 1428 | int nfs_wb_all(struct inode *inode) |
1501 | { | 1429 | { |
1502 | return nfs_write_mapping(inode->i_mapping, 0); | 1430 | struct writeback_control wbc = { |
1503 | } | 1431 | .sync_mode = WB_SYNC_ALL, |
1432 | .nr_to_write = LONG_MAX, | ||
1433 | .range_start = 0, | ||
1434 | .range_end = LLONG_MAX, | ||
1435 | }; | ||
1504 | 1436 | ||
1505 | int nfs_wb_nocommit(struct inode *inode) | 1437 | return sync_inode(inode, &wbc); |
1506 | { | ||
1507 | return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT); | ||
1508 | } | 1438 | } |
1509 | 1439 | ||
1510 | int nfs_wb_page_cancel(struct inode *inode, struct page *page) | 1440 | int nfs_wb_page_cancel(struct inode *inode, struct page *page) |
1511 | { | 1441 | { |
1512 | struct nfs_page *req; | 1442 | struct nfs_page *req; |
1513 | loff_t range_start = page_offset(page); | ||
1514 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); | ||
1515 | struct writeback_control wbc = { | ||
1516 | .bdi = page->mapping->backing_dev_info, | ||
1517 | .sync_mode = WB_SYNC_ALL, | ||
1518 | .nr_to_write = LONG_MAX, | ||
1519 | .range_start = range_start, | ||
1520 | .range_end = range_end, | ||
1521 | }; | ||
1522 | int ret = 0; | 1443 | int ret = 0; |
1523 | 1444 | ||
1524 | BUG_ON(!PageLocked(page)); | 1445 | BUG_ON(!PageLocked(page)); |
1525 | for (;;) { | 1446 | for (;;) { |
1526 | req = nfs_page_find_request(page); | 1447 | req = nfs_page_find_request(page); |
1527 | if (req == NULL) | 1448 | if (req == NULL) |
1528 | goto out; | ||
1529 | if (test_bit(PG_CLEAN, &req->wb_flags)) { | ||
1530 | nfs_release_request(req); | ||
1531 | break; | 1449 | break; |
1532 | } | ||
1533 | if (nfs_lock_request_dontget(req)) { | 1450 | if (nfs_lock_request_dontget(req)) { |
1534 | nfs_inode_remove_request(req); | 1451 | nfs_inode_remove_request(req); |
1535 | /* | 1452 | /* |
@@ -1541,55 +1458,56 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | |||
1541 | break; | 1458 | break; |
1542 | } | 1459 | } |
1543 | ret = nfs_wait_on_request(req); | 1460 | ret = nfs_wait_on_request(req); |
1461 | nfs_release_request(req); | ||
1544 | if (ret < 0) | 1462 | if (ret < 0) |
1545 | goto out; | 1463 | break; |
1546 | } | 1464 | } |
1547 | if (!PagePrivate(page)) | ||
1548 | return 0; | ||
1549 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE); | ||
1550 | out: | ||
1551 | return ret; | 1465 | return ret; |
1552 | } | 1466 | } |
1553 | 1467 | ||
1554 | static int nfs_wb_page_priority(struct inode *inode, struct page *page, | 1468 | /* |
1555 | int how) | 1469 | * Write back all requests on one page - we do this before reading it. |
1470 | */ | ||
1471 | int nfs_wb_page(struct inode *inode, struct page *page) | ||
1556 | { | 1472 | { |
1557 | loff_t range_start = page_offset(page); | 1473 | loff_t range_start = page_offset(page); |
1558 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); | 1474 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); |
1559 | struct writeback_control wbc = { | 1475 | struct writeback_control wbc = { |
1560 | .bdi = page->mapping->backing_dev_info, | ||
1561 | .sync_mode = WB_SYNC_ALL, | 1476 | .sync_mode = WB_SYNC_ALL, |
1562 | .nr_to_write = LONG_MAX, | 1477 | .nr_to_write = 0, |
1563 | .range_start = range_start, | 1478 | .range_start = range_start, |
1564 | .range_end = range_end, | 1479 | .range_end = range_end, |
1565 | }; | 1480 | }; |
1481 | struct nfs_page *req; | ||
1482 | int need_commit; | ||
1566 | int ret; | 1483 | int ret; |
1567 | 1484 | ||
1568 | do { | 1485 | while(PagePrivate(page)) { |
1569 | if (clear_page_dirty_for_io(page)) { | 1486 | if (clear_page_dirty_for_io(page)) { |
1570 | ret = nfs_writepage_locked(page, &wbc); | 1487 | ret = nfs_writepage_locked(page, &wbc); |
1571 | if (ret < 0) | 1488 | if (ret < 0) |
1572 | goto out_error; | 1489 | goto out_error; |
1573 | } else if (!PagePrivate(page)) | 1490 | } |
1491 | req = nfs_find_and_lock_request(page); | ||
1492 | if (!req) | ||
1574 | break; | 1493 | break; |
1575 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); | 1494 | if (IS_ERR(req)) { |
1576 | if (ret < 0) | 1495 | ret = PTR_ERR(req); |
1577 | goto out_error; | 1496 | goto out_error; |
1578 | } while (PagePrivate(page)); | 1497 | } |
1498 | need_commit = test_bit(PG_CLEAN, &req->wb_flags); | ||
1499 | nfs_clear_page_tag_locked(req); | ||
1500 | if (need_commit) { | ||
1501 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | ||
1502 | if (ret < 0) | ||
1503 | goto out_error; | ||
1504 | } | ||
1505 | } | ||
1579 | return 0; | 1506 | return 0; |
1580 | out_error: | 1507 | out_error: |
1581 | __mark_inode_dirty(inode, I_DIRTY_PAGES); | ||
1582 | return ret; | 1508 | return ret; |
1583 | } | 1509 | } |
1584 | 1510 | ||
1585 | /* | ||
1586 | * Write back all requests on one page - we do this before reading it. | ||
1587 | */ | ||
1588 | int nfs_wb_page(struct inode *inode, struct page* page) | ||
1589 | { | ||
1590 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); | ||
1591 | } | ||
1592 | |||
1593 | #ifdef CONFIG_MIGRATION | 1511 | #ifdef CONFIG_MIGRATION |
1594 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | 1512 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, |
1595 | struct page *page) | 1513 | struct page *page) |
@@ -1597,8 +1515,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | |||
1597 | struct nfs_page *req; | 1515 | struct nfs_page *req; |
1598 | int ret; | 1516 | int ret; |
1599 | 1517 | ||
1600 | if (PageFsCache(page)) | 1518 | nfs_fscache_release_page(page, GFP_KERNEL); |
1601 | nfs_fscache_release_page(page, GFP_KERNEL); | ||
1602 | 1519 | ||
1603 | req = nfs_find_and_lock_request(page); | 1520 | req = nfs_find_and_lock_request(page); |
1604 | ret = PTR_ERR(req); | 1521 | ret = PTR_ERR(req); |