aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-06-20 12:55:45 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-24 13:11:38 -0400
commitfedb595c66e1fbd5acafe0d43b7e95c13c936d61 (patch)
treee865ecc2399c09b7164c2f2c13a2c5c10c8d4de1
parentb1c5921c5b715c207d7fe77cd7aaafbb322f09f5 (diff)
NFS: "open code" the NFS direct write rescheduler
An NFSv3/v4 client must reschedule on-the-wire writes if the writes are UNSTABLE, and the server reboots before the client can complete a subsequent COMMIT request. To support direct asynchronous scatter-gather writes, the write rescheduler in fs/nfs/direct.c must not depend on the I/O parameters in the controlling nfs_direct_req structure. iovecs can be somewhat arbitrarily complex, so there could be an unbounded amount of information to save for a rarely encountered requirement. Refactor the direct write rescheduler so it uses information from each nfs_write_data structure to reschedule writes, instead of caching that information in the controlling nfs_direct_req structure. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--fs/nfs/direct.c51
1 files changed, 43 insertions, 8 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index d78c61a41ec3..7101405713e1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -94,8 +94,8 @@ struct nfs_direct_req {
94 struct nfs_writeverf verf; /* unstable write verifier */ 94 struct nfs_writeverf verf; /* unstable write verifier */
95}; 95};
96 96
97static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 97static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
98static const struct rpc_call_ops nfs_write_direct_ops;
99 99
100static inline void get_dreq(struct nfs_direct_req *dreq) 100static inline void get_dreq(struct nfs_direct_req *dreq)
101{ 101{
@@ -435,14 +435,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
435#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 435#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
436static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 436static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
437{ 437{
438 struct list_head *pos; 438 struct inode *inode = dreq->inode;
439 struct list_head *p;
440 struct nfs_write_data *data;
439 441
440 list_splice_init(&dreq->rewrite_list, &dreq->list);
441 list_for_each(pos, &dreq->list)
442 get_dreq(dreq);
443 dreq->count = 0; 442 dreq->count = 0;
443 get_dreq(dreq);
444
445 list_for_each(p, &dreq->rewrite_list) {
446 data = list_entry(p, struct nfs_write_data, pages);
447
448 get_dreq(dreq);
449
450 /*
451 * Reset data->res.
452 */
453 nfs_fattr_init(&data->fattr);
454 data->res.count = data->args.count;
455 memset(&data->verf, 0, sizeof(data->verf));
456
457 /*
458 * Reuse data->task; data->args should not have changed
459 * since the original request was sent.
460 */
461 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
462 &nfs_write_direct_ops, data);
463 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
464
465 data->task.tk_priority = RPC_PRIORITY_NORMAL;
466 data->task.tk_cookie = (unsigned long) inode;
467
468 /*
469 * We're called via an RPC callback, so BKL is already held.
470 */
471 rpc_execute(&data->task);
472
473 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
474 data->task.tk_pid,
475 inode->i_sb->s_id,
476 (long long)NFS_FILEID(inode),
477 data->args.count,
478 (unsigned long long)data->args.offset);
479 }
444 480
445 nfs_direct_write_schedule(dreq, FLUSH_STABLE); 481 if (put_dreq(dreq))
482 nfs_direct_write_complete(dreq, inode);
446} 483}
447 484
448static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 485static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
@@ -612,8 +649,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
612 } 649 }
613 } 650 }
614 } 651 }
615 /* In case we have to resend */
616 data->args.stable = NFS_FILE_SYNC;
617 652
618 spin_unlock(&dreq->lock); 653 spin_unlock(&dreq->lock);
619} 654}