aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-06-20 12:55:19 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-24 13:11:38 -0400
commitb1c5921c5b715c207d7fe77cd7aaafbb322f09f5 (patch)
treec8a56c56740efa728b7d3b935b6cb09330a4817f /fs
parent816724e65c72a90a44fbad0ef0b59b186c85fa90 (diff)
NFS: Separate functions for counting outstanding NFS direct I/Os
Factor out the logic that increments and decrements the outstanding I/O count. This will be a commonly used bit of code in upcoming patches. Also make this an atomic_t again, since it will be very often manipulated outside dreq->spin lock. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/direct.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 402005c35ab3..d78c61a41ec3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -80,8 +80,8 @@ struct nfs_direct_req {
80 unsigned int npages; /* count of pages */ 80 unsigned int npages; /* count of pages */
81 81
82 /* completion state */ 82 /* completion state */
83 atomic_t io_count; /* i/os we're waiting for */
83 spinlock_t lock; /* protect completion state */ 84 spinlock_t lock; /* protect completion state */
84 int outstanding; /* i/os we're waiting for */
85 ssize_t count, /* bytes actually processed */ 85 ssize_t count, /* bytes actually processed */
86 error; /* any reported error */ 86 error; /* any reported error */
87 struct completion completion; /* wait for i/o completion */ 87 struct completion completion; /* wait for i/o completion */
@@ -97,6 +97,16 @@ struct nfs_direct_req {
97static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync); 97static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
99 99
100static inline void get_dreq(struct nfs_direct_req *dreq)
101{
102 atomic_inc(&dreq->io_count);
103}
104
105static inline int put_dreq(struct nfs_direct_req *dreq)
106{
107 return atomic_dec_and_test(&dreq->io_count);
108}
109
100/** 110/**
101 * nfs_direct_IO - NFS address space operation for direct I/O 111 * nfs_direct_IO - NFS address space operation for direct I/O
102 * @rw: direction (read or write) 112 * @rw: direction (read or write)
@@ -180,7 +190,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
180 dreq->iocb = NULL; 190 dreq->iocb = NULL;
181 dreq->ctx = NULL; 191 dreq->ctx = NULL;
182 spin_lock_init(&dreq->lock); 192 spin_lock_init(&dreq->lock);
183 dreq->outstanding = 0; 193 atomic_set(&dreq->io_count, 0);
184 dreq->count = 0; 194 dreq->count = 0;
185 dreq->error = 0; 195 dreq->error = 0;
186 dreq->flags = 0; 196 dreq->flags = 0;
@@ -278,7 +288,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
278 list_add(&data->pages, list); 288 list_add(&data->pages, list);
279 289
280 data->req = (struct nfs_page *) dreq; 290 data->req = (struct nfs_page *) dreq;
281 dreq->outstanding++; 291 get_dreq(dreq);
282 if (nbytes <= rsize) 292 if (nbytes <= rsize)
283 break; 293 break;
284 nbytes -= rsize; 294 nbytes -= rsize;
@@ -302,13 +312,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
302 else 312 else
303 dreq->error = task->tk_status; 313 dreq->error = task->tk_status;
304 314
305 if (--dreq->outstanding) {
306 spin_unlock(&dreq->lock);
307 return;
308 }
309
310 spin_unlock(&dreq->lock); 315 spin_unlock(&dreq->lock);
311 nfs_direct_complete(dreq); 316
317 if (put_dreq(dreq))
318 nfs_direct_complete(dreq);
312} 319}
313 320
314static const struct rpc_call_ops nfs_read_direct_ops = { 321static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -432,7 +439,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
432 439
433 list_splice_init(&dreq->rewrite_list, &dreq->list); 440 list_splice_init(&dreq->rewrite_list, &dreq->list);
434 list_for_each(pos, &dreq->list) 441 list_for_each(pos, &dreq->list)
435 dreq->outstanding++; 442 get_dreq(dreq);
436 dreq->count = 0; 443 dreq->count = 0;
437 444
438 nfs_direct_write_schedule(dreq, FLUSH_STABLE); 445 nfs_direct_write_schedule(dreq, FLUSH_STABLE);
@@ -564,7 +571,7 @@ static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize
564 list_add(&data->pages, list); 571 list_add(&data->pages, list);
565 572
566 data->req = (struct nfs_page *) dreq; 573 data->req = (struct nfs_page *) dreq;
567 dreq->outstanding++; 574 get_dreq(dreq);
568 if (nbytes <= wsize) 575 if (nbytes <= wsize)
569 break; 576 break;
570 nbytes -= wsize; 577 nbytes -= wsize;
@@ -620,14 +627,8 @@ static void nfs_direct_write_release(void *calldata)
620 struct nfs_write_data *data = calldata; 627 struct nfs_write_data *data = calldata;
621 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 628 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
622 629
623 spin_lock(&dreq->lock); 630 if (put_dreq(dreq))
624 if (--dreq->outstanding) { 631 nfs_direct_write_complete(dreq, data->inode);
625 spin_unlock(&dreq->lock);
626 return;
627 }
628 spin_unlock(&dreq->lock);
629
630 nfs_direct_write_complete(dreq, data->inode);
631} 632}
632 633
633static const struct rpc_call_ops nfs_write_direct_ops = { 634static const struct rpc_call_ops nfs_write_direct_ops = {