diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2013-04-08 21:38:12 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2013-04-08 22:12:33 -0400 |
commit | 577b42327d707fbe7166aad6902c2eeee6a65015 (patch) | |
tree | 3c2c925ede3f9cd13511e0b9913974200b99719e /fs/nfs/pagelist.c | |
parent | bc7a05ca5156915a5aada26d64ee035fdd5e5d25 (diff) |
NFS: Add functionality to allow waiting on all outstanding reads to complete
This will later allow NFS locking code to wait for readahead to complete
before releasing byte range locks.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/pagelist.c')
-rw-r--r-- | fs/nfs/pagelist.c | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7f0933086b36..29cfb7ade121 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -84,6 +84,55 @@ nfs_page_free(struct nfs_page *p) | |||
84 | kmem_cache_free(nfs_page_cachep, p); | 84 | kmem_cache_free(nfs_page_cachep, p); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void | ||
88 | nfs_iocounter_inc(struct nfs_io_counter *c) | ||
89 | { | ||
90 | atomic_inc(&c->io_count); | ||
91 | } | ||
92 | |||
93 | static void | ||
94 | nfs_iocounter_dec(struct nfs_io_counter *c) | ||
95 | { | ||
96 | if (atomic_dec_and_test(&c->io_count)) { | ||
97 | clear_bit(NFS_IO_INPROGRESS, &c->flags); | ||
98 | smp_mb__after_clear_bit(); | ||
99 | wake_up_bit(&c->flags, NFS_IO_INPROGRESS); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static int | ||
104 | __nfs_iocounter_wait(struct nfs_io_counter *c) | ||
105 | { | ||
106 | wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS); | ||
107 | DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS); | ||
108 | int ret = 0; | ||
109 | |||
110 | do { | ||
111 | prepare_to_wait(wq, &q.wait, TASK_KILLABLE); | ||
112 | set_bit(NFS_IO_INPROGRESS, &c->flags); | ||
113 | if (atomic_read(&c->io_count) == 0) | ||
114 | break; | ||
115 | ret = nfs_wait_bit_killable(&c->flags); | ||
116 | } while (atomic_read(&c->io_count) != 0); | ||
117 | finish_wait(wq, &q.wait); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * nfs_iocounter_wait - wait for i/o to complete | ||
123 | * @c: nfs_io_counter to use | ||
124 | * | ||
125 | * returns -ERESTARTSYS if interrupted by a fatal signal. | ||
126 | * Otherwise returns 0 once the io_count hits 0. | ||
127 | */ | ||
128 | int | ||
129 | nfs_iocounter_wait(struct nfs_io_counter *c) | ||
130 | { | ||
131 | if (atomic_read(&c->io_count) == 0) | ||
132 | return 0; | ||
133 | return __nfs_iocounter_wait(c); | ||
134 | } | ||
135 | |||
87 | /** | 136 | /** |
88 | * nfs_create_request - Create an NFS read/write request. | 137 | * nfs_create_request - Create an NFS read/write request. |
89 | * @ctx: open context to use | 138 | * @ctx: open context to use |
@@ -118,6 +167,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
118 | return ERR_CAST(l_ctx); | 167 | return ERR_CAST(l_ctx); |
119 | } | 168 | } |
120 | req->wb_lock_context = l_ctx; | 169 | req->wb_lock_context = l_ctx; |
170 | nfs_iocounter_inc(&l_ctx->io_count); | ||
121 | 171 | ||
122 | /* Initialize the request struct. Initially, we assume a | 172 | /* Initialize the request struct. Initially, we assume a |
123 | * long write-back delay. This will be adjusted in | 173 | * long write-back delay. This will be adjusted in |
@@ -177,6 +227,7 @@ static void nfs_clear_request(struct nfs_page *req) | |||
177 | req->wb_page = NULL; | 227 | req->wb_page = NULL; |
178 | } | 228 | } |
179 | if (l_ctx != NULL) { | 229 | if (l_ctx != NULL) { |
230 | nfs_iocounter_dec(&l_ctx->io_count); | ||
180 | nfs_put_lock_context(l_ctx); | 231 | nfs_put_lock_context(l_ctx); |
181 | req->wb_lock_context = NULL; | 232 | req->wb_lock_context = NULL; |
182 | } | 233 | } |