aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/pagelist.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/pagelist.c')
-rw-r--r--fs/nfs/pagelist.c142
1 files changed, 108 insertions, 34 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 4f1ba723848d..d53857b148e2 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -107,11 +107,38 @@ void nfs_unlock_request(struct nfs_page *req)
107 smp_mb__before_clear_bit(); 107 smp_mb__before_clear_bit();
108 clear_bit(PG_BUSY, &req->wb_flags); 108 clear_bit(PG_BUSY, &req->wb_flags);
109 smp_mb__after_clear_bit(); 109 smp_mb__after_clear_bit();
110 wake_up_all(&req->wb_context->waitq); 110 wake_up_bit(&req->wb_flags, PG_BUSY);
111 nfs_release_request(req); 111 nfs_release_request(req);
112} 112}
113 113
114/** 114/**
115 * nfs_set_page_writeback_locked - Lock a request for writeback
116 * @req:
117 */
118int nfs_set_page_writeback_locked(struct nfs_page *req)
119{
120 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
121
122 if (!nfs_lock_request(req))
123 return 0;
124 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
125 return 1;
126}
127
128/**
129 * nfs_clear_page_writeback - Unlock request and wake up sleepers
130 */
131void nfs_clear_page_writeback(struct nfs_page *req)
132{
133 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
134
135 spin_lock(&nfsi->req_lock);
136 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
137 spin_unlock(&nfsi->req_lock);
138 nfs_unlock_request(req);
139}
140
141/**
115 * nfs_clear_request - Free up all resources allocated to the request 142 * nfs_clear_request - Free up all resources allocated to the request
116 * @req: 143 * @req:
117 * 144 *
@@ -150,34 +177,15 @@ nfs_release_request(struct nfs_page *req)
150 nfs_page_free(req); 177 nfs_page_free(req);
151} 178}
152 179
153/** 180static int nfs_wait_bit_interruptible(void *word)
154 * nfs_list_add_request - Insert a request into a sorted list
155 * @req: request
156 * @head: head of list into which to insert the request.
157 *
158 * Note that the wb_list is sorted by page index in order to facilitate
159 * coalescing of requests.
160 * We use an insertion sort that is optimized for the case of appended
161 * writes.
162 */
163void
164nfs_list_add_request(struct nfs_page *req, struct list_head *head)
165{ 181{
166 struct list_head *pos; 182 int ret = 0;
167 183
168#ifdef NFS_PARANOIA 184 if (signal_pending(current))
169 if (!list_empty(&req->wb_list)) { 185 ret = -ERESTARTSYS;
170 printk(KERN_ERR "NFS: Add to list failed!\n"); 186 else
171 BUG(); 187 schedule();
172 } 188 return ret;
173#endif
174 list_for_each_prev(pos, head) {
175 struct nfs_page *p = nfs_list_entry(pos);
176 if (p->wb_index < req->wb_index)
177 break;
178 }
179 list_add(&req->wb_list, pos);
180 req->wb_list_head = head;
181} 189}
182 190
183/** 191/**
@@ -190,12 +198,22 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
190int 198int
191nfs_wait_on_request(struct nfs_page *req) 199nfs_wait_on_request(struct nfs_page *req)
192{ 200{
193 struct inode *inode = req->wb_context->dentry->d_inode; 201 struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);
194 struct rpc_clnt *clnt = NFS_CLIENT(inode); 202 sigset_t oldmask;
195 203 int ret = 0;
196 if (!NFS_WBACK_BUSY(req)) 204
197 return 0; 205 if (!test_bit(PG_BUSY, &req->wb_flags))
198 return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req)); 206 goto out;
207 /*
208 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
209 * are not interrupted if intr flag is not set
210 */
211 rpc_clnt_sigmask(clnt, &oldmask);
212 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
213 nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
214 rpc_clnt_sigunmask(clnt, &oldmask);
215out:
216 return ret;
199} 217}
200 218
201/** 219/**
@@ -243,6 +261,62 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
243 return npages; 261 return npages;
244} 262}
245 263
264#define NFS_SCAN_MAXENTRIES 16
265/**
266 * nfs_scan_lock_dirty - Scan the radix tree for dirty requests
267 * @nfsi: NFS inode
268 * @dst: Destination list
269 * @idx_start: lower bound of page->index to scan
270 * @npages: idx_start + npages sets the upper bound to scan.
271 *
272 * Moves elements from one of the inode request lists.
273 * If the number of requests is set to 0, the entire address_space
274 * starting at index idx_start, is scanned.
275 * The requests are *not* checked to ensure that they form a contiguous set.
276 * You must be holding the inode's req_lock when calling this function
277 */
278int
279nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
280 unsigned long idx_start, unsigned int npages)
281{
282 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
283 struct nfs_page *req;
284 unsigned long idx_end;
285 int found, i;
286 int res;
287
288 res = 0;
289 if (npages == 0)
290 idx_end = ~0;
291 else
292 idx_end = idx_start + npages - 1;
293
294 for (;;) {
295 found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
296 (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
297 NFS_PAGE_TAG_DIRTY);
298 if (found <= 0)
299 break;
300 for (i = 0; i < found; i++) {
301 req = pgvec[i];
302 if (req->wb_index > idx_end)
303 goto out;
304
305 idx_start = req->wb_index + 1;
306
307 if (nfs_set_page_writeback_locked(req)) {
308 radix_tree_tag_clear(&nfsi->nfs_page_tree,
309 req->wb_index, NFS_PAGE_TAG_DIRTY);
310 nfs_list_remove_request(req);
311 nfs_list_add_request(req, dst);
312 res++;
313 }
314 }
315 }
316out:
317 return res;
318}
319
246/** 320/**
247 * nfs_scan_list - Scan a list for matching requests 321 * nfs_scan_list - Scan a list for matching requests
248 * @head: One of the NFS inode request lists 322 * @head: One of the NFS inode request lists
@@ -280,7 +354,7 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
280 if (req->wb_index > idx_end) 354 if (req->wb_index > idx_end)
281 break; 355 break;
282 356
283 if (!nfs_lock_request(req)) 357 if (!nfs_set_page_writeback_locked(req))
284 continue; 358 continue;
285 nfs_list_remove_request(req); 359 nfs_list_remove_request(req);
286 nfs_list_add_request(req, dst); 360 nfs_list_add_request(req, dst);