aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-22 17:09:05 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-10-09 17:15:11 -0400
commit9cccef95052c7169040c3577e17d4f6fa230cc28 (patch)
tree56d0cfc610272f67bde429565d3b23b83d2df6af /fs/nfs/write.c
parent94387fb1aa16ee853d00f959373132a181b0196b (diff)
NFS: Clean up write code...
The addition of nfs_page_mkwrite means that We should no longer need to create requests inside nfs_writepage() Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c68
1 files changed, 4 insertions, 64 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 0d7a77cc394b..0c346d79fb34 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -243,10 +243,7 @@ static void nfs_end_page_writeback(struct page *page)
243 243
244/* 244/*
245 * Find an associated nfs write request, and prepare to flush it out 245 * Find an associated nfs write request, and prepare to flush it out
246 * Returns 1 if there was no write request, or if the request was 246 * May return an error if the user signalled nfs_wait_on_request().
247 * already tagged by nfs_set_page_dirty.Returns 0 if the request
248 * was not tagged.
249 * May also return an error if the user signalled nfs_wait_on_request().
250 */ 247 */
251static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 248static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
252 struct page *page) 249 struct page *page)
@@ -261,7 +258,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
261 req = nfs_page_find_request_locked(page); 258 req = nfs_page_find_request_locked(page);
262 if (req == NULL) { 259 if (req == NULL) {
263 spin_unlock(&inode->i_lock); 260 spin_unlock(&inode->i_lock);
264 return 1; 261 return 0;
265 } 262 }
266 if (nfs_lock_request_dontget(req)) 263 if (nfs_lock_request_dontget(req))
267 break; 264 break;
@@ -282,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
282 spin_unlock(&inode->i_lock); 279 spin_unlock(&inode->i_lock);
283 nfs_unlock_request(req); 280 nfs_unlock_request(req);
284 nfs_pageio_complete(pgio); 281 nfs_pageio_complete(pgio);
285 return 1; 282 return 0;
286 } 283 }
287 if (nfs_set_page_writeback(page) != 0) { 284 if (nfs_set_page_writeback(page) != 0) {
288 spin_unlock(&inode->i_lock); 285 spin_unlock(&inode->i_lock);
@@ -290,10 +287,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
290 } 287 }
291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 288 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
292 NFS_PAGE_TAG_LOCKED); 289 NFS_PAGE_TAG_LOCKED);
293 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
294 spin_unlock(&inode->i_lock); 290 spin_unlock(&inode->i_lock);
295 nfs_pageio_add_request(pgio, req); 291 nfs_pageio_add_request(pgio, req);
296 return ret; 292 return 0;
297} 293}
298 294
299/* 295/*
@@ -302,9 +298,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
302static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) 298static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
303{ 299{
304 struct nfs_pageio_descriptor mypgio, *pgio; 300 struct nfs_pageio_descriptor mypgio, *pgio;
305 struct nfs_open_context *ctx;
306 struct inode *inode = page->mapping->host; 301 struct inode *inode = page->mapping->host;
307 unsigned offset;
308 int err; 302 int err;
309 303
310 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 304 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
@@ -320,28 +314,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
320 nfs_pageio_cond_complete(pgio, page->index); 314 nfs_pageio_cond_complete(pgio, page->index);
321 315
322 err = nfs_page_async_flush(pgio, page); 316 err = nfs_page_async_flush(pgio, page);
323 if (err <= 0)
324 goto out;
325 err = 0;
326 offset = nfs_page_length(page);
327 if (!offset)
328 goto out;
329 317
330 nfs_pageio_cond_complete(pgio, page->index);
331
332 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
333 if (ctx == NULL) {
334 err = -EBADF;
335 goto out;
336 }
337 err = nfs_writepage_setup(ctx, page, 0, offset);
338 put_nfs_open_context(ctx);
339 if (err != 0)
340 goto out;
341 err = nfs_page_async_flush(pgio, page);
342 if (err > 0)
343 err = 0;
344out:
345 if (!wbc->for_writepages) 318 if (!wbc->for_writepages)
346 nfs_pageio_complete(pgio); 319 nfs_pageio_complete(pgio);
347 return err; 320 return err;
@@ -395,8 +368,6 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
395 } 368 }
396 SetPagePrivate(req->wb_page); 369 SetPagePrivate(req->wb_page);
397 set_page_private(req->wb_page, (unsigned long)req); 370 set_page_private(req->wb_page, (unsigned long)req);
398 if (PageDirty(req->wb_page))
399 set_bit(PG_NEED_FLUSH, &req->wb_flags);
400 nfsi->npages++; 371 nfsi->npages++;
401 kref_get(&req->wb_kref); 372 kref_get(&req->wb_kref);
402 return 0; 373 return 0;
@@ -416,8 +387,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
416 set_page_private(req->wb_page, 0); 387 set_page_private(req->wb_page, 0);
417 ClearPagePrivate(req->wb_page); 388 ClearPagePrivate(req->wb_page);
418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 389 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
419 if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
420 __set_page_dirty_nobuffers(req->wb_page);
421 nfsi->npages--; 390 nfsi->npages--;
422 if (!nfsi->npages) { 391 if (!nfsi->npages) {
423 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
@@ -1477,35 +1446,6 @@ int nfs_wb_page(struct inode *inode, struct page* page)
1477 return nfs_wb_page_priority(inode, page, FLUSH_STABLE); 1446 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1478} 1447}
1479 1448
1480int nfs_set_page_dirty(struct page *page)
1481{
1482 struct address_space *mapping = page->mapping;
1483 struct inode *inode;
1484 struct nfs_page *req;
1485 int ret;
1486
1487 if (!mapping)
1488 goto out_raced;
1489 inode = mapping->host;
1490 if (!inode)
1491 goto out_raced;
1492 spin_lock(&inode->i_lock);
1493 req = nfs_page_find_request_locked(page);
1494 if (req != NULL) {
1495 /* Mark any existing write requests for flushing */
1496 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1497 spin_unlock(&inode->i_lock);
1498 nfs_release_request(req);
1499 return ret;
1500 }
1501 ret = __set_page_dirty_nobuffers(page);
1502 spin_unlock(&inode->i_lock);
1503 return ret;
1504out_raced:
1505 return !TestSetPageDirty(page);
1506}
1507
1508
1509int __init nfs_init_writepagecache(void) 1449int __init nfs_init_writepagecache(void)
1510{ 1450{
1511 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1451 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",