diff options
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 199 |
1 files changed, 76 insertions, 123 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0d7a77cc394b..e2bb66c34406 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -110,6 +110,13 @@ void nfs_writedata_release(void *wdata) | |||
110 | nfs_writedata_free(wdata); | 110 | nfs_writedata_free(wdata); |
111 | } | 111 | } |
112 | 112 | ||
113 | static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | ||
114 | { | ||
115 | ctx->error = error; | ||
116 | smp_wmb(); | ||
117 | set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); | ||
118 | } | ||
119 | |||
113 | static struct nfs_page *nfs_page_find_request_locked(struct page *page) | 120 | static struct nfs_page *nfs_page_find_request_locked(struct page *page) |
114 | { | 121 | { |
115 | struct nfs_page *req = NULL; | 122 | struct nfs_page *req = NULL; |
@@ -243,10 +250,7 @@ static void nfs_end_page_writeback(struct page *page) | |||
243 | 250 | ||
244 | /* | 251 | /* |
245 | * Find an associated nfs write request, and prepare to flush it out | 252 | * Find an associated nfs write request, and prepare to flush it out |
246 | * Returns 1 if there was no write request, or if the request was | 253 | * May return an error if the user signalled nfs_wait_on_request(). |
247 | * already tagged by nfs_set_page_dirty.Returns 0 if the request | ||
248 | * was not tagged. | ||
249 | * May also return an error if the user signalled nfs_wait_on_request(). | ||
250 | */ | 254 | */ |
251 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | 255 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
252 | struct page *page) | 256 | struct page *page) |
@@ -261,7 +265,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
261 | req = nfs_page_find_request_locked(page); | 265 | req = nfs_page_find_request_locked(page); |
262 | if (req == NULL) { | 266 | if (req == NULL) { |
263 | spin_unlock(&inode->i_lock); | 267 | spin_unlock(&inode->i_lock); |
264 | return 1; | 268 | return 0; |
265 | } | 269 | } |
266 | if (nfs_lock_request_dontget(req)) | 270 | if (nfs_lock_request_dontget(req)) |
267 | break; | 271 | break; |
@@ -282,7 +286,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
282 | spin_unlock(&inode->i_lock); | 286 | spin_unlock(&inode->i_lock); |
283 | nfs_unlock_request(req); | 287 | nfs_unlock_request(req); |
284 | nfs_pageio_complete(pgio); | 288 | nfs_pageio_complete(pgio); |
285 | return 1; | 289 | return 0; |
286 | } | 290 | } |
287 | if (nfs_set_page_writeback(page) != 0) { | 291 | if (nfs_set_page_writeback(page) != 0) { |
288 | spin_unlock(&inode->i_lock); | 292 | spin_unlock(&inode->i_lock); |
@@ -290,70 +294,56 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
290 | } | 294 | } |
291 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | 295 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, |
292 | NFS_PAGE_TAG_LOCKED); | 296 | NFS_PAGE_TAG_LOCKED); |
293 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
294 | spin_unlock(&inode->i_lock); | 297 | spin_unlock(&inode->i_lock); |
295 | nfs_pageio_add_request(pgio, req); | 298 | nfs_pageio_add_request(pgio, req); |
296 | return ret; | 299 | return 0; |
297 | } | 300 | } |
298 | 301 | ||
299 | /* | 302 | static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) |
300 | * Write an mmapped page to the server. | ||
301 | */ | ||
302 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | ||
303 | { | 303 | { |
304 | struct nfs_pageio_descriptor mypgio, *pgio; | ||
305 | struct nfs_open_context *ctx; | ||
306 | struct inode *inode = page->mapping->host; | 304 | struct inode *inode = page->mapping->host; |
307 | unsigned offset; | ||
308 | int err; | ||
309 | 305 | ||
310 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 306 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
311 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 307 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
312 | 308 | ||
313 | if (wbc->for_writepages) | ||
314 | pgio = wbc->fs_private; | ||
315 | else { | ||
316 | nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc)); | ||
317 | pgio = &mypgio; | ||
318 | } | ||
319 | |||
320 | nfs_pageio_cond_complete(pgio, page->index); | 309 | nfs_pageio_cond_complete(pgio, page->index); |
310 | return nfs_page_async_flush(pgio, page); | ||
311 | } | ||
321 | 312 | ||
322 | err = nfs_page_async_flush(pgio, page); | 313 | /* |
323 | if (err <= 0) | 314 | * Write an mmapped page to the server. |
324 | goto out; | 315 | */ |
325 | err = 0; | 316 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
326 | offset = nfs_page_length(page); | 317 | { |
327 | if (!offset) | 318 | struct nfs_pageio_descriptor pgio; |
328 | goto out; | 319 | int err; |
329 | |||
330 | nfs_pageio_cond_complete(pgio, page->index); | ||
331 | 320 | ||
332 | ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); | 321 | nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc)); |
333 | if (ctx == NULL) { | 322 | err = nfs_do_writepage(page, wbc, &pgio); |
334 | err = -EBADF; | 323 | nfs_pageio_complete(&pgio); |
335 | goto out; | 324 | if (err < 0) |
336 | } | 325 | return err; |
337 | err = nfs_writepage_setup(ctx, page, 0, offset); | 326 | if (pgio.pg_error < 0) |
338 | put_nfs_open_context(ctx); | 327 | return pgio.pg_error; |
339 | if (err != 0) | 328 | return 0; |
340 | goto out; | ||
341 | err = nfs_page_async_flush(pgio, page); | ||
342 | if (err > 0) | ||
343 | err = 0; | ||
344 | out: | ||
345 | if (!wbc->for_writepages) | ||
346 | nfs_pageio_complete(pgio); | ||
347 | return err; | ||
348 | } | 329 | } |
349 | 330 | ||
350 | int nfs_writepage(struct page *page, struct writeback_control *wbc) | 331 | int nfs_writepage(struct page *page, struct writeback_control *wbc) |
351 | { | 332 | { |
352 | int err; | 333 | int ret; |
334 | |||
335 | ret = nfs_writepage_locked(page, wbc); | ||
336 | unlock_page(page); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) | ||
341 | { | ||
342 | int ret; | ||
353 | 343 | ||
354 | err = nfs_writepage_locked(page, wbc); | 344 | ret = nfs_do_writepage(page, wbc, data); |
355 | unlock_page(page); | 345 | unlock_page(page); |
356 | return err; | 346 | return ret; |
357 | } | 347 | } |
358 | 348 | ||
359 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | 349 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
@@ -365,12 +355,11 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
365 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); | 355 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
366 | 356 | ||
367 | nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); | 357 | nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); |
368 | wbc->fs_private = &pgio; | 358 | err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); |
369 | err = generic_writepages(mapping, wbc); | ||
370 | nfs_pageio_complete(&pgio); | 359 | nfs_pageio_complete(&pgio); |
371 | if (err) | 360 | if (err < 0) |
372 | return err; | 361 | return err; |
373 | if (pgio.pg_error) | 362 | if (pgio.pg_error < 0) |
374 | return pgio.pg_error; | 363 | return pgio.pg_error; |
375 | return 0; | 364 | return 0; |
376 | } | 365 | } |
@@ -389,14 +378,11 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
389 | return error; | 378 | return error; |
390 | if (!nfsi->npages) { | 379 | if (!nfsi->npages) { |
391 | igrab(inode); | 380 | igrab(inode); |
392 | nfs_begin_data_update(inode); | ||
393 | if (nfs_have_delegation(inode, FMODE_WRITE)) | 381 | if (nfs_have_delegation(inode, FMODE_WRITE)) |
394 | nfsi->change_attr++; | 382 | nfsi->change_attr++; |
395 | } | 383 | } |
396 | SetPagePrivate(req->wb_page); | 384 | SetPagePrivate(req->wb_page); |
397 | set_page_private(req->wb_page, (unsigned long)req); | 385 | set_page_private(req->wb_page, (unsigned long)req); |
398 | if (PageDirty(req->wb_page)) | ||
399 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
400 | nfsi->npages++; | 386 | nfsi->npages++; |
401 | kref_get(&req->wb_kref); | 387 | kref_get(&req->wb_kref); |
402 | return 0; | 388 | return 0; |
@@ -416,12 +402,9 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
416 | set_page_private(req->wb_page, 0); | 402 | set_page_private(req->wb_page, 0); |
417 | ClearPagePrivate(req->wb_page); | 403 | ClearPagePrivate(req->wb_page); |
418 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | 404 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
419 | if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags)) | ||
420 | __set_page_dirty_nobuffers(req->wb_page); | ||
421 | nfsi->npages--; | 405 | nfsi->npages--; |
422 | if (!nfsi->npages) { | 406 | if (!nfsi->npages) { |
423 | spin_unlock(&inode->i_lock); | 407 | spin_unlock(&inode->i_lock); |
424 | nfs_end_data_update(inode); | ||
425 | iput(inode); | 408 | iput(inode); |
426 | } else | 409 | } else |
427 | spin_unlock(&inode->i_lock); | 410 | spin_unlock(&inode->i_lock); |
@@ -682,7 +665,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
682 | 665 | ||
683 | int nfs_flush_incompatible(struct file *file, struct page *page) | 666 | int nfs_flush_incompatible(struct file *file, struct page *page) |
684 | { | 667 | { |
685 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | 668 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
686 | struct nfs_page *req; | 669 | struct nfs_page *req; |
687 | int do_flush, status; | 670 | int do_flush, status; |
688 | /* | 671 | /* |
@@ -716,7 +699,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
716 | int nfs_updatepage(struct file *file, struct page *page, | 699 | int nfs_updatepage(struct file *file, struct page *page, |
717 | unsigned int offset, unsigned int count) | 700 | unsigned int offset, unsigned int count) |
718 | { | 701 | { |
719 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | 702 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
720 | struct inode *inode = page->mapping->host; | 703 | struct inode *inode = page->mapping->host; |
721 | int status = 0; | 704 | int status = 0; |
722 | 705 | ||
@@ -967,7 +950,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
967 | 950 | ||
968 | if (task->tk_status < 0) { | 951 | if (task->tk_status < 0) { |
969 | nfs_set_pageerror(page); | 952 | nfs_set_pageerror(page); |
970 | req->wb_context->error = task->tk_status; | 953 | nfs_context_set_write_error(req->wb_context, task->tk_status); |
971 | dprintk(", error = %d\n", task->tk_status); | 954 | dprintk(", error = %d\n", task->tk_status); |
972 | goto out; | 955 | goto out; |
973 | } | 956 | } |
@@ -1030,7 +1013,7 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | |||
1030 | 1013 | ||
1031 | if (task->tk_status < 0) { | 1014 | if (task->tk_status < 0) { |
1032 | nfs_set_pageerror(page); | 1015 | nfs_set_pageerror(page); |
1033 | req->wb_context->error = task->tk_status; | 1016 | nfs_context_set_write_error(req->wb_context, task->tk_status); |
1034 | dprintk(", error = %d\n", task->tk_status); | 1017 | dprintk(", error = %d\n", task->tk_status); |
1035 | goto remove_request; | 1018 | goto remove_request; |
1036 | } | 1019 | } |
@@ -1244,7 +1227,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
1244 | req->wb_bytes, | 1227 | req->wb_bytes, |
1245 | (long long)req_offset(req)); | 1228 | (long long)req_offset(req)); |
1246 | if (task->tk_status < 0) { | 1229 | if (task->tk_status < 0) { |
1247 | req->wb_context->error = task->tk_status; | 1230 | nfs_context_set_write_error(req->wb_context, task->tk_status); |
1248 | nfs_inode_remove_request(req); | 1231 | nfs_inode_remove_request(req); |
1249 | dprintk(", error = %d\n", task->tk_status); | 1232 | dprintk(", error = %d\n", task->tk_status); |
1250 | goto next; | 1233 | goto next; |
@@ -1347,53 +1330,52 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr | |||
1347 | return ret; | 1330 | return ret; |
1348 | } | 1331 | } |
1349 | 1332 | ||
1350 | /* | 1333 | static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) |
1351 | * flush the inode to disk. | ||
1352 | */ | ||
1353 | int nfs_wb_all(struct inode *inode) | ||
1354 | { | 1334 | { |
1355 | struct address_space *mapping = inode->i_mapping; | ||
1356 | struct writeback_control wbc = { | ||
1357 | .bdi = mapping->backing_dev_info, | ||
1358 | .sync_mode = WB_SYNC_ALL, | ||
1359 | .nr_to_write = LONG_MAX, | ||
1360 | .for_writepages = 1, | ||
1361 | .range_cyclic = 1, | ||
1362 | }; | ||
1363 | int ret; | 1335 | int ret; |
1364 | 1336 | ||
1365 | ret = nfs_writepages(mapping, &wbc); | 1337 | ret = nfs_writepages(mapping, wbc); |
1366 | if (ret < 0) | 1338 | if (ret < 0) |
1367 | goto out; | 1339 | goto out; |
1368 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); | 1340 | ret = nfs_sync_mapping_wait(mapping, wbc, how); |
1369 | if (ret >= 0) | 1341 | if (ret < 0) |
1370 | return 0; | 1342 | goto out; |
1343 | return 0; | ||
1371 | out: | 1344 | out: |
1372 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 1345 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
1373 | return ret; | 1346 | return ret; |
1374 | } | 1347 | } |
1375 | 1348 | ||
1376 | int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) | 1349 | /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ |
1350 | static int nfs_write_mapping(struct address_space *mapping, int how) | ||
1377 | { | 1351 | { |
1378 | struct writeback_control wbc = { | 1352 | struct writeback_control wbc = { |
1379 | .bdi = mapping->backing_dev_info, | 1353 | .bdi = mapping->backing_dev_info, |
1380 | .sync_mode = WB_SYNC_ALL, | 1354 | .sync_mode = WB_SYNC_NONE, |
1381 | .nr_to_write = LONG_MAX, | 1355 | .nr_to_write = LONG_MAX, |
1382 | .range_start = range_start, | ||
1383 | .range_end = range_end, | ||
1384 | .for_writepages = 1, | 1356 | .for_writepages = 1, |
1357 | .range_cyclic = 1, | ||
1385 | }; | 1358 | }; |
1386 | int ret; | 1359 | int ret; |
1387 | 1360 | ||
1388 | ret = nfs_writepages(mapping, &wbc); | 1361 | ret = __nfs_write_mapping(mapping, &wbc, how); |
1389 | if (ret < 0) | 1362 | if (ret < 0) |
1390 | goto out; | 1363 | return ret; |
1391 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); | 1364 | wbc.sync_mode = WB_SYNC_ALL; |
1392 | if (ret >= 0) | 1365 | return __nfs_write_mapping(mapping, &wbc, how); |
1393 | return 0; | 1366 | } |
1394 | out: | 1367 | |
1395 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 1368 | /* |
1396 | return ret; | 1369 | * flush the inode to disk. |
1370 | */ | ||
1371 | int nfs_wb_all(struct inode *inode) | ||
1372 | { | ||
1373 | return nfs_write_mapping(inode->i_mapping, 0); | ||
1374 | } | ||
1375 | |||
1376 | int nfs_wb_nocommit(struct inode *inode) | ||
1377 | { | ||
1378 | return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT); | ||
1397 | } | 1379 | } |
1398 | 1380 | ||
1399 | int nfs_wb_page_cancel(struct inode *inode, struct page *page) | 1381 | int nfs_wb_page_cancel(struct inode *inode, struct page *page) |
@@ -1477,35 +1459,6 @@ int nfs_wb_page(struct inode *inode, struct page* page) | |||
1477 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); | 1459 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); |
1478 | } | 1460 | } |
1479 | 1461 | ||
1480 | int nfs_set_page_dirty(struct page *page) | ||
1481 | { | ||
1482 | struct address_space *mapping = page->mapping; | ||
1483 | struct inode *inode; | ||
1484 | struct nfs_page *req; | ||
1485 | int ret; | ||
1486 | |||
1487 | if (!mapping) | ||
1488 | goto out_raced; | ||
1489 | inode = mapping->host; | ||
1490 | if (!inode) | ||
1491 | goto out_raced; | ||
1492 | spin_lock(&inode->i_lock); | ||
1493 | req = nfs_page_find_request_locked(page); | ||
1494 | if (req != NULL) { | ||
1495 | /* Mark any existing write requests for flushing */ | ||
1496 | ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
1497 | spin_unlock(&inode->i_lock); | ||
1498 | nfs_release_request(req); | ||
1499 | return ret; | ||
1500 | } | ||
1501 | ret = __set_page_dirty_nobuffers(page); | ||
1502 | spin_unlock(&inode->i_lock); | ||
1503 | return ret; | ||
1504 | out_raced: | ||
1505 | return !TestSetPageDirty(page); | ||
1506 | } | ||
1507 | |||
1508 | |||
1509 | int __init nfs_init_writepagecache(void) | 1462 | int __init nfs_init_writepagecache(void) |
1510 | { | 1463 | { |
1511 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1464 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |