aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-09-07 10:12:51 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2011-10-21 07:39:44 -0400
commit13d921e37174e3d1042deeb303537c1d935da553 (patch)
treec3d208bd4e8eaec8e0f07586f6b21f3c7cd5832b /fs/gfs2
parentccad4e147acf2a59b463f5df3cee8b43b144ce82 (diff)
GFS2: Clean up ->page_mkwrite
This patch brings gfs2's ->page_mkwrite uptodate with respect to the expectations set by the VM. Also added is a check to wait if the fs is frozen, before we attempt to get a glock. This will only work on the node which initiates the freeze, but thats ok since the transaction lock will still provide the expected barrier on other nodes. The major change here is that we return a locked page now, except when we don't return a page at all (error cases). This removes the race which required rechecking the page after it was returned. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/file.c64
1 files changed, 46 insertions, 18 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 3b65f67bb38e..aa3a4ddb834e 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -366,8 +366,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
366 unsigned int data_blocks, ind_blocks, rblocks; 366 unsigned int data_blocks, ind_blocks, rblocks;
367 struct gfs2_holder gh; 367 struct gfs2_holder gh;
368 struct gfs2_alloc *al; 368 struct gfs2_alloc *al;
369 loff_t size;
369 int ret; 370 int ret;
370 371
372 /* Wait if fs is frozen. This is racy so we check again later on
373 * and retry if the fs has been frozen after the page lock has
374 * been acquired
375 */
376 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
377
371 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 378 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
372 ret = gfs2_glock_nq(&gh); 379 ret = gfs2_glock_nq(&gh);
373 if (ret) 380 if (ret)
@@ -376,8 +383,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
376 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 383 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
377 set_bit(GIF_SW_PAGED, &ip->i_flags); 384 set_bit(GIF_SW_PAGED, &ip->i_flags);
378 385
379 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) 386 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
387 lock_page(page);
388 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
389 ret = -EAGAIN;
390 unlock_page(page);
391 }
380 goto out_unlock; 392 goto out_unlock;
393 }
394
381 ret = -ENOMEM; 395 ret = -ENOMEM;
382 al = gfs2_alloc_get(ip); 396 al = gfs2_alloc_get(ip);
383 if (al == NULL) 397 if (al == NULL)
@@ -405,21 +419,29 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
405 419
406 lock_page(page); 420 lock_page(page);
407 ret = -EINVAL; 421 ret = -EINVAL;
408 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT; 422 size = i_size_read(inode);
409 if (page->index > last_index) 423 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
410 goto out_unlock_page; 424 /* Check page index against inode size */
425 if (size == 0 || (page->index > last_index))
426 goto out_trans_end;
427
428 ret = -EAGAIN;
429 /* If truncated, we must retry the operation, we may have raced
430 * with the glock demotion code.
431 */
432 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
433 goto out_trans_end;
434
435 /* Unstuff, if required, and allocate backing blocks for page */
411 ret = 0; 436 ret = 0;
412 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping) 437 if (gfs2_is_stuffed(ip))
413 goto out_unlock_page;
414 if (gfs2_is_stuffed(ip)) {
415 ret = gfs2_unstuff_dinode(ip, page); 438 ret = gfs2_unstuff_dinode(ip, page);
416 if (ret) 439 if (ret == 0)
417 goto out_unlock_page; 440 ret = gfs2_allocate_page_backing(page);
418 }
419 ret = gfs2_allocate_page_backing(page);
420 441
421out_unlock_page: 442out_trans_end:
422 unlock_page(page); 443 if (ret)
444 unlock_page(page);
423 gfs2_trans_end(sdp); 445 gfs2_trans_end(sdp);
424out_trans_fail: 446out_trans_fail:
425 gfs2_inplace_release(ip); 447 gfs2_inplace_release(ip);
@@ -431,11 +453,17 @@ out_unlock:
431 gfs2_glock_dq(&gh); 453 gfs2_glock_dq(&gh);
432out: 454out:
433 gfs2_holder_uninit(&gh); 455 gfs2_holder_uninit(&gh);
434 if (ret == -ENOMEM) 456 if (ret == 0) {
435 ret = VM_FAULT_OOM; 457 set_page_dirty(page);
436 else if (ret) 458 /* This check must be post dropping of transaction lock */
437 ret = VM_FAULT_SIGBUS; 459 if (inode->i_sb->s_frozen == SB_UNFROZEN) {
438 return ret; 460 wait_on_page_writeback(page);
461 } else {
462 ret = -EAGAIN;
463 unlock_page(page);
464 }
465 }
466 return block_page_mkwrite_return(ret);
439} 467}
440 468
441static const struct vm_operations_struct gfs2_vm_ops = { 469static const struct vm_operations_struct gfs2_vm_ops = {