aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_sync.c
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2009-04-06 12:47:27 -0400
committerChristoph Hellwig <hch@brick.lst.de>2009-04-06 12:47:27 -0400
commite43afd72d2455defd63a3f94f22fa09b586e58ed (patch)
treea883a748374d5f63bf030dcf445723a901f88357 /fs/xfs/linux-2.6/xfs_sync.c
parent5825294edd3364cbba6514f70d88debec4f6cec7 (diff)
xfs: block callers of xfs_flush_inodes() correctly
xfs_flush_inodes() currently uses a magic timeout to wait for some inodes to be flushed before returning. This isn't really reliable but used to be the best that could be done due to deadlock potential of waiting for the entire flush. Now the inode flush is safe to execute while we hold page and inode locks, we can wait for all the inodes to flush synchronously. Convert the wait mechanism to a completion to do this efficiently. This should remove all remaining spurious ENOSPC errors from the delayed allocation reservation path. This is extracted almost line for line from a larger patch from Mikulas Patocka. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 73cf8dc19738..f7ba76633c29 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -404,7 +404,8 @@ STATIC void
404xfs_syncd_queue_work( 404xfs_syncd_queue_work(
405 struct xfs_mount *mp, 405 struct xfs_mount *mp,
406 void *data, 406 void *data,
407 void (*syncer)(struct xfs_mount *, void *)) 407 void (*syncer)(struct xfs_mount *, void *),
408 struct completion *completion)
408{ 409{
409 struct xfs_sync_work *work; 410 struct xfs_sync_work *work;
410 411
@@ -413,6 +414,7 @@ xfs_syncd_queue_work(
413 work->w_syncer = syncer; 414 work->w_syncer = syncer;
414 work->w_data = data; 415 work->w_data = data;
415 work->w_mount = mp; 416 work->w_mount = mp;
417 work->w_completion = completion;
416 spin_lock(&mp->m_sync_lock); 418 spin_lock(&mp->m_sync_lock);
417 list_add_tail(&work->w_list, &mp->m_sync_list); 419 list_add_tail(&work->w_list, &mp->m_sync_list);
418 spin_unlock(&mp->m_sync_lock); 420 spin_unlock(&mp->m_sync_lock);
@@ -441,10 +443,11 @@ xfs_flush_inodes(
441 xfs_inode_t *ip) 443 xfs_inode_t *ip)
442{ 444{
443 struct inode *inode = VFS_I(ip); 445 struct inode *inode = VFS_I(ip);
446 DECLARE_COMPLETION_ONSTACK(completion);
444 447
445 igrab(inode); 448 igrab(inode);
446 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work); 449 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
447 delay(msecs_to_jiffies(500)); 450 wait_for_completion(&completion);
448 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); 451 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
449} 452}
450 453
@@ -514,6 +517,8 @@ xfssyncd(
514 list_del(&work->w_list); 517 list_del(&work->w_list);
515 if (work == &mp->m_sync_work) 518 if (work == &mp->m_sync_work)
516 continue; 519 continue;
520 if (work->w_completion)
521 complete(work->w_completion);
517 kmem_free(work); 522 kmem_free(work);
518 } 523 }
519 } 524 }
@@ -527,6 +532,7 @@ xfs_syncd_init(
527{ 532{
528 mp->m_sync_work.w_syncer = xfs_sync_worker; 533 mp->m_sync_work.w_syncer = xfs_sync_worker;
529 mp->m_sync_work.w_mount = mp; 534 mp->m_sync_work.w_mount = mp;
535 mp->m_sync_work.w_completion = NULL;
530 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); 536 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
531 if (IS_ERR(mp->m_sync_task)) 537 if (IS_ERR(mp->m_sync_task))
532 return -PTR_ERR(mp->m_sync_task); 538 return -PTR_ERR(mp->m_sync_task);