aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_super.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c175
1 files changed, 137 insertions, 38 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 77414db10dc2..71345a370d9f 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -877,12 +877,11 @@ xfsaild(
877{ 877{
878 struct xfs_ail *ailp = data; 878 struct xfs_ail *ailp = data;
879 xfs_lsn_t last_pushed_lsn = 0; 879 xfs_lsn_t last_pushed_lsn = 0;
880 long tout = 0; 880 long tout = 0; /* milliseconds */
881 881
882 while (!kthread_should_stop()) { 882 while (!kthread_should_stop()) {
883 if (tout) 883 schedule_timeout_interruptible(tout ?
884 schedule_timeout_interruptible(msecs_to_jiffies(tout)); 884 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
885 tout = 1000;
886 885
887 /* swsusp */ 886 /* swsusp */
888 try_to_freeze(); 887 try_to_freeze();
@@ -1022,59 +1021,108 @@ xfs_fs_dirty_inode(
1022 XFS_I(inode)->i_update_core = 1; 1021 XFS_I(inode)->i_update_core = 1;
1023} 1022}
1024 1023
1025/* 1024STATIC int
1026 * Attempt to flush the inode, this will actually fail 1025xfs_log_inode(
1027 * if the inode is pinned, but we dirty the inode again 1026 struct xfs_inode *ip)
1028 * at the point when it is unpinned after a log write, 1027{
1029 * since this is when the inode itself becomes flushable. 1028 struct xfs_mount *mp = ip->i_mount;
1030 */ 1029 struct xfs_trans *tp;
1030 int error;
1031
1032 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1033 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1034 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
1035
1036 if (error) {
1037 xfs_trans_cancel(tp, 0);
1038 /* we need to return with the lock hold shared */
1039 xfs_ilock(ip, XFS_ILOCK_SHARED);
1040 return error;
1041 }
1042
1043 xfs_ilock(ip, XFS_ILOCK_EXCL);
1044
1045 /*
1046 * Note - it's possible that we might have pushed ourselves out of the
1047 * way during trans_reserve which would flush the inode. But there's
1048 * no guarantee that the inode buffer has actually gone out yet (it's
1049 * delwri). Plus the buffer could be pinned anyway if it's part of
1050 * an inode in another recent transaction. So we play it safe and
1051 * fire off the transaction anyway.
1052 */
1053 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1054 xfs_trans_ihold(tp, ip);
1055 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1056 xfs_trans_set_sync(tp);
1057 error = xfs_trans_commit(tp, 0);
1058 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1059
1060 return error;
1061}
1062
1031STATIC int 1063STATIC int
1032xfs_fs_write_inode( 1064xfs_fs_write_inode(
1033 struct inode *inode, 1065 struct inode *inode,
1034 int sync) 1066 struct writeback_control *wbc)
1035{ 1067{
1036 struct xfs_inode *ip = XFS_I(inode); 1068 struct xfs_inode *ip = XFS_I(inode);
1037 struct xfs_mount *mp = ip->i_mount; 1069 struct xfs_mount *mp = ip->i_mount;
1038 int error = 0; 1070 int error = EAGAIN;
1039 1071
1040 xfs_itrace_entry(ip); 1072 xfs_itrace_entry(ip);
1041 1073
1042 if (XFS_FORCED_SHUTDOWN(mp)) 1074 if (XFS_FORCED_SHUTDOWN(mp))
1043 return XFS_ERROR(EIO); 1075 return XFS_ERROR(EIO);
1044 1076
1045 if (sync) { 1077 if (wbc->sync_mode == WB_SYNC_ALL) {
1046 error = xfs_wait_on_pages(ip, 0, -1); 1078 /*
1047 if (error) 1079 * Make sure the inode has hit stable storage. By using the
1080 * log and the fsync transactions we reduce the IOs we have
1081 * to do here from two (log and inode) to just the log.
1082 *
1083 * Note: We still need to do a delwri write of the inode after
1084 * this to flush it to the backing buffer so that bulkstat
1085 * works properly if this is the first time the inode has been
1086 * written. Because we hold the ilock atomically over the
1087 * transaction commit and the inode flush we are guaranteed
1088 * that the inode is not pinned when it returns. If the flush
1089 * lock is already held, then the inode has already been
1090 * flushed once and we don't need to flush it again. Hence
1091 * the code will only flush the inode if it isn't already
1092 * being flushed.
1093 */
1094 xfs_ilock(ip, XFS_ILOCK_SHARED);
1095 if (ip->i_update_core) {
1096 error = xfs_log_inode(ip);
1097 if (error)
1098 goto out_unlock;
1099 }
1100 } else {
1101 /*
1102 * We make this non-blocking if the inode is contended, return
1103 * EAGAIN to indicate to the caller that they did not succeed.
1104 * This prevents the flush path from blocking on inodes inside
1105 * another operation right now, they get caught later by xfs_sync.
1106 */
1107 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1048 goto out; 1108 goto out;
1049 } 1109 }
1050 1110
1051 /* 1111 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1052 * Bypass inodes which have already been cleaned by 1112 goto out_unlock;
1053 * the inode flush clustering code inside xfs_iflush
1054 */
1055 if (xfs_inode_clean(ip))
1056 goto out;
1057 1113
1058 /* 1114 /*
1059 * We make this non-blocking if the inode is contended, return 1115 * Now we have the flush lock and the inode is not pinned, we can check
1060 * EAGAIN to indicate to the caller that they did not succeed. 1116 * if the inode is really clean as we know that there are no pending
1061 * This prevents the flush path from blocking on inodes inside 1117 * transaction completions, it is not waiting on the delayed write
1062 * another operation right now, they get caught later by xfs_sync. 1118 * queue and there is no IO in progress.
1063 */ 1119 */
1064 if (sync) { 1120 if (xfs_inode_clean(ip)) {
1065 xfs_ilock(ip, XFS_ILOCK_SHARED); 1121 xfs_ifunlock(ip);
1066 xfs_iflock(ip); 1122 error = 0;
1067 1123 goto out_unlock;
1068 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
1069 } else {
1070 error = EAGAIN;
1071 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1072 goto out;
1073 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1074 goto out_unlock;
1075
1076 error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
1077 } 1124 }
1125 error = xfs_iflush(ip, 0);
1078 1126
1079 out_unlock: 1127 out_unlock:
1080 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1128 xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1257,6 +1305,29 @@ xfs_fs_statfs(
1257 return 0; 1305 return 0;
1258} 1306}
1259 1307
1308STATIC void
1309xfs_save_resvblks(struct xfs_mount *mp)
1310{
1311 __uint64_t resblks = 0;
1312
1313 mp->m_resblks_save = mp->m_resblks;
1314 xfs_reserve_blocks(mp, &resblks, NULL);
1315}
1316
1317STATIC void
1318xfs_restore_resvblks(struct xfs_mount *mp)
1319{
1320 __uint64_t resblks;
1321
1322 if (mp->m_resblks_save) {
1323 resblks = mp->m_resblks_save;
1324 mp->m_resblks_save = 0;
1325 } else
1326 resblks = xfs_default_resblks(mp);
1327
1328 xfs_reserve_blocks(mp, &resblks, NULL);
1329}
1330
1260STATIC int 1331STATIC int
1261xfs_fs_remount( 1332xfs_fs_remount(
1262 struct super_block *sb, 1333 struct super_block *sb,
@@ -1336,11 +1407,27 @@ xfs_fs_remount(
1336 } 1407 }
1337 mp->m_update_flags = 0; 1408 mp->m_update_flags = 0;
1338 } 1409 }
1410
1411 /*
1412 * Fill out the reserve pool if it is empty. Use the stashed
1413 * value if it is non-zero, otherwise go with the default.
1414 */
1415 xfs_restore_resvblks(mp);
1339 } 1416 }
1340 1417
1341 /* rw -> ro */ 1418 /* rw -> ro */
1342 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { 1419 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1420 /*
1421 * After we have synced the data but before we sync the
1422 * metadata, we need to free up the reserve block pool so that
1423 * the used block count in the superblock on disk is correct at
1424 * the end of the remount. Stash the current reserve pool size
1425 * so that if we get remounted rw, we can return it to the same
1426 * size.
1427 */
1428
1343 xfs_quiesce_data(mp); 1429 xfs_quiesce_data(mp);
1430 xfs_save_resvblks(mp);
1344 xfs_quiesce_attr(mp); 1431 xfs_quiesce_attr(mp);
1345 mp->m_flags |= XFS_MOUNT_RDONLY; 1432 mp->m_flags |= XFS_MOUNT_RDONLY;
1346 } 1433 }
@@ -1359,11 +1446,22 @@ xfs_fs_freeze(
1359{ 1446{
1360 struct xfs_mount *mp = XFS_M(sb); 1447 struct xfs_mount *mp = XFS_M(sb);
1361 1448
1449 xfs_save_resvblks(mp);
1362 xfs_quiesce_attr(mp); 1450 xfs_quiesce_attr(mp);
1363 return -xfs_fs_log_dummy(mp); 1451 return -xfs_fs_log_dummy(mp);
1364} 1452}
1365 1453
1366STATIC int 1454STATIC int
1455xfs_fs_unfreeze(
1456 struct super_block *sb)
1457{
1458 struct xfs_mount *mp = XFS_M(sb);
1459
1460 xfs_restore_resvblks(mp);
1461 return 0;
1462}
1463
1464STATIC int
1367xfs_fs_show_options( 1465xfs_fs_show_options(
1368 struct seq_file *m, 1466 struct seq_file *m,
1369 struct vfsmount *mnt) 1467 struct vfsmount *mnt)
@@ -1585,6 +1683,7 @@ static const struct super_operations xfs_super_operations = {
1585 .put_super = xfs_fs_put_super, 1683 .put_super = xfs_fs_put_super,
1586 .sync_fs = xfs_fs_sync_fs, 1684 .sync_fs = xfs_fs_sync_fs,
1587 .freeze_fs = xfs_fs_freeze, 1685 .freeze_fs = xfs_fs_freeze,
1686 .unfreeze_fs = xfs_fs_unfreeze,
1588 .statfs = xfs_fs_statfs, 1687 .statfs = xfs_fs_statfs,
1589 .remount_fs = xfs_fs_remount, 1688 .remount_fs = xfs_fs_remount,
1590 .show_options = xfs_fs_show_options, 1689 .show_options = xfs_fs_show_options,