diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_super.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_super.c | 354 |
1 files changed, 190 insertions, 164 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 18a4b8e11df2..29f1edca76de 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | |||
18 | #include "xfs.h" | 19 | #include "xfs.h" |
19 | #include "xfs_bit.h" | 20 | #include "xfs_bit.h" |
20 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
@@ -52,14 +53,15 @@ | |||
52 | #include "xfs_trans_priv.h" | 53 | #include "xfs_trans_priv.h" |
53 | #include "xfs_filestream.h" | 54 | #include "xfs_filestream.h" |
54 | #include "xfs_da_btree.h" | 55 | #include "xfs_da_btree.h" |
55 | #include "xfs_dir2_trace.h" | ||
56 | #include "xfs_extfree_item.h" | 56 | #include "xfs_extfree_item.h" |
57 | #include "xfs_mru_cache.h" | 57 | #include "xfs_mru_cache.h" |
58 | #include "xfs_inode_item.h" | 58 | #include "xfs_inode_item.h" |
59 | #include "xfs_sync.h" | 59 | #include "xfs_sync.h" |
60 | #include "xfs_trace.h" | ||
60 | 61 | ||
61 | #include <linux/namei.h> | 62 | #include <linux/namei.h> |
62 | #include <linux/init.h> | 63 | #include <linux/init.h> |
64 | #include <linux/slab.h> | ||
63 | #include <linux/mount.h> | 65 | #include <linux/mount.h> |
64 | #include <linux/mempool.h> | 66 | #include <linux/mempool.h> |
65 | #include <linux/writeback.h> | 67 | #include <linux/writeback.h> |
@@ -876,12 +878,11 @@ xfsaild( | |||
876 | { | 878 | { |
877 | struct xfs_ail *ailp = data; | 879 | struct xfs_ail *ailp = data; |
878 | xfs_lsn_t last_pushed_lsn = 0; | 880 | xfs_lsn_t last_pushed_lsn = 0; |
879 | long tout = 0; | 881 | long tout = 0; /* milliseconds */ |
880 | 882 | ||
881 | while (!kthread_should_stop()) { | 883 | while (!kthread_should_stop()) { |
882 | if (tout) | 884 | schedule_timeout_interruptible(tout ? |
883 | schedule_timeout_interruptible(msecs_to_jiffies(tout)); | 885 | msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); |
884 | tout = 1000; | ||
885 | 886 | ||
886 | /* swsusp */ | 887 | /* swsusp */ |
887 | try_to_freeze(); | 888 | try_to_freeze(); |
@@ -930,13 +931,37 @@ xfs_fs_alloc_inode( | |||
930 | */ | 931 | */ |
931 | STATIC void | 932 | STATIC void |
932 | xfs_fs_destroy_inode( | 933 | xfs_fs_destroy_inode( |
933 | struct inode *inode) | 934 | struct inode *inode) |
934 | { | 935 | { |
935 | xfs_inode_t *ip = XFS_I(inode); | 936 | struct xfs_inode *ip = XFS_I(inode); |
937 | |||
938 | xfs_itrace_entry(ip); | ||
936 | 939 | ||
937 | XFS_STATS_INC(vn_reclaim); | 940 | XFS_STATS_INC(vn_reclaim); |
938 | if (xfs_reclaim(ip)) | 941 | |
939 | panic("%s: cannot reclaim 0x%p\n", __func__, inode); | 942 | /* bad inode, get out here ASAP */ |
943 | if (is_bad_inode(inode)) | ||
944 | goto out_reclaim; | ||
945 | |||
946 | xfs_ioend_wait(ip); | ||
947 | |||
948 | ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); | ||
949 | |||
950 | /* | ||
951 | * We should never get here with one of the reclaim flags already set. | ||
952 | */ | ||
953 | ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
954 | ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); | ||
955 | |||
956 | /* | ||
957 | * We always use background reclaim here because even if the | ||
958 | * inode is clean, it still may be under IO and hence we have | ||
959 | * to take the flush lock. The background reclaim path handles | ||
960 | * this more efficiently than we can here, so simply let background | ||
961 | * reclaim tear down all inodes. | ||
962 | */ | ||
963 | out_reclaim: | ||
964 | xfs_inode_set_reclaim_tag(ip); | ||
940 | } | 965 | } |
941 | 966 | ||
942 | /* | 967 | /* |
@@ -973,7 +998,6 @@ xfs_fs_inode_init_once( | |||
973 | 998 | ||
974 | mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, | 999 | mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, |
975 | "xfsino", ip->i_ino); | 1000 | "xfsino", ip->i_ino); |
976 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | ||
977 | } | 1001 | } |
978 | 1002 | ||
979 | /* | 1003 | /* |
@@ -998,59 +1022,108 @@ xfs_fs_dirty_inode( | |||
998 | XFS_I(inode)->i_update_core = 1; | 1022 | XFS_I(inode)->i_update_core = 1; |
999 | } | 1023 | } |
1000 | 1024 | ||
1001 | /* | 1025 | STATIC int |
1002 | * Attempt to flush the inode, this will actually fail | 1026 | xfs_log_inode( |
1003 | * if the inode is pinned, but we dirty the inode again | 1027 | struct xfs_inode *ip) |
1004 | * at the point when it is unpinned after a log write, | 1028 | { |
1005 | * since this is when the inode itself becomes flushable. | 1029 | struct xfs_mount *mp = ip->i_mount; |
1006 | */ | 1030 | struct xfs_trans *tp; |
1031 | int error; | ||
1032 | |||
1033 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
1034 | tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); | ||
1035 | error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); | ||
1036 | |||
1037 | if (error) { | ||
1038 | xfs_trans_cancel(tp, 0); | ||
1039 | /* we need to return with the lock hold shared */ | ||
1040 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
1041 | return error; | ||
1042 | } | ||
1043 | |||
1044 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
1045 | |||
1046 | /* | ||
1047 | * Note - it's possible that we might have pushed ourselves out of the | ||
1048 | * way during trans_reserve which would flush the inode. But there's | ||
1049 | * no guarantee that the inode buffer has actually gone out yet (it's | ||
1050 | * delwri). Plus the buffer could be pinned anyway if it's part of | ||
1051 | * an inode in another recent transaction. So we play it safe and | ||
1052 | * fire off the transaction anyway. | ||
1053 | */ | ||
1054 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | ||
1055 | xfs_trans_ihold(tp, ip); | ||
1056 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | ||
1057 | xfs_trans_set_sync(tp); | ||
1058 | error = xfs_trans_commit(tp, 0); | ||
1059 | xfs_ilock_demote(ip, XFS_ILOCK_EXCL); | ||
1060 | |||
1061 | return error; | ||
1062 | } | ||
1063 | |||
1007 | STATIC int | 1064 | STATIC int |
1008 | xfs_fs_write_inode( | 1065 | xfs_fs_write_inode( |
1009 | struct inode *inode, | 1066 | struct inode *inode, |
1010 | int sync) | 1067 | struct writeback_control *wbc) |
1011 | { | 1068 | { |
1012 | struct xfs_inode *ip = XFS_I(inode); | 1069 | struct xfs_inode *ip = XFS_I(inode); |
1013 | struct xfs_mount *mp = ip->i_mount; | 1070 | struct xfs_mount *mp = ip->i_mount; |
1014 | int error = 0; | 1071 | int error = EAGAIN; |
1015 | 1072 | ||
1016 | xfs_itrace_entry(ip); | 1073 | xfs_itrace_entry(ip); |
1017 | 1074 | ||
1018 | if (XFS_FORCED_SHUTDOWN(mp)) | 1075 | if (XFS_FORCED_SHUTDOWN(mp)) |
1019 | return XFS_ERROR(EIO); | 1076 | return XFS_ERROR(EIO); |
1020 | 1077 | ||
1021 | if (sync) { | 1078 | if (wbc->sync_mode == WB_SYNC_ALL) { |
1022 | error = xfs_wait_on_pages(ip, 0, -1); | 1079 | /* |
1023 | if (error) | 1080 | * Make sure the inode has hit stable storage. By using the |
1081 | * log and the fsync transactions we reduce the IOs we have | ||
1082 | * to do here from two (log and inode) to just the log. | ||
1083 | * | ||
1084 | * Note: We still need to do a delwri write of the inode after | ||
1085 | * this to flush it to the backing buffer so that bulkstat | ||
1086 | * works properly if this is the first time the inode has been | ||
1087 | * written. Because we hold the ilock atomically over the | ||
1088 | * transaction commit and the inode flush we are guaranteed | ||
1089 | * that the inode is not pinned when it returns. If the flush | ||
1090 | * lock is already held, then the inode has already been | ||
1091 | * flushed once and we don't need to flush it again. Hence | ||
1092 | * the code will only flush the inode if it isn't already | ||
1093 | * being flushed. | ||
1094 | */ | ||
1095 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
1096 | if (ip->i_update_core) { | ||
1097 | error = xfs_log_inode(ip); | ||
1098 | if (error) | ||
1099 | goto out_unlock; | ||
1100 | } | ||
1101 | } else { | ||
1102 | /* | ||
1103 | * We make this non-blocking if the inode is contended, return | ||
1104 | * EAGAIN to indicate to the caller that they did not succeed. | ||
1105 | * This prevents the flush path from blocking on inodes inside | ||
1106 | * another operation right now, they get caught later by xfs_sync. | ||
1107 | */ | ||
1108 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) | ||
1024 | goto out; | 1109 | goto out; |
1025 | } | 1110 | } |
1026 | 1111 | ||
1027 | /* | 1112 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) |
1028 | * Bypass inodes which have already been cleaned by | 1113 | goto out_unlock; |
1029 | * the inode flush clustering code inside xfs_iflush | ||
1030 | */ | ||
1031 | if (xfs_inode_clean(ip)) | ||
1032 | goto out; | ||
1033 | 1114 | ||
1034 | /* | 1115 | /* |
1035 | * We make this non-blocking if the inode is contended, return | 1116 | * Now we have the flush lock and the inode is not pinned, we can check |
1036 | * EAGAIN to indicate to the caller that they did not succeed. | 1117 | * if the inode is really clean as we know that there are no pending |
1037 | * This prevents the flush path from blocking on inodes inside | 1118 | * transaction completions, it is not waiting on the delayed write |
1038 | * another operation right now, they get caught later by xfs_sync. | 1119 | * queue and there is no IO in progress. |
1039 | */ | 1120 | */ |
1040 | if (sync) { | 1121 | if (xfs_inode_clean(ip)) { |
1041 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 1122 | xfs_ifunlock(ip); |
1042 | xfs_iflock(ip); | 1123 | error = 0; |
1043 | 1124 | goto out_unlock; | |
1044 | error = xfs_iflush(ip, XFS_IFLUSH_SYNC); | ||
1045 | } else { | ||
1046 | error = EAGAIN; | ||
1047 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) | ||
1048 | goto out; | ||
1049 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) | ||
1050 | goto out_unlock; | ||
1051 | |||
1052 | error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK); | ||
1053 | } | 1125 | } |
1126 | error = xfs_iflush(ip, 0); | ||
1054 | 1127 | ||
1055 | out_unlock: | 1128 | out_unlock: |
1056 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 1129 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
@@ -1075,6 +1148,20 @@ xfs_fs_clear_inode( | |||
1075 | XFS_STATS_INC(vn_remove); | 1148 | XFS_STATS_INC(vn_remove); |
1076 | XFS_STATS_DEC(vn_active); | 1149 | XFS_STATS_DEC(vn_active); |
1077 | 1150 | ||
1151 | /* | ||
1152 | * The iolock is used by the file system to coordinate reads, | ||
1153 | * writes, and block truncates. Up to this point the lock | ||
1154 | * protected concurrent accesses by users of the inode. But | ||
1155 | * from here forward we're doing some final processing of the | ||
1156 | * inode because we're done with it, and although we reuse the | ||
1157 | * iolock for protection it is really a distinct lock class | ||
1158 | * (in the lockdep sense) from before. To keep lockdep happy | ||
1159 | * (and basically indicate what we are doing), we explicitly | ||
1160 | * re-init the iolock here. | ||
1161 | */ | ||
1162 | ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); | ||
1163 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | ||
1164 | |||
1078 | xfs_inactive(ip); | 1165 | xfs_inactive(ip); |
1079 | } | 1166 | } |
1080 | 1167 | ||
@@ -1092,8 +1179,6 @@ xfs_fs_put_super( | |||
1092 | struct super_block *sb) | 1179 | struct super_block *sb) |
1093 | { | 1180 | { |
1094 | struct xfs_mount *mp = XFS_M(sb); | 1181 | struct xfs_mount *mp = XFS_M(sb); |
1095 | struct xfs_inode *rip = mp->m_rootip; | ||
1096 | int unmount_event_flags = 0; | ||
1097 | 1182 | ||
1098 | xfs_syncd_stop(mp); | 1183 | xfs_syncd_stop(mp); |
1099 | 1184 | ||
@@ -1109,20 +1194,7 @@ xfs_fs_put_super( | |||
1109 | xfs_sync_attr(mp, 0); | 1194 | xfs_sync_attr(mp, 0); |
1110 | } | 1195 | } |
1111 | 1196 | ||
1112 | #ifdef HAVE_DMAPI | 1197 | XFS_SEND_PREUNMOUNT(mp); |
1113 | if (mp->m_flags & XFS_MOUNT_DMAPI) { | ||
1114 | unmount_event_flags = | ||
1115 | (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? | ||
1116 | 0 : DM_FLAGS_UNWANTED; | ||
1117 | /* | ||
1118 | * Ignore error from dmapi here, first unmount is not allowed | ||
1119 | * to fail anyway, and second we wouldn't want to fail a | ||
1120 | * unmount because of dmapi. | ||
1121 | */ | ||
1122 | XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL, | ||
1123 | NULL, NULL, 0, 0, unmount_event_flags); | ||
1124 | } | ||
1125 | #endif | ||
1126 | 1198 | ||
1127 | /* | 1199 | /* |
1128 | * Blow away any referenced inode in the filestreams cache. | 1200 | * Blow away any referenced inode in the filestreams cache. |
@@ -1133,13 +1205,11 @@ xfs_fs_put_super( | |||
1133 | 1205 | ||
1134 | XFS_bflush(mp->m_ddev_targp); | 1206 | XFS_bflush(mp->m_ddev_targp); |
1135 | 1207 | ||
1136 | if (mp->m_flags & XFS_MOUNT_DMAPI) { | 1208 | XFS_SEND_UNMOUNT(mp); |
1137 | XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0, | ||
1138 | unmount_event_flags); | ||
1139 | } | ||
1140 | 1209 | ||
1141 | xfs_unmountfs(mp); | 1210 | xfs_unmountfs(mp); |
1142 | xfs_freesb(mp); | 1211 | xfs_freesb(mp); |
1212 | xfs_inode_shrinker_unregister(mp); | ||
1143 | xfs_icsb_destroy_counters(mp); | 1213 | xfs_icsb_destroy_counters(mp); |
1144 | xfs_close_devices(mp); | 1214 | xfs_close_devices(mp); |
1145 | xfs_dmops_put(mp); | 1215 | xfs_dmops_put(mp); |
@@ -1237,6 +1307,29 @@ xfs_fs_statfs( | |||
1237 | return 0; | 1307 | return 0; |
1238 | } | 1308 | } |
1239 | 1309 | ||
1310 | STATIC void | ||
1311 | xfs_save_resvblks(struct xfs_mount *mp) | ||
1312 | { | ||
1313 | __uint64_t resblks = 0; | ||
1314 | |||
1315 | mp->m_resblks_save = mp->m_resblks; | ||
1316 | xfs_reserve_blocks(mp, &resblks, NULL); | ||
1317 | } | ||
1318 | |||
1319 | STATIC void | ||
1320 | xfs_restore_resvblks(struct xfs_mount *mp) | ||
1321 | { | ||
1322 | __uint64_t resblks; | ||
1323 | |||
1324 | if (mp->m_resblks_save) { | ||
1325 | resblks = mp->m_resblks_save; | ||
1326 | mp->m_resblks_save = 0; | ||
1327 | } else | ||
1328 | resblks = xfs_default_resblks(mp); | ||
1329 | |||
1330 | xfs_reserve_blocks(mp, &resblks, NULL); | ||
1331 | } | ||
1332 | |||
1240 | STATIC int | 1333 | STATIC int |
1241 | xfs_fs_remount( | 1334 | xfs_fs_remount( |
1242 | struct super_block *sb, | 1335 | struct super_block *sb, |
@@ -1316,11 +1409,27 @@ xfs_fs_remount( | |||
1316 | } | 1409 | } |
1317 | mp->m_update_flags = 0; | 1410 | mp->m_update_flags = 0; |
1318 | } | 1411 | } |
1412 | |||
1413 | /* | ||
1414 | * Fill out the reserve pool if it is empty. Use the stashed | ||
1415 | * value if it is non-zero, otherwise go with the default. | ||
1416 | */ | ||
1417 | xfs_restore_resvblks(mp); | ||
1319 | } | 1418 | } |
1320 | 1419 | ||
1321 | /* rw -> ro */ | 1420 | /* rw -> ro */ |
1322 | if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { | 1421 | if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { |
1422 | /* | ||
1423 | * After we have synced the data but before we sync the | ||
1424 | * metadata, we need to free up the reserve block pool so that | ||
1425 | * the used block count in the superblock on disk is correct at | ||
1426 | * the end of the remount. Stash the current reserve pool size | ||
1427 | * so that if we get remounted rw, we can return it to the same | ||
1428 | * size. | ||
1429 | */ | ||
1430 | |||
1323 | xfs_quiesce_data(mp); | 1431 | xfs_quiesce_data(mp); |
1432 | xfs_save_resvblks(mp); | ||
1324 | xfs_quiesce_attr(mp); | 1433 | xfs_quiesce_attr(mp); |
1325 | mp->m_flags |= XFS_MOUNT_RDONLY; | 1434 | mp->m_flags |= XFS_MOUNT_RDONLY; |
1326 | } | 1435 | } |
@@ -1339,11 +1448,22 @@ xfs_fs_freeze( | |||
1339 | { | 1448 | { |
1340 | struct xfs_mount *mp = XFS_M(sb); | 1449 | struct xfs_mount *mp = XFS_M(sb); |
1341 | 1450 | ||
1451 | xfs_save_resvblks(mp); | ||
1342 | xfs_quiesce_attr(mp); | 1452 | xfs_quiesce_attr(mp); |
1343 | return -xfs_fs_log_dummy(mp); | 1453 | return -xfs_fs_log_dummy(mp); |
1344 | } | 1454 | } |
1345 | 1455 | ||
1346 | STATIC int | 1456 | STATIC int |
1457 | xfs_fs_unfreeze( | ||
1458 | struct super_block *sb) | ||
1459 | { | ||
1460 | struct xfs_mount *mp = XFS_M(sb); | ||
1461 | |||
1462 | xfs_restore_resvblks(mp); | ||
1463 | return 0; | ||
1464 | } | ||
1465 | |||
1466 | STATIC int | ||
1347 | xfs_fs_show_options( | 1467 | xfs_fs_show_options( |
1348 | struct seq_file *m, | 1468 | struct seq_file *m, |
1349 | struct vfsmount *mnt) | 1469 | struct vfsmount *mnt) |
@@ -1503,9 +1623,9 @@ xfs_fs_fill_super( | |||
1503 | if (error) | 1623 | if (error) |
1504 | goto fail_vnrele; | 1624 | goto fail_vnrele; |
1505 | 1625 | ||
1506 | kfree(mtpt); | 1626 | xfs_inode_shrinker_register(mp); |
1507 | 1627 | ||
1508 | xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); | 1628 | kfree(mtpt); |
1509 | return 0; | 1629 | return 0; |
1510 | 1630 | ||
1511 | out_filestream_unmount: | 1631 | out_filestream_unmount: |
@@ -1567,6 +1687,7 @@ static const struct super_operations xfs_super_operations = { | |||
1567 | .put_super = xfs_fs_put_super, | 1687 | .put_super = xfs_fs_put_super, |
1568 | .sync_fs = xfs_fs_sync_fs, | 1688 | .sync_fs = xfs_fs_sync_fs, |
1569 | .freeze_fs = xfs_fs_freeze, | 1689 | .freeze_fs = xfs_fs_freeze, |
1690 | .unfreeze_fs = xfs_fs_unfreeze, | ||
1570 | .statfs = xfs_fs_statfs, | 1691 | .statfs = xfs_fs_statfs, |
1571 | .remount_fs = xfs_fs_remount, | 1692 | .remount_fs = xfs_fs_remount, |
1572 | .show_options = xfs_fs_show_options, | 1693 | .show_options = xfs_fs_show_options, |
@@ -1581,94 +1702,6 @@ static struct file_system_type xfs_fs_type = { | |||
1581 | }; | 1702 | }; |
1582 | 1703 | ||
1583 | STATIC int __init | 1704 | STATIC int __init |
1584 | xfs_alloc_trace_bufs(void) | ||
1585 | { | ||
1586 | #ifdef XFS_ALLOC_TRACE | ||
1587 | xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL); | ||
1588 | if (!xfs_alloc_trace_buf) | ||
1589 | goto out; | ||
1590 | #endif | ||
1591 | #ifdef XFS_BMAP_TRACE | ||
1592 | xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL); | ||
1593 | if (!xfs_bmap_trace_buf) | ||
1594 | goto out_free_alloc_trace; | ||
1595 | #endif | ||
1596 | #ifdef XFS_BTREE_TRACE | ||
1597 | xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE, | ||
1598 | KM_MAYFAIL); | ||
1599 | if (!xfs_allocbt_trace_buf) | ||
1600 | goto out_free_bmap_trace; | ||
1601 | |||
1602 | xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL); | ||
1603 | if (!xfs_inobt_trace_buf) | ||
1604 | goto out_free_allocbt_trace; | ||
1605 | |||
1606 | xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL); | ||
1607 | if (!xfs_bmbt_trace_buf) | ||
1608 | goto out_free_inobt_trace; | ||
1609 | #endif | ||
1610 | #ifdef XFS_ATTR_TRACE | ||
1611 | xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL); | ||
1612 | if (!xfs_attr_trace_buf) | ||
1613 | goto out_free_bmbt_trace; | ||
1614 | #endif | ||
1615 | #ifdef XFS_DIR2_TRACE | ||
1616 | xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL); | ||
1617 | if (!xfs_dir2_trace_buf) | ||
1618 | goto out_free_attr_trace; | ||
1619 | #endif | ||
1620 | |||
1621 | return 0; | ||
1622 | |||
1623 | #ifdef XFS_DIR2_TRACE | ||
1624 | out_free_attr_trace: | ||
1625 | #endif | ||
1626 | #ifdef XFS_ATTR_TRACE | ||
1627 | ktrace_free(xfs_attr_trace_buf); | ||
1628 | out_free_bmbt_trace: | ||
1629 | #endif | ||
1630 | #ifdef XFS_BTREE_TRACE | ||
1631 | ktrace_free(xfs_bmbt_trace_buf); | ||
1632 | out_free_inobt_trace: | ||
1633 | ktrace_free(xfs_inobt_trace_buf); | ||
1634 | out_free_allocbt_trace: | ||
1635 | ktrace_free(xfs_allocbt_trace_buf); | ||
1636 | out_free_bmap_trace: | ||
1637 | #endif | ||
1638 | #ifdef XFS_BMAP_TRACE | ||
1639 | ktrace_free(xfs_bmap_trace_buf); | ||
1640 | out_free_alloc_trace: | ||
1641 | #endif | ||
1642 | #ifdef XFS_ALLOC_TRACE | ||
1643 | ktrace_free(xfs_alloc_trace_buf); | ||
1644 | out: | ||
1645 | #endif | ||
1646 | return -ENOMEM; | ||
1647 | } | ||
1648 | |||
1649 | STATIC void | ||
1650 | xfs_free_trace_bufs(void) | ||
1651 | { | ||
1652 | #ifdef XFS_DIR2_TRACE | ||
1653 | ktrace_free(xfs_dir2_trace_buf); | ||
1654 | #endif | ||
1655 | #ifdef XFS_ATTR_TRACE | ||
1656 | ktrace_free(xfs_attr_trace_buf); | ||
1657 | #endif | ||
1658 | #ifdef XFS_BTREE_TRACE | ||
1659 | ktrace_free(xfs_bmbt_trace_buf); | ||
1660 | ktrace_free(xfs_inobt_trace_buf); | ||
1661 | ktrace_free(xfs_allocbt_trace_buf); | ||
1662 | #endif | ||
1663 | #ifdef XFS_BMAP_TRACE | ||
1664 | ktrace_free(xfs_bmap_trace_buf); | ||
1665 | #endif | ||
1666 | #ifdef XFS_ALLOC_TRACE | ||
1667 | ktrace_free(xfs_alloc_trace_buf); | ||
1668 | #endif | ||
1669 | } | ||
1670 | |||
1671 | STATIC int __init | ||
1672 | xfs_init_zones(void) | 1705 | xfs_init_zones(void) |
1673 | { | 1706 | { |
1674 | 1707 | ||
@@ -1809,7 +1842,6 @@ init_xfs_fs(void) | |||
1809 | printk(KERN_INFO XFS_VERSION_STRING " with " | 1842 | printk(KERN_INFO XFS_VERSION_STRING " with " |
1810 | XFS_BUILD_OPTIONS " enabled\n"); | 1843 | XFS_BUILD_OPTIONS " enabled\n"); |
1811 | 1844 | ||
1812 | ktrace_init(64); | ||
1813 | xfs_ioend_init(); | 1845 | xfs_ioend_init(); |
1814 | xfs_dir_startup(); | 1846 | xfs_dir_startup(); |
1815 | 1847 | ||
@@ -1817,13 +1849,9 @@ init_xfs_fs(void) | |||
1817 | if (error) | 1849 | if (error) |
1818 | goto out; | 1850 | goto out; |
1819 | 1851 | ||
1820 | error = xfs_alloc_trace_bufs(); | ||
1821 | if (error) | ||
1822 | goto out_destroy_zones; | ||
1823 | |||
1824 | error = xfs_mru_cache_init(); | 1852 | error = xfs_mru_cache_init(); |
1825 | if (error) | 1853 | if (error) |
1826 | goto out_free_trace_buffers; | 1854 | goto out_destroy_zones; |
1827 | 1855 | ||
1828 | error = xfs_filestream_init(); | 1856 | error = xfs_filestream_init(); |
1829 | if (error) | 1857 | if (error) |
@@ -1842,6 +1870,7 @@ init_xfs_fs(void) | |||
1842 | goto out_cleanup_procfs; | 1870 | goto out_cleanup_procfs; |
1843 | 1871 | ||
1844 | vfs_initquota(); | 1872 | vfs_initquota(); |
1873 | xfs_inode_shrinker_init(); | ||
1845 | 1874 | ||
1846 | error = register_filesystem(&xfs_fs_type); | 1875 | error = register_filesystem(&xfs_fs_type); |
1847 | if (error) | 1876 | if (error) |
@@ -1858,8 +1887,6 @@ init_xfs_fs(void) | |||
1858 | xfs_filestream_uninit(); | 1887 | xfs_filestream_uninit(); |
1859 | out_mru_cache_uninit: | 1888 | out_mru_cache_uninit: |
1860 | xfs_mru_cache_uninit(); | 1889 | xfs_mru_cache_uninit(); |
1861 | out_free_trace_buffers: | ||
1862 | xfs_free_trace_bufs(); | ||
1863 | out_destroy_zones: | 1890 | out_destroy_zones: |
1864 | xfs_destroy_zones(); | 1891 | xfs_destroy_zones(); |
1865 | out: | 1892 | out: |
@@ -1871,14 +1898,13 @@ exit_xfs_fs(void) | |||
1871 | { | 1898 | { |
1872 | vfs_exitquota(); | 1899 | vfs_exitquota(); |
1873 | unregister_filesystem(&xfs_fs_type); | 1900 | unregister_filesystem(&xfs_fs_type); |
1901 | xfs_inode_shrinker_destroy(); | ||
1874 | xfs_sysctl_unregister(); | 1902 | xfs_sysctl_unregister(); |
1875 | xfs_cleanup_procfs(); | 1903 | xfs_cleanup_procfs(); |
1876 | xfs_buf_terminate(); | 1904 | xfs_buf_terminate(); |
1877 | xfs_filestream_uninit(); | 1905 | xfs_filestream_uninit(); |
1878 | xfs_mru_cache_uninit(); | 1906 | xfs_mru_cache_uninit(); |
1879 | xfs_free_trace_bufs(); | ||
1880 | xfs_destroy_zones(); | 1907 | xfs_destroy_zones(); |
1881 | ktrace_uninit(); | ||
1882 | } | 1908 | } |
1883 | 1909 | ||
1884 | module_init(init_xfs_fs); | 1910 | module_init(init_xfs_fs); |