aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_super.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c213
1 files changed, 83 insertions, 130 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 29f1edca76de..a4e07974955b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -25,14 +25,11 @@
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 28#include "xfs_quota.h"
30#include "xfs_mount.h" 29#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 31#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 32#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 33#include "xfs_dinode.h"
37#include "xfs_inode.h" 34#include "xfs_inode.h"
38#include "xfs_btree.h" 35#include "xfs_btree.h"
@@ -43,7 +40,6 @@
43#include "xfs_error.h" 40#include "xfs_error.h"
44#include "xfs_itable.h" 41#include "xfs_itable.h"
45#include "xfs_fsops.h" 42#include "xfs_fsops.h"
46#include "xfs_rw.h"
47#include "xfs_attr.h" 43#include "xfs_attr.h"
48#include "xfs_buf_item.h" 44#include "xfs_buf_item.h"
49#include "xfs_utils.h" 45#include "xfs_utils.h"
@@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool;
94#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 90#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
95 * unwritten extent conversion */ 91 * unwritten extent conversion */
96#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 92#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
97#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
98#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 93#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
99#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 94#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
100#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 95#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
@@ -116,9 +111,8 @@ mempool_t *xfs_ioend_pool;
116#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 111#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
117#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 112#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
118#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 113#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
119#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ 114#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
120#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ 115#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
121#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
122 116
123/* 117/*
124 * Table driven mount option parser. 118 * Table driven mount option parser.
@@ -170,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
170STATIC int 164STATIC int
171xfs_parseargs( 165xfs_parseargs(
172 struct xfs_mount *mp, 166 struct xfs_mount *mp,
173 char *options, 167 char *options)
174 char **mtpt)
175{ 168{
176 struct super_block *sb = mp->m_super; 169 struct super_block *sb = mp->m_super;
177 char *this_char, *value, *eov; 170 char *this_char, *value, *eov;
178 int dsunit = 0; 171 int dsunit = 0;
179 int dswidth = 0; 172 int dswidth = 0;
180 int iosize = 0; 173 int iosize = 0;
181 int dmapi_implies_ikeep = 1;
182 __uint8_t iosizelog = 0; 174 __uint8_t iosizelog = 0;
183 175
184 /* 176 /*
@@ -241,15 +233,10 @@ xfs_parseargs(
241 if (!mp->m_logname) 233 if (!mp->m_logname)
242 return ENOMEM; 234 return ENOMEM;
243 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 235 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
244 if (!value || !*value) { 236 cmn_err(CE_WARN,
245 cmn_err(CE_WARN, 237 "XFS: %s option not allowed on this system",
246 "XFS: %s option requires an argument", 238 this_char);
247 this_char); 239 return EINVAL;
248 return EINVAL;
249 }
250 *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
251 if (!*mtpt)
252 return ENOMEM;
253 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 240 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
254 if (!value || !*value) { 241 if (!value || !*value) {
255 cmn_err(CE_WARN, 242 cmn_err(CE_WARN,
@@ -286,8 +273,6 @@ xfs_parseargs(
286 mp->m_flags &= ~XFS_MOUNT_GRPID; 273 mp->m_flags &= ~XFS_MOUNT_GRPID;
287 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 274 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
288 mp->m_flags |= XFS_MOUNT_WSYNC; 275 mp->m_flags |= XFS_MOUNT_WSYNC;
289 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
290 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
291 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 276 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
292 mp->m_flags |= XFS_MOUNT_NORECOVERY; 277 mp->m_flags |= XFS_MOUNT_NORECOVERY;
293 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 278 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
@@ -327,7 +312,6 @@ xfs_parseargs(
327 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 312 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
328 mp->m_flags |= XFS_MOUNT_IKEEP; 313 mp->m_flags |= XFS_MOUNT_IKEEP;
329 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 314 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
330 dmapi_implies_ikeep = 0;
331 mp->m_flags &= ~XFS_MOUNT_IKEEP; 315 mp->m_flags &= ~XFS_MOUNT_IKEEP;
332 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 316 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
333 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 317 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
@@ -368,19 +352,22 @@ xfs_parseargs(
368 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 352 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
369 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 353 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
370 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 354 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
371 } else if (!strcmp(this_char, MNTOPT_DMAPI)) { 355 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
372 mp->m_flags |= XFS_MOUNT_DMAPI; 356 mp->m_flags |= XFS_MOUNT_DELAYLOG;
373 } else if (!strcmp(this_char, MNTOPT_XDSM)) { 357 cmn_err(CE_WARN,
374 mp->m_flags |= XFS_MOUNT_DMAPI; 358 "Enabling EXPERIMENTAL delayed logging feature "
375 } else if (!strcmp(this_char, MNTOPT_DMI)) { 359 "- use at your own risk.\n");
376 mp->m_flags |= XFS_MOUNT_DMAPI; 360 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
361 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
377 } else if (!strcmp(this_char, "ihashsize")) { 362 } else if (!strcmp(this_char, "ihashsize")) {
378 cmn_err(CE_WARN, 363 cmn_err(CE_WARN,
379 "XFS: ihashsize no longer used, option is deprecated."); 364 "XFS: ihashsize no longer used, option is deprecated.");
380 } else if (!strcmp(this_char, "osyncisdsync")) { 365 } else if (!strcmp(this_char, "osyncisdsync")) {
381 /* no-op, this is now the default */
382 cmn_err(CE_WARN, 366 cmn_err(CE_WARN,
383 "XFS: osyncisdsync is now the default, option is deprecated."); 367 "XFS: osyncisdsync has no effect, option is deprecated.");
368 } else if (!strcmp(this_char, "osyncisosync")) {
369 cmn_err(CE_WARN,
370 "XFS: osyncisosync has no effect, option is deprecated.");
384 } else if (!strcmp(this_char, "irixsgid")) { 371 } else if (!strcmp(this_char, "irixsgid")) {
385 cmn_err(CE_WARN, 372 cmn_err(CE_WARN,
386 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 373 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
@@ -421,12 +408,6 @@ xfs_parseargs(
421 return EINVAL; 408 return EINVAL;
422 } 409 }
423 410
424 if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
425 printk("XFS: %s option needs the mount point option as well\n",
426 MNTOPT_DMAPI);
427 return EINVAL;
428 }
429
430 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 411 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
431 cmn_err(CE_WARN, 412 cmn_err(CE_WARN,
432 "XFS: sunit and swidth must be specified together"); 413 "XFS: sunit and swidth must be specified together");
@@ -440,18 +421,6 @@ xfs_parseargs(
440 return EINVAL; 421 return EINVAL;
441 } 422 }
442 423
443 /*
444 * Applications using DMI filesystems often expect the
445 * inode generation number to be monotonically increasing.
446 * If we delete inode chunks we break this assumption, so
447 * keep unused inode chunks on disk for DMI filesystems
448 * until we come up with a better solution.
449 * Note that if "ikeep" or "noikeep" mount options are
450 * supplied, then they are honored.
451 */
452 if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
453 mp->m_flags |= XFS_MOUNT_IKEEP;
454
455done: 424done:
456 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 425 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
457 /* 426 /*
@@ -530,11 +499,10 @@ xfs_showargs(
530 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 499 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
531 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 500 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
532 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 501 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
533 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
534 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 502 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
535 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 503 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
536 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
537 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 504 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
505 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
538 { 0, NULL } 506 { 0, NULL }
539 }; 507 };
540 static struct proc_xfs_info xfs_info_unset[] = { 508 static struct proc_xfs_info xfs_info_unset[] = {
@@ -725,7 +693,8 @@ void
725xfs_blkdev_issue_flush( 693xfs_blkdev_issue_flush(
726 xfs_buftarg_t *buftarg) 694 xfs_buftarg_t *buftarg)
727{ 695{
728 blkdev_issue_flush(buftarg->bt_bdev, NULL); 696 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
697 BLKDEV_IFL_WAIT);
729} 698}
730 699
731STATIC void 700STATIC void
@@ -789,18 +758,18 @@ xfs_open_devices(
789 * Setup xfs_mount buffer target pointers 758 * Setup xfs_mount buffer target pointers
790 */ 759 */
791 error = ENOMEM; 760 error = ENOMEM;
792 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0); 761 mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname);
793 if (!mp->m_ddev_targp) 762 if (!mp->m_ddev_targp)
794 goto out_close_rtdev; 763 goto out_close_rtdev;
795 764
796 if (rtdev) { 765 if (rtdev) {
797 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1); 766 mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname);
798 if (!mp->m_rtdev_targp) 767 if (!mp->m_rtdev_targp)
799 goto out_free_ddev_targ; 768 goto out_free_ddev_targ;
800 } 769 }
801 770
802 if (logdev && logdev != ddev) { 771 if (logdev && logdev != ddev) {
803 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1); 772 mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname);
804 if (!mp->m_logdev_targp) 773 if (!mp->m_logdev_targp)
805 goto out_free_rtdev_targ; 774 goto out_free_rtdev_targ;
806 } else { 775 } else {
@@ -902,7 +871,8 @@ xfsaild_start(
902 struct xfs_ail *ailp) 871 struct xfs_ail *ailp)
903{ 872{
904 ailp->xa_target = 0; 873 ailp->xa_target = 0;
905 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild"); 874 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
875 ailp->xa_mount->m_fsname);
906 if (IS_ERR(ailp->xa_task)) 876 if (IS_ERR(ailp->xa_task))
907 return -PTR_ERR(ailp->xa_task); 877 return -PTR_ERR(ailp->xa_task);
908 return 0; 878 return 0;
@@ -935,7 +905,7 @@ xfs_fs_destroy_inode(
935{ 905{
936 struct xfs_inode *ip = XFS_I(inode); 906 struct xfs_inode *ip = XFS_I(inode);
937 907
938 xfs_itrace_entry(ip); 908 trace_xfs_destroy_inode(ip);
939 909
940 XFS_STATS_INC(vn_reclaim); 910 XFS_STATS_INC(vn_reclaim);
941 911
@@ -1051,10 +1021,8 @@ xfs_log_inode(
1051 * an inode in another recent transaction. So we play it safe and 1021 * an inode in another recent transaction. So we play it safe and
1052 * fire off the transaction anyway. 1022 * fire off the transaction anyway.
1053 */ 1023 */
1054 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1024 xfs_trans_ijoin(tp, ip);
1055 xfs_trans_ihold(tp, ip);
1056 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1025 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1057 xfs_trans_set_sync(tp);
1058 error = xfs_trans_commit(tp, 0); 1026 error = xfs_trans_commit(tp, 0);
1059 xfs_ilock_demote(ip, XFS_ILOCK_EXCL); 1027 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1060 1028
@@ -1070,28 +1038,20 @@ xfs_fs_write_inode(
1070 struct xfs_mount *mp = ip->i_mount; 1038 struct xfs_mount *mp = ip->i_mount;
1071 int error = EAGAIN; 1039 int error = EAGAIN;
1072 1040
1073 xfs_itrace_entry(ip); 1041 trace_xfs_write_inode(ip);
1074 1042
1075 if (XFS_FORCED_SHUTDOWN(mp)) 1043 if (XFS_FORCED_SHUTDOWN(mp))
1076 return XFS_ERROR(EIO); 1044 return XFS_ERROR(EIO);
1077 1045
1078 if (wbc->sync_mode == WB_SYNC_ALL) { 1046 if (wbc->sync_mode == WB_SYNC_ALL) {
1079 /* 1047 /*
1080 * Make sure the inode has hit stable storage. By using the 1048 * Make sure the inode has made it it into the log. Instead
1081 * log and the fsync transactions we reduce the IOs we have 1049 * of forcing it all the way to stable storage using a
1082 * to do here from two (log and inode) to just the log. 1050 * synchronous transaction we let the log force inside the
1083 * 1051 * ->sync_fs call do that for thus, which reduces the number
1084 * Note: We still need to do a delwri write of the inode after 1052 * of synchronous log foces dramatically.
1085 * this to flush it to the backing buffer so that bulkstat
1086 * works properly if this is the first time the inode has been
1087 * written. Because we hold the ilock atomically over the
1088 * transaction commit and the inode flush we are guaranteed
1089 * that the inode is not pinned when it returns. If the flush
1090 * lock is already held, then the inode has already been
1091 * flushed once and we don't need to flush it again. Hence
1092 * the code will only flush the inode if it isn't already
1093 * being flushed.
1094 */ 1053 */
1054 xfs_ioend_wait(ip);
1095 xfs_ilock(ip, XFS_ILOCK_SHARED); 1055 xfs_ilock(ip, XFS_ILOCK_SHARED);
1096 if (ip->i_update_core) { 1056 if (ip->i_update_core) {
1097 error = xfs_log_inode(ip); 1057 error = xfs_log_inode(ip);
@@ -1103,27 +1063,29 @@ xfs_fs_write_inode(
1103 * We make this non-blocking if the inode is contended, return 1063 * We make this non-blocking if the inode is contended, return
1104 * EAGAIN to indicate to the caller that they did not succeed. 1064 * EAGAIN to indicate to the caller that they did not succeed.
1105 * This prevents the flush path from blocking on inodes inside 1065 * This prevents the flush path from blocking on inodes inside
1106 * another operation right now, they get caught later by xfs_sync. 1066 * another operation right now, they get caught later by
1067 * xfs_sync.
1107 */ 1068 */
1108 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) 1069 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1109 goto out; 1070 goto out;
1110 }
1111 1071
1112 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) 1072 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1113 goto out_unlock; 1073 goto out_unlock;
1114 1074
1115 /* 1075 /*
1116 * Now we have the flush lock and the inode is not pinned, we can check 1076 * Now we have the flush lock and the inode is not pinned, we
1117 * if the inode is really clean as we know that there are no pending 1077 * can check if the inode is really clean as we know that
1118 * transaction completions, it is not waiting on the delayed write 1078 * there are no pending transaction completions, it is not
1119 * queue and there is no IO in progress. 1079 * waiting on the delayed write queue and there is no IO in
1120 */ 1080 * progress.
1121 if (xfs_inode_clean(ip)) { 1081 */
1122 xfs_ifunlock(ip); 1082 if (xfs_inode_clean(ip)) {
1123 error = 0; 1083 xfs_ifunlock(ip);
1124 goto out_unlock; 1084 error = 0;
1085 goto out_unlock;
1086 }
1087 error = xfs_iflush(ip, 0);
1125 } 1088 }
1126 error = xfs_iflush(ip, 0);
1127 1089
1128 out_unlock: 1090 out_unlock:
1129 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1091 xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1138,12 +1100,15 @@ xfs_fs_write_inode(
1138} 1100}
1139 1101
1140STATIC void 1102STATIC void
1141xfs_fs_clear_inode( 1103xfs_fs_evict_inode(
1142 struct inode *inode) 1104 struct inode *inode)
1143{ 1105{
1144 xfs_inode_t *ip = XFS_I(inode); 1106 xfs_inode_t *ip = XFS_I(inode);
1145 1107
1146 xfs_itrace_entry(ip); 1108 trace_xfs_evict_inode(ip);
1109
1110 truncate_inode_pages(&inode->i_data, 0);
1111 end_writeback(inode);
1147 XFS_STATS_INC(vn_rele); 1112 XFS_STATS_INC(vn_rele);
1148 XFS_STATS_INC(vn_remove); 1113 XFS_STATS_INC(vn_remove);
1149 XFS_STATS_DEC(vn_active); 1114 XFS_STATS_DEC(vn_active);
@@ -1180,22 +1145,13 @@ xfs_fs_put_super(
1180{ 1145{
1181 struct xfs_mount *mp = XFS_M(sb); 1146 struct xfs_mount *mp = XFS_M(sb);
1182 1147
1148 /*
1149 * Unregister the memory shrinker before we tear down the mount
1150 * structure so we don't have memory reclaim racing with us here.
1151 */
1152 xfs_inode_shrinker_unregister(mp);
1183 xfs_syncd_stop(mp); 1153 xfs_syncd_stop(mp);
1184 1154
1185 if (!(sb->s_flags & MS_RDONLY)) {
1186 /*
1187 * XXX(hch): this should be SYNC_WAIT.
1188 *
1189 * Or more likely not needed at all because the VFS is already
1190 * calling ->sync_fs after shutting down all filestem
1191 * operations and just before calling ->put_super.
1192 */
1193 xfs_sync_data(mp, 0);
1194 xfs_sync_attr(mp, 0);
1195 }
1196
1197 XFS_SEND_PREUNMOUNT(mp);
1198
1199 /* 1155 /*
1200 * Blow away any referenced inode in the filestreams cache. 1156 * Blow away any referenced inode in the filestreams cache.
1201 * This can and will cause log traffic as inodes go inactive 1157 * This can and will cause log traffic as inodes go inactive
@@ -1205,14 +1161,10 @@ xfs_fs_put_super(
1205 1161
1206 XFS_bflush(mp->m_ddev_targp); 1162 XFS_bflush(mp->m_ddev_targp);
1207 1163
1208 XFS_SEND_UNMOUNT(mp);
1209
1210 xfs_unmountfs(mp); 1164 xfs_unmountfs(mp);
1211 xfs_freesb(mp); 1165 xfs_freesb(mp);
1212 xfs_inode_shrinker_unregister(mp);
1213 xfs_icsb_destroy_counters(mp); 1166 xfs_icsb_destroy_counters(mp);
1214 xfs_close_devices(mp); 1167 xfs_close_devices(mp);
1215 xfs_dmops_put(mp);
1216 xfs_free_fsname(mp); 1168 xfs_free_fsname(mp);
1217 kfree(mp); 1169 kfree(mp);
1218} 1170}
@@ -1274,6 +1226,7 @@ xfs_fs_statfs(
1274 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1226 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1275 __uint64_t fakeinos, id; 1227 __uint64_t fakeinos, id;
1276 xfs_extlen_t lsize; 1228 xfs_extlen_t lsize;
1229 __int64_t ffree;
1277 1230
1278 statp->f_type = XFS_SB_MAGIC; 1231 statp->f_type = XFS_SB_MAGIC;
1279 statp->f_namelen = MAXNAMELEN - 1; 1232 statp->f_namelen = MAXNAMELEN - 1;
@@ -1297,7 +1250,11 @@ xfs_fs_statfs(
1297 statp->f_files = min_t(typeof(statp->f_files), 1250 statp->f_files = min_t(typeof(statp->f_files),
1298 statp->f_files, 1251 statp->f_files,
1299 mp->m_maxicount); 1252 mp->m_maxicount);
1300 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1253
1254 /* make sure statp->f_ffree does not underflow */
1255 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1256 statp->f_ffree = max_t(__int64_t, ffree, 0);
1257
1301 spin_unlock(&mp->m_sb_lock); 1258 spin_unlock(&mp->m_sb_lock);
1302 1259
1303 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1260 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
@@ -1450,7 +1407,7 @@ xfs_fs_freeze(
1450 1407
1451 xfs_save_resvblks(mp); 1408 xfs_save_resvblks(mp);
1452 xfs_quiesce_attr(mp); 1409 xfs_quiesce_attr(mp);
1453 return -xfs_fs_log_dummy(mp); 1410 return -xfs_fs_log_dummy(mp, SYNC_WAIT);
1454} 1411}
1455 1412
1456STATIC int 1413STATIC int
@@ -1530,7 +1487,6 @@ xfs_fs_fill_super(
1530 struct inode *root; 1487 struct inode *root;
1531 struct xfs_mount *mp = NULL; 1488 struct xfs_mount *mp = NULL;
1532 int flags = 0, error = ENOMEM; 1489 int flags = 0, error = ENOMEM;
1533 char *mtpt = NULL;
1534 1490
1535 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1491 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1536 if (!mp) 1492 if (!mp)
@@ -1546,7 +1502,7 @@ xfs_fs_fill_super(
1546 mp->m_super = sb; 1502 mp->m_super = sb;
1547 sb->s_fs_info = mp; 1503 sb->s_fs_info = mp;
1548 1504
1549 error = xfs_parseargs(mp, (char *)data, &mtpt); 1505 error = xfs_parseargs(mp, (char *)data);
1550 if (error) 1506 if (error)
1551 goto out_free_fsname; 1507 goto out_free_fsname;
1552 1508
@@ -1558,16 +1514,12 @@ xfs_fs_fill_super(
1558#endif 1514#endif
1559 sb->s_op = &xfs_super_operations; 1515 sb->s_op = &xfs_super_operations;
1560 1516
1561 error = xfs_dmops_get(mp);
1562 if (error)
1563 goto out_free_fsname;
1564
1565 if (silent) 1517 if (silent)
1566 flags |= XFS_MFSI_QUIET; 1518 flags |= XFS_MFSI_QUIET;
1567 1519
1568 error = xfs_open_devices(mp); 1520 error = xfs_open_devices(mp);
1569 if (error) 1521 if (error)
1570 goto out_put_dmops; 1522 goto out_free_fsname;
1571 1523
1572 if (xfs_icsb_init_counters(mp)) 1524 if (xfs_icsb_init_counters(mp))
1573 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1525 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@@ -1595,8 +1547,6 @@ xfs_fs_fill_super(
1595 if (error) 1547 if (error)
1596 goto out_filestream_unmount; 1548 goto out_filestream_unmount;
1597 1549
1598 XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
1599
1600 sb->s_magic = XFS_SB_MAGIC; 1550 sb->s_magic = XFS_SB_MAGIC;
1601 sb->s_blocksize = mp->m_sb.sb_blocksize; 1551 sb->s_blocksize = mp->m_sb.sb_blocksize;
1602 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1552 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1625,7 +1575,6 @@ xfs_fs_fill_super(
1625 1575
1626 xfs_inode_shrinker_register(mp); 1576 xfs_inode_shrinker_register(mp);
1627 1577
1628 kfree(mtpt);
1629 return 0; 1578 return 0;
1630 1579
1631 out_filestream_unmount: 1580 out_filestream_unmount:
@@ -1635,11 +1584,8 @@ xfs_fs_fill_super(
1635 out_destroy_counters: 1584 out_destroy_counters:
1636 xfs_icsb_destroy_counters(mp); 1585 xfs_icsb_destroy_counters(mp);
1637 xfs_close_devices(mp); 1586 xfs_close_devices(mp);
1638 out_put_dmops:
1639 xfs_dmops_put(mp);
1640 out_free_fsname: 1587 out_free_fsname:
1641 xfs_free_fsname(mp); 1588 xfs_free_fsname(mp);
1642 kfree(mtpt);
1643 kfree(mp); 1589 kfree(mp);
1644 out: 1590 out:
1645 return -error; 1591 return -error;
@@ -1683,7 +1629,7 @@ static const struct super_operations xfs_super_operations = {
1683 .destroy_inode = xfs_fs_destroy_inode, 1629 .destroy_inode = xfs_fs_destroy_inode,
1684 .dirty_inode = xfs_fs_dirty_inode, 1630 .dirty_inode = xfs_fs_dirty_inode,
1685 .write_inode = xfs_fs_write_inode, 1631 .write_inode = xfs_fs_write_inode,
1686 .clear_inode = xfs_fs_clear_inode, 1632 .evict_inode = xfs_fs_evict_inode,
1687 .put_super = xfs_fs_put_super, 1633 .put_super = xfs_fs_put_super,
1688 .sync_fs = xfs_fs_sync_fs, 1634 .sync_fs = xfs_fs_sync_fs,
1689 .freeze_fs = xfs_fs_freeze, 1635 .freeze_fs = xfs_fs_freeze,
@@ -1746,16 +1692,22 @@ xfs_init_zones(void)
1746 if (!xfs_trans_zone) 1692 if (!xfs_trans_zone)
1747 goto out_destroy_ifork_zone; 1693 goto out_destroy_ifork_zone;
1748 1694
1695 xfs_log_item_desc_zone =
1696 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1697 "xfs_log_item_desc");
1698 if (!xfs_log_item_desc_zone)
1699 goto out_destroy_trans_zone;
1700
1749 /* 1701 /*
1750 * The size of the zone allocated buf log item is the maximum 1702 * The size of the zone allocated buf log item is the maximum
1751 * size possible under XFS. This wastes a little bit of memory, 1703 * size possible under XFS. This wastes a little bit of memory,
1752 * but it is much faster. 1704 * but it is much faster.
1753 */ 1705 */
1754 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1706 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1755 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 1707 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1756 NBWORD) * sizeof(int))), "xfs_buf_item"); 1708 NBWORD) * sizeof(int))), "xfs_buf_item");
1757 if (!xfs_buf_item_zone) 1709 if (!xfs_buf_item_zone)
1758 goto out_destroy_trans_zone; 1710 goto out_destroy_log_item_desc_zone;
1759 1711
1760 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1712 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1761 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1713 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
@@ -1792,6 +1744,8 @@ xfs_init_zones(void)
1792 kmem_zone_destroy(xfs_efd_zone); 1744 kmem_zone_destroy(xfs_efd_zone);
1793 out_destroy_buf_item_zone: 1745 out_destroy_buf_item_zone:
1794 kmem_zone_destroy(xfs_buf_item_zone); 1746 kmem_zone_destroy(xfs_buf_item_zone);
1747 out_destroy_log_item_desc_zone:
1748 kmem_zone_destroy(xfs_log_item_desc_zone);
1795 out_destroy_trans_zone: 1749 out_destroy_trans_zone:
1796 kmem_zone_destroy(xfs_trans_zone); 1750 kmem_zone_destroy(xfs_trans_zone);
1797 out_destroy_ifork_zone: 1751 out_destroy_ifork_zone:
@@ -1822,6 +1776,7 @@ xfs_destroy_zones(void)
1822 kmem_zone_destroy(xfs_efi_zone); 1776 kmem_zone_destroy(xfs_efi_zone);
1823 kmem_zone_destroy(xfs_efd_zone); 1777 kmem_zone_destroy(xfs_efd_zone);
1824 kmem_zone_destroy(xfs_buf_item_zone); 1778 kmem_zone_destroy(xfs_buf_item_zone);
1779 kmem_zone_destroy(xfs_log_item_desc_zone);
1825 kmem_zone_destroy(xfs_trans_zone); 1780 kmem_zone_destroy(xfs_trans_zone);
1826 kmem_zone_destroy(xfs_ifork_zone); 1781 kmem_zone_destroy(xfs_ifork_zone);
1827 kmem_zone_destroy(xfs_dabuf_zone); 1782 kmem_zone_destroy(xfs_dabuf_zone);
@@ -1870,7 +1825,6 @@ init_xfs_fs(void)
1870 goto out_cleanup_procfs; 1825 goto out_cleanup_procfs;
1871 1826
1872 vfs_initquota(); 1827 vfs_initquota();
1873 xfs_inode_shrinker_init();
1874 1828
1875 error = register_filesystem(&xfs_fs_type); 1829 error = register_filesystem(&xfs_fs_type);
1876 if (error) 1830 if (error)
@@ -1898,7 +1852,6 @@ exit_xfs_fs(void)
1898{ 1852{
1899 vfs_exitquota(); 1853 vfs_exitquota();
1900 unregister_filesystem(&xfs_fs_type); 1854 unregister_filesystem(&xfs_fs_type);
1901 xfs_inode_shrinker_destroy();
1902 xfs_sysctl_unregister(); 1855 xfs_sysctl_unregister();
1903 xfs_cleanup_procfs(); 1856 xfs_cleanup_procfs();
1904 xfs_buf_terminate(); 1857 xfs_buf_terminate();