aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-12-06 16:58:12 -0500
committerBen Myers <bpm@sgi.com>2011-12-12 17:41:44 -0500
commit34625c661b01dab193c7e8a0151a63553e97cfdf (patch)
tree75bbf616053e35ab63847a717068f8fae51f8d12
parentf2fba558d3c80dcd10bbadbb8f05c78dc2860b95 (diff)
xfs: remove xfs_qm_sync
Now that we can't have any dirty dquots around that aren't in the AIL we can get rid of the explicit dquot syncing from xfssyncd and xfs_fs_sync_fs and instead rely on AIL pushing to write out any quota updates. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
-rw-r--r--fs/xfs/xfs_qm.c94
-rw-r--r--fs/xfs/xfs_qm.h6
-rw-r--r--fs/xfs/xfs_quota.h5
-rw-r--r--fs/xfs/xfs_super.c11
-rw-r--r--fs/xfs/xfs_sync.c6
5 files changed, 3 insertions, 119 deletions
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index c704ea0115d..9bf32558f5f 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -879,100 +879,6 @@ xfs_qm_dqdetach(
879 } 879 }
880} 880}
881 881
882int
883xfs_qm_sync(
884 struct xfs_mount *mp,
885 int flags)
886{
887 struct xfs_quotainfo *q = mp->m_quotainfo;
888 int recl, restarts;
889 struct xfs_dquot *dqp;
890 int error;
891
892 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
893 return 0;
894
895 restarts = 0;
896
897 again:
898 mutex_lock(&q->qi_dqlist_lock);
899 /*
900 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
901 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
902 * when we have the mplist lock, we know that dquots will be consistent
903 * as long as we have it locked.
904 */
905 if (!XFS_IS_QUOTA_ON(mp)) {
906 mutex_unlock(&q->qi_dqlist_lock);
907 return 0;
908 }
909 ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
910 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
911 /*
912 * If this is vfs_sync calling, then skip the dquots that
913 * don't 'seem' to be dirty. ie. don't acquire dqlock.
914 * This is very similar to what xfs_sync does with inodes.
915 */
916 if (flags & SYNC_TRYLOCK) {
917 if (!XFS_DQ_IS_DIRTY(dqp))
918 continue;
919 if (!xfs_qm_dqlock_nowait(dqp))
920 continue;
921 } else {
922 xfs_dqlock(dqp);
923 }
924
925 /*
926 * Now, find out for sure if this dquot is dirty or not.
927 */
928 if (! XFS_DQ_IS_DIRTY(dqp)) {
929 xfs_dqunlock(dqp);
930 continue;
931 }
932
933 /* XXX a sentinel would be better */
934 recl = q->qi_dqreclaims;
935 if (!xfs_dqflock_nowait(dqp)) {
936 if (flags & SYNC_TRYLOCK) {
937 xfs_dqunlock(dqp);
938 continue;
939 }
940 /*
941 * If we can't grab the flush lock then if the caller
942 * really wanted us to give this our best shot, so
943 * see if we can give a push to the buffer before we wait
944 * on the flush lock. At this point, we know that
945 * even though the dquot is being flushed,
946 * it has (new) dirty data.
947 */
948 xfs_qm_dqflock_pushbuf_wait(dqp);
949 }
950 /*
951 * Let go of the mplist lock. We don't want to hold it
952 * across a disk write
953 */
954 mutex_unlock(&q->qi_dqlist_lock);
955 error = xfs_qm_dqflush(dqp, flags);
956 xfs_dqunlock(dqp);
957 if (error && XFS_FORCED_SHUTDOWN(mp))
958 return 0; /* Need to prevent umount failure */
959 else if (error)
960 return error;
961
962 mutex_lock(&q->qi_dqlist_lock);
963 if (recl != q->qi_dqreclaims) {
964 if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
965 break;
966
967 mutex_unlock(&q->qi_dqlist_lock);
968 goto again;
969 }
970 }
971
972 mutex_unlock(&q->qi_dqlist_lock);
973 return 0;
974}
975
976/* 882/*
977 * The hash chains and the mplist use the same xfs_dqhash structure as 883 * The hash chains and the mplist use the same xfs_dqhash structure as
978 * their list head, but we can take the mplist qh_lock and one of the 884 * their list head, but we can take the mplist qh_lock and one of the
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 43b9abe1052..9b4f3adefbc 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -33,12 +33,6 @@ extern kmem_zone_t *qm_dqzone;
33extern kmem_zone_t *qm_dqtrxzone; 33extern kmem_zone_t *qm_dqtrxzone;
34 34
35/* 35/*
36 * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
37 * iterate over the mountpt's dquot list in one call.
38 */
39#define XFS_QM_SYNC_MAX_RESTARTS 7
40
41/*
42 * Ditto, for xfs_qm_dqreclaim_one. 36 * Ditto, for xfs_qm_dqreclaim_one.
43 */ 37 */
44#define XFS_QM_RECLAIM_MAX_RESTARTS 4 38#define XFS_QM_RECLAIM_MAX_RESTARTS 4
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index a595f29567f..707ba33e319 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -326,7 +326,6 @@ extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
326extern void xfs_qm_dqdetach(struct xfs_inode *); 326extern void xfs_qm_dqdetach(struct xfs_inode *);
327extern void xfs_qm_dqrele(struct xfs_dquot *); 327extern void xfs_qm_dqrele(struct xfs_dquot *);
328extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); 328extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
329extern int xfs_qm_sync(struct xfs_mount *, int);
330extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); 329extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
331extern void xfs_qm_mount_quotas(struct xfs_mount *); 330extern void xfs_qm_mount_quotas(struct xfs_mount *);
332extern void xfs_qm_unmount(struct xfs_mount *); 331extern void xfs_qm_unmount(struct xfs_mount *);
@@ -366,10 +365,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
366#define xfs_qm_dqdetach(ip) 365#define xfs_qm_dqdetach(ip)
367#define xfs_qm_dqrele(d) 366#define xfs_qm_dqrele(d)
368#define xfs_qm_statvfs(ip, s) 367#define xfs_qm_statvfs(ip, s)
369static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
370{
371 return 0;
372}
373#define xfs_qm_newmount(mp, a, b) (0) 368#define xfs_qm_newmount(mp, a, b) (0)
374#define xfs_qm_mount_quotas(mp) 369#define xfs_qm_mount_quotas(mp)
375#define xfs_qm_unmount(mp) 370#define xfs_qm_unmount(mp)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 0e76348d958..88cd0c89316 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1025,17 +1025,10 @@ xfs_fs_sync_fs(
1025 int error; 1025 int error;
1026 1026
1027 /* 1027 /*
1028 * Not much we can do for the first async pass. Writing out the 1028 * Doing anything during the async pass would be counterproductive.
1029 * superblock would be counter-productive as we are going to redirty
1030 * when writing out other data and metadata (and writing out a single
1031 * block is quite fast anyway).
1032 *
1033 * Try to asynchronously kick off quota syncing at least.
1034 */ 1029 */
1035 if (!wait) { 1030 if (!wait)
1036 xfs_qm_sync(mp, SYNC_TRYLOCK);
1037 return 0; 1031 return 0;
1038 }
1039 1032
1040 error = xfs_quiesce_data(mp); 1033 error = xfs_quiesce_data(mp);
1041 if (error) 1034 if (error)
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index be5c51d8f75..5b9ec37a3e0 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -359,10 +359,7 @@ xfs_quiesce_data(
359{ 359{
360 int error, error2 = 0; 360 int error, error2 = 0;
361 361
362 xfs_qm_sync(mp, SYNC_TRYLOCK); 362 /* force out the log */
363 xfs_qm_sync(mp, SYNC_WAIT);
364
365 /* force out the newly dirtied log buffers */
366 xfs_log_force(mp, XFS_LOG_SYNC); 363 xfs_log_force(mp, XFS_LOG_SYNC);
367 364
368 /* write superblock and hoover up shutdown errors */ 365 /* write superblock and hoover up shutdown errors */
@@ -470,7 +467,6 @@ xfs_sync_worker(
470 error = xfs_fs_log_dummy(mp); 467 error = xfs_fs_log_dummy(mp);
471 else 468 else
472 xfs_log_force(mp, 0); 469 xfs_log_force(mp, 0);
473 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
474 470
475 /* start pushing all the metadata that is currently dirty */ 471 /* start pushing all the metadata that is currently dirty */
476 xfs_ail_push_all(mp->m_ail); 472 xfs_ail_push_all(mp->m_ail);