aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.h
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-04-07 22:45:07 -0400
committerDave Chinner <david@fromorbit.com>2011-04-07 22:45:07 -0400
commit89e4cb550a492cfca038a555fcc1bdac58822ec3 (patch)
treeab688a1849d6361c92b9f60ae0586045908010da /fs/xfs/xfs_mount.h
parentc6d09b666de11eb272326a6eb6cd3246da571014 (diff)
xfs: convert ENOSPC inode flushing to use new syncd workqueue
On of the problems with the current inode flush at ENOSPC is that we queue a flush per ENOSPC event, regardless of how many are already queued. Thi can result in hundreds of queued flushes, most of which simply burn CPU scanned and do no real work. This simply slows down allocation at ENOSPC. We really only need one active flush at a time, and we can easily implement that via the new xfs_syncd_wq. All we need to do is queue a flush if one is not already active, then block waiting for the currently active flush to complete. The result is that we only ever have a single ENOSPC inode flush active at a time and this greatly reduces the overhead of ENOSPC processing. On my 2p test machine, this results in tests exercising ENOSPC conditions running significantly faster - 042 halves execution time, 083 drops from 60s to 5s, etc - while not introducing test regressions. This allows us to remove the old xfssyncd threads and infrastructure as they are no longer used. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_mount.h')
-rw-r--r--fs/xfs/xfs_mount.h4
1 files changed, 1 insertions, 3 deletions
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 2c11e62be888..a0ad90e95299 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -204,9 +204,7 @@ typedef struct xfs_mount {
204#endif 204#endif
205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
206 struct delayed_work m_sync_work; /* background sync work */ 206 struct delayed_work m_sync_work; /* background sync work */
207 struct task_struct *m_sync_task; /* generalised sync thread */ 207 struct work_struct m_flush_work; /* background inode flush */
208 struct list_head m_sync_list; /* sync thread work item list */
209 spinlock_t m_sync_lock; /* work item list lock */
210 __int64_t m_update_flags; /* sb flags we need to update 208 __int64_t m_update_flags; /* sb flags we need to update
211 on the next remount,rw */ 209 on the next remount,rw */
212 struct shrinker m_inode_shrink; /* inode reclaim shrinker */ 210 struct shrinker m_inode_shrink; /* inode reclaim shrinker */