aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-09-27 22:28:19 -0400
committerAlex Elder <aelder@sgi.com>2010-10-18 16:07:53 -0400
commit78ae5256768b91f25ce7a4eb9f56d563e302cc10 (patch)
tree73b03ac578c2cb572fdef6d9be5788eee98076b1 /fs/xfs
parente13de955ca67b0bd1cec9a2f9352a3053065bf7f (diff)
xfs: implement batched inode lookups for AG walking
With the reclaim code separated from the generic walking code, it is simple to implement batched lookups for the generic walk code. Separate out the inode validation from the execute operations and modify the tree lookups to get a batch of inodes at a time. Reclaim operations will be optimised separately. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c66
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
2 files changed, 45 insertions, 23 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37fc2c0a4d2..0ed3d0ae3c2 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -39,6 +39,14 @@
39#include <linux/kthread.h> 39#include <linux/kthread.h>
40#include <linux/freezer.h> 40#include <linux/freezer.h>
41 41
42/*
43 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between
45 * lookup reduction and stack usage. This is in the reclaim path, so we can't
46 * be too greedy.
47 */
48#define XFS_LOOKUP_BATCH 32
49
42STATIC int 50STATIC int
43xfs_inode_ag_walk_grab( 51xfs_inode_ag_walk_grab(
44 struct xfs_inode *ip) 52 struct xfs_inode *ip)
@@ -66,7 +74,6 @@ xfs_inode_ag_walk_grab(
66 return 0; 74 return 0;
67} 75}
68 76
69
70STATIC int 77STATIC int
71xfs_inode_ag_walk( 78xfs_inode_ag_walk(
72 struct xfs_mount *mp, 79 struct xfs_mount *mp,
@@ -79,54 +86,69 @@ xfs_inode_ag_walk(
79 int last_error = 0; 86 int last_error = 0;
80 int skipped; 87 int skipped;
81 int done; 88 int done;
89 int nr_found;
82 90
83restart: 91restart:
84 done = 0; 92 done = 0;
85 skipped = 0; 93 skipped = 0;
86 first_index = 0; 94 first_index = 0;
95 nr_found = 0;
87 do { 96 do {
97 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
88 int error = 0; 98 int error = 0;
89 int nr_found; 99 int i;
90 xfs_inode_t *ip;
91 100
92 read_lock(&pag->pag_ici_lock); 101 read_lock(&pag->pag_ici_lock);
93 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 102 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
94 (void **)&ip, first_index, 1); 103 (void **)batch, first_index,
104 XFS_LOOKUP_BATCH);
95 if (!nr_found) { 105 if (!nr_found) {
96 read_unlock(&pag->pag_ici_lock); 106 read_unlock(&pag->pag_ici_lock);
97 break; 107 break;
98 } 108 }
99 109
100 /* 110 /*
101 * Update the index for the next lookup. Catch overflows 111 * Grab the inodes before we drop the lock. if we found
102 * into the next AG range which can occur if we have inodes 112 * nothing, nr == 0 and the loop will be skipped.
103 * in the last block of the AG and we are currently
104 * pointing to the last inode.
105 */ 113 */
106 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 114 for (i = 0; i < nr_found; i++) {
107 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 115 struct xfs_inode *ip = batch[i];
108 done = 1;
109 116
110 if (xfs_inode_ag_walk_grab(ip)) { 117 if (done || xfs_inode_ag_walk_grab(ip))
111 read_unlock(&pag->pag_ici_lock); 118 batch[i] = NULL;
112 continue; 119
120 /*
121 * Update the index for the next lookup. Catch overflows
122 * into the next AG range which can occur if we have inodes
123 * in the last block of the AG and we are currently
124 * pointing to the last inode.
125 */
126 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
127 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
128 done = 1;
113 } 129 }
130
131 /* unlock now we've grabbed the inodes. */
114 read_unlock(&pag->pag_ici_lock); 132 read_unlock(&pag->pag_ici_lock);
115 133
116 error = execute(ip, pag, flags); 134 for (i = 0; i < nr_found; i++) {
117 IRELE(ip); 135 if (!batch[i])
118 if (error == EAGAIN) { 136 continue;
119 skipped++; 137 error = execute(batch[i], pag, flags);
120 continue; 138 IRELE(batch[i]);
139 if (error == EAGAIN) {
140 skipped++;
141 continue;
142 }
143 if (error && last_error != EFSCORRUPTED)
144 last_error = error;
121 } 145 }
122 if (error)
123 last_error = error;
124 146
125 /* bail out if the filesystem is corrupted. */ 147 /* bail out if the filesystem is corrupted. */
126 if (error == EFSCORRUPTED) 148 if (error == EFSCORRUPTED)
127 break; 149 break;
128 150
129 } while (!done); 151 } while (nr_found && !done);
130 152
131 if (skipped) { 153 if (skipped) {
132 delay(1); 154 delay(1);
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index e8a352896d2..32ba6628290 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -47,7 +47,7 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
47void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, 47void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
48 struct xfs_inode *ip); 48 struct xfs_inode *ip);
49 49
50int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); 50int xfs_sync_inode_grab(struct xfs_inode *ip);
51int xfs_inode_ag_iterator(struct xfs_mount *mp, 51int xfs_inode_ag_iterator(struct xfs_mount *mp,
52 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 52 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
53 int flags); 53 int flags);