aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/gc.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2016-02-01 07:00:25 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2016-02-29 17:29:10 -0500
commit5817b9dc9cc1225feedd9e1282707165fc64c384 (patch)
tree309d8459630ce24498e02b78091aa3e3594c8678 /fs/jffs2/gc.c
parentd267aefc54a28efc5bda7f009598dc83b5f98734 (diff)
jffs2: Improve post-mount CRC scan efficiency
We need to finish doing the CRC checks before we can allow writes to happen, and we currently process the inodes in order. This means a call to jffs2_get_ino_cache() for each possible inode# up to c->highest_ino. There may be a lot of lookups which fail, if the inode# space is used sparsely. And the inode# space is *often* used sparsely, if a file system contains a lot of stuff that was put there in the original image, followed by lots of creation and deletion of new files. Instead of processing them numerically with a lookup each time, just walk the hash buckets instead. [fix locking typo reported by Dan Carpenter] Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'fs/jffs2/gc.c')
-rw-r--r--fs/jffs2/gc.c64
1 files changed, 42 insertions, 22 deletions
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5a2dec2b064c..61c55470b1ab 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -134,37 +134,59 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
134 if (mutex_lock_interruptible(&c->alloc_sem)) 134 if (mutex_lock_interruptible(&c->alloc_sem))
135 return -EINTR; 135 return -EINTR;
136 136
137
137 for (;;) { 138 for (;;) {
139 /* We can't start doing GC until we've finished checking
140 the node CRCs etc. */
141 int bucket, want_ino;
142
138 spin_lock(&c->erase_completion_lock); 143 spin_lock(&c->erase_completion_lock);
139 if (!c->unchecked_size) 144 if (!c->unchecked_size)
140 break; 145 break;
141
142 /* We can't start doing GC yet. We haven't finished checking
143 the node CRCs etc. Do it now. */
144
145 /* checked_ino is protected by the alloc_sem */
146 if (c->checked_ino > c->highest_ino && xattr) {
147 pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
148 c->unchecked_size);
149 jffs2_dbg_dump_block_lists_nolock(c);
150 spin_unlock(&c->erase_completion_lock);
151 mutex_unlock(&c->alloc_sem);
152 return -ENOSPC;
153 }
154
155 spin_unlock(&c->erase_completion_lock); 146 spin_unlock(&c->erase_completion_lock);
156 147
157 if (!xattr) 148 if (!xattr)
158 xattr = jffs2_verify_xattr(c); 149 xattr = jffs2_verify_xattr(c);
159 150
160 spin_lock(&c->inocache_lock); 151 spin_lock(&c->inocache_lock);
152 /* Instead of doing the inodes in numeric order, doing a lookup
153 * in the hash for each possible number, just walk the hash
154 * buckets of *existing* inodes. This means that we process
155 * them out-of-order, but it can be a lot faster if there's
156 * a sparse inode# space. Which there often is. */
157 want_ino = c->check_ino;
158 for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
159 for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
160 if (ic->ino < want_ino)
161 continue;
162
163 if (ic->state != INO_STATE_CHECKEDABSENT &&
164 ic->state != INO_STATE_PRESENT)
165 goto got_next; /* with inocache_lock held */
166
167 jffs2_dbg(1, "Skipping ino #%u already checked\n",
168 ic->ino);
169 }
170 want_ino = 0;
171 }
161 172
162 ic = jffs2_get_ino_cache(c, c->checked_ino++); 173 /* Point c->check_ino past the end of the last bucket. */
174 c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
175 ~c->inocache_hashsize) - 1;
163 176
164 if (!ic) { 177 spin_unlock(&c->inocache_lock);
165 spin_unlock(&c->inocache_lock); 178
166 continue; 179 pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
167 } 180 c->unchecked_size);
181 jffs2_dbg_dump_block_lists_nolock(c);
182 mutex_unlock(&c->alloc_sem);
183 return -ENOSPC;
184
185 got_next:
186 /* For next time round the loop, we want c->checked_ino to indicate
187 * the *next* one we want to check. And since we're walking the
188 * buckets rather than doing it sequentially, it's: */
189 c->check_ino = ic->ino + c->inocache_hashsize;
168 190
169 if (!ic->pino_nlink) { 191 if (!ic->pino_nlink) {
170 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", 192 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
@@ -176,8 +198,6 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
176 switch(ic->state) { 198 switch(ic->state) {
177 case INO_STATE_CHECKEDABSENT: 199 case INO_STATE_CHECKEDABSENT:
178 case INO_STATE_PRESENT: 200 case INO_STATE_PRESENT:
179 jffs2_dbg(1, "Skipping ino #%u already checked\n",
180 ic->ino);
181 spin_unlock(&c->inocache_lock); 201 spin_unlock(&c->inocache_lock);
182 continue; 202 continue;
183 203
@@ -196,7 +216,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
196 ic->ino); 216 ic->ino);
197 /* We need to come back again for the _same_ inode. We've 217 /* We need to come back again for the _same_ inode. We've
198 made no progress in this case, but that should be OK */ 218 made no progress in this case, but that should be OK */
199 c->checked_ino--; 219 c->check_ino = ic->ino;
200 220
201 mutex_unlock(&c->alloc_sem); 221 mutex_unlock(&c->alloc_sem);
202 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 222 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);