diff options
Diffstat (limited to 'fs/jffs2/nodemgmt.c')
-rw-r--r-- | fs/jffs2/nodemgmt.c | 471 |
1 files changed, 198 insertions, 273 deletions
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index c1d8b5ed9ab9..49127a1f0458 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $ | 10 | * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <linux/sched.h> /* For cond_resched() */ | 18 | #include <linux/sched.h> /* For cond_resched() */ |
19 | #include "nodelist.h" | 19 | #include "nodelist.h" |
20 | #include "debug.h" | ||
20 | 21 | ||
21 | /** | 22 | /** |
22 | * jffs2_reserve_space - request physical space to write nodes to flash | 23 | * jffs2_reserve_space - request physical space to write nodes to flash |
@@ -38,9 +39,11 @@ | |||
38 | * for the requested allocation. | 39 | * for the requested allocation. |
39 | */ | 40 | */ |
40 | 41 | ||
41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | 42 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
43 | uint32_t *ofs, uint32_t *len, uint32_t sumsize); | ||
42 | 44 | ||
43 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) | 45 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
46 | uint32_t *len, int prio, uint32_t sumsize) | ||
44 | { | 47 | { |
45 | int ret = -EAGAIN; | 48 | int ret = -EAGAIN; |
46 | int blocksneeded = c->resv_blocks_write; | 49 | int blocksneeded = c->resv_blocks_write; |
@@ -85,12 +88,12 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
85 | up(&c->alloc_sem); | 88 | up(&c->alloc_sem); |
86 | return -ENOSPC; | 89 | return -ENOSPC; |
87 | } | 90 | } |
88 | 91 | ||
89 | /* Calc possibly available space. Possibly available means that we | 92 | /* Calc possibly available space. Possibly available means that we |
90 | * don't know, if unchecked size contains obsoleted nodes, which could give us some | 93 | * don't know, if unchecked size contains obsoleted nodes, which could give us some |
91 | * more usable space. This will affect the sum only once, as gc first finishes checking | 94 | * more usable space. This will affect the sum only once, as gc first finishes checking |
92 | * of nodes. | 95 | * of nodes. |
93 | + Return -ENOSPC, if the maximum possibly available space is less or equal than | 96 | + Return -ENOSPC, if the maximum possibly available space is less or equal than |
94 | * blocksneeded * sector_size. | 97 | * blocksneeded * sector_size. |
95 | * This blocks endless gc looping on a filesystem, which is nearly full, even if | 98 | * This blocks endless gc looping on a filesystem, which is nearly full, even if |
96 | * the check above passes. | 99 | * the check above passes. |
@@ -115,7 +118,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
115 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 118 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, |
116 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | 119 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); |
117 | spin_unlock(&c->erase_completion_lock); | 120 | spin_unlock(&c->erase_completion_lock); |
118 | 121 | ||
119 | ret = jffs2_garbage_collect_pass(c); | 122 | ret = jffs2_garbage_collect_pass(c); |
120 | if (ret) | 123 | if (ret) |
121 | return ret; | 124 | return ret; |
@@ -129,7 +132,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
129 | spin_lock(&c->erase_completion_lock); | 132 | spin_lock(&c->erase_completion_lock); |
130 | } | 133 | } |
131 | 134 | ||
132 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 135 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
133 | if (ret) { | 136 | if (ret) { |
134 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 137 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); |
135 | } | 138 | } |
@@ -140,7 +143,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
140 | return ret; | 143 | return ret; |
141 | } | 144 | } |
142 | 145 | ||
143 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 146 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, |
147 | uint32_t *len, uint32_t sumsize) | ||
144 | { | 148 | { |
145 | int ret = -EAGAIN; | 149 | int ret = -EAGAIN; |
146 | minsize = PAD(minsize); | 150 | minsize = PAD(minsize); |
@@ -149,7 +153,7 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
149 | 153 | ||
150 | spin_lock(&c->erase_completion_lock); | 154 | spin_lock(&c->erase_completion_lock); |
151 | while(ret == -EAGAIN) { | 155 | while(ret == -EAGAIN) { |
152 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | 156 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); |
153 | if (ret) { | 157 | if (ret) { |
154 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 158 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); |
155 | } | 159 | } |
@@ -158,105 +162,185 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
158 | return ret; | 162 | return ret; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* Called with alloc sem _and_ erase_completion_lock */ | 165 | |
162 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | 166 | /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ |
167 | |||
168 | static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
163 | { | 169 | { |
164 | struct jffs2_eraseblock *jeb = c->nextblock; | 170 | |
165 | 171 | /* Check, if we have a dirty block now, or if it was dirty already */ | |
166 | restart: | 172 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { |
167 | if (jeb && minsize > jeb->free_size) { | 173 | c->dirty_size += jeb->wasted_size; |
168 | /* Skip the end of this block and file it as having some dirty space */ | 174 | c->wasted_size -= jeb->wasted_size; |
169 | /* If there's a pending write to it, flush now */ | 175 | jeb->dirty_size += jeb->wasted_size; |
170 | if (jffs2_wbuf_dirty(c)) { | 176 | jeb->wasted_size = 0; |
177 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
178 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
179 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
180 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
181 | } else { | ||
182 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
183 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
184 | list_add_tail(&jeb->list, &c->dirty_list); | ||
185 | } | ||
186 | } else { | ||
187 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
188 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
189 | list_add_tail(&jeb->list, &c->clean_list); | ||
190 | } | ||
191 | c->nextblock = NULL; | ||
192 | |||
193 | } | ||
194 | |||
195 | /* Select a new jeb for nextblock */ | ||
196 | |||
197 | static int jffs2_find_nextblock(struct jffs2_sb_info *c) | ||
198 | { | ||
199 | struct list_head *next; | ||
200 | |||
201 | /* Take the next block off the 'free' list */ | ||
202 | |||
203 | if (list_empty(&c->free_list)) { | ||
204 | |||
205 | if (!c->nr_erasing_blocks && | ||
206 | !list_empty(&c->erasable_list)) { | ||
207 | struct jffs2_eraseblock *ejeb; | ||
208 | |||
209 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | ||
210 | list_del(&ejeb->list); | ||
211 | list_add_tail(&ejeb->list, &c->erase_pending_list); | ||
212 | c->nr_erasing_blocks++; | ||
213 | jffs2_erase_pending_trigger(c); | ||
214 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", | ||
215 | ejeb->offset)); | ||
216 | } | ||
217 | |||
218 | if (!c->nr_erasing_blocks && | ||
219 | !list_empty(&c->erasable_pending_wbuf_list)) { | ||
220 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); | ||
221 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | ||
171 | spin_unlock(&c->erase_completion_lock); | 222 | spin_unlock(&c->erase_completion_lock); |
172 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
173 | jffs2_flush_wbuf_pad(c); | 223 | jffs2_flush_wbuf_pad(c); |
174 | spin_lock(&c->erase_completion_lock); | 224 | spin_lock(&c->erase_completion_lock); |
175 | jeb = c->nextblock; | 225 | /* Have another go. It'll be on the erasable_list now */ |
176 | goto restart; | 226 | return -EAGAIN; |
177 | } | 227 | } |
178 | c->wasted_size += jeb->free_size; | 228 | |
179 | c->free_size -= jeb->free_size; | 229 | if (!c->nr_erasing_blocks) { |
180 | jeb->wasted_size += jeb->free_size; | 230 | /* Ouch. We're in GC, or we wouldn't have got here. |
181 | jeb->free_size = 0; | 231 | And there's no space left. At all. */ |
182 | 232 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | |
183 | /* Check, if we have a dirty block now, or if it was dirty already */ | 233 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", |
184 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { | 234 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); |
185 | c->dirty_size += jeb->wasted_size; | 235 | return -ENOSPC; |
186 | c->wasted_size -= jeb->wasted_size; | ||
187 | jeb->dirty_size += jeb->wasted_size; | ||
188 | jeb->wasted_size = 0; | ||
189 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
190 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
191 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
192 | list_add_tail(&jeb->list, &c->very_dirty_list); | ||
193 | } else { | ||
194 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
195 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
196 | list_add_tail(&jeb->list, &c->dirty_list); | ||
197 | } | ||
198 | } else { | ||
199 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | ||
200 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | ||
201 | list_add_tail(&jeb->list, &c->clean_list); | ||
202 | } | 236 | } |
203 | c->nextblock = jeb = NULL; | 237 | |
238 | spin_unlock(&c->erase_completion_lock); | ||
239 | /* Don't wait for it; just erase one right now */ | ||
240 | jffs2_erase_pending_blocks(c, 1); | ||
241 | spin_lock(&c->erase_completion_lock); | ||
242 | |||
243 | /* An erase may have failed, decreasing the | ||
244 | amount of free space available. So we must | ||
245 | restart from the beginning */ | ||
246 | return -EAGAIN; | ||
204 | } | 247 | } |
205 | |||
206 | if (!jeb) { | ||
207 | struct list_head *next; | ||
208 | /* Take the next block off the 'free' list */ | ||
209 | 248 | ||
210 | if (list_empty(&c->free_list)) { | 249 | next = c->free_list.next; |
250 | list_del(next); | ||
251 | c->nextblock = list_entry(next, struct jffs2_eraseblock, list); | ||
252 | c->nr_free_blocks--; | ||
211 | 253 | ||
212 | if (!c->nr_erasing_blocks && | 254 | jffs2_sum_reset_collected(c->summary); /* reset collected summary */ |
213 | !list_empty(&c->erasable_list)) { | ||
214 | struct jffs2_eraseblock *ejeb; | ||
215 | 255 | ||
216 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | 256 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); |
217 | list_del(&ejeb->list); | 257 | |
218 | list_add_tail(&ejeb->list, &c->erase_pending_list); | 258 | return 0; |
219 | c->nr_erasing_blocks++; | 259 | } |
220 | jffs2_erase_pending_trigger(c); | 260 | |
221 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", | 261 | /* Called with alloc sem _and_ erase_completion_lock */ |
222 | ejeb->offset)); | 262 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) |
263 | { | ||
264 | struct jffs2_eraseblock *jeb = c->nextblock; | ||
265 | uint32_t reserved_size; /* for summary information at the end of the jeb */ | ||
266 | int ret; | ||
267 | |||
268 | restart: | ||
269 | reserved_size = 0; | ||
270 | |||
271 | if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { | ||
272 | /* NOSUM_SIZE means not to generate summary */ | ||
273 | |||
274 | if (jeb) { | ||
275 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
276 | dbg_summary("minsize=%d , jeb->free=%d ," | ||
277 | "summary->size=%d , sumsize=%d\n", | ||
278 | minsize, jeb->free_size, | ||
279 | c->summary->sum_size, sumsize); | ||
280 | } | ||
281 | |||
282 | /* Is there enough space for writing out the current node, or we have to | ||
283 | write out summary information now, close this jeb and select new nextblock? */ | ||
284 | if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + | ||
285 | JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { | ||
286 | |||
287 | /* Has summary been disabled for this jeb? */ | ||
288 | if (jffs2_sum_is_disabled(c->summary)) { | ||
289 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
290 | goto restart; | ||
223 | } | 291 | } |
224 | 292 | ||
225 | if (!c->nr_erasing_blocks && | 293 | /* Writing out the collected summary information */ |
226 | !list_empty(&c->erasable_pending_wbuf_list)) { | 294 | dbg_summary("generating summary for 0x%08x.\n", jeb->offset); |
227 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | 295 | ret = jffs2_sum_write_sumnode(c); |
228 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | 296 | |
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | if (jffs2_sum_is_disabled(c->summary)) { | ||
301 | /* jffs2_write_sumnode() couldn't write out the summary information | ||
302 | diabling summary for this jeb and free the collected information | ||
303 | */ | ||
304 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; | ||
305 | goto restart; | ||
306 | } | ||
307 | |||
308 | jffs2_close_nextblock(c, jeb); | ||
309 | jeb = NULL; | ||
310 | /* keep always valid value in reserved_size */ | ||
311 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); | ||
312 | } | ||
313 | } else { | ||
314 | if (jeb && minsize > jeb->free_size) { | ||
315 | /* Skip the end of this block and file it as having some dirty space */ | ||
316 | /* If there's a pending write to it, flush now */ | ||
317 | |||
318 | if (jffs2_wbuf_dirty(c)) { | ||
229 | spin_unlock(&c->erase_completion_lock); | 319 | spin_unlock(&c->erase_completion_lock); |
320 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | ||
230 | jffs2_flush_wbuf_pad(c); | 321 | jffs2_flush_wbuf_pad(c); |
231 | spin_lock(&c->erase_completion_lock); | 322 | spin_lock(&c->erase_completion_lock); |
232 | /* Have another go. It'll be on the erasable_list now */ | 323 | jeb = c->nextblock; |
233 | return -EAGAIN; | 324 | goto restart; |
234 | } | 325 | } |
235 | 326 | ||
236 | if (!c->nr_erasing_blocks) { | 327 | c->wasted_size += jeb->free_size; |
237 | /* Ouch. We're in GC, or we wouldn't have got here. | 328 | c->free_size -= jeb->free_size; |
238 | And there's no space left. At all. */ | 329 | jeb->wasted_size += jeb->free_size; |
239 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | 330 | jeb->free_size = 0; |
240 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | ||
241 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | ||
242 | return -ENOSPC; | ||
243 | } | ||
244 | |||
245 | spin_unlock(&c->erase_completion_lock); | ||
246 | /* Don't wait for it; just erase one right now */ | ||
247 | jffs2_erase_pending_blocks(c, 1); | ||
248 | spin_lock(&c->erase_completion_lock); | ||
249 | 331 | ||
250 | /* An erase may have failed, decreasing the | 332 | jffs2_close_nextblock(c, jeb); |
251 | amount of free space available. So we must | 333 | jeb = NULL; |
252 | restart from the beginning */ | ||
253 | return -EAGAIN; | ||
254 | } | 334 | } |
335 | } | ||
336 | |||
337 | if (!jeb) { | ||
255 | 338 | ||
256 | next = c->free_list.next; | 339 | ret = jffs2_find_nextblock(c); |
257 | list_del(next); | 340 | if (ret) |
258 | c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); | 341 | return ret; |
259 | c->nr_free_blocks--; | 342 | |
343 | jeb = c->nextblock; | ||
260 | 344 | ||
261 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | 345 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { |
262 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | 346 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); |
@@ -266,13 +350,13 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
266 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | 350 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has |
267 | enough space */ | 351 | enough space */ |
268 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | 352 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); |
269 | *len = jeb->free_size; | 353 | *len = jeb->free_size - reserved_size; |
270 | 354 | ||
271 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | 355 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && |
272 | !jeb->first_node->next_in_ino) { | 356 | !jeb->first_node->next_in_ino) { |
273 | /* Only node in it beforehand was a CLEANMARKER node (we think). | 357 | /* Only node in it beforehand was a CLEANMARKER node (we think). |
274 | So mark it obsolete now that there's going to be another node | 358 | So mark it obsolete now that there's going to be another node |
275 | in the block. This will reduce used_size to zero but We've | 359 | in the block. This will reduce used_size to zero but We've |
276 | already set c->nextblock so that jffs2_mark_node_obsolete() | 360 | already set c->nextblock so that jffs2_mark_node_obsolete() |
277 | won't try to refile it to the dirty_list. | 361 | won't try to refile it to the dirty_list. |
278 | */ | 362 | */ |
@@ -292,12 +376,12 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui | |||
292 | * @len: length of this physical node | 376 | * @len: length of this physical node |
293 | * @dirty: dirty flag for new node | 377 | * @dirty: dirty flag for new node |
294 | * | 378 | * |
295 | * Should only be used to report nodes for which space has been allocated | 379 | * Should only be used to report nodes for which space has been allocated |
296 | * by jffs2_reserve_space. | 380 | * by jffs2_reserve_space. |
297 | * | 381 | * |
298 | * Must be called with the alloc_sem held. | 382 | * Must be called with the alloc_sem held. |
299 | */ | 383 | */ |
300 | 384 | ||
301 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | 385 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) |
302 | { | 386 | { |
303 | struct jffs2_eraseblock *jeb; | 387 | struct jffs2_eraseblock *jeb; |
@@ -349,8 +433,8 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
349 | list_add_tail(&jeb->list, &c->clean_list); | 433 | list_add_tail(&jeb->list, &c->clean_list); |
350 | c->nextblock = NULL; | 434 | c->nextblock = NULL; |
351 | } | 435 | } |
352 | ACCT_SANITY_CHECK(c,jeb); | 436 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
353 | D1(ACCT_PARANOIA_CHECK(jeb)); | 437 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
354 | 438 | ||
355 | spin_unlock(&c->erase_completion_lock); | 439 | spin_unlock(&c->erase_completion_lock); |
356 | 440 | ||
@@ -404,8 +488,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
404 | 488 | ||
405 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && | 489 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && |
406 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { | 490 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { |
407 | /* Hm. This may confuse static lock analysis. If any of the above | 491 | /* Hm. This may confuse static lock analysis. If any of the above |
408 | three conditions is false, we're going to return from this | 492 | three conditions is false, we're going to return from this |
409 | function without actually obliterating any nodes or freeing | 493 | function without actually obliterating any nodes or freeing |
410 | any jffs2_raw_node_refs. So we don't need to stop erases from | 494 | any jffs2_raw_node_refs. So we don't need to stop erases from |
411 | happening, or protect against people holding an obsolete | 495 | happening, or protect against people holding an obsolete |
@@ -430,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
430 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 514 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); |
431 | BUG(); | 515 | BUG(); |
432 | }) | 516 | }) |
433 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 517 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); |
434 | jeb->used_size -= ref_totlen(c, jeb, ref); | 518 | jeb->used_size -= ref_totlen(c, jeb, ref); |
435 | c->used_size -= ref_totlen(c, jeb, ref); | 519 | c->used_size -= ref_totlen(c, jeb, ref); |
436 | } | 520 | } |
@@ -462,18 +546,17 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
462 | D1(printk(KERN_DEBUG "Wasting\n")); | 546 | D1(printk(KERN_DEBUG "Wasting\n")); |
463 | addedsize = 0; | 547 | addedsize = 0; |
464 | jeb->wasted_size += ref_totlen(c, jeb, ref); | 548 | jeb->wasted_size += ref_totlen(c, jeb, ref); |
465 | c->wasted_size += ref_totlen(c, jeb, ref); | 549 | c->wasted_size += ref_totlen(c, jeb, ref); |
466 | } | 550 | } |
467 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | 551 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
468 | |||
469 | ACCT_SANITY_CHECK(c, jeb); | ||
470 | 552 | ||
471 | D1(ACCT_PARANOIA_CHECK(jeb)); | 553 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
554 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
472 | 555 | ||
473 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { | 556 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { |
474 | /* Flash scanning is in progress. Don't muck about with the block | 557 | /* Flash scanning is in progress. Don't muck about with the block |
475 | lists because they're not ready yet, and don't actually | 558 | lists because they're not ready yet, and don't actually |
476 | obliterate nodes that look obsolete. If they weren't | 559 | obliterate nodes that look obsolete. If they weren't |
477 | marked obsolete on the flash at the time they _became_ | 560 | marked obsolete on the flash at the time they _became_ |
478 | obsolete, there was probably a reason for that. */ | 561 | obsolete, there was probably a reason for that. */ |
479 | spin_unlock(&c->erase_completion_lock); | 562 | spin_unlock(&c->erase_completion_lock); |
@@ -507,7 +590,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
507 | immediately reused, and we spread the load a bit. */ | 590 | immediately reused, and we spread the load a bit. */ |
508 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 591 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); |
509 | list_add_tail(&jeb->list, &c->erasable_list); | 592 | list_add_tail(&jeb->list, &c->erasable_list); |
510 | } | 593 | } |
511 | } | 594 | } |
512 | D1(printk(KERN_DEBUG "Done OK\n")); | 595 | D1(printk(KERN_DEBUG "Done OK\n")); |
513 | } else if (jeb == c->gcblock) { | 596 | } else if (jeb == c->gcblock) { |
@@ -525,8 +608,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
525 | list_add_tail(&jeb->list, &c->very_dirty_list); | 608 | list_add_tail(&jeb->list, &c->very_dirty_list); |
526 | } else { | 609 | } else { |
527 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | 610 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
528 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 611 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
529 | } | 612 | } |
530 | 613 | ||
531 | spin_unlock(&c->erase_completion_lock); | 614 | spin_unlock(&c->erase_completion_lock); |
532 | 615 | ||
@@ -573,11 +656,11 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
573 | 656 | ||
574 | /* Nodes which have been marked obsolete no longer need to be | 657 | /* Nodes which have been marked obsolete no longer need to be |
575 | associated with any inode. Remove them from the per-inode list. | 658 | associated with any inode. Remove them from the per-inode list. |
576 | 659 | ||
577 | Note we can't do this for NAND at the moment because we need | 660 | Note we can't do this for NAND at the moment because we need |
578 | obsolete dirent nodes to stay on the lists, because of the | 661 | obsolete dirent nodes to stay on the lists, because of the |
579 | horridness in jffs2_garbage_collect_deletion_dirent(). Also | 662 | horridness in jffs2_garbage_collect_deletion_dirent(). Also |
580 | because we delete the inocache, and on NAND we need that to | 663 | because we delete the inocache, and on NAND we need that to |
581 | stay around until all the nodes are actually erased, in order | 664 | stay around until all the nodes are actually erased, in order |
582 | to stop us from giving the same inode number to another newly | 665 | to stop us from giving the same inode number to another newly |
583 | created inode. */ | 666 | created inode. */ |
@@ -606,7 +689,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
606 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | 689 | if (ref->next_phys && ref_obsolete(ref->next_phys) && |
607 | !ref->next_phys->next_in_ino) { | 690 | !ref->next_phys->next_in_ino) { |
608 | struct jffs2_raw_node_ref *n = ref->next_phys; | 691 | struct jffs2_raw_node_ref *n = ref->next_phys; |
609 | 692 | ||
610 | spin_lock(&c->erase_completion_lock); | 693 | spin_lock(&c->erase_completion_lock); |
611 | 694 | ||
612 | ref->__totlen += n->__totlen; | 695 | ref->__totlen += n->__totlen; |
@@ -620,7 +703,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
620 | 703 | ||
621 | jffs2_free_raw_node_ref(n); | 704 | jffs2_free_raw_node_ref(n); |
622 | } | 705 | } |
623 | 706 | ||
624 | /* Also merge with the previous node in the list, if there is one | 707 | /* Also merge with the previous node in the list, if there is one |
625 | and that one is obsolete */ | 708 | and that one is obsolete */ |
626 | if (ref != jeb->first_node ) { | 709 | if (ref != jeb->first_node ) { |
@@ -630,7 +713,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
630 | 713 | ||
631 | while (p->next_phys != ref) | 714 | while (p->next_phys != ref) |
632 | p = p->next_phys; | 715 | p = p->next_phys; |
633 | 716 | ||
634 | if (ref_obsolete(p) && !ref->next_in_ino) { | 717 | if (ref_obsolete(p) && !ref->next_in_ino) { |
635 | p->__totlen += ref->__totlen; | 718 | p->__totlen += ref->__totlen; |
636 | if (jeb->last_node == ref) { | 719 | if (jeb->last_node == ref) { |
@@ -649,164 +732,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
649 | up(&c->erase_free_sem); | 732 | up(&c->erase_free_sem); |
650 | } | 733 | } |
651 | 734 | ||
652 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | ||
653 | void jffs2_dump_block_lists(struct jffs2_sb_info *c) | ||
654 | { | ||
655 | |||
656 | |||
657 | printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); | ||
658 | printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); | ||
659 | printk(KERN_DEBUG "used_size: %08x\n", c->used_size); | ||
660 | printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); | ||
661 | printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); | ||
662 | printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); | ||
663 | printk(KERN_DEBUG "free_size: %08x\n", c->free_size); | ||
664 | printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); | ||
665 | printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); | ||
666 | printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); | ||
667 | printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); | ||
668 | |||
669 | if (c->nextblock) { | ||
670 | printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
671 | c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); | ||
672 | } else { | ||
673 | printk(KERN_DEBUG "nextblock: NULL\n"); | ||
674 | } | ||
675 | if (c->gcblock) { | ||
676 | printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
677 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | ||
678 | } else { | ||
679 | printk(KERN_DEBUG "gcblock: NULL\n"); | ||
680 | } | ||
681 | if (list_empty(&c->clean_list)) { | ||
682 | printk(KERN_DEBUG "clean_list: empty\n"); | ||
683 | } else { | ||
684 | struct list_head *this; | ||
685 | int numblocks = 0; | ||
686 | uint32_t dirty = 0; | ||
687 | |||
688 | list_for_each(this, &c->clean_list) { | ||
689 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
690 | numblocks ++; | ||
691 | dirty += jeb->wasted_size; | ||
692 | printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
693 | } | ||
694 | printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); | ||
695 | } | ||
696 | if (list_empty(&c->very_dirty_list)) { | ||
697 | printk(KERN_DEBUG "very_dirty_list: empty\n"); | ||
698 | } else { | ||
699 | struct list_head *this; | ||
700 | int numblocks = 0; | ||
701 | uint32_t dirty = 0; | ||
702 | |||
703 | list_for_each(this, &c->very_dirty_list) { | ||
704 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
705 | numblocks ++; | ||
706 | dirty += jeb->dirty_size; | ||
707 | printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
708 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
709 | } | ||
710 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
711 | numblocks, dirty, dirty / numblocks); | ||
712 | } | ||
713 | if (list_empty(&c->dirty_list)) { | ||
714 | printk(KERN_DEBUG "dirty_list: empty\n"); | ||
715 | } else { | ||
716 | struct list_head *this; | ||
717 | int numblocks = 0; | ||
718 | uint32_t dirty = 0; | ||
719 | |||
720 | list_for_each(this, &c->dirty_list) { | ||
721 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
722 | numblocks ++; | ||
723 | dirty += jeb->dirty_size; | ||
724 | printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
725 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
726 | } | ||
727 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | ||
728 | numblocks, dirty, dirty / numblocks); | ||
729 | } | ||
730 | if (list_empty(&c->erasable_list)) { | ||
731 | printk(KERN_DEBUG "erasable_list: empty\n"); | ||
732 | } else { | ||
733 | struct list_head *this; | ||
734 | |||
735 | list_for_each(this, &c->erasable_list) { | ||
736 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
737 | printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
738 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
739 | } | ||
740 | } | ||
741 | if (list_empty(&c->erasing_list)) { | ||
742 | printk(KERN_DEBUG "erasing_list: empty\n"); | ||
743 | } else { | ||
744 | struct list_head *this; | ||
745 | |||
746 | list_for_each(this, &c->erasing_list) { | ||
747 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
748 | printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
749 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
750 | } | ||
751 | } | ||
752 | if (list_empty(&c->erase_pending_list)) { | ||
753 | printk(KERN_DEBUG "erase_pending_list: empty\n"); | ||
754 | } else { | ||
755 | struct list_head *this; | ||
756 | |||
757 | list_for_each(this, &c->erase_pending_list) { | ||
758 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
759 | printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
760 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
761 | } | ||
762 | } | ||
763 | if (list_empty(&c->erasable_pending_wbuf_list)) { | ||
764 | printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); | ||
765 | } else { | ||
766 | struct list_head *this; | ||
767 | |||
768 | list_for_each(this, &c->erasable_pending_wbuf_list) { | ||
769 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
770 | printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
771 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
772 | } | ||
773 | } | ||
774 | if (list_empty(&c->free_list)) { | ||
775 | printk(KERN_DEBUG "free_list: empty\n"); | ||
776 | } else { | ||
777 | struct list_head *this; | ||
778 | |||
779 | list_for_each(this, &c->free_list) { | ||
780 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
781 | printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
782 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
783 | } | ||
784 | } | ||
785 | if (list_empty(&c->bad_list)) { | ||
786 | printk(KERN_DEBUG "bad_list: empty\n"); | ||
787 | } else { | ||
788 | struct list_head *this; | ||
789 | |||
790 | list_for_each(this, &c->bad_list) { | ||
791 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
792 | printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
793 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
794 | } | ||
795 | } | ||
796 | if (list_empty(&c->bad_used_list)) { | ||
797 | printk(KERN_DEBUG "bad_used_list: empty\n"); | ||
798 | } else { | ||
799 | struct list_head *this; | ||
800 | |||
801 | list_for_each(this, &c->bad_used_list) { | ||
802 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | ||
803 | printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | ||
804 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | ||
805 | } | ||
806 | } | ||
807 | } | ||
808 | #endif /* CONFIG_JFFS2_FS_DEBUG */ | ||
809 | |||
810 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | 735 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) |
811 | { | 736 | { |
812 | int ret = 0; | 737 | int ret = 0; |
@@ -828,11 +753,11 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
828 | */ | 753 | */ |
829 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; | 754 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; |
830 | 755 | ||
831 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && | 756 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && |
832 | (dirty > c->nospc_dirty_size)) | 757 | (dirty > c->nospc_dirty_size)) |
833 | ret = 1; | 758 | ret = 1; |
834 | 759 | ||
835 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", | 760 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", |
836 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); | 761 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); |
837 | 762 | ||
838 | return ret; | 763 | return ret; |