diff options
Diffstat (limited to 'fs/jffs2')
-rw-r--r-- | fs/jffs2/background.c | 18 | ||||
-rw-r--r-- | fs/jffs2/readinode.c | 42 |
2 files changed, 44 insertions, 16 deletions
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 3cceef4ad2b7..e9580104b6ba 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -95,13 +95,17 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
95 | spin_unlock(&c->erase_completion_lock); | 95 | spin_unlock(&c->erase_completion_lock); |
96 | 96 | ||
97 | 97 | ||
98 | /* This thread is purely an optimisation. But if it runs when | 98 | /* Problem - immediately after bootup, the GCD spends a lot |
99 | other things could be running, it actually makes things a | 99 | * of time in places like jffs2_kill_fragtree(); so much so |
100 | lot worse. Use yield() and put it at the back of the runqueue | 100 | * that userspace processes (like gdm and X) are starved |
101 | every time. Especially during boot, pulling an inode in | 101 | * despite plenty of cond_resched()s and renicing. Yield() |
102 | with read_inode() is much preferable to having the GC thread | 102 | * doesn't help, either (presumably because userspace and GCD |
103 | get there first. */ | 103 | * are generally competing for a higher latency resource - |
104 | yield(); | 104 | * disk). |
105 | * This forces the GCD to slow the hell down. Pulling an | ||
106 | * inode in with read_inode() is much preferable to having | ||
107 | * the GC thread get there first. */ | ||
108 | schedule_timeout_interruptible(msecs_to_jiffies(50)); | ||
105 | 109 | ||
106 | /* Put_super will send a SIGKILL and then wait on the sem. | 110 | /* Put_super will send a SIGKILL and then wait on the sem. |
107 | */ | 111 | */ |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 6ca08ad887c0..1fc1e92356ee 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -220,7 +220,7 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, | |||
220 | struct jffs2_tmp_dnode_info *tn) | 220 | struct jffs2_tmp_dnode_info *tn) |
221 | { | 221 | { |
222 | uint32_t fn_end = tn->fn->ofs + tn->fn->size; | 222 | uint32_t fn_end = tn->fn->ofs + tn->fn->size; |
223 | struct jffs2_tmp_dnode_info *this; | 223 | struct jffs2_tmp_dnode_info *this, *ptn; |
224 | 224 | ||
225 | dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); | 225 | dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); |
226 | 226 | ||
@@ -251,11 +251,18 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, | |||
251 | if (this) { | 251 | if (this) { |
252 | /* If the node is coincident with another at a lower address, | 252 | /* If the node is coincident with another at a lower address, |
253 | back up until the other node is found. It may be relevant */ | 253 | back up until the other node is found. It may be relevant */ |
254 | while (this->overlapped) | 254 | while (this->overlapped) { |
255 | this = tn_prev(this); | 255 | ptn = tn_prev(this); |
256 | 256 | if (!ptn) { | |
257 | /* First node should never be marked overlapped */ | 257 | /* |
258 | BUG_ON(!this); | 258 | * We killed a node which set the overlapped |
259 | * flags during the scan. Fix it up. | ||
260 | */ | ||
261 | this->overlapped = 0; | ||
262 | break; | ||
263 | } | ||
264 | this = ptn; | ||
265 | } | ||
259 | dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); | 266 | dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); |
260 | } | 267 | } |
261 | 268 | ||
@@ -360,7 +367,17 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, | |||
360 | } | 367 | } |
361 | if (!this->overlapped) | 368 | if (!this->overlapped) |
362 | break; | 369 | break; |
363 | this = tn_prev(this); | 370 | |
371 | ptn = tn_prev(this); | ||
372 | if (!ptn) { | ||
373 | /* | ||
374 | * We killed a node which set the overlapped | ||
375 | * flags during the scan. Fix it up. | ||
376 | */ | ||
377 | this->overlapped = 0; | ||
378 | break; | ||
379 | } | ||
380 | this = ptn; | ||
364 | } | 381 | } |
365 | } | 382 | } |
366 | 383 | ||
@@ -456,8 +473,15 @@ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, | |||
456 | eat_last(&rii->tn_root, &last->rb); | 473 | eat_last(&rii->tn_root, &last->rb); |
457 | ver_insert(&ver_root, last); | 474 | ver_insert(&ver_root, last); |
458 | 475 | ||
459 | if (unlikely(last->overlapped)) | 476 | if (unlikely(last->overlapped)) { |
460 | continue; | 477 | if (pen) |
478 | continue; | ||
479 | /* | ||
480 | * We killed a node which set the overlapped | ||
481 | * flags during the scan. Fix it up. | ||
482 | */ | ||
483 | last->overlapped = 0; | ||
484 | } | ||
461 | 485 | ||
462 | /* Now we have a bunch of nodes in reverse version | 486 | /* Now we have a bunch of nodes in reverse version |
463 | order, in the tree at ver_root. Most of the time, | 487 | order, in the tree at ver_root. Most of the time, |