aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/nodemgmt.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-05-23 21:04:45 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-23 21:04:45 -0400
commit2f785402f39b96a077b6e62bf26164bfb8e0c980 (patch)
tree3f3a38b484ef2dabda1599d4d8f08b121bd03a76 /fs/jffs2/nodemgmt.c
parent4cbb9b80e171107c6c34116283fe38e5a396c68b (diff)
[JFFS2] Reduce visibility of raw_node_ref to upper layers of JFFS2 code.
As the first step towards eliminating the ref->next_phys member and saving memory by using an _array_ of struct jffs2_raw_node_ref per eraseblock, stop the write functions from allocating their own refs; have them just _reserve_ the appropriate number instead. Then jffs2_link_node_ref() can just fill them in. Use a linked list of pre-allocated refs in the superblock, for now. Once we switch to an array, it'll just be a case of extending that array. Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'fs/jffs2/nodemgmt.c')
-rw-r--r--fs/jffs2/nodemgmt.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 8feb8749bc75..01bf2773fe4d 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -137,6 +137,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
137 } 137 }
138 } 138 }
139 spin_unlock(&c->erase_completion_lock); 139 spin_unlock(&c->erase_completion_lock);
140 if (!ret)
141 ret = jffs2_prealloc_raw_node_refs(c, 1);
140 if (ret) 142 if (ret)
141 up(&c->alloc_sem); 143 up(&c->alloc_sem);
142 return ret; 144 return ret;
@@ -158,6 +160,9 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
158 } 160 }
159 } 161 }
160 spin_unlock(&c->erase_completion_lock); 162 spin_unlock(&c->erase_completion_lock);
163 if (!ret)
164 ret = jffs2_prealloc_raw_node_refs(c, 1);
165
161 return ret; 166 return ret;
162} 167}
163 168
@@ -381,30 +386,30 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
381 * Must be called with the alloc_sem held. 386 * Must be called with the alloc_sem held.
382 */ 387 */
383 388
384int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new, 389struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
385 uint32_t len, struct jffs2_inode_cache *ic) 390 uint32_t ofs, uint32_t len,
391 struct jffs2_inode_cache *ic)
386{ 392{
387 struct jffs2_eraseblock *jeb; 393 struct jffs2_eraseblock *jeb;
394 struct jffs2_raw_node_ref *new;
388 395
389 jeb = &c->blocks[new->flash_offset / c->sector_size]; 396 jeb = &c->blocks[ofs / c->sector_size];
390#ifdef TEST_TOTLEN
391 new->__totlen = len;
392#endif
393 397
394 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); 398 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
399 ofs & ~3, ofs & 3, len));
395#if 1 400#if 1
396 /* we could get some obsolete nodes after nextblock was refiled 401 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
397 in wbuf.c */ 402 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
398 if ((c->nextblock || !ref_obsolete(new)) 403 even after refiling c->nextblock */
399 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { 404 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
405 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
400 printk(KERN_WARNING "argh. node added in wrong place\n"); 406 printk(KERN_WARNING "argh. node added in wrong place\n");
401 jffs2_free_raw_node_ref(new); 407 return ERR_PTR(-EINVAL);
402 return -EINVAL;
403 } 408 }
404#endif 409#endif
405 spin_lock(&c->erase_completion_lock); 410 spin_lock(&c->erase_completion_lock);
406 411
407 jffs2_link_node_ref(c, jeb, new, len, ic); 412 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
408 413
409 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { 414 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
410 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ 415 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
@@ -425,7 +430,7 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r
425 430
426 spin_unlock(&c->erase_completion_lock); 431 spin_unlock(&c->erase_completion_lock);
427 432
428 return 0; 433 return new;
429} 434}
430 435
431 436
@@ -697,7 +702,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
697 } 702 }
698 spin_unlock(&c->erase_completion_lock); 703 spin_unlock(&c->erase_completion_lock);
699 704
700 jffs2_free_raw_node_ref(n); 705 __jffs2_free_raw_node_ref(n);
701 } 706 }
702 707
703 /* Also merge with the previous node in the list, if there is one 708 /* Also merge with the previous node in the list, if there is one
@@ -722,7 +727,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
722 jeb->gc_node=p; 727 jeb->gc_node=p;
723 } 728 }
724 p->next_phys = ref->next_phys; 729 p->next_phys = ref->next_phys;
725 jffs2_free_raw_node_ref(ref); 730 __jffs2_free_raw_node_ref(ref);
726 } 731 }
727 spin_unlock(&c->erase_completion_lock); 732 spin_unlock(&c->erase_completion_lock);
728 } 733 }