aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/readinode.c
diff options
context:
space:
mode:
authorArtem B. Bityutskiy <dedekind@infradead.org>2005-07-27 10:46:14 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-11-06 11:50:45 -0500
commitf97117d15361b3a6aeaf9e347a287ef3f54b58f9 (patch)
tree614ff5f6fa693e1e475430eff9687e40b96b8555 /fs/jffs2/readinode.c
parentf538c96ba2a3fdf7744ecf9fdffac14b1ec4be32 (diff)
[JFFS2] Move scattered function into related files
Move functions to read inodes into readinode.c Move functions to handle fragtree and dentry lists into nodelist.[ch] Signed-off-by: Artem B. Bityutskiy <dedekind@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'fs/jffs2/readinode.c')
-rw-r--r--fs/jffs2/readinode.c863
1 files changed, 557 insertions, 306 deletions
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 339ba46320fa..85a285b2a309 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * For licensing information, see the file 'LICENCE' in this directory. 8 * For licensing information, see the file 'LICENCE' in this directory.
9 * 9 *
10 * $Id: readinode.c,v 1.130 2005/07/24 15:29:56 dedekind Exp $ 10 * $Id: readinode.c,v 1.131 2005/07/27 14:46:11 dedekind Exp $
11 * 11 *
12 */ 12 */
13 13
@@ -20,376 +20,537 @@
20#include <linux/compiler.h> 20#include <linux/compiler.h>
21#include "nodelist.h" 21#include "nodelist.h"
22 22
23static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); 23void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
24
25static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
26{ 24{
27 if (this->node) { 25 struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
28 this->node->frags--; 26
29 if (!this->node->frags) { 27 D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size));
30 /* The node has no valid frags left. It's totally obsoleted */ 28
31 D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", 29 /* We know frag->ofs <= size. That's what lookup does for us */
32 ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); 30 if (frag && frag->ofs != size) {
33 jffs2_mark_node_obsolete(c, this->node->raw); 31 if (frag->ofs+frag->size >= size) {
34 jffs2_free_full_dnode(this->node); 32 D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
35 } else { 33 frag->size = size - frag->ofs;
36 D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
37 ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size,
38 this->node->frags));
39 mark_ref_normal(this->node->raw);
40 } 34 }
41 35 frag = frag_next(frag);
36 }
37 while (frag && frag->ofs >= size) {
38 struct jffs2_node_frag *next = frag_next(frag);
39
40 D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
41 frag_erase(frag, list);
42 jffs2_obsolete_node_frag(c, frag);
43 frag = next;
42 } 44 }
43 jffs2_free_node_frag(this);
44} 45}
45 46
46/* Given an inode, probably with existing list of fragments, add the new node 47/*
47 * to the fragment list. 48 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
49 * order of increasing version.
48 */ 50 */
49int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) 51static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
50{ 52{
51 int ret; 53 struct rb_node **p = &list->rb_node;
52 struct jffs2_node_frag *newfrag; 54 struct rb_node * parent = NULL;
55 struct jffs2_tmp_dnode_info *this;
56
57 while (*p) {
58 parent = *p;
59 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
60
61 /* There may actually be a collision here, but it doesn't
62 actually matter. As long as the two nodes with the same
63 version are together, it's all fine. */
64 if (tn->version < this->version)
65 p = &(*p)->rb_left;
66 else
67 p = &(*p)->rb_right;
68 }
69
70 rb_link_node(&tn->rb, parent, p);
71 rb_insert_color(&tn->rb, list);
72}
53 73
54 D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); 74static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
75{
76 struct rb_node *this;
77 struct jffs2_tmp_dnode_info *tn;
78
79 this = list->rb_node;
80
81 /* Now at bottom of tree */
82 while (this) {
83 if (this->rb_left)
84 this = this->rb_left;
85 else if (this->rb_right)
86 this = this->rb_right;
87 else {
88 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
89 jffs2_free_full_dnode(tn->fn);
90 jffs2_free_tmp_dnode_info(tn);
91
92 this = this->rb_parent;
93 if (!this)
94 break;
95
96 if (this->rb_left == &tn->rb)
97 this->rb_left = NULL;
98 else if (this->rb_right == &tn->rb)
99 this->rb_right = NULL;
100 else BUG();
101 }
102 }
103 list->rb_node = NULL;
104}
55 105
56 if (unlikely(!fn->size)) 106static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
57 return 0; 107{
108 struct jffs2_full_dirent *next;
58 109
59 newfrag = jffs2_alloc_node_frag(); 110 while (fd) {
60 if (unlikely(!newfrag)) 111 next = fd->next;
61 return -ENOMEM; 112 jffs2_free_full_dirent(fd);
113 fd = next;
114 }
115}
62 116
63 D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", 117/* Returns first valid node after 'ref'. May return 'ref' */
64 fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); 118static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
65 119{
66 newfrag->ofs = fn->ofs; 120 while (ref && ref->next_in_ino) {
67 newfrag->size = fn->size; 121 if (!ref_obsolete(ref))
68 newfrag->node = fn; 122 return ref;
69 newfrag->node->frags = 1; 123 D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
124 ref = ref->next_in_ino;
125 }
126 return NULL;
127}
70 128
71 ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); 129/*
72 if (ret) 130 * Helper function for jffs2_get_inode_nodes().
73 return ret; 131 * It is called every time an directory entry node is found.
132 *
133 * Returns: 0 on succes;
134 * 1 if the node should be marked obsolete;
135 * negative error code on failure.
136 */
137static inline int
138read_direntry(struct jffs2_sb_info *c,
139 struct jffs2_raw_node_ref *ref,
140 struct jffs2_raw_dirent *rd,
141 uint32_t read,
142 struct jffs2_full_dirent **fdp,
143 int32_t *latest_mctime,
144 uint32_t *mctime_ver)
145{
146 struct jffs2_full_dirent *fd;
147
148 /* The direntry nodes are checked during the flash scanning */
149 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
150 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
151 BUG_ON(ref_obsolete(ref));
152
153 /* Sanity check */
154 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
155 printk(KERN_ERR "Error! Illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
156 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
157 return 1;
158 }
159
160 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
161 if (unlikely(!fd))
162 return -ENOMEM;
74 163
75 /* If we now share a page with other nodes, mark either previous 164 fd->raw = ref;
76 or next node REF_NORMAL, as appropriate. */ 165 fd->version = je32_to_cpu(rd->version);
77 if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { 166 fd->ino = je32_to_cpu(rd->ino);
78 struct jffs2_node_frag *prev = frag_prev(newfrag); 167 fd->type = rd->type;
79 168
80 mark_ref_normal(fn->raw); 169 /* Pick out the mctime of the latest dirent */
81 /* If we don't start at zero there's _always_ a previous */ 170 if(fd->version > *mctime_ver) {
82 if (prev->node) 171 *mctime_ver = fd->version;
83 mark_ref_normal(prev->node->raw); 172 *latest_mctime = je32_to_cpu(rd->mctime);
84 } 173 }
85 174
86 if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { 175 /*
87 struct jffs2_node_frag *next = frag_next(newfrag); 176 * Copy as much of the name as possible from the raw
177 * dirent we've already read from the flash.
178 */
179 if (read > sizeof(*rd))
180 memcpy(&fd->name[0], &rd->name[0],
181 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
88 182
89 if (next) { 183 /* Do we need to copy any more of the name directly from the flash? */
90 mark_ref_normal(fn->raw); 184 if (rd->nsize + sizeof(*rd) > read) {
91 if (next->node) 185 /* FIXME: point() */
92 mark_ref_normal(next->node->raw); 186 int err;
187 int already = read - sizeof(*rd);
188
189 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
190 rd->nsize - already, &read, &fd->name[already]);
191 if (unlikely(read != rd->nsize - already) && likely(!err))
192 return -EIO;
193
194 if (unlikely(err)) {
195 printk(KERN_WARNING "Read remainder of name: error %d\n", err);
196 jffs2_free_full_dirent(fd);
197 return -EIO;
93 } 198 }
94 } 199 }
95 jffs2_dbg_fragtree_paranoia_check_nolock(f); 200
96 jffs2_dbg_dump_fragtree_nolock(f); 201 fd->nhash = full_name_hash(fd->name, rd->nsize);
202 fd->next = NULL;
203 fd->name[rd->nsize] = '\0';
204
205 /*
206 * Wheee. We now have a complete jffs2_full_dirent structure, with
207 * the name in it and everything. Link it into the list
208 */
209 D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
210
211 jffs2_add_fd_to_list(c, fd, fdp);
212
97 return 0; 213 return 0;
98} 214}
99 215
100/* Doesn't set inode->i_size */ 216/*
101static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) 217 * Helper function for jffs2_get_inode_nodes().
218 * It is called every time an inode node is found.
219 *
220 * Returns: 0 on succes;
221 * 1 if the node should be marked obsolete;
222 * negative error code on failure.
223 */
224static inline int
225read_dnode(struct jffs2_sb_info *c,
226 struct jffs2_raw_node_ref *ref,
227 struct jffs2_raw_inode *rd,
228 uint32_t read,
229 struct rb_root *tnp,
230 int32_t *latest_mctime,
231 uint32_t *mctime_ver)
102{ 232{
103 struct jffs2_node_frag *this; 233 struct jffs2_eraseblock *jeb;
104 uint32_t lastend; 234 struct jffs2_tmp_dnode_info *tn;
105 235
106 /* Skip all the nodes which are completed before this one starts */ 236 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
107 this = jffs2_lookup_node_frag(list, newfrag->node->ofs); 237 BUG_ON(ref_obsolete(ref));
108 238
109 if (this) { 239 /* If we've never checked the CRCs on this node, check them now */
110 D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", 240 if (ref_flags(ref) == REF_UNCHECKED) {
111 this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); 241 uint32_t crc, len;
112 lastend = this->ofs + this->size; 242
113 } else { 243 crc = crc32(0, rd, sizeof(*rd) - 8);
114 D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); 244 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
115 lastend = 0; 245 printk(KERN_WARNING "Header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
116 } 246 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
117 247 return 1;
118 /* See if we ran off the end of the list */ 248 }
119 if (lastend <= newfrag->ofs) { 249
120 /* We did */ 250 /* Sanity checks */
121 251 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
122 /* Check if 'this' node was on the same page as the new node. 252 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
123 If so, both 'this' and the new node get marked REF_NORMAL so 253 printk(KERN_WARNING "Inode corrupted at %#08x, totlen %d, #ino %d, version %d, "
124 the GC can take a look. 254 "isize %d, csize %d, dsize %d \n",
125 */ 255 ref_offset(ref), je32_to_cpu(rd->totlen), je32_to_cpu(rd->ino),
126 if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { 256 je32_to_cpu(rd->version), je32_to_cpu(rd->isize),
127 if (this->node) 257 je32_to_cpu(rd->csize), je32_to_cpu(rd->dsize));
128 mark_ref_normal(this->node->raw); 258 return 1;
129 mark_ref_normal(newfrag->node->raw);
130 } 259 }
131 260
132 if (lastend < newfrag->node->ofs) { 261 if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) {
133 /* ... and we need to put a hole in before the new node */ 262 unsigned char *buf = NULL;
134 struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); 263 uint32_t pointed = 0;
135 if (!holefrag) { 264 int err;
136 jffs2_free_node_frag(newfrag); 265#ifndef __ECOS
137 return -ENOMEM; 266 if (c->mtd->point) {
267 err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
268 &read, &buf);
269 if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) {
270 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", read));
271 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd),
272 je32_to_cpu(rd->csize));
273 } else if (unlikely(err)){
274 D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
275 } else
276 pointed = 1; /* succefully pointed to device */
138 } 277 }
139 holefrag->ofs = lastend; 278#endif
140 holefrag->size = newfrag->node->ofs - lastend; 279 if(!pointed){
141 holefrag->node = NULL; 280 buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL);
142 if (this) { 281 if (!buf)
143 /* By definition, the 'this' node has no right-hand child, 282 return -ENOMEM;
144 because there are no frags with offset greater than it. 283
145 So that's where we want to put the hole */ 284 err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
146 D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); 285 &read, buf);
147 rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); 286 if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err))
148 } else { 287 err = -EIO;
149 D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); 288 if (err) {
150 rb_link_node(&holefrag->rb, NULL, &list->rb_node); 289 kfree(buf);
290 return err;
291 }
151 } 292 }
152 rb_insert_color(&holefrag->rb, list); 293 crc = crc32(0, buf, je32_to_cpu(rd->csize));
153 this = holefrag; 294 if(!pointed)
154 } 295 kfree(buf);
155 if (this) { 296#ifndef __ECOS
156 /* By definition, the 'this' node has no right-hand child, 297 else
157 because there are no frags with offset greater than it. 298 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize));
158 So that's where we want to put the hole */ 299#endif
159 D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); 300
160 rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); 301 if (crc != je32_to_cpu(rd->data_crc)) {
161 } else { 302 printk(KERN_NOTICE "Data CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
162 D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); 303 ref_offset(ref), je32_to_cpu(rd->data_crc), crc);
163 rb_link_node(&newfrag->rb, NULL, &list->rb_node); 304 return 1;
164 }
165 rb_insert_color(&newfrag->rb, list);
166 return 0;
167 }
168
169 D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
170 this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
171
172 /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
173 * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
174 */
175 if (newfrag->ofs > this->ofs) {
176 /* This node isn't completely obsoleted. The start of it remains valid */
177
178 /* Mark the new node and the partially covered node REF_NORMAL -- let
179 the GC take a look at them */
180 mark_ref_normal(newfrag->node->raw);
181 if (this->node)
182 mark_ref_normal(this->node->raw);
183
184 if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
185 /* The new node splits 'this' frag into two */
186 struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag();
187 if (!newfrag2) {
188 jffs2_free_node_frag(newfrag);
189 return -ENOMEM;
190 } 305 }
191 D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size);
192 if (this->node)
193 printk("phys 0x%08x\n", ref_offset(this->node->raw));
194 else
195 printk("hole\n");
196 )
197
198 /* New second frag pointing to this's node */
199 newfrag2->ofs = newfrag->ofs + newfrag->size;
200 newfrag2->size = (this->ofs+this->size) - newfrag2->ofs;
201 newfrag2->node = this->node;
202 if (this->node)
203 this->node->frags++;
204
205 /* Adjust size of original 'this' */
206 this->size = newfrag->ofs - this->ofs;
207
208 /* Now, we know there's no node with offset
209 greater than this->ofs but smaller than
210 newfrag2->ofs or newfrag->ofs, for obvious
211 reasons. So we can do a tree insert from
212 'this' to insert newfrag, and a tree insert
213 from newfrag to insert newfrag2. */
214 jffs2_fragtree_insert(newfrag, this);
215 rb_insert_color(&newfrag->rb, list);
216 306
217 jffs2_fragtree_insert(newfrag2, newfrag);
218 rb_insert_color(&newfrag2->rb, list);
219
220 return 0;
221 } 307 }
222 /* New node just reduces 'this' frag in size, doesn't split it */
223 this->size = newfrag->ofs - this->ofs;
224 308
225 /* Again, we know it lives down here in the tree */ 309 /* Mark the node as having been checked and fix the accounting accordingly */
226 jffs2_fragtree_insert(newfrag, this); 310 jeb = &c->blocks[ref->flash_offset / c->sector_size];
227 rb_insert_color(&newfrag->rb, list); 311 len = ref_totlen(c, jeb, ref);
228 } else { 312
229 /* New frag starts at the same point as 'this' used to. Replace 313 spin_lock(&c->erase_completion_lock);
230 it in the tree without doing a delete and insertion */ 314 jeb->used_size += len;
231 D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", 315 jeb->unchecked_size -= len;
232 newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, 316 c->used_size += len;
233 this, this->ofs, this->ofs+this->size)); 317 c->unchecked_size -= len;
234 318
235 rb_replace_node(&this->rb, &newfrag->rb, list); 319 /* If node covers at least a whole page, or if it starts at the
236 320 beginning of a page and runs to the end of the file, or if
237 if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { 321 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
238 D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size));
239 jffs2_obsolete_node_frag(c, this);
240 } else {
241 this->ofs += newfrag->size;
242 this->size -= newfrag->size;
243 322
244 jffs2_fragtree_insert(this, newfrag); 323 If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
245 rb_insert_color(&this->rb, list); 324 when the overlapping node(s) get added to the tree anyway.
246 return 0; 325 */
326 if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) ||
327 ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) &&
328 (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) {
329 D1(printk(KERN_DEBUG "Marking node at %#08x REF_PRISTINE\n", ref_offset(ref)));
330 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
331 } else {
332 D1(printk(KERN_DEBUG "Marking node at %#08x REF_NORMAL\n", ref_offset(ref)));
333 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
247 } 334 }
335 spin_unlock(&c->erase_completion_lock);
248 } 336 }
249 /* OK, now we have newfrag added in the correct place in the tree, but
250 frag_next(newfrag) may be a fragment which is overlapped by it
251 */
252 while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
253 /* 'this' frag is obsoleted completely. */
254 D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size));
255 rb_erase(&this->rb, list);
256 jffs2_obsolete_node_frag(c, this);
257 }
258 /* Now we're pointing at the first frag which isn't totally obsoleted by
259 the new frag */
260 337
261 if (!this || newfrag->ofs + newfrag->size == this->ofs) { 338 tn = jffs2_alloc_tmp_dnode_info();
262 return 0; 339 if (!tn) {
340 D1(printk(KERN_DEBUG "alloc tn failed\n"));
341 return -ENOMEM;
263 } 342 }
264 /* Still some overlap but we don't need to move it in the tree */
265 this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
266 this->ofs = newfrag->ofs + newfrag->size;
267 343
268 /* And mark them REF_NORMAL so the GC takes a look at them */ 344 tn->fn = jffs2_alloc_full_dnode();
269 if (this->node) 345 if (!tn->fn) {
270 mark_ref_normal(this->node->raw); 346 D1(printk(KERN_DEBUG "alloc fn failed\n"));
271 mark_ref_normal(newfrag->node->raw); 347 jffs2_free_tmp_dnode_info(tn);
348 return -ENOMEM;
349 }
350
351 tn->version = je32_to_cpu(rd->version);
352 tn->fn->ofs = je32_to_cpu(rd->offset);
353 tn->fn->raw = ref;
354
355 /* There was a bug where we wrote hole nodes out with
356 csize/dsize swapped. Deal with it */
357 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize))
358 tn->fn->size = je32_to_cpu(rd->csize);
359 else // normal case...
360 tn->fn->size = je32_to_cpu(rd->dsize);
361
362 D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %#04x, dsize %#04x\n",
363 ref_offset(ref), je32_to_cpu(rd->version),
364 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize)));
365
366 jffs2_add_tn_to_tree(tn, tnp);
272 367
273 return 0; 368 return 0;
274} 369}
275 370
276void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) 371/*
372 * Helper function for jffs2_get_inode_nodes().
373 * It is called every time an unknown node is found.
374 *
375 * Returns: 0 on succes;
376 * 1 if the node should be marked obsolete;
377 * negative error code on failure.
378 */
379static inline int
380read_unknown(struct jffs2_sb_info *c,
381 struct jffs2_raw_node_ref *ref,
382 struct jffs2_unknown_node *un,
383 uint32_t read)
277{ 384{
278 struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); 385 /* We don't mark unknown nodes as REF_UNCHECKED */
386 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
387
388 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
279 389
280 D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); 390 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
281 391
282 /* We know frag->ofs <= size. That's what lookup does for us */ 392 /* Hmmm. This should have been caught at scan time. */
283 if (frag && frag->ofs != size) { 393 printk(KERN_WARNING "Warning! Node header CRC failed at %#08x. "
284 if (frag->ofs+frag->size >= size) { 394 "But it must have been OK earlier.\n", ref_offset(ref));
285 D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); 395 D1(printk(KERN_DEBUG "Node was: { %#04x, %#04x, %#08x, %#08x }\n",
286 frag->size = size - frag->ofs; 396 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
397 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc)));
398 return 1;
399 } else {
400 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
401
402 case JFFS2_FEATURE_INCOMPAT:
403 printk(KERN_NOTICE "Unknown INCOMPAT nodetype %#04X at %#08x\n",
404 je16_to_cpu(un->nodetype), ref_offset(ref));
405 /* EEP */
406 BUG();
407 break;
408
409 case JFFS2_FEATURE_ROCOMPAT:
410 printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %#04X at %#08x\n",
411 je16_to_cpu(un->nodetype), ref_offset(ref));
412 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
413 break;
414
415 case JFFS2_FEATURE_RWCOMPAT_COPY:
416 printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
417 je16_to_cpu(un->nodetype), ref_offset(ref));
418 break;
419
420 case JFFS2_FEATURE_RWCOMPAT_DELETE:
421 printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
422 je16_to_cpu(un->nodetype), ref_offset(ref));
423 return 1;
287 } 424 }
288 frag = frag_next(frag);
289 } 425 }
290 while (frag && frag->ofs >= size) {
291 struct jffs2_node_frag *next = frag_next(frag);
292 426
293 D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); 427 return 0;
294 frag_erase(frag, list);
295 jffs2_obsolete_node_frag(c, frag);
296 frag = next;
297 }
298} 428}
299 429
300/* Scan the list of all nodes present for this ino, build map of versions, etc. */ 430/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
301 431 with this ino, returning the former in order of version */
302static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
303 struct jffs2_inode_info *f,
304 struct jffs2_raw_inode *latest_node);
305 432
306int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 433static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
307 uint32_t ino, struct jffs2_raw_inode *latest_node) 434 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
435 uint32_t *highest_version, uint32_t *latest_mctime,
436 uint32_t *mctime_ver)
308{ 437{
309 D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); 438 struct jffs2_raw_node_ref *ref, *valid_ref;
439 struct rb_root ret_tn = RB_ROOT;
440 struct jffs2_full_dirent *ret_fd = NULL;
441 union jffs2_node_union node;
442 size_t retlen;
443 int err;
310 444
311 retry_inocache: 445 *mctime_ver = 0;
312 spin_lock(&c->inocache_lock); 446
313 f->inocache = jffs2_get_ino_cache(c, ino); 447 D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
314 448
315 D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); 449 spin_lock(&c->erase_completion_lock);
316 450
317 if (f->inocache) { 451 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
318 /* Check its state. We may need to wait before we can use it */ 452
319 switch(f->inocache->state) { 453 if (!valid_ref && (f->inocache->ino != 1))
320 case INO_STATE_UNCHECKED: 454 printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
321 case INO_STATE_CHECKEDABSENT: 455
322 f->inocache->state = INO_STATE_READING; 456 while (valid_ref) {
323 break; 457 /* We can hold a pointer to a non-obsolete node without the spinlock,
458 but _obsolete_ nodes may disappear at any time, if the block
459 they're in gets erased. So if we mark 'ref' obsolete while we're
460 not holding the lock, it can go away immediately. For that reason,
461 we find the next valid node first, before processing 'ref'.
462 */
463 ref = valid_ref;
464 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
465 spin_unlock(&c->erase_completion_lock);
466
467 cond_resched();
468
469 /* FIXME: point() */
470 err = jffs2_flash_read(c, (ref_offset(ref)),
471 min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
472 &retlen, (void *)&node);
473 if (err) {
474 printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
475 goto free_out;
476 }
324 477
325 case INO_STATE_CHECKING: 478 switch (je16_to_cpu(node.u.nodetype)) {
326 case INO_STATE_GC: 479
327 /* If it's in either of these states, we need 480 case JFFS2_NODETYPE_DIRENT:
328 to wait for whoever's got it to finish and 481 D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
329 put it back. */ 482
330 D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", 483 if (retlen < sizeof(node.d)) {
331 ino, f->inocache->state)); 484 printk(KERN_WARNING "Warning! Short read dirent at %#08x\n", ref_offset(ref));
332 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 485 err = -EIO;
333 goto retry_inocache; 486 goto free_out;
487 }
488
489 err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver);
490 if (err == 1) {
491 jffs2_mark_node_obsolete(c, ref);
492 break;
493 } else if (unlikely(err))
494 goto free_out;
495
496 if (je32_to_cpu(node.d.version) > *highest_version)
497 *highest_version = je32_to_cpu(node.d.version);
334 498
335 case INO_STATE_READING:
336 case INO_STATE_PRESENT:
337 /* Eep. This should never happen. It can
338 happen if Linux calls read_inode() again
339 before clear_inode() has finished though. */
340 printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
341 /* Fail. That's probably better than allowing it to succeed */
342 f->inocache = NULL;
343 break; 499 break;
344 500
345 default: 501 case JFFS2_NODETYPE_INODE:
346 BUG(); 502 D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
347 } 503
348 } 504 if (retlen < sizeof(node.i)) {
349 spin_unlock(&c->inocache_lock); 505 printk(KERN_WARNING "Warning! Short read dnode at %#08x\n", ref_offset(ref));
506 err = -EIO;
507 goto free_out;
508 }
350 509
351 if (!f->inocache && ino == 1) { 510 err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver);
352 /* Special case - no root inode on medium */ 511 if (err == 1) {
353 f->inocache = jffs2_alloc_inode_cache(); 512 jffs2_mark_node_obsolete(c, ref);
354 if (!f->inocache) { 513 break;
355 printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); 514 } else if (unlikely(err))
356 return -ENOMEM; 515 goto free_out;
357 }
358 D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n"));
359 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
360 f->inocache->ino = f->inocache->nlink = 1;
361 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
362 f->inocache->state = INO_STATE_READING;
363 jffs2_add_ino_cache(c, f->inocache);
364 }
365 if (!f->inocache) {
366 printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino);
367 return -ENOENT;
368 }
369 516
370 return jffs2_do_read_inode_internal(c, f, latest_node); 517 if (je32_to_cpu(node.i.version) > *highest_version)
371} 518 *highest_version = je32_to_cpu(node.i.version);
519
520 D1(printk(KERN_DEBUG "version %d, highest_version now %d\n",
521 je32_to_cpu(node.i.version), *highest_version));
372 522
373int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) 523 break;
374{
375 struct jffs2_raw_inode n;
376 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
377 int ret;
378 524
379 if (!f) 525 default:
380 return -ENOMEM; 526 /* Check we've managed to read at least the common node header */
527 if (retlen < sizeof(struct jffs2_unknown_node)) {
528 printk(KERN_WARNING "Warning! Short read unknown node at %#08x\n",
529 ref_offset(ref));
530 return -EIO;
531 }
381 532
382 memset(f, 0, sizeof(*f)); 533 err = read_unknown(c, ref, &node.u, retlen);
383 init_MUTEX_LOCKED(&f->sem); 534 if (err == 1) {
384 f->inocache = ic; 535 jffs2_mark_node_obsolete(c, ref);
536 break;
537 } else if (unlikely(err))
538 goto free_out;
539
540 }
541 spin_lock(&c->erase_completion_lock);
385 542
386 ret = jffs2_do_read_inode_internal(c, f, &n);
387 if (!ret) {
388 up(&f->sem);
389 jffs2_do_clear_inode(c, f);
390 } 543 }
391 kfree (f); 544 spin_unlock(&c->erase_completion_lock);
392 return ret; 545 *tnp = ret_tn;
546 *fdp = ret_fd;
547
548 return 0;
549
550 free_out:
551 jffs2_free_tmp_dnode_info_list(&ret_tn);
552 jffs2_free_full_dirent_list(ret_fd);
553 return err;
393} 554}
394 555
395static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, 556static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
@@ -618,6 +779,96 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
618 return 0; 779 return 0;
619} 780}
620 781
782/* Scan the list of all nodes present for this ino, build map of versions, etc. */
783int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
784 uint32_t ino, struct jffs2_raw_inode *latest_node)
785{
786 D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n"));
787
788 retry_inocache:
789 spin_lock(&c->inocache_lock);
790 f->inocache = jffs2_get_ino_cache(c, ino);
791
792 D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache));
793
794 if (f->inocache) {
795 /* Check its state. We may need to wait before we can use it */
796 switch(f->inocache->state) {
797 case INO_STATE_UNCHECKED:
798 case INO_STATE_CHECKEDABSENT:
799 f->inocache->state = INO_STATE_READING;
800 break;
801
802 case INO_STATE_CHECKING:
803 case INO_STATE_GC:
804 /* If it's in either of these states, we need
805 to wait for whoever's got it to finish and
806 put it back. */
807 D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n",
808 ino, f->inocache->state));
809 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
810 goto retry_inocache;
811
812 case INO_STATE_READING:
813 case INO_STATE_PRESENT:
814 /* Eep. This should never happen. It can
815 happen if Linux calls read_inode() again
816 before clear_inode() has finished though. */
817 printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
818 /* Fail. That's probably better than allowing it to succeed */
819 f->inocache = NULL;
820 break;
821
822 default:
823 BUG();
824 }
825 }
826 spin_unlock(&c->inocache_lock);
827
828 if (!f->inocache && ino == 1) {
829 /* Special case - no root inode on medium */
830 f->inocache = jffs2_alloc_inode_cache();
831 if (!f->inocache) {
832 printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n");
833 return -ENOMEM;
834 }
835 D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n"));
836 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
837 f->inocache->ino = f->inocache->nlink = 1;
838 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
839 f->inocache->state = INO_STATE_READING;
840 jffs2_add_ino_cache(c, f->inocache);
841 }
842 if (!f->inocache) {
843 printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino);
844 return -ENOENT;
845 }
846
847 return jffs2_do_read_inode_internal(c, f, latest_node);
848}
849
850int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
851{
852 struct jffs2_raw_inode n;
853 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
854 int ret;
855
856 if (!f)
857 return -ENOMEM;
858
859 memset(f, 0, sizeof(*f));
860 init_MUTEX_LOCKED(&f->sem);
861 f->inocache = ic;
862
863 ret = jffs2_do_read_inode_internal(c, f, &n);
864 if (!ret) {
865 up(&f->sem);
866 jffs2_do_clear_inode(c, f);
867 }
868 kfree (f);
869 return ret;
870}
871
621void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) 872void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
622{ 873{
623 struct jffs2_full_dirent *fd, *fds; 874 struct jffs2_full_dirent *fd, *fds;