diff options
-rw-r--r-- | fs/jffs2/nodelist.c | 637 | ||||
-rw-r--r-- | fs/jffs2/nodelist.h | 9 | ||||
-rw-r--r-- | fs/jffs2/readinode.c | 475 |
3 files changed, 868 insertions, 253 deletions
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 0cf5e6f11989..390ce06ab1a7 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.c,v 1.103 2005/07/31 08:20:44 dedekind Exp $ | 10 | * $Id: nodelist.c,v 1.104 2005/08/01 12:05:19 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -59,7 +59,7 @@ void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint | |||
59 | 59 | ||
60 | /* We know frag->ofs <= size. That's what lookup does for us */ | 60 | /* We know frag->ofs <= size. That's what lookup does for us */ |
61 | if (frag && frag->ofs != size) { | 61 | if (frag && frag->ofs != size) { |
62 | if (frag->ofs+frag->size >= size) { | 62 | if (frag->ofs+frag->size > size) { |
63 | JFFS2_DBG_FRAGTREE2("truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size); | 63 | JFFS2_DBG_FRAGTREE2("truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size); |
64 | frag->size = size - frag->ofs; | 64 | frag->size = size - frag->ofs; |
65 | } | 65 | } |
@@ -73,6 +73,20 @@ void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint | |||
73 | jffs2_obsolete_node_frag(c, frag); | 73 | jffs2_obsolete_node_frag(c, frag); |
74 | frag = next; | 74 | frag = next; |
75 | } | 75 | } |
76 | |||
77 | if (size == 0) | ||
78 | return; | ||
79 | |||
80 | /* | ||
81 | * If the last fragment starts at the RAM page boundary, it is | ||
82 | * REF_PRISTINE irrespective of its size. | ||
83 | */ | ||
84 | frag = frag_last(list); | ||
85 | if ((frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { | ||
86 | JFFS2_DBG_FRAGTREE2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", | ||
87 | frag->ofs, frag->ofs + frag->size); | ||
88 | frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; | ||
89 | } | ||
76 | } | 90 | } |
77 | 91 | ||
78 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) | 92 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) |
@@ -120,14 +134,82 @@ static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_ | |||
120 | rb_link_node(&newfrag->rb, &base->rb, link); | 134 | rb_link_node(&newfrag->rb, &base->rb, link); |
121 | } | 135 | } |
122 | 136 | ||
137 | /* | ||
138 | * Allocate and initializes a new fragment. | ||
139 | */ | ||
140 | static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) | ||
141 | { | ||
142 | struct jffs2_node_frag *newfrag; | ||
143 | |||
144 | newfrag = jffs2_alloc_node_frag(); | ||
145 | if (likely(newfrag)) { | ||
146 | newfrag->ofs = ofs; | ||
147 | newfrag->size = size; | ||
148 | newfrag->node = fn; | ||
149 | } else { | ||
150 | JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); | ||
151 | } | ||
152 | |||
153 | return newfrag; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Called when there is no overlapping fragment exist. Inserts a hole before the new | ||
158 | * fragment and inserts the new fragment to the fragtree. | ||
159 | */ | ||
160 | static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, | ||
161 | struct jffs2_node_frag *newfrag, | ||
162 | struct jffs2_node_frag *this, uint32_t lastend) | ||
163 | { | ||
164 | if (lastend < newfrag->node->ofs) { | ||
165 | /* put a hole in before the new fragment */ | ||
166 | struct jffs2_node_frag *holefrag; | ||
167 | |||
168 | holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); | ||
169 | if (unlikely(!holefrag)) { | ||
170 | jffs2_free_node_frag(newfrag); | ||
171 | return -ENOMEM; | ||
172 | } | ||
173 | |||
174 | if (this) { | ||
175 | /* By definition, the 'this' node has no right-hand child, | ||
176 | because there are no frags with offset greater than it. | ||
177 | So that's where we want to put the hole */ | ||
178 | JFFS2_DBG_FRAGTREE2("add hole frag %u-%u on the right of the new frag.\n", | ||
179 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
180 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
181 | } else { | ||
182 | JFFS2_DBG_FRAGTREE2("Add hole frag %u-%u to the root of the tree.\n", | ||
183 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
184 | rb_link_node(&holefrag->rb, NULL, &root->rb_node); | ||
185 | } | ||
186 | rb_insert_color(&holefrag->rb, root); | ||
187 | this = holefrag; | ||
188 | } | ||
189 | |||
190 | if (this) { | ||
191 | /* By definition, the 'this' node has no right-hand child, | ||
192 | because there are no frags with offset greater than it. | ||
193 | So that's where we want to put new fragment */ | ||
194 | JFFS2_DBG_FRAGTREE2("add the new node at the right\n"); | ||
195 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
196 | } else { | ||
197 | JFFS2_DBG_FRAGTREE2("insert the new node at the root of the tree\n"); | ||
198 | rb_link_node(&newfrag->rb, NULL, &root->rb_node); | ||
199 | } | ||
200 | rb_insert_color(&newfrag->rb, root); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
123 | /* Doesn't set inode->i_size */ | 205 | /* Doesn't set inode->i_size */ |
124 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | 206 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) |
125 | { | 207 | { |
126 | struct jffs2_node_frag *this; | 208 | struct jffs2_node_frag *this; |
127 | uint32_t lastend; | 209 | uint32_t lastend; |
128 | 210 | ||
129 | /* Skip all the nodes which are completed before this one starts */ | 211 | /* Skip all the nodes which are completed before this one starts */ |
130 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); | 212 | this = jffs2_lookup_node_frag(root, newfrag->node->ofs); |
131 | 213 | ||
132 | if (this) { | 214 | if (this) { |
133 | JFFS2_DBG_FRAGTREE2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 215 | JFFS2_DBG_FRAGTREE2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", |
@@ -138,7 +220,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
138 | lastend = 0; | 220 | lastend = 0; |
139 | } | 221 | } |
140 | 222 | ||
141 | /* See if we ran off the end of the list */ | 223 | /* See if we ran off the end of the fragtree */ |
142 | if (lastend <= newfrag->ofs) { | 224 | if (lastend <= newfrag->ofs) { |
143 | /* We did */ | 225 | /* We did */ |
144 | 226 | ||
@@ -152,45 +234,16 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
152 | mark_ref_normal(newfrag->node->raw); | 234 | mark_ref_normal(newfrag->node->raw); |
153 | } | 235 | } |
154 | 236 | ||
155 | if (lastend < newfrag->node->ofs) { | 237 | return no_overlapping_node(c, root, newfrag, this, lastend); |
156 | /* ... and we need to put a hole in before the new node */ | ||
157 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); | ||
158 | if (!holefrag) { | ||
159 | jffs2_free_node_frag(newfrag); | ||
160 | return -ENOMEM; | ||
161 | } | ||
162 | holefrag->ofs = lastend; | ||
163 | holefrag->size = newfrag->node->ofs - lastend; | ||
164 | holefrag->node = NULL; | ||
165 | if (this) { | ||
166 | /* By definition, the 'this' node has no right-hand child, | ||
167 | because there are no frags with offset greater than it. | ||
168 | So that's where we want to put the hole */ | ||
169 | JFFS2_DBG_FRAGTREE2("adding hole frag (%p) on right of node at (%p)\n", holefrag, this); | ||
170 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
171 | } else { | ||
172 | JFFS2_DBG_FRAGTREE2("adding hole frag (%p) at root of tree\n", holefrag); | ||
173 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | ||
174 | } | ||
175 | rb_insert_color(&holefrag->rb, list); | ||
176 | this = holefrag; | ||
177 | } | ||
178 | if (this) { | ||
179 | /* By definition, the 'this' node has no right-hand child, | ||
180 | because there are no frags with offset greater than it. | ||
181 | So that's where we want to put new fragment */ | ||
182 | JFFS2_DBG_FRAGTREE2("adding new frag (%p) on right of node at (%p)\n", newfrag, this); | ||
183 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
184 | } else { | ||
185 | JFFS2_DBG_FRAGTREE2("adding new frag (%p) at root of tree\n", newfrag); | ||
186 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | ||
187 | } | ||
188 | rb_insert_color(&newfrag->rb, list); | ||
189 | return 0; | ||
190 | } | 238 | } |
191 | 239 | ||
192 | JFFS2_DBG_FRAGTREE2("dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 240 | if (this->node) |
193 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); | 241 | JFFS2_DBG_FRAGTREE2("dealing with frag %u-%u, phys %#08x(%d).\n", |
242 | this->ofs, this->ofs + this->size, | ||
243 | ref_offset(this->node->raw), ref_flags(this->node->raw)); | ||
244 | else | ||
245 | JFFS2_DBG_FRAGTREE2("dealing with hole frag %u-%u.\n", | ||
246 | this->ofs, this->ofs + this->size); | ||
194 | 247 | ||
195 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | 248 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, |
196 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | 249 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs |
@@ -206,11 +259,8 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
206 | 259 | ||
207 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | 260 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { |
208 | /* The new node splits 'this' frag into two */ | 261 | /* The new node splits 'this' frag into two */ |
209 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | 262 | struct jffs2_node_frag *newfrag2; |
210 | if (!newfrag2) { | 263 | |
211 | jffs2_free_node_frag(newfrag); | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | if (this->node) | 264 | if (this->node) |
215 | JFFS2_DBG_FRAGTREE2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", | 265 | JFFS2_DBG_FRAGTREE2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", |
216 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); | 266 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); |
@@ -219,9 +269,10 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
219 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); | 269 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); |
220 | 270 | ||
221 | /* New second frag pointing to this's node */ | 271 | /* New second frag pointing to this's node */ |
222 | newfrag2->ofs = newfrag->ofs + newfrag->size; | 272 | newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, |
223 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | 273 | this->ofs + this->size - newfrag->ofs - newfrag->size); |
224 | newfrag2->node = this->node; | 274 | if (unlikely(!newfrag2)) |
275 | return -ENOMEM; | ||
225 | if (this->node) | 276 | if (this->node) |
226 | this->node->frags++; | 277 | this->node->frags++; |
227 | 278 | ||
@@ -235,10 +286,10 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
235 | 'this' to insert newfrag, and a tree insert | 286 | 'this' to insert newfrag, and a tree insert |
236 | from newfrag to insert newfrag2. */ | 287 | from newfrag to insert newfrag2. */ |
237 | jffs2_fragtree_insert(newfrag, this); | 288 | jffs2_fragtree_insert(newfrag, this); |
238 | rb_insert_color(&newfrag->rb, list); | 289 | rb_insert_color(&newfrag->rb, root); |
239 | 290 | ||
240 | jffs2_fragtree_insert(newfrag2, newfrag); | 291 | jffs2_fragtree_insert(newfrag2, newfrag); |
241 | rb_insert_color(&newfrag2->rb, list); | 292 | rb_insert_color(&newfrag2->rb, root); |
242 | 293 | ||
243 | return 0; | 294 | return 0; |
244 | } | 295 | } |
@@ -247,14 +298,14 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
247 | 298 | ||
248 | /* Again, we know it lives down here in the tree */ | 299 | /* Again, we know it lives down here in the tree */ |
249 | jffs2_fragtree_insert(newfrag, this); | 300 | jffs2_fragtree_insert(newfrag, this); |
250 | rb_insert_color(&newfrag->rb, list); | 301 | rb_insert_color(&newfrag->rb, root); |
251 | } else { | 302 | } else { |
252 | /* New frag starts at the same point as 'this' used to. Replace | 303 | /* New frag starts at the same point as 'this' used to. Replace |
253 | it in the tree without doing a delete and insertion */ | 304 | it in the tree without doing a delete and insertion */ |
254 | JFFS2_DBG_FRAGTREE2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | 305 | JFFS2_DBG_FRAGTREE2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", |
255 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); | 306 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); |
256 | 307 | ||
257 | rb_replace_node(&this->rb, &newfrag->rb, list); | 308 | rb_replace_node(&this->rb, &newfrag->rb, root); |
258 | 309 | ||
259 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | 310 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { |
260 | JFFS2_DBG_FRAGTREE2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); | 311 | JFFS2_DBG_FRAGTREE2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); |
@@ -264,7 +315,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
264 | this->size -= newfrag->size; | 315 | this->size -= newfrag->size; |
265 | 316 | ||
266 | jffs2_fragtree_insert(this, newfrag); | 317 | jffs2_fragtree_insert(this, newfrag); |
267 | rb_insert_color(&this->rb, list); | 318 | rb_insert_color(&this->rb, root); |
268 | return 0; | 319 | return 0; |
269 | } | 320 | } |
270 | } | 321 | } |
@@ -275,15 +326,15 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
275 | /* 'this' frag is obsoleted completely. */ | 326 | /* 'this' frag is obsoleted completely. */ |
276 | JFFS2_DBG_FRAGTREE2("obsoleting node frag %p (%x-%x) and removing from tree\n", | 327 | JFFS2_DBG_FRAGTREE2("obsoleting node frag %p (%x-%x) and removing from tree\n", |
277 | this, this->ofs, this->ofs+this->size); | 328 | this, this->ofs, this->ofs+this->size); |
278 | rb_erase(&this->rb, list); | 329 | rb_erase(&this->rb, root); |
279 | jffs2_obsolete_node_frag(c, this); | 330 | jffs2_obsolete_node_frag(c, this); |
280 | } | 331 | } |
281 | /* Now we're pointing at the first frag which isn't totally obsoleted by | 332 | /* Now we're pointing at the first frag which isn't totally obsoleted by |
282 | the new frag */ | 333 | the new frag */ |
283 | 334 | ||
284 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { | 335 | if (!this || newfrag->ofs + newfrag->size == this->ofs) |
285 | return 0; | 336 | return 0; |
286 | } | 337 | |
287 | /* Still some overlap but we don't need to move it in the tree */ | 338 | /* Still some overlap but we don't need to move it in the tree */ |
288 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | 339 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); |
289 | this->ofs = newfrag->ofs + newfrag->size; | 340 | this->ofs = newfrag->ofs + newfrag->size; |
@@ -296,8 +347,9 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *l | |||
296 | return 0; | 347 | return 0; |
297 | } | 348 | } |
298 | 349 | ||
299 | /* Given an inode, probably with existing list of fragments, add the new node | 350 | /* |
300 | * to the fragment list. | 351 | * Given an inode, probably with existing tree of fragments, add the new node |
352 | * to the fragment tree. | ||
301 | */ | 353 | */ |
302 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 354 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) |
303 | { | 355 | { |
@@ -307,18 +359,14 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in | |||
307 | if (unlikely(!fn->size)) | 359 | if (unlikely(!fn->size)) |
308 | return 0; | 360 | return 0; |
309 | 361 | ||
310 | newfrag = jffs2_alloc_node_frag(); | 362 | newfrag = new_fragment(fn, fn->ofs, fn->size); |
311 | if (unlikely(!newfrag)) | 363 | if (unlikely(!newfrag)) |
312 | return -ENOMEM; | 364 | return -ENOMEM; |
365 | newfrag->node->frags = 1; | ||
313 | 366 | ||
314 | JFFS2_DBG_FRAGTREE("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", | 367 | JFFS2_DBG_FRAGTREE("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", |
315 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); | 368 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); |
316 | 369 | ||
317 | newfrag->ofs = fn->ofs; | ||
318 | newfrag->size = fn->size; | ||
319 | newfrag->node = fn; | ||
320 | newfrag->node->frags = 1; | ||
321 | |||
322 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | 370 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); |
323 | if (unlikely(ret)) | 371 | if (unlikely(ret)) |
324 | return ret; | 372 | return ret; |
@@ -344,10 +392,465 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in | |||
344 | } | 392 | } |
345 | } | 393 | } |
346 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | 394 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
347 | jffs2_dbg_dump_fragtree_nolock(f); | 395 | |
396 | return 0; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Check the data CRC of the node. | ||
401 | * | ||
402 | * Returns: 0 if the data CRC is correct; | ||
403 | * 1 - if incorrect; | ||
404 | * error code if an error occured. | ||
405 | */ | ||
406 | static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) | ||
407 | { | ||
408 | struct jffs2_raw_node_ref *ref = tn->fn->raw; | ||
409 | int err = 0, pointed = 0; | ||
410 | struct jffs2_eraseblock *jeb; | ||
411 | unsigned char *buffer; | ||
412 | uint32_t crc, ofs, retlen, len; | ||
413 | |||
414 | BUG_ON(tn->csize == 0); | ||
415 | |||
416 | /* Calculate how many bytes were already checked */ | ||
417 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); | ||
418 | len = ofs - (ofs & (PAGE_CACHE_SIZE - 1)); | ||
419 | len = c->wbuf_pagesize - len; | ||
420 | |||
421 | if (len >= tn->csize) { | ||
422 | JFFS2_DBG_READINODE("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", | ||
423 | ref_offset(ref), tn->csize, ofs); | ||
424 | goto adj_acc; | ||
425 | } | ||
426 | |||
427 | ofs += len; | ||
428 | len = tn->csize - len; | ||
429 | |||
430 | JFFS2_DBG_READINODE("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", | ||
431 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); | ||
432 | |||
433 | #ifndef __ECOS | ||
434 | /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), | ||
435 | * adding and jffs2_flash_read_end() interface. */ | ||
436 | if (c->mtd->point) { | ||
437 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | ||
438 | if (!err && retlen < tn->csize) { | ||
439 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | ||
440 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
441 | } else if (err) | ||
442 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | ||
443 | else | ||
444 | pointed = 1; /* succefully pointed to device */ | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | if (!pointed) { | ||
449 | buffer = kmalloc(len, GFP_KERNEL); | ||
450 | if (unlikely(!buffer)) | ||
451 | return -ENOMEM; | ||
452 | |||
453 | /* TODO: this is very frequent pattern, make it a separate | ||
454 | * routine */ | ||
455 | err = jffs2_flash_read(c, ofs, len, &retlen, buffer); | ||
456 | if (err) { | ||
457 | JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); | ||
458 | goto free_out; | ||
459 | } | ||
460 | |||
461 | if (retlen != len) { | ||
462 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); | ||
463 | err = -EIO; | ||
464 | goto free_out; | ||
465 | } | ||
466 | } | ||
467 | |||
468 | /* Continue calculating CRC */ | ||
469 | crc = crc32(tn->partial_crc, buffer, len); | ||
470 | if(!pointed) | ||
471 | kfree(buffer); | ||
472 | #ifndef __ECOS | ||
473 | else | ||
474 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
475 | #endif | ||
476 | |||
477 | if (crc != tn->data_crc) { | ||
478 | JFFS2_NOTICE("drong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", | ||
479 | ofs, tn->data_crc, crc); | ||
480 | return 1; | ||
481 | } | ||
482 | |||
483 | adj_acc: | ||
484 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
485 | len = ref_totlen(c, jeb, ref); | ||
486 | |||
487 | /* | ||
488 | * Mark the node as having been checked and fix the | ||
489 | * accounting accordingly. | ||
490 | */ | ||
491 | spin_lock(&c->erase_completion_lock); | ||
492 | jeb->used_size += len; | ||
493 | jeb->unchecked_size -= len; | ||
494 | c->used_size += len; | ||
495 | c->unchecked_size -= len; | ||
496 | spin_unlock(&c->erase_completion_lock); | ||
497 | |||
348 | return 0; | 498 | return 0; |
499 | |||
500 | free_out: | ||
501 | if(!pointed) | ||
502 | kfree(buffer); | ||
503 | #ifndef __ECOS | ||
504 | else | ||
505 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
506 | #endif | ||
507 | return err; | ||
349 | } | 508 | } |
350 | 509 | ||
510 | /* | ||
511 | * Helper function for jffs2_add_older_frag_to_fragtree(). | ||
512 | * | ||
513 | * Checks the node if we are in the checking stage. | ||
514 | */ | ||
515 | static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) | ||
516 | { | ||
517 | int ret; | ||
518 | |||
519 | BUG_ON(ref_obsolete(tn->fn->raw)); | ||
520 | |||
521 | /* We only check the data CRC of unchecked nodes */ | ||
522 | if (ref_flags(tn->fn->raw) != REF_UNCHECKED) | ||
523 | return 0; | ||
524 | |||
525 | JFFS2_DBG_FRAGTREE2("check node %u-%u, phys offs %#08x.\n", | ||
526 | tn->fn->ofs, tn->fn->ofs + tn->fn->size, | ||
527 | ref_offset(tn->fn->raw)); | ||
528 | |||
529 | ret = check_node_data(c, tn); | ||
530 | if (unlikely(ret < 0)) { | ||
531 | JFFS2_ERROR("check_node_data() returned error: %d.\n", | ||
532 | ret); | ||
533 | } else if (unlikely(ret > 0)) { | ||
534 | JFFS2_DBG_FRAGTREE2("CRC error, mark it obsolete.\n"); | ||
535 | jffs2_mark_node_obsolete(c, tn->fn->raw); | ||
536 | } | ||
537 | |||
538 | return ret; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * Helper function for jffs2_add_older_frag_to_fragtree(). | ||
543 | * | ||
544 | * Called when the new fragment that is being inserted | ||
545 | * splits a hole fragment. | ||
546 | */ | ||
547 | static int split_hole(struct jffs2_sb_info *c, struct rb_root *root, | ||
548 | struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole) | ||
549 | { | ||
550 | JFFS2_DBG_FRAGTREE2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n", | ||
551 | newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size); | ||
552 | |||
553 | if (hole->ofs == newfrag->ofs) { | ||
554 | /* | ||
555 | * Well, the new fragment actually starts at the same offset as | ||
556 | * the hole. | ||
557 | */ | ||
558 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
559 | /* | ||
560 | * We replace the overlapped left part of the hole by | ||
561 | * the new node. | ||
562 | */ | ||
563 | |||
564 | JFFS2_DBG_FRAGTREE2("insert fragment %#04x-%#04x and cut the left part of the hole\n", | ||
565 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
566 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
567 | |||
568 | hole->ofs += newfrag->size; | ||
569 | hole->size -= newfrag->size; | ||
570 | |||
571 | /* | ||
572 | * We know that 'hole' should be the right hand | ||
573 | * fragment. | ||
574 | */ | ||
575 | jffs2_fragtree_insert(hole, newfrag); | ||
576 | rb_insert_color(&hole->rb, root); | ||
577 | } else { | ||
578 | /* | ||
579 | * Ah, the new fragment is of the same size as the hole. | ||
580 | * Relace the hole by it. | ||
581 | */ | ||
582 | JFFS2_DBG_FRAGTREE2("insert fragment %#04x-%#04x and overwrite hole\n", | ||
583 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
584 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
585 | jffs2_free_node_frag(hole); | ||
586 | } | ||
587 | } else { | ||
588 | /* The new fragment lefts some hole space at the left */ | ||
589 | |||
590 | struct jffs2_node_frag * newfrag2 = NULL; | ||
591 | |||
592 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
593 | /* The new frag also lefts some space at the right */ | ||
594 | newfrag2 = new_fragment(NULL, newfrag->ofs + | ||
595 | newfrag->size, hole->ofs + hole->size | ||
596 | - newfrag->ofs - newfrag->size); | ||
597 | if (unlikely(!newfrag2)) { | ||
598 | jffs2_free_node_frag(newfrag); | ||
599 | return -ENOMEM; | ||
600 | } | ||
601 | } | ||
602 | |||
603 | hole->size = newfrag->ofs - hole->ofs; | ||
604 | JFFS2_DBG_FRAGTREE2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n", | ||
605 | hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size); | ||
606 | |||
607 | jffs2_fragtree_insert(newfrag, hole); | ||
608 | rb_insert_color(&newfrag->rb, root); | ||
609 | |||
610 | if (newfrag2) { | ||
611 | JFFS2_DBG_FRAGTREE2("left the hole %#04x-%#04x at the right\n", | ||
612 | newfrag2->ofs, newfrag2->ofs + newfrag2->size); | ||
613 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
614 | rb_insert_color(&newfrag2->rb, root); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * This function is used when we build inode. It expects the nodes are passed | ||
623 | * in the decreasing version order. The whole point of this is to improve the | ||
624 | * inodes checking on NAND: we check the nodes' data CRC only when they are not | ||
625 | * obsoleted. Previously, add_frag_to_fragtree() function was used and | ||
626 | * nodes were passed to it in the increasing version ordes and CRCs of all | ||
627 | * nodes were checked. | ||
628 | * | ||
629 | * Note: tn->fn->size shouldn't be zero. | ||
630 | * | ||
631 | * Returns 0 if the node was inserted | ||
632 | * 1 if it wasn't inserted (since it is obsolete) | ||
633 | * < 0 an if error occured | ||
634 | */ | ||
635 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
636 | struct jffs2_tmp_dnode_info *tn) | ||
637 | { | ||
638 | struct jffs2_node_frag *this, *newfrag; | ||
639 | uint32_t lastend; | ||
640 | struct jffs2_full_dnode *fn = tn->fn; | ||
641 | struct rb_root *root = &f->fragtree; | ||
642 | uint32_t fn_size = fn->size, fn_ofs = fn->ofs; | ||
643 | int err, checked = 0; | ||
644 | int ref_flag; | ||
645 | |||
646 | JFFS2_DBG_FRAGTREE("insert fragment %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size); | ||
647 | |||
648 | /* Skip all the nodes which are completed before this one starts */ | ||
649 | this = jffs2_lookup_node_frag(root, fn_ofs); | ||
650 | if (this) | ||
651 | JFFS2_DBG_FRAGTREE2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole"); | ||
652 | |||
653 | if (this) | ||
654 | lastend = this->ofs + this->size; | ||
655 | else | ||
656 | lastend = 0; | ||
657 | |||
658 | /* Detect the preliminary type of node */ | ||
659 | if (fn->size >= PAGE_CACHE_SIZE) | ||
660 | ref_flag = REF_PRISTINE; | ||
661 | else | ||
662 | ref_flag = REF_NORMAL; | ||
663 | |||
664 | /* See if we ran off the end of the root */ | ||
665 | if (lastend <= fn_ofs) { | ||
666 | /* We did */ | ||
667 | |||
668 | /* | ||
669 | * We are going to insert the new node into the | ||
670 | * fragment tree, so check it. | ||
671 | */ | ||
672 | err = check_node(c, f, tn); | ||
673 | if (err != 0) | ||
674 | return err; | ||
675 | |||
676 | fn->frags = 1; | ||
677 | |||
678 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
679 | if (unlikely(!newfrag)) | ||
680 | return -ENOMEM; | ||
681 | |||
682 | err = no_overlapping_node(c, root, newfrag, this, lastend); | ||
683 | if (unlikely(err != 0)) { | ||
684 | jffs2_free_node_frag(newfrag); | ||
685 | return err; | ||
686 | } | ||
687 | |||
688 | goto out_ok; | ||
689 | } | ||
690 | |||
691 | fn->frags = 0; | ||
692 | |||
693 | while (1) { | ||
694 | /* | ||
695 | * Here we have: | ||
696 | * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs. | ||
697 | * | ||
698 | * Remember, 'this' has higher version, any non-hole node | ||
699 | * which is already in the fragtree is newer then the newly | ||
700 | * inserted. | ||
701 | */ | ||
702 | if (!this->node) { | ||
703 | /* | ||
704 | * 'this' is the hole fragment, so at least the | ||
705 | * beginning of the new fragment is valid. | ||
706 | */ | ||
707 | |||
708 | /* | ||
709 | * We are going to insert the new node into the | ||
710 | * fragment tree, so check it. | ||
711 | */ | ||
712 | if (!checked) { | ||
713 | err = check_node(c, f, tn); | ||
714 | if (unlikely(err != 0)) | ||
715 | return err; | ||
716 | checked = 1; | ||
717 | } | ||
718 | |||
719 | if (this->ofs + this->size >= fn_ofs + fn_size) { | ||
720 | /* We split the hole on two parts */ | ||
721 | |||
722 | fn->frags += 1; | ||
723 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
724 | if (unlikely(!newfrag)) | ||
725 | return -ENOMEM; | ||
726 | |||
727 | err = split_hole(c, root, newfrag, this); | ||
728 | if (unlikely(err)) | ||
729 | return err; | ||
730 | goto out_ok; | ||
731 | } | ||
732 | |||
733 | /* | ||
734 | * The beginning of the new fragment is valid since it | ||
735 | * overlaps the hole node. | ||
736 | */ | ||
737 | |||
738 | ref_flag = REF_NORMAL; | ||
739 | |||
740 | fn->frags += 1; | ||
741 | newfrag = new_fragment(fn, fn_ofs, | ||
742 | this->ofs + this->size - fn_ofs); | ||
743 | if (unlikely(!newfrag)) | ||
744 | return -ENOMEM; | ||
745 | |||
746 | if (fn_ofs == this->ofs) { | ||
747 | /* | ||
748 | * The new node starts at the same offset as | ||
749 | * the hole and supersieds the hole. | ||
750 | */ | ||
751 | JFFS2_DBG_FRAGTREE2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n", | ||
752 | fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); | ||
753 | |||
754 | rb_replace_node(&this->rb, &newfrag->rb, root); | ||
755 | jffs2_free_node_frag(this); | ||
756 | } else { | ||
757 | /* | ||
758 | * The hole becomes shorter as its right part | ||
759 | * is supersieded by the new fragment. | ||
760 | */ | ||
761 | JFFS2_DBG_FRAGTREE2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n", | ||
762 | this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size); | ||
763 | |||
764 | JFFS2_DBG_FRAGTREE2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs, | ||
765 | fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); | ||
766 | |||
767 | this->size -= newfrag->size; | ||
768 | jffs2_fragtree_insert(newfrag, this); | ||
769 | rb_insert_color(&newfrag->rb, root); | ||
770 | } | ||
771 | |||
772 | fn_ofs += newfrag->size; | ||
773 | fn_size -= newfrag->size; | ||
774 | this = rb_entry(rb_next(&newfrag->rb), | ||
775 | struct jffs2_node_frag, rb); | ||
776 | |||
777 | JFFS2_DBG_FRAGTREE2("switch to the next 'this' fragment: %#04x-%#04x %s\n", | ||
778 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * 'This' node is not the hole so it obsoletes the new fragment | ||
783 | * either fully or partially. | ||
784 | */ | ||
785 | if (this->ofs + this->size >= fn_ofs + fn_size) { | ||
786 | /* The new node is obsolete, drop it */ | ||
787 | if (fn->frags == 0) { | ||
788 | JFFS2_DBG_FRAGTREE2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size); | ||
789 | ref_flag = REF_OBSOLETE; | ||
790 | } | ||
791 | goto out_ok; | ||
792 | } else { | ||
793 | struct jffs2_node_frag *new_this; | ||
794 | |||
795 | /* 'This' node obsoletes the beginning of the new node */ | ||
796 | JFFS2_DBG_FRAGTREE2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size); | ||
797 | |||
798 | ref_flag = REF_NORMAL; | ||
799 | |||
800 | fn_size -= this->ofs + this->size - fn_ofs; | ||
801 | fn_ofs = this->ofs + this->size; | ||
802 | JFFS2_DBG_FRAGTREE2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size); | ||
803 | |||
804 | new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb); | ||
805 | if (!new_this) { | ||
806 | /* | ||
807 | * There is no next fragment. Add the rest of | ||
808 | * the new node as the right-hand child. | ||
809 | */ | ||
810 | if (!checked) { | ||
811 | err = check_node(c, f, tn); | ||
812 | if (unlikely(err != 0)) | ||
813 | return err; | ||
814 | checked = 1; | ||
815 | } | ||
816 | |||
817 | fn->frags += 1; | ||
818 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
819 | if (unlikely(!newfrag)) | ||
820 | return -ENOMEM; | ||
821 | |||
822 | JFFS2_DBG_FRAGTREE2("there are no more fragments, insert %#04x-%#04x\n", | ||
823 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
824 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
825 | rb_insert_color(&newfrag->rb, root); | ||
826 | goto out_ok; | ||
827 | } else { | ||
828 | this = new_this; | ||
829 | JFFS2_DBG_FRAGTREE2("switch to the next 'this' fragment: %#04x-%#04x %s\n", | ||
830 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); | ||
831 | } | ||
832 | } | ||
833 | } | ||
834 | |||
835 | out_ok: | ||
836 | BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE); | ||
837 | |||
838 | if (ref_flag == REF_OBSOLETE) { | ||
839 | JFFS2_DBG_FRAGTREE2("the node is obsolete now\n"); | ||
840 | /* jffs2_mark_node_obsolete() will adjust space accounting */ | ||
841 | jffs2_mark_node_obsolete(c, fn->raw); | ||
842 | return 1; | ||
843 | } | ||
844 | |||
845 | JFFS2_DBG_FRAGTREE2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE"); | ||
846 | |||
847 | /* Space accounting was adjusted at check_node_data() */ | ||
848 | spin_lock(&c->erase_completion_lock); | ||
849 | fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag; | ||
850 | spin_unlock(&c->erase_completion_lock); | ||
851 | |||
852 | return 0; | ||
853 | } | ||
351 | 854 | ||
352 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | 855 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) |
353 | { | 856 | { |
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 53c12e4a337d..adee3c6eb448 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.h,v 1.136 2005/07/31 08:20:44 dedekind Exp $ | 10 | * $Id: nodelist.h,v 1.137 2005/08/01 12:05:19 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -61,6 +61,9 @@ | |||
61 | #error wibble | 61 | #error wibble |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | /* The minimal node header size */ | ||
65 | #define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent) | ||
66 | |||
64 | /* | 67 | /* |
65 | This is all we need to keep in-core for each raw node during normal | 68 | This is all we need to keep in-core for each raw node during normal |
66 | operation. As and when we do read_inode on a particular inode, we can | 69 | operation. As and when we do read_inode on a particular inode, we can |
@@ -148,6 +151,9 @@ struct jffs2_tmp_dnode_info | |||
148 | struct rb_node rb; | 151 | struct rb_node rb; |
149 | struct jffs2_full_dnode *fn; | 152 | struct jffs2_full_dnode *fn; |
150 | uint32_t version; | 153 | uint32_t version; |
154 | uint32_t data_crc; | ||
155 | uint32_t partial_crc; | ||
156 | uint32_t csize; | ||
151 | }; | 157 | }; |
152 | 158 | ||
153 | struct jffs2_full_dirent | 159 | struct jffs2_full_dirent |
@@ -311,6 +317,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root | |||
311 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); | 317 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); |
312 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | 318 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); |
313 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 319 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); |
320 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); | ||
314 | 321 | ||
315 | /* nodemgmt.c */ | 322 | /* nodemgmt.c */ |
316 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | 323 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index f3b12d7fe9ab..488787a823b6 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: readinode.c,v 1.134 2005/07/31 08:20:44 dedekind Exp $ | 10 | * $Id: readinode.c,v 1.135 2005/08/01 12:05:19 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -21,8 +21,8 @@ | |||
21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
22 | #include "nodelist.h" | 22 | #include "nodelist.h" |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | 25 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in |
26 | * order of increasing version. | 26 | * order of increasing version. |
27 | */ | 27 | */ |
28 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | 28 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) |
@@ -38,11 +38,11 @@ static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root | |||
38 | /* There may actually be a collision here, but it doesn't | 38 | /* There may actually be a collision here, but it doesn't |
39 | actually matter. As long as the two nodes with the same | 39 | actually matter. As long as the two nodes with the same |
40 | version are together, it's all fine. */ | 40 | version are together, it's all fine. */ |
41 | if (tn->version < this->version) | 41 | if (tn->version > this->version) |
42 | p = &(*p)->rb_left; | 42 | p = &(*p)->rb_left; |
43 | else | 43 | else |
44 | p = &(*p)->rb_right; | 44 | p = &(*p)->rb_right; |
45 | } | 45 | } |
46 | 46 | ||
47 | rb_link_node(&tn->rb, parent, p); | 47 | rb_link_node(&tn->rb, parent, p); |
48 | rb_insert_color(&tn->rb, list); | 48 | rb_insert_color(&tn->rb, list); |
@@ -111,14 +111,9 @@ static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_r | |||
111 | * 1 if the node should be marked obsolete; | 111 | * 1 if the node should be marked obsolete; |
112 | * negative error code on failure. | 112 | * negative error code on failure. |
113 | */ | 113 | */ |
114 | static inline int | 114 | static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
115 | read_direntry(struct jffs2_sb_info *c, | 115 | struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp, |
116 | struct jffs2_raw_node_ref *ref, | 116 | uint32_t *latest_mctime, uint32_t *mctime_ver) |
117 | struct jffs2_raw_dirent *rd, | ||
118 | uint32_t read, | ||
119 | struct jffs2_full_dirent **fdp, | ||
120 | int32_t *latest_mctime, | ||
121 | uint32_t *mctime_ver) | ||
122 | { | 117 | { |
123 | struct jffs2_full_dirent *fd; | 118 | struct jffs2_full_dirent *fd; |
124 | 119 | ||
@@ -196,30 +191,35 @@ read_direntry(struct jffs2_sb_info *c, | |||
196 | * 1 if the node should be marked obsolete; | 191 | * 1 if the node should be marked obsolete; |
197 | * negative error code on failure. | 192 | * negative error code on failure. |
198 | */ | 193 | */ |
199 | static inline int | 194 | static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, |
200 | read_dnode(struct jffs2_sb_info *c, | 195 | struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen, |
201 | struct jffs2_raw_node_ref *ref, | 196 | uint32_t *latest_mctime, uint32_t *mctime_ver) |
202 | struct jffs2_raw_inode *rd, | ||
203 | uint32_t read, | ||
204 | struct rb_root *tnp, | ||
205 | int32_t *latest_mctime, | ||
206 | uint32_t *mctime_ver) | ||
207 | { | 197 | { |
208 | struct jffs2_eraseblock *jeb; | ||
209 | struct jffs2_tmp_dnode_info *tn; | 198 | struct jffs2_tmp_dnode_info *tn; |
199 | uint32_t len, csize; | ||
200 | int ret = 1; | ||
210 | 201 | ||
211 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | 202 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
212 | BUG_ON(ref_obsolete(ref)); | 203 | BUG_ON(ref_obsolete(ref)); |
213 | 204 | ||
205 | tn = jffs2_alloc_tmp_dnode_info(); | ||
206 | if (!tn) { | ||
207 | JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn)); | ||
208 | return -ENOMEM; | ||
209 | } | ||
210 | |||
211 | tn->partial_crc = 0; | ||
212 | csize = je32_to_cpu(rd->csize); | ||
213 | |||
214 | /* If we've never checked the CRCs on this node, check them now */ | 214 | /* If we've never checked the CRCs on this node, check them now */ |
215 | if (ref_flags(ref) == REF_UNCHECKED) { | 215 | if (ref_flags(ref) == REF_UNCHECKED) { |
216 | uint32_t crc, len; | 216 | uint32_t crc; |
217 | 217 | ||
218 | crc = crc32(0, rd, sizeof(*rd) - 8); | 218 | crc = crc32(0, rd, sizeof(*rd) - 8); |
219 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { | 219 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { |
220 | JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", | 220 | JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", |
221 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); | 221 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); |
222 | return 1; | 222 | goto free_out; |
223 | } | 223 | } |
224 | 224 | ||
225 | /* Sanity checks */ | 225 | /* Sanity checks */ |
@@ -227,107 +227,102 @@ read_dnode(struct jffs2_sb_info *c, | |||
227 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { | 227 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { |
228 | JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); | 228 | JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); |
229 | jffs2_dbg_dump_node(c, ref_offset(ref)); | 229 | jffs2_dbg_dump_node(c, ref_offset(ref)); |
230 | return 1; | 230 | goto free_out; |
231 | } | 231 | } |
232 | 232 | ||
233 | if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) { | 233 | if (jffs2_is_writebuffered(c) && csize != 0) { |
234 | unsigned char *buf = NULL; | 234 | /* At this point we are supposed to check the data CRC |
235 | uint32_t pointed = 0; | 235 | * of our unchecked node. But thus far, we do not |
236 | int err; | 236 | * know whether the node is valid or obsolete. To |
237 | #ifndef __ECOS | 237 | * figure this out, we need to walk all the nodes of |
238 | if (c->mtd->point) { | 238 | * the inode and build the inode fragtree. We don't |
239 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), | 239 | * want to spend time checking data of nodes which may |
240 | &read, &buf); | 240 | * later be found to be obsolete. So we put off the full |
241 | if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) { | 241 | * data CRC checking until we have read all the inode |
242 | JFFS2_ERROR("MTD point returned len too short: 0x%zx\n", read); | 242 | * nodes and have started building the fragtree. |
243 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), | 243 | * |
244 | je32_to_cpu(rd->csize)); | 244 | * The fragtree is being built starting with nodes |
245 | } else if (unlikely(err)){ | 245 | * having the highest version number, so we'll be able |
246 | JFFS2_ERROR("MTD point failed %d\n", err); | 246 | * to detect whether a node is valid (i.e., it is not |
247 | } else | 247 | * overlapped by a node with higher version) or not. |
248 | pointed = 1; /* succefully pointed to device */ | 248 | * And we'll be able to check only those nodes, which |
249 | } | 249 | * are not obsolete. |
250 | #endif | 250 | * |
251 | if(!pointed){ | 251 | * Of course, this optimization only makes sense in case |
252 | buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL); | 252 | * of NAND flashes (or other flashes whith |
253 | if (!buf) | 253 | * !jffs2_can_mark_obsolete()), since on NOR flashes |
254 | return -ENOMEM; | 254 | * nodes are marked obsolete physically. |
255 | 255 | * | |
256 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), | 256 | * Since NAND flashes (or other flashes with |
257 | &read, buf); | 257 | * jffs2_is_writebuffered(c)) are anyway read by |
258 | if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err)) | 258 | * fractions of c->wbuf_pagesize, and we have just read |
259 | err = -EIO; | 259 | * the node header, it is likely that the starting part |
260 | if (err) { | 260 | * of the node data is also read when we read the |
261 | kfree(buf); | 261 | * header. So we don't mind to check the CRC of the |
262 | return err; | 262 | * starting part of the data of the node now, and check |
263 | } | 263 | * the second part later (in jffs2_check_node_data()). |
264 | } | 264 | * Of course, we will not need to re-read and re-check |
265 | crc = crc32(0, buf, je32_to_cpu(rd->csize)); | 265 | * the NAND page which we have just read. This is why we |
266 | if(!pointed) | 266 | * read the whole NAND page at jffs2_get_inode_nodes(), |
267 | kfree(buf); | 267 | * while we needed only the node header. |
268 | #ifndef __ECOS | 268 | */ |
269 | else | 269 | unsigned char *buf; |
270 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize)); | 270 | |
271 | #endif | 271 | /* 'buf' will point to the start of data */ |
272 | 272 | buf = (unsigned char *)rd + sizeof(*rd); | |
273 | if (crc != je32_to_cpu(rd->data_crc)) { | 273 | /* len will be the read data length */ |
274 | JFFS2_NOTICE("data CRC failed on node at %#08x: read %#08x, calculated %#08x\n", | 274 | len = min_t(uint32_t, rdlen - sizeof(*rd), csize); |
275 | ref_offset(ref), je32_to_cpu(rd->data_crc), crc); | ||
276 | return 1; | ||
277 | } | ||
278 | 275 | ||
279 | } | 276 | if (len) |
280 | 277 | tn->partial_crc = crc = crc32(0, buf, len); | |
281 | /* Mark the node as having been checked and fix the accounting accordingly */ | 278 | |
282 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 279 | /* If we actually calculated the whole data CRC |
283 | len = ref_totlen(c, jeb, ref); | 280 | * and it is wrong, drop the node. */ |
284 | 281 | if (unlikely(tn->partial_crc | |
285 | spin_lock(&c->erase_completion_lock); | 282 | != je32_to_cpu(rd->data_crc)) && |
286 | jeb->used_size += len; | 283 | len == csize) |
287 | jeb->unchecked_size -= len; | 284 | goto free_out; |
288 | c->used_size += len; | ||
289 | c->unchecked_size -= len; | ||
290 | |||
291 | /* If node covers at least a whole page, or if it starts at the | ||
292 | beginning of a page and runs to the end of the file, or if | ||
293 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | ||
294 | 285 | ||
295 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | 286 | } else if (csize == 0) { |
296 | when the overlapping node(s) get added to the tree anyway. | 287 | /* |
297 | */ | 288 | * We checked the header CRC. If the node has no data, adjust |
298 | if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) || | 289 | * the space accounting now. For other nodes this will be done |
299 | ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) && | 290 | * later either when the node is marked obsolete or when its |
300 | (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) { | 291 | * data is checked. |
301 | JFFS2_DBG_READINODE("marking node at %#08x REF_PRISTINE\n", ref_offset(ref)); | 292 | */ |
302 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | 293 | struct jffs2_eraseblock *jeb; |
303 | } else { | 294 | |
304 | JFFS2_DBG_READINODE("marking node at %#08x REF_NORMAL\n", ref_offset(ref)); | 295 | JFFS2_DBG_READINODE("the node has no data.\n"); |
296 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
297 | len = ref_totlen(c, jeb, ref); | ||
298 | |||
299 | spin_lock(&c->erase_completion_lock); | ||
300 | jeb->used_size += len; | ||
301 | jeb->unchecked_size -= len; | ||
302 | c->used_size += len; | ||
303 | c->unchecked_size -= len; | ||
305 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | 304 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; |
305 | spin_unlock(&c->erase_completion_lock); | ||
306 | } | 306 | } |
307 | spin_unlock(&c->erase_completion_lock); | ||
308 | } | ||
309 | |||
310 | tn = jffs2_alloc_tmp_dnode_info(); | ||
311 | if (!tn) { | ||
312 | JFFS2_ERROR("alloc tn failed\n"); | ||
313 | return -ENOMEM; | ||
314 | } | 307 | } |
315 | 308 | ||
316 | tn->fn = jffs2_alloc_full_dnode(); | 309 | tn->fn = jffs2_alloc_full_dnode(); |
317 | if (!tn->fn) { | 310 | if (!tn->fn) { |
318 | JFFS2_ERROR("alloc fn failed\n"); | 311 | JFFS2_ERROR("alloc fn failed\n"); |
319 | jffs2_free_tmp_dnode_info(tn); | 312 | ret = -ENOMEM; |
320 | return -ENOMEM; | 313 | goto free_out; |
321 | } | 314 | } |
322 | 315 | ||
323 | tn->version = je32_to_cpu(rd->version); | 316 | tn->version = je32_to_cpu(rd->version); |
324 | tn->fn->ofs = je32_to_cpu(rd->offset); | 317 | tn->fn->ofs = je32_to_cpu(rd->offset); |
318 | tn->data_crc = je32_to_cpu(rd->data_crc); | ||
319 | tn->csize = csize; | ||
325 | tn->fn->raw = ref; | 320 | tn->fn->raw = ref; |
326 | 321 | ||
327 | /* There was a bug where we wrote hole nodes out with | 322 | /* There was a bug where we wrote hole nodes out with |
328 | csize/dsize swapped. Deal with it */ | 323 | csize/dsize swapped. Deal with it */ |
329 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize)) | 324 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) |
330 | tn->fn->size = je32_to_cpu(rd->csize); | 325 | tn->fn->size = csize; |
331 | else // normal case... | 326 | else // normal case... |
332 | tn->fn->size = je32_to_cpu(rd->dsize); | 327 | tn->fn->size = je32_to_cpu(rd->dsize); |
333 | 328 | ||
@@ -337,6 +332,10 @@ read_dnode(struct jffs2_sb_info *c, | |||
337 | jffs2_add_tn_to_tree(tn, tnp); | 332 | jffs2_add_tn_to_tree(tn, tnp); |
338 | 333 | ||
339 | return 0; | 334 | return 0; |
335 | |||
336 | free_out: | ||
337 | jffs2_free_tmp_dnode_info(tn); | ||
338 | return ret; | ||
340 | } | 339 | } |
341 | 340 | ||
342 | /* | 341 | /* |
@@ -347,11 +346,7 @@ read_dnode(struct jffs2_sb_info *c, | |||
347 | * 1 if the node should be marked obsolete; | 346 | * 1 if the node should be marked obsolete; |
348 | * negative error code on failure. | 347 | * negative error code on failure. |
349 | */ | 348 | */ |
350 | static inline int | 349 | static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) |
351 | read_unknown(struct jffs2_sb_info *c, | ||
352 | struct jffs2_raw_node_ref *ref, | ||
353 | struct jffs2_unknown_node *un, | ||
354 | uint32_t read) | ||
355 | { | 350 | { |
356 | /* We don't mark unknown nodes as REF_UNCHECKED */ | 351 | /* We don't mark unknown nodes as REF_UNCHECKED */ |
357 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | 352 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); |
@@ -394,9 +389,62 @@ read_unknown(struct jffs2_sb_info *c, | |||
394 | return 0; | 389 | return 0; |
395 | } | 390 | } |
396 | 391 | ||
392 | /* | ||
393 | * Helper function for jffs2_get_inode_nodes(). | ||
394 | * The function detects whether more data should be read and reads it if yes. | ||
395 | * | ||
396 | * Returns: 0 on succes; | ||
397 | * negative error code on failure. | ||
398 | */ | ||
399 | static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, | ||
400 | int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart) | ||
401 | { | ||
402 | int right_len, err, len; | ||
403 | size_t retlen; | ||
404 | uint32_t offs; | ||
405 | |||
406 | if (jffs2_is_writebuffered(c)) { | ||
407 | right_len = c->wbuf_pagesize - (bufstart - buf); | ||
408 | if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize) | ||
409 | right_len += c->wbuf_pagesize; | ||
410 | } else | ||
411 | right_len = right_size; | ||
412 | |||
413 | if (*rdlen == right_len) | ||
414 | return 0; | ||
415 | |||
416 | /* We need to read more data */ | ||
417 | offs = ref_offset(ref) + *rdlen; | ||
418 | if (jffs2_is_writebuffered(c)) { | ||
419 | bufstart = buf + c->wbuf_pagesize; | ||
420 | len = c->wbuf_pagesize; | ||
421 | } else { | ||
422 | bufstart = buf + *rdlen; | ||
423 | len = right_size - *rdlen; | ||
424 | } | ||
425 | |||
426 | JFFS2_DBG_READINODE("read more %d bytes.", len); | ||
427 | |||
428 | err = jffs2_flash_read(c, offs, len, &retlen, bufstart); | ||
429 | if (err) { | ||
430 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " | ||
431 | "error code: %d.\n", len, offs, err); | ||
432 | return err; | ||
433 | } | ||
434 | |||
435 | if (retlen < len) { | ||
436 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", | ||
437 | offs, retlen, len); | ||
438 | return -EIO; | ||
439 | } | ||
440 | |||
441 | *rdlen = right_len; | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
397 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | 446 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated |
398 | with this ino, returning the former in order of version */ | 447 | with this ino, returning the former in order of version */ |
399 | |||
400 | static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 448 | static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
401 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | 449 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, |
402 | uint32_t *highest_version, uint32_t *latest_mctime, | 450 | uint32_t *highest_version, uint32_t *latest_mctime, |
@@ -405,22 +453,47 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf | |||
405 | struct jffs2_raw_node_ref *ref, *valid_ref; | 453 | struct jffs2_raw_node_ref *ref, *valid_ref; |
406 | struct rb_root ret_tn = RB_ROOT; | 454 | struct rb_root ret_tn = RB_ROOT; |
407 | struct jffs2_full_dirent *ret_fd = NULL; | 455 | struct jffs2_full_dirent *ret_fd = NULL; |
408 | union jffs2_node_union node; | 456 | unsigned char *buf = NULL; |
457 | union jffs2_node_union *node; | ||
409 | size_t retlen; | 458 | size_t retlen; |
410 | int err; | 459 | int len, err; |
411 | 460 | ||
412 | *mctime_ver = 0; | 461 | *mctime_ver = 0; |
413 | 462 | ||
414 | JFFS2_DBG_READINODE("ino #%u\n", f->inocache->ino); | 463 | JFFS2_DBG_READINODE("ino #%u\n", f->inocache->ino); |
415 | 464 | ||
416 | spin_lock(&c->erase_completion_lock); | 465 | if (jffs2_is_writebuffered(c)) { |
466 | /* | ||
467 | * If we have the write buffer, we assume the minimal I/O unit | ||
468 | * is c->wbuf_pagesize. We implement some optimizations which in | ||
469 | * this case and we need a temporary buffer of size = | ||
470 | * 2*c->wbuf_pagesize bytes (see comments in read_dnode()). | ||
471 | * Basically, we want to read not only the node header, but the | ||
472 | * whole wbuf (NAND page in case of NAND) or 2, if the node | ||
473 | * header overlaps the border between the 2 wbufs. | ||
474 | */ | ||
475 | len = 2*c->wbuf_pagesize; | ||
476 | } else { | ||
477 | /* | ||
478 | * When there is no write buffer, the size of the temporary | ||
479 | * buffer is the size of the larges node header. | ||
480 | */ | ||
481 | len = sizeof(union jffs2_node_union); | ||
482 | } | ||
417 | 483 | ||
484 | /* FIXME: in case of NOR and available ->point() this | ||
485 | * needs to be fixed. */ | ||
486 | buf = kmalloc(len, GFP_KERNEL); | ||
487 | if (!buf) | ||
488 | return -ENOMEM; | ||
489 | |||
490 | spin_lock(&c->erase_completion_lock); | ||
418 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | 491 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); |
419 | 492 | if (!valid_ref && f->inocache->ino != 1) | |
420 | if (!valid_ref && (f->inocache->ino != 1)) | 493 | JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); |
421 | JFFS2_WARNING("no valid nodes for ino #%u\n", f->inocache->ino); | ||
422 | |||
423 | while (valid_ref) { | 494 | while (valid_ref) { |
495 | unsigned char *bufstart; | ||
496 | |||
424 | /* We can hold a pointer to a non-obsolete node without the spinlock, | 497 | /* We can hold a pointer to a non-obsolete node without the spinlock, |
425 | but _obsolete_ nodes may disappear at any time, if the block | 498 | but _obsolete_ nodes may disappear at any time, if the block |
426 | they're in gets erased. So if we mark 'ref' obsolete while we're | 499 | they're in gets erased. So if we mark 'ref' obsolete while we're |
@@ -433,70 +506,100 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf | |||
433 | 506 | ||
434 | cond_resched(); | 507 | cond_resched(); |
435 | 508 | ||
509 | /* | ||
510 | * At this point we don't know the type of the node we're going | ||
511 | * to read, so we do not know the size of its header. In order | ||
512 | * to minimize the amount of flash IO we assume the node has | ||
513 | * size = JFFS2_MIN_NODE_HEADER. | ||
514 | */ | ||
515 | if (jffs2_is_writebuffered(c)) { | ||
516 | /* | ||
517 | * We treat 'buf' as 2 adjacent wbufs. We want to | ||
518 | * adjust bufstart such as it points to the | ||
519 | * beginning of the node within this wbuf. | ||
520 | */ | ||
521 | bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize); | ||
522 | /* We will read either one wbuf or 2 wbufs. */ | ||
523 | len = c->wbuf_pagesize - (bufstart - buf); | ||
524 | if (JFFS2_MIN_NODE_HEADER + | ||
525 | (int)(bufstart - buf) > c->wbuf_pagesize) { | ||
526 | /* The header spans the border of the | ||
527 | * first wbuf */ | ||
528 | len += c->wbuf_pagesize; | ||
529 | } | ||
530 | } else { | ||
531 | bufstart = buf; | ||
532 | len = JFFS2_MIN_NODE_HEADER; | ||
533 | } | ||
534 | |||
535 | JFFS2_DBG_READINODE("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); | ||
536 | |||
436 | /* FIXME: point() */ | 537 | /* FIXME: point() */ |
437 | err = jffs2_flash_read(c, (ref_offset(ref)), | 538 | err = jffs2_flash_read(c, ref_offset(ref), len, |
438 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | 539 | &retlen, bufstart); |
439 | &retlen, (void *)&node); | ||
440 | if (err) { | 540 | if (err) { |
441 | JFFS2_ERROR("error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | 541 | JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err); |
542 | goto free_out; | ||
543 | } | ||
544 | |||
545 | if (retlen < len) { | ||
546 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len); | ||
547 | err = -EIO; | ||
442 | goto free_out; | 548 | goto free_out; |
443 | } | 549 | } |
550 | |||
551 | node = (union jffs2_node_union *)bufstart; | ||
444 | 552 | ||
445 | switch (je16_to_cpu(node.u.nodetype)) { | 553 | switch (je16_to_cpu(node->u.nodetype)) { |
446 | 554 | ||
447 | case JFFS2_NODETYPE_DIRENT: | 555 | case JFFS2_NODETYPE_DIRENT: |
448 | JFFS2_DBG_READINODE("node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)); | ||
449 | |||
450 | if (retlen < sizeof(node.d)) { | ||
451 | JFFS2_ERROR("short read dirent at %#08x\n", ref_offset(ref)); | ||
452 | err = -EIO; | ||
453 | goto free_out; | ||
454 | } | ||
455 | 556 | ||
456 | err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver); | 557 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) { |
558 | err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart); | ||
559 | if (unlikely(err)) | ||
560 | goto free_out; | ||
561 | } | ||
562 | |||
563 | err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver); | ||
457 | if (err == 1) { | 564 | if (err == 1) { |
458 | jffs2_mark_node_obsolete(c, ref); | 565 | jffs2_mark_node_obsolete(c, ref); |
459 | break; | 566 | break; |
460 | } else if (unlikely(err)) | 567 | } else if (unlikely(err)) |
461 | goto free_out; | 568 | goto free_out; |
462 | 569 | ||
463 | if (je32_to_cpu(node.d.version) > *highest_version) | 570 | if (je32_to_cpu(node->d.version) > *highest_version) |
464 | *highest_version = je32_to_cpu(node.d.version); | 571 | *highest_version = je32_to_cpu(node->d.version); |
465 | 572 | ||
466 | break; | 573 | break; |
467 | 574 | ||
468 | case JFFS2_NODETYPE_INODE: | 575 | case JFFS2_NODETYPE_INODE: |
469 | JFFS2_DBG_READINODE("node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)); | ||
470 | 576 | ||
471 | if (retlen < sizeof(node.i)) { | 577 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) { |
472 | JFFS2_ERROR("short read dnode at %#08x\n", ref_offset(ref)); | 578 | err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart); |
473 | err = -EIO; | 579 | if (unlikely(err)) |
474 | goto free_out; | 580 | goto free_out; |
475 | } | 581 | } |
476 | 582 | ||
477 | err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver); | 583 | err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver); |
478 | if (err == 1) { | 584 | if (err == 1) { |
479 | jffs2_mark_node_obsolete(c, ref); | 585 | jffs2_mark_node_obsolete(c, ref); |
480 | break; | 586 | break; |
481 | } else if (unlikely(err)) | 587 | } else if (unlikely(err)) |
482 | goto free_out; | 588 | goto free_out; |
483 | 589 | ||
484 | if (je32_to_cpu(node.i.version) > *highest_version) | 590 | if (je32_to_cpu(node->i.version) > *highest_version) |
485 | *highest_version = je32_to_cpu(node.i.version); | 591 | *highest_version = je32_to_cpu(node->i.version); |
486 | 592 | ||
487 | JFFS2_DBG_READINODE("version %d, highest_version now %d\n", | ||
488 | je32_to_cpu(node.i.version), *highest_version); | ||
489 | |||
490 | break; | 593 | break; |
491 | 594 | ||
492 | default: | 595 | default: |
493 | /* Check we've managed to read at least the common node header */ | 596 | if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) { |
494 | if (retlen < sizeof(struct jffs2_unknown_node)) { | 597 | err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart); |
495 | JFFS2_ERROR("short read unknown node at %#08x\n", ref_offset(ref)); | 598 | if (unlikely(err)) |
496 | return -EIO; | 599 | goto free_out; |
497 | } | 600 | } |
498 | 601 | ||
499 | err = read_unknown(c, ref, &node.u, retlen); | 602 | err = read_unknown(c, ref, &node->u); |
500 | if (err == 1) { | 603 | if (err == 1) { |
501 | jffs2_mark_node_obsolete(c, ref); | 604 | jffs2_mark_node_obsolete(c, ref); |
502 | break; | 605 | break; |
@@ -505,17 +608,21 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf | |||
505 | 608 | ||
506 | } | 609 | } |
507 | spin_lock(&c->erase_completion_lock); | 610 | spin_lock(&c->erase_completion_lock); |
508 | |||
509 | } | 611 | } |
612 | |||
510 | spin_unlock(&c->erase_completion_lock); | 613 | spin_unlock(&c->erase_completion_lock); |
511 | *tnp = ret_tn; | 614 | *tnp = ret_tn; |
512 | *fdp = ret_fd; | 615 | *fdp = ret_fd; |
616 | kfree(buf); | ||
513 | 617 | ||
618 | JFFS2_DBG_READINODE("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", | ||
619 | f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver); | ||
514 | return 0; | 620 | return 0; |
515 | 621 | ||
516 | free_out: | 622 | free_out: |
517 | jffs2_free_tmp_dnode_info_list(&ret_tn); | 623 | jffs2_free_tmp_dnode_info_list(&ret_tn); |
518 | jffs2_free_full_dirent_list(ret_fd); | 624 | jffs2_free_full_dirent_list(ret_fd); |
625 | kfree(buf); | ||
519 | return err; | 626 | return err; |
520 | } | 627 | } |
521 | 628 | ||
@@ -523,14 +630,13 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
523 | struct jffs2_inode_info *f, | 630 | struct jffs2_inode_info *f, |
524 | struct jffs2_raw_inode *latest_node) | 631 | struct jffs2_raw_inode *latest_node) |
525 | { | 632 | { |
526 | struct jffs2_tmp_dnode_info *tn = NULL; | 633 | struct jffs2_tmp_dnode_info *tn; |
527 | struct rb_root tn_list; | 634 | struct rb_root tn_list; |
528 | struct rb_node *rb, *repl_rb; | 635 | struct rb_node *rb, *repl_rb; |
529 | struct jffs2_full_dirent *fd_list; | 636 | struct jffs2_full_dirent *fd_list; |
530 | struct jffs2_full_dnode *fn = NULL; | 637 | struct jffs2_full_dnode *fn, *first_fn = NULL; |
531 | uint32_t crc; | 638 | uint32_t crc; |
532 | uint32_t latest_mctime, mctime_ver; | 639 | uint32_t latest_mctime, mctime_ver; |
533 | uint32_t mdata_ver = 0; | ||
534 | size_t retlen; | 640 | size_t retlen; |
535 | int ret; | 641 | int ret; |
536 | 642 | ||
@@ -550,42 +656,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
550 | rb = rb_first(&tn_list); | 656 | rb = rb_first(&tn_list); |
551 | 657 | ||
552 | while (rb) { | 658 | while (rb) { |
659 | cond_resched(); | ||
553 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); | 660 | tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); |
554 | fn = tn->fn; | 661 | fn = tn->fn; |
555 | 662 | ret = 1; | |
556 | if (f->metadata) { | 663 | JFFS2_DBG_READINODE("consider node ver %u, phys offset " |
557 | if (likely(tn->version >= mdata_ver)) { | 664 | "%#08x(%d), range %u-%u.\n", tn->version, |
558 | JFFS2_DBG_READINODE("obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw)); | 665 | ref_offset(fn->raw), ref_flags(fn->raw), |
559 | jffs2_mark_node_obsolete(c, f->metadata->raw); | 666 | fn->ofs, fn->ofs + fn->size); |
560 | jffs2_free_full_dnode(f->metadata); | ||
561 | f->metadata = NULL; | ||
562 | |||
563 | mdata_ver = 0; | ||
564 | } else { | ||
565 | /* This should never happen. */ | ||
566 | JFFS2_ERROR("Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n", | ||
567 | ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw)); | ||
568 | jffs2_mark_node_obsolete(c, fn->raw); | ||
569 | jffs2_free_full_dnode(fn); | ||
570 | /* Fill in latest_node from the metadata, not this one we're about to free... */ | ||
571 | fn = f->metadata; | ||
572 | goto next_tn; | ||
573 | } | ||
574 | } | ||
575 | 667 | ||
576 | if (fn->size) { | 668 | if (fn->size) { |
577 | jffs2_add_full_dnode_to_inode(c, f, fn); | 669 | ret = jffs2_add_older_frag_to_fragtree(c, f, tn); |
578 | } else { | 670 | /* TODO: the error code isn't checked, check it */ |
579 | /* Zero-sized node at end of version list. Just a metadata update */ | 671 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
580 | JFFS2_DBG_READINODE("metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version); | 672 | BUG_ON(ret < 0); |
673 | if (!first_fn && ret == 0) | ||
674 | first_fn = fn; | ||
675 | } else if (!first_fn) { | ||
676 | first_fn = fn; | ||
581 | f->metadata = fn; | 677 | f->metadata = fn; |
582 | mdata_ver = tn->version; | 678 | ret = 0; /* Prevent freeing the metadata update node */ |
583 | } | 679 | } else |
584 | next_tn: | 680 | jffs2_mark_node_obsolete(c, fn->raw); |
681 | |||
585 | BUG_ON(rb->rb_left); | 682 | BUG_ON(rb->rb_left); |
586 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { | 683 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { |
587 | /* We were then left-hand child of our parent. We need | 684 | /* We were then left-hand child of our parent. We need |
588 | to move our own right-hand child into our place. */ | 685 | * to move our own right-hand child into our place. */ |
589 | repl_rb = rb->rb_right; | 686 | repl_rb = rb->rb_right; |
590 | if (repl_rb) | 687 | if (repl_rb) |
591 | repl_rb->rb_parent = rb->rb_parent; | 688 | repl_rb->rb_parent = rb->rb_parent; |
@@ -595,7 +692,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
595 | rb = rb_next(rb); | 692 | rb = rb_next(rb); |
596 | 693 | ||
597 | /* Remove the spent tn from the tree; don't bother rebalancing | 694 | /* Remove the spent tn from the tree; don't bother rebalancing |
598 | but put our right-hand child in our own place. */ | 695 | * but put our right-hand child in our own place. */ |
599 | if (tn->rb.rb_parent) { | 696 | if (tn->rb.rb_parent) { |
600 | if (tn->rb.rb_parent->rb_left == &tn->rb) | 697 | if (tn->rb.rb_parent->rb_left == &tn->rb) |
601 | tn->rb.rb_parent->rb_left = repl_rb; | 698 | tn->rb.rb_parent->rb_left = repl_rb; |
@@ -606,10 +703,18 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
606 | tn->rb.rb_right->rb_parent = NULL; | 703 | tn->rb.rb_right->rb_parent = NULL; |
607 | 704 | ||
608 | jffs2_free_tmp_dnode_info(tn); | 705 | jffs2_free_tmp_dnode_info(tn); |
706 | if (ret) { | ||
707 | JFFS2_DBG_READINODE("delete dnode %u-%u.\n", | ||
708 | fn->ofs, fn->ofs + fn->size); | ||
709 | jffs2_free_full_dnode(fn); | ||
710 | } | ||
609 | } | 711 | } |
610 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | 712 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
611 | 713 | ||
612 | if (!fn) { | 714 | BUG_ON(first_fn && ref_obsolete(first_fn->raw)); |
715 | |||
716 | fn = first_fn; | ||
717 | if (unlikely(!first_fn)) { | ||
613 | /* No data nodes for this inode. */ | 718 | /* No data nodes for this inode. */ |
614 | if (f->inocache->ino != 1) { | 719 | if (f->inocache->ino != 1) { |
615 | JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); | 720 | JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); |