diff options
Diffstat (limited to 'fs/jffs2/nodelist.c')
-rw-r--r-- | fs/jffs2/nodelist.c | 1226 |
1 files changed, 773 insertions, 453 deletions
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 4991c348f6ec..c79eebb8ab32 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $ | 10 | * $Id: nodelist.c,v 1.115 2005/11/07 11:14:40 gleixner Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -24,469 +24,832 @@ | |||
24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) | 24 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) |
25 | { | 25 | { |
26 | struct jffs2_full_dirent **prev = list; | 26 | struct jffs2_full_dirent **prev = list; |
27 | D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list)); | 27 | |
28 | dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); | ||
28 | 29 | ||
29 | while ((*prev) && (*prev)->nhash <= new->nhash) { | 30 | while ((*prev) && (*prev)->nhash <= new->nhash) { |
30 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { | 31 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { |
31 | /* Duplicate. Free one */ | 32 | /* Duplicate. Free one */ |
32 | if (new->version < (*prev)->version) { | 33 | if (new->version < (*prev)->version) { |
33 | D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n")); | 34 | dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", |
34 | D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino)); | 35 | (*prev)->name, (*prev)->ino); |
35 | jffs2_mark_node_obsolete(c, new->raw); | 36 | jffs2_mark_node_obsolete(c, new->raw); |
36 | jffs2_free_full_dirent(new); | 37 | jffs2_free_full_dirent(new); |
37 | } else { | 38 | } else { |
38 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino)); | 39 | dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", |
40 | (*prev)->name, (*prev)->ino); | ||
39 | new->next = (*prev)->next; | 41 | new->next = (*prev)->next; |
40 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | 42 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); |
41 | jffs2_free_full_dirent(*prev); | 43 | jffs2_free_full_dirent(*prev); |
42 | *prev = new; | 44 | *prev = new; |
43 | } | 45 | } |
44 | goto out; | 46 | return; |
45 | } | 47 | } |
46 | prev = &((*prev)->next); | 48 | prev = &((*prev)->next); |
47 | } | 49 | } |
48 | new->next = *prev; | 50 | new->next = *prev; |
49 | *prev = new; | 51 | *prev = new; |
52 | } | ||
53 | |||
54 | void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | ||
55 | { | ||
56 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | ||
57 | |||
58 | dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); | ||
59 | |||
60 | /* We know frag->ofs <= size. That's what lookup does for us */ | ||
61 | if (frag && frag->ofs != size) { | ||
62 | if (frag->ofs+frag->size > size) { | ||
63 | frag->size = size - frag->ofs; | ||
64 | } | ||
65 | frag = frag_next(frag); | ||
66 | } | ||
67 | while (frag && frag->ofs >= size) { | ||
68 | struct jffs2_node_frag *next = frag_next(frag); | ||
69 | |||
70 | frag_erase(frag, list); | ||
71 | jffs2_obsolete_node_frag(c, frag); | ||
72 | frag = next; | ||
73 | } | ||
50 | 74 | ||
51 | out: | 75 | if (size == 0) |
52 | D2(while(*list) { | 76 | return; |
53 | printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino); | 77 | |
54 | list = &(*list)->next; | 78 | /* |
55 | }); | 79 | * If the last fragment starts at the RAM page boundary, it is |
80 | * REF_PRISTINE irrespective of its size. | ||
81 | */ | ||
82 | frag = frag_last(list); | ||
83 | if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { | ||
84 | dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", | ||
85 | frag->ofs, frag->ofs + frag->size); | ||
86 | frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; | ||
87 | } | ||
56 | } | 88 | } |
57 | 89 | ||
58 | /* | 90 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) |
59 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | ||
60 | * order of increasing version. | ||
61 | */ | ||
62 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
63 | { | 91 | { |
64 | struct rb_node **p = &list->rb_node; | 92 | if (this->node) { |
65 | struct rb_node * parent = NULL; | 93 | this->node->frags--; |
66 | struct jffs2_tmp_dnode_info *this; | 94 | if (!this->node->frags) { |
67 | 95 | /* The node has no valid frags left. It's totally obsoleted */ | |
68 | while (*p) { | 96 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", |
69 | parent = *p; | 97 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); |
70 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); | 98 | jffs2_mark_node_obsolete(c, this->node->raw); |
71 | 99 | jffs2_free_full_dnode(this->node); | |
72 | /* There may actually be a collision here, but it doesn't | 100 | } else { |
73 | actually matter. As long as the two nodes with the same | 101 | dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", |
74 | version are together, it's all fine. */ | 102 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); |
75 | if (tn->version < this->version) | 103 | mark_ref_normal(this->node->raw); |
76 | p = &(*p)->rb_left; | 104 | } |
77 | else | ||
78 | p = &(*p)->rb_right; | ||
79 | } | ||
80 | 105 | ||
81 | rb_link_node(&tn->rb, parent, p); | 106 | } |
82 | rb_insert_color(&tn->rb, list); | 107 | jffs2_free_node_frag(this); |
83 | } | 108 | } |
84 | 109 | ||
85 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | 110 | static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) |
86 | { | 111 | { |
87 | struct rb_node *this; | 112 | struct rb_node *parent = &base->rb; |
88 | struct jffs2_tmp_dnode_info *tn; | 113 | struct rb_node **link = &parent; |
89 | 114 | ||
90 | this = list->rb_node; | 115 | dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); |
91 | 116 | ||
92 | /* Now at bottom of tree */ | 117 | while (*link) { |
93 | while (this) { | 118 | parent = *link; |
94 | if (this->rb_left) | 119 | base = rb_entry(parent, struct jffs2_node_frag, rb); |
95 | this = this->rb_left; | 120 | |
96 | else if (this->rb_right) | 121 | if (newfrag->ofs > base->ofs) |
97 | this = this->rb_right; | 122 | link = &base->rb.rb_right; |
123 | else if (newfrag->ofs < base->ofs) | ||
124 | link = &base->rb.rb_left; | ||
98 | else { | 125 | else { |
99 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | 126 | JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); |
100 | jffs2_free_full_dnode(tn->fn); | 127 | BUG(); |
101 | jffs2_free_tmp_dnode_info(tn); | ||
102 | |||
103 | this = this->rb_parent; | ||
104 | if (!this) | ||
105 | break; | ||
106 | |||
107 | if (this->rb_left == &tn->rb) | ||
108 | this->rb_left = NULL; | ||
109 | else if (this->rb_right == &tn->rb) | ||
110 | this->rb_right = NULL; | ||
111 | else BUG(); | ||
112 | } | 128 | } |
113 | } | 129 | } |
114 | list->rb_node = NULL; | 130 | |
131 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
115 | } | 132 | } |
116 | 133 | ||
117 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) | 134 | /* |
135 | * Allocate and initializes a new fragment. | ||
136 | */ | ||
137 | static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) | ||
118 | { | 138 | { |
119 | struct jffs2_full_dirent *next; | 139 | struct jffs2_node_frag *newfrag; |
120 | 140 | ||
121 | while (fd) { | 141 | newfrag = jffs2_alloc_node_frag(); |
122 | next = fd->next; | 142 | if (likely(newfrag)) { |
123 | jffs2_free_full_dirent(fd); | 143 | newfrag->ofs = ofs; |
124 | fd = next; | 144 | newfrag->size = size; |
145 | newfrag->node = fn; | ||
146 | } else { | ||
147 | JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); | ||
125 | } | 148 | } |
149 | |||
150 | return newfrag; | ||
126 | } | 151 | } |
127 | 152 | ||
128 | /* Returns first valid node after 'ref'. May return 'ref' */ | 153 | /* |
129 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) | 154 | * Called when there is no overlapping fragment exist. Inserts a hole before the new |
155 | * fragment and inserts the new fragment to the fragtree. | ||
156 | */ | ||
157 | static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, | ||
158 | struct jffs2_node_frag *newfrag, | ||
159 | struct jffs2_node_frag *this, uint32_t lastend) | ||
130 | { | 160 | { |
131 | while (ref && ref->next_in_ino) { | 161 | if (lastend < newfrag->node->ofs) { |
132 | if (!ref_obsolete(ref)) | 162 | /* put a hole in before the new fragment */ |
133 | return ref; | 163 | struct jffs2_node_frag *holefrag; |
134 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); | 164 | |
135 | ref = ref->next_in_ino; | 165 | holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); |
166 | if (unlikely(!holefrag)) { | ||
167 | jffs2_free_node_frag(newfrag); | ||
168 | return -ENOMEM; | ||
169 | } | ||
170 | |||
171 | if (this) { | ||
172 | /* By definition, the 'this' node has no right-hand child, | ||
173 | because there are no frags with offset greater than it. | ||
174 | So that's where we want to put the hole */ | ||
175 | dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", | ||
176 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
177 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | ||
178 | } else { | ||
179 | dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", | ||
180 | holefrag->ofs, holefrag->ofs + holefrag->size); | ||
181 | rb_link_node(&holefrag->rb, NULL, &root->rb_node); | ||
182 | } | ||
183 | rb_insert_color(&holefrag->rb, root); | ||
184 | this = holefrag; | ||
185 | } | ||
186 | |||
187 | if (this) { | ||
188 | /* By definition, the 'this' node has no right-hand child, | ||
189 | because there are no frags with offset greater than it. | ||
190 | So that's where we want to put new fragment */ | ||
191 | dbg_fragtree2("add the new node at the right\n"); | ||
192 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
193 | } else { | ||
194 | dbg_fragtree2("insert the new node at the root of the tree\n"); | ||
195 | rb_link_node(&newfrag->rb, NULL, &root->rb_node); | ||
136 | } | 196 | } |
137 | return NULL; | 197 | rb_insert_color(&newfrag->rb, root); |
198 | |||
199 | return 0; | ||
138 | } | 200 | } |
139 | 201 | ||
140 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | 202 | /* Doesn't set inode->i_size */ |
141 | with this ino, returning the former in order of version */ | 203 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) |
204 | { | ||
205 | struct jffs2_node_frag *this; | ||
206 | uint32_t lastend; | ||
207 | |||
208 | /* Skip all the nodes which are completed before this one starts */ | ||
209 | this = jffs2_lookup_node_frag(root, newfrag->node->ofs); | ||
210 | |||
211 | if (this) { | ||
212 | dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
213 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); | ||
214 | lastend = this->ofs + this->size; | ||
215 | } else { | ||
216 | dbg_fragtree2("lookup gave no frag\n"); | ||
217 | lastend = 0; | ||
218 | } | ||
219 | |||
220 | /* See if we ran off the end of the fragtree */ | ||
221 | if (lastend <= newfrag->ofs) { | ||
222 | /* We did */ | ||
223 | |||
224 | /* Check if 'this' node was on the same page as the new node. | ||
225 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
226 | the GC can take a look. | ||
227 | */ | ||
228 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
229 | if (this->node) | ||
230 | mark_ref_normal(this->node->raw); | ||
231 | mark_ref_normal(newfrag->node->raw); | ||
232 | } | ||
233 | |||
234 | return no_overlapping_node(c, root, newfrag, this, lastend); | ||
235 | } | ||
142 | 236 | ||
143 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 237 | if (this->node) |
144 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | 238 | dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", |
145 | uint32_t *highest_version, uint32_t *latest_mctime, | 239 | this->ofs, this->ofs + this->size, |
146 | uint32_t *mctime_ver) | 240 | ref_offset(this->node->raw), ref_flags(this->node->raw)); |
241 | else | ||
242 | dbg_fragtree2("dealing with hole frag %u-%u.\n", | ||
243 | this->ofs, this->ofs + this->size); | ||
244 | |||
245 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | ||
246 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | ||
247 | */ | ||
248 | if (newfrag->ofs > this->ofs) { | ||
249 | /* This node isn't completely obsoleted. The start of it remains valid */ | ||
250 | |||
251 | /* Mark the new node and the partially covered node REF_NORMAL -- let | ||
252 | the GC take a look at them */ | ||
253 | mark_ref_normal(newfrag->node->raw); | ||
254 | if (this->node) | ||
255 | mark_ref_normal(this->node->raw); | ||
256 | |||
257 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
258 | /* The new node splits 'this' frag into two */ | ||
259 | struct jffs2_node_frag *newfrag2; | ||
260 | |||
261 | if (this->node) | ||
262 | dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", | ||
263 | this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); | ||
264 | else | ||
265 | dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", | ||
266 | this->ofs, this->ofs+this->size); | ||
267 | |||
268 | /* New second frag pointing to this's node */ | ||
269 | newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, | ||
270 | this->ofs + this->size - newfrag->ofs - newfrag->size); | ||
271 | if (unlikely(!newfrag2)) | ||
272 | return -ENOMEM; | ||
273 | if (this->node) | ||
274 | this->node->frags++; | ||
275 | |||
276 | /* Adjust size of original 'this' */ | ||
277 | this->size = newfrag->ofs - this->ofs; | ||
278 | |||
279 | /* Now, we know there's no node with offset | ||
280 | greater than this->ofs but smaller than | ||
281 | newfrag2->ofs or newfrag->ofs, for obvious | ||
282 | reasons. So we can do a tree insert from | ||
283 | 'this' to insert newfrag, and a tree insert | ||
284 | from newfrag to insert newfrag2. */ | ||
285 | jffs2_fragtree_insert(newfrag, this); | ||
286 | rb_insert_color(&newfrag->rb, root); | ||
287 | |||
288 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
289 | rb_insert_color(&newfrag2->rb, root); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
294 | this->size = newfrag->ofs - this->ofs; | ||
295 | |||
296 | /* Again, we know it lives down here in the tree */ | ||
297 | jffs2_fragtree_insert(newfrag, this); | ||
298 | rb_insert_color(&newfrag->rb, root); | ||
299 | } else { | ||
300 | /* New frag starts at the same point as 'this' used to. Replace | ||
301 | it in the tree without doing a delete and insertion */ | ||
302 | dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | ||
303 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); | ||
304 | |||
305 | rb_replace_node(&this->rb, &newfrag->rb, root); | ||
306 | |||
307 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | ||
308 | dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); | ||
309 | jffs2_obsolete_node_frag(c, this); | ||
310 | } else { | ||
311 | this->ofs += newfrag->size; | ||
312 | this->size -= newfrag->size; | ||
313 | |||
314 | jffs2_fragtree_insert(this, newfrag); | ||
315 | rb_insert_color(&this->rb, root); | ||
316 | return 0; | ||
317 | } | ||
318 | } | ||
319 | /* OK, now we have newfrag added in the correct place in the tree, but | ||
320 | frag_next(newfrag) may be a fragment which is overlapped by it | ||
321 | */ | ||
322 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | ||
323 | /* 'this' frag is obsoleted completely. */ | ||
324 | dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", | ||
325 | this, this->ofs, this->ofs+this->size); | ||
326 | rb_erase(&this->rb, root); | ||
327 | jffs2_obsolete_node_frag(c, this); | ||
328 | } | ||
329 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
330 | the new frag */ | ||
331 | |||
332 | if (!this || newfrag->ofs + newfrag->size == this->ofs) | ||
333 | return 0; | ||
334 | |||
335 | /* Still some overlap but we don't need to move it in the tree */ | ||
336 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
337 | this->ofs = newfrag->ofs + newfrag->size; | ||
338 | |||
339 | /* And mark them REF_NORMAL so the GC takes a look at them */ | ||
340 | if (this->node) | ||
341 | mark_ref_normal(this->node->raw); | ||
342 | mark_ref_normal(newfrag->node->raw); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Given an inode, probably with existing tree of fragments, add the new node | ||
349 | * to the fragment tree. | ||
350 | */ | ||
351 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | ||
147 | { | 352 | { |
148 | struct jffs2_raw_node_ref *ref, *valid_ref; | 353 | int ret; |
149 | struct jffs2_tmp_dnode_info *tn; | 354 | struct jffs2_node_frag *newfrag; |
150 | struct rb_root ret_tn = RB_ROOT; | ||
151 | struct jffs2_full_dirent *fd, *ret_fd = NULL; | ||
152 | union jffs2_node_union node; | ||
153 | size_t retlen; | ||
154 | int err; | ||
155 | |||
156 | *mctime_ver = 0; | ||
157 | |||
158 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); | ||
159 | 355 | ||
160 | spin_lock(&c->erase_completion_lock); | 356 | if (unlikely(!fn->size)) |
357 | return 0; | ||
161 | 358 | ||
162 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | 359 | newfrag = new_fragment(fn, fn->ofs, fn->size); |
360 | if (unlikely(!newfrag)) | ||
361 | return -ENOMEM; | ||
362 | newfrag->node->frags = 1; | ||
163 | 363 | ||
164 | if (!valid_ref && (f->inocache->ino != 1)) | 364 | dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", |
165 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); | 365 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); |
166 | 366 | ||
167 | while (valid_ref) { | 367 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); |
168 | /* We can hold a pointer to a non-obsolete node without the spinlock, | 368 | if (unlikely(ret)) |
169 | but _obsolete_ nodes may disappear at any time, if the block | 369 | return ret; |
170 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
171 | not holding the lock, it can go away immediately. For that reason, | ||
172 | we find the next valid node first, before processing 'ref'. | ||
173 | */ | ||
174 | ref = valid_ref; | ||
175 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
176 | spin_unlock(&c->erase_completion_lock); | ||
177 | 370 | ||
178 | cond_resched(); | 371 | /* If we now share a page with other nodes, mark either previous |
372 | or next node REF_NORMAL, as appropriate. */ | ||
373 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | ||
374 | struct jffs2_node_frag *prev = frag_prev(newfrag); | ||
375 | |||
376 | mark_ref_normal(fn->raw); | ||
377 | /* If we don't start at zero there's _always_ a previous */ | ||
378 | if (prev->node) | ||
379 | mark_ref_normal(prev->node->raw); | ||
380 | } | ||
381 | |||
382 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | ||
383 | struct jffs2_node_frag *next = frag_next(newfrag); | ||
384 | |||
385 | if (next) { | ||
386 | mark_ref_normal(fn->raw); | ||
387 | if (next->node) | ||
388 | mark_ref_normal(next->node->raw); | ||
389 | } | ||
390 | } | ||
391 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Check the data CRC of the node. | ||
398 | * | ||
399 | * Returns: 0 if the data CRC is correct; | ||
400 | * 1 - if incorrect; | ||
401 | * error code if an error occured. | ||
402 | */ | ||
403 | static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) | ||
404 | { | ||
405 | struct jffs2_raw_node_ref *ref = tn->fn->raw; | ||
406 | int err = 0, pointed = 0; | ||
407 | struct jffs2_eraseblock *jeb; | ||
408 | unsigned char *buffer; | ||
409 | uint32_t crc, ofs, retlen, len; | ||
410 | |||
411 | BUG_ON(tn->csize == 0); | ||
412 | |||
413 | if (!jffs2_is_writebuffered(c)) | ||
414 | goto adj_acc; | ||
415 | |||
416 | /* Calculate how many bytes were already checked */ | ||
417 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); | ||
418 | len = ofs % c->wbuf_pagesize; | ||
419 | if (likely(len)) | ||
420 | len = c->wbuf_pagesize - len; | ||
421 | |||
422 | if (len >= tn->csize) { | ||
423 | dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", | ||
424 | ref_offset(ref), tn->csize, ofs); | ||
425 | goto adj_acc; | ||
426 | } | ||
427 | |||
428 | ofs += len; | ||
429 | len = tn->csize - len; | ||
430 | |||
431 | dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", | ||
432 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); | ||
433 | |||
434 | #ifndef __ECOS | ||
435 | /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), | ||
436 | * adding and jffs2_flash_read_end() interface. */ | ||
437 | if (c->mtd->point) { | ||
438 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | ||
439 | if (!err && retlen < tn->csize) { | ||
440 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | ||
441 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | ||
442 | } else if (err) | ||
443 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | ||
444 | else | ||
445 | pointed = 1; /* succefully pointed to device */ | ||
446 | } | ||
447 | #endif | ||
448 | |||
449 | if (!pointed) { | ||
450 | buffer = kmalloc(len, GFP_KERNEL); | ||
451 | if (unlikely(!buffer)) | ||
452 | return -ENOMEM; | ||
179 | 453 | ||
180 | /* FIXME: point() */ | 454 | /* TODO: this is very frequent pattern, make it a separate |
181 | err = jffs2_flash_read(c, (ref_offset(ref)), | 455 | * routine */ |
182 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | 456 | err = jffs2_flash_read(c, ofs, len, &retlen, buffer); |
183 | &retlen, (void *)&node); | ||
184 | if (err) { | 457 | if (err) { |
185 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | 458 | JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); |
186 | goto free_out; | 459 | goto free_out; |
187 | } | 460 | } |
188 | |||
189 | 461 | ||
190 | /* Check we've managed to read at least the common node header */ | 462 | if (retlen != len) { |
191 | if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) { | 463 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); |
192 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
193 | err = -EIO; | 464 | err = -EIO; |
194 | goto free_out; | 465 | goto free_out; |
195 | } | 466 | } |
196 | 467 | } | |
197 | switch (je16_to_cpu(node.u.nodetype)) { | ||
198 | case JFFS2_NODETYPE_DIRENT: | ||
199 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); | ||
200 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
201 | printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref)); | ||
202 | BUG(); | ||
203 | } | ||
204 | if (retlen < sizeof(node.d)) { | ||
205 | printk(KERN_WARNING "short read in get_inode_nodes()\n"); | ||
206 | err = -EIO; | ||
207 | goto free_out; | ||
208 | } | ||
209 | /* sanity check */ | ||
210 | if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) { | ||
211 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n", | ||
212 | ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen)); | ||
213 | jffs2_mark_node_obsolete(c, ref); | ||
214 | spin_lock(&c->erase_completion_lock); | ||
215 | continue; | ||
216 | } | ||
217 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
218 | *highest_version = je32_to_cpu(node.d.version); | ||
219 | if (ref_obsolete(ref)) { | ||
220 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
221 | printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n", | ||
222 | ref_offset(ref)); | ||
223 | BUG(); | ||
224 | } | ||
225 | |||
226 | fd = jffs2_alloc_full_dirent(node.d.nsize+1); | ||
227 | if (!fd) { | ||
228 | err = -ENOMEM; | ||
229 | goto free_out; | ||
230 | } | ||
231 | fd->raw = ref; | ||
232 | fd->version = je32_to_cpu(node.d.version); | ||
233 | fd->ino = je32_to_cpu(node.d.ino); | ||
234 | fd->type = node.d.type; | ||
235 | |||
236 | /* Pick out the mctime of the latest dirent */ | ||
237 | if(fd->version > *mctime_ver) { | ||
238 | *mctime_ver = fd->version; | ||
239 | *latest_mctime = je32_to_cpu(node.d.mctime); | ||
240 | } | ||
241 | 468 | ||
242 | /* memcpy as much of the name as possible from the raw | 469 | /* Continue calculating CRC */ |
243 | dirent we've already read from the flash | 470 | crc = crc32(tn->partial_crc, buffer, len); |
244 | */ | 471 | if(!pointed) |
245 | if (retlen > sizeof(struct jffs2_raw_dirent)) | 472 | kfree(buffer); |
246 | memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent)))); | 473 | #ifndef __ECOS |
247 | 474 | else | |
248 | /* Do we need to copy any more of the name directly | 475 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
249 | from the flash? | 476 | #endif |
250 | */ | ||
251 | if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) { | ||
252 | /* FIXME: point() */ | ||
253 | int already = retlen - sizeof(struct jffs2_raw_dirent); | ||
254 | |||
255 | err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, | ||
256 | node.d.nsize - already, &retlen, &fd->name[already]); | ||
257 | if (!err && retlen != node.d.nsize - already) | ||
258 | err = -EIO; | ||
259 | |||
260 | if (err) { | ||
261 | printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err); | ||
262 | jffs2_free_full_dirent(fd); | ||
263 | goto free_out; | ||
264 | } | ||
265 | } | ||
266 | fd->nhash = full_name_hash(fd->name, node.d.nsize); | ||
267 | fd->next = NULL; | ||
268 | fd->name[node.d.nsize] = '\0'; | ||
269 | /* Wheee. We now have a complete jffs2_full_dirent structure, with | ||
270 | the name in it and everything. Link it into the list | ||
271 | */ | ||
272 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
273 | jffs2_add_fd_to_list(c, fd, &ret_fd); | ||
274 | break; | ||
275 | |||
276 | case JFFS2_NODETYPE_INODE: | ||
277 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); | ||
278 | if (retlen < sizeof(node.i)) { | ||
279 | printk(KERN_WARNING "read too short for dnode\n"); | ||
280 | err = -EIO; | ||
281 | goto free_out; | ||
282 | } | ||
283 | if (je32_to_cpu(node.i.version) > *highest_version) | ||
284 | *highest_version = je32_to_cpu(node.i.version); | ||
285 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version)); | ||
286 | |||
287 | if (ref_obsolete(ref)) { | ||
288 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
289 | printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n", | ||
290 | ref_offset(ref)); | ||
291 | BUG(); | ||
292 | } | ||
293 | 477 | ||
294 | /* If we've never checked the CRCs on this node, check them now. */ | 478 | if (crc != tn->data_crc) { |
295 | if (ref_flags(ref) == REF_UNCHECKED) { | 479 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", |
296 | uint32_t crc, len; | 480 | ofs, tn->data_crc, crc); |
297 | struct jffs2_eraseblock *jeb; | 481 | return 1; |
298 | 482 | } | |
299 | crc = crc32(0, &node, sizeof(node.i)-8); | ||
300 | if (crc != je32_to_cpu(node.i.node_crc)) { | ||
301 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | ||
302 | ref_offset(ref), je32_to_cpu(node.i.node_crc), crc); | ||
303 | jffs2_mark_node_obsolete(c, ref); | ||
304 | spin_lock(&c->erase_completion_lock); | ||
305 | continue; | ||
306 | } | ||
307 | |||
308 | /* sanity checks */ | ||
309 | if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) || | ||
310 | PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) { | ||
311 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n", | ||
312 | ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino), | ||
313 | je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize), | ||
314 | je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize)); | ||
315 | jffs2_mark_node_obsolete(c, ref); | ||
316 | spin_lock(&c->erase_completion_lock); | ||
317 | continue; | ||
318 | } | ||
319 | 483 | ||
320 | if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) { | 484 | adj_acc: |
321 | unsigned char *buf=NULL; | 485 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
322 | uint32_t pointed = 0; | 486 | len = ref_totlen(c, jeb, ref); |
323 | #ifndef __ECOS | 487 | |
324 | if (c->mtd->point) { | 488 | /* |
325 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | 489 | * Mark the node as having been checked and fix the |
326 | &retlen, &buf); | 490 | * accounting accordingly. |
327 | if (!err && retlen < je32_to_cpu(node.i.csize)) { | 491 | */ |
328 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 492 | spin_lock(&c->erase_completion_lock); |
329 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 493 | jeb->used_size += len; |
330 | } else if (err){ | 494 | jeb->unchecked_size -= len; |
331 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | 495 | c->used_size += len; |
332 | } else | 496 | c->unchecked_size -= len; |
333 | pointed = 1; /* succefully pointed to device */ | 497 | spin_unlock(&c->erase_completion_lock); |
334 | } | 498 | |
335 | #endif | 499 | return 0; |
336 | if(!pointed){ | 500 | |
337 | buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL); | 501 | free_out: |
338 | if (!buf) | 502 | if(!pointed) |
339 | return -ENOMEM; | 503 | kfree(buffer); |
340 | |||
341 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), | ||
342 | &retlen, buf); | ||
343 | if (!err && retlen != je32_to_cpu(node.i.csize)) | ||
344 | err = -EIO; | ||
345 | if (err) { | ||
346 | kfree(buf); | ||
347 | return err; | ||
348 | } | ||
349 | } | ||
350 | crc = crc32(0, buf, je32_to_cpu(node.i.csize)); | ||
351 | if(!pointed) | ||
352 | kfree(buf); | ||
353 | #ifndef __ECOS | 504 | #ifndef __ECOS |
354 | else | 505 | else |
355 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); | 506 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
356 | #endif | 507 | #endif |
508 | return err; | ||
509 | } | ||
357 | 510 | ||
358 | if (crc != je32_to_cpu(node.i.data_crc)) { | 511 | /* |
359 | printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 512 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
360 | ref_offset(ref), je32_to_cpu(node.i.data_crc), crc); | 513 | * |
361 | jffs2_mark_node_obsolete(c, ref); | 514 | * Checks the node if we are in the checking stage. |
362 | spin_lock(&c->erase_completion_lock); | 515 | */ |
363 | continue; | 516 | static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn) |
364 | } | 517 | { |
365 | 518 | int ret; | |
366 | } | ||
367 | 519 | ||
368 | /* Mark the node as having been checked and fix the accounting accordingly */ | 520 | BUG_ON(ref_obsolete(tn->fn->raw)); |
369 | spin_lock(&c->erase_completion_lock); | 521 | |
370 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 522 | /* We only check the data CRC of unchecked nodes */ |
371 | len = ref_totlen(c, jeb, ref); | 523 | if (ref_flags(tn->fn->raw) != REF_UNCHECKED) |
372 | 524 | return 0; | |
373 | jeb->used_size += len; | 525 | |
374 | jeb->unchecked_size -= len; | 526 | dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n", |
375 | c->used_size += len; | 527 | tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); |
376 | c->unchecked_size -= len; | 528 | |
377 | 529 | ret = check_node_data(c, tn); | |
378 | /* If node covers at least a whole page, or if it starts at the | 530 | if (unlikely(ret < 0)) { |
379 | beginning of a page and runs to the end of the file, or if | 531 | JFFS2_ERROR("check_node_data() returned error: %d.\n", |
380 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | 532 | ret); |
381 | 533 | } else if (unlikely(ret > 0)) { | |
382 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | 534 | dbg_fragtree2("CRC error, mark it obsolete.\n"); |
383 | when the overlapping node(s) get added to the tree anyway. | 535 | jffs2_mark_node_obsolete(c, tn->fn->raw); |
384 | */ | 536 | } |
385 | if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) || | 537 | |
386 | ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) && | 538 | return ret; |
387 | (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) { | 539 | } |
388 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref))); | 540 | |
389 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | 541 | /* |
390 | } else { | 542 | * Helper function for jffs2_add_older_frag_to_fragtree(). |
391 | D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref))); | 543 | * |
392 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | 544 | * Called when the new fragment that is being inserted |
393 | } | 545 | * splits a hole fragment. |
394 | spin_unlock(&c->erase_completion_lock); | 546 | */ |
547 | static int split_hole(struct jffs2_sb_info *c, struct rb_root *root, | ||
548 | struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole) | ||
549 | { | ||
550 | dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n", | ||
551 | newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size); | ||
552 | |||
553 | if (hole->ofs == newfrag->ofs) { | ||
554 | /* | ||
555 | * Well, the new fragment actually starts at the same offset as | ||
556 | * the hole. | ||
557 | */ | ||
558 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
559 | /* | ||
560 | * We replace the overlapped left part of the hole by | ||
561 | * the new node. | ||
562 | */ | ||
563 | |||
564 | dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n", | ||
565 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
566 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
567 | |||
568 | hole->ofs += newfrag->size; | ||
569 | hole->size -= newfrag->size; | ||
570 | |||
571 | /* | ||
572 | * We know that 'hole' should be the right hand | ||
573 | * fragment. | ||
574 | */ | ||
575 | jffs2_fragtree_insert(hole, newfrag); | ||
576 | rb_insert_color(&hole->rb, root); | ||
577 | } else { | ||
578 | /* | ||
579 | * Ah, the new fragment is of the same size as the hole. | ||
580 | * Relace the hole by it. | ||
581 | */ | ||
582 | dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n", | ||
583 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
584 | rb_replace_node(&hole->rb, &newfrag->rb, root); | ||
585 | jffs2_free_node_frag(hole); | ||
586 | } | ||
587 | } else { | ||
588 | /* The new fragment lefts some hole space at the left */ | ||
589 | |||
590 | struct jffs2_node_frag * newfrag2 = NULL; | ||
591 | |||
592 | if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) { | ||
593 | /* The new frag also lefts some space at the right */ | ||
594 | newfrag2 = new_fragment(NULL, newfrag->ofs + | ||
595 | newfrag->size, hole->ofs + hole->size | ||
596 | - newfrag->ofs - newfrag->size); | ||
597 | if (unlikely(!newfrag2)) { | ||
598 | jffs2_free_node_frag(newfrag); | ||
599 | return -ENOMEM; | ||
395 | } | 600 | } |
601 | } | ||
602 | |||
603 | hole->size = newfrag->ofs - hole->ofs; | ||
604 | dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n", | ||
605 | hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size); | ||
606 | |||
607 | jffs2_fragtree_insert(newfrag, hole); | ||
608 | rb_insert_color(&newfrag->rb, root); | ||
609 | |||
610 | if (newfrag2) { | ||
611 | dbg_fragtree2("left the hole %#04x-%#04x at the right\n", | ||
612 | newfrag2->ofs, newfrag2->ofs + newfrag2->size); | ||
613 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
614 | rb_insert_color(&newfrag2->rb, root); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * This function is used when we build inode. It expects the nodes are passed | ||
623 | * in the decreasing version order. The whole point of this is to improve the | ||
624 | * inodes checking on NAND: we check the nodes' data CRC only when they are not | ||
625 | * obsoleted. Previously, add_frag_to_fragtree() function was used and | ||
626 | * nodes were passed to it in the increasing version ordes and CRCs of all | ||
627 | * nodes were checked. | ||
628 | * | ||
629 | * Note: tn->fn->size shouldn't be zero. | ||
630 | * | ||
631 | * Returns 0 if the node was inserted | ||
632 | * 1 if it wasn't inserted (since it is obsolete) | ||
633 | * < 0 an if error occured | ||
634 | */ | ||
635 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
636 | struct jffs2_tmp_dnode_info *tn) | ||
637 | { | ||
638 | struct jffs2_node_frag *this, *newfrag; | ||
639 | uint32_t lastend; | ||
640 | struct jffs2_full_dnode *fn = tn->fn; | ||
641 | struct rb_root *root = &f->fragtree; | ||
642 | uint32_t fn_size = fn->size, fn_ofs = fn->ofs; | ||
643 | int err, checked = 0; | ||
644 | int ref_flag; | ||
645 | |||
646 | dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version); | ||
647 | |||
648 | /* Skip all the nodes which are completed before this one starts */ | ||
649 | this = jffs2_lookup_node_frag(root, fn_ofs); | ||
650 | if (this) | ||
651 | dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole"); | ||
652 | |||
653 | if (this) | ||
654 | lastend = this->ofs + this->size; | ||
655 | else | ||
656 | lastend = 0; | ||
657 | |||
658 | /* Detect the preliminary type of node */ | ||
659 | if (fn->size >= PAGE_CACHE_SIZE) | ||
660 | ref_flag = REF_PRISTINE; | ||
661 | else | ||
662 | ref_flag = REF_NORMAL; | ||
663 | |||
664 | /* See if we ran off the end of the root */ | ||
665 | if (lastend <= fn_ofs) { | ||
666 | /* We did */ | ||
667 | |||
668 | /* | ||
669 | * We are going to insert the new node into the | ||
670 | * fragment tree, so check it. | ||
671 | */ | ||
672 | err = check_node(c, f, tn); | ||
673 | if (err != 0) | ||
674 | return err; | ||
675 | |||
676 | fn->frags = 1; | ||
677 | |||
678 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
679 | if (unlikely(!newfrag)) | ||
680 | return -ENOMEM; | ||
681 | |||
682 | err = no_overlapping_node(c, root, newfrag, this, lastend); | ||
683 | if (unlikely(err != 0)) { | ||
684 | jffs2_free_node_frag(newfrag); | ||
685 | return err; | ||
686 | } | ||
687 | |||
688 | goto out_ok; | ||
689 | } | ||
396 | 690 | ||
397 | tn = jffs2_alloc_tmp_dnode_info(); | 691 | fn->frags = 0; |
398 | if (!tn) { | 692 | |
399 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | 693 | while (1) { |
400 | err = -ENOMEM; | 694 | /* |
401 | goto free_out; | 695 | * Here we have: |
696 | * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs. | ||
697 | * | ||
698 | * Remember, 'this' has higher version, any non-hole node | ||
699 | * which is already in the fragtree is newer then the newly | ||
700 | * inserted. | ||
701 | */ | ||
702 | if (!this->node) { | ||
703 | /* | ||
704 | * 'this' is the hole fragment, so at least the | ||
705 | * beginning of the new fragment is valid. | ||
706 | */ | ||
707 | |||
708 | /* | ||
709 | * We are going to insert the new node into the | ||
710 | * fragment tree, so check it. | ||
711 | */ | ||
712 | if (!checked) { | ||
713 | err = check_node(c, f, tn); | ||
714 | if (unlikely(err != 0)) | ||
715 | return err; | ||
716 | checked = 1; | ||
402 | } | 717 | } |
403 | 718 | ||
404 | tn->fn = jffs2_alloc_full_dnode(); | 719 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
405 | if (!tn->fn) { | 720 | /* We split the hole on two parts */ |
406 | D1(printk(KERN_DEBUG "alloc fn failed\n")); | 721 | |
407 | err = -ENOMEM; | 722 | fn->frags += 1; |
408 | jffs2_free_tmp_dnode_info(tn); | 723 | newfrag = new_fragment(fn, fn_ofs, fn_size); |
409 | goto free_out; | 724 | if (unlikely(!newfrag)) |
725 | return -ENOMEM; | ||
726 | |||
727 | err = split_hole(c, root, newfrag, this); | ||
728 | if (unlikely(err)) | ||
729 | return err; | ||
730 | goto out_ok; | ||
410 | } | 731 | } |
411 | tn->version = je32_to_cpu(node.i.version); | 732 | |
412 | tn->fn->ofs = je32_to_cpu(node.i.offset); | 733 | /* |
413 | /* There was a bug where we wrote hole nodes out with | 734 | * The beginning of the new fragment is valid since it |
414 | csize/dsize swapped. Deal with it */ | 735 | * overlaps the hole node. |
415 | if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize)) | 736 | */ |
416 | tn->fn->size = je32_to_cpu(node.i.csize); | 737 | |
417 | else // normal case... | 738 | ref_flag = REF_NORMAL; |
418 | tn->fn->size = je32_to_cpu(node.i.dsize); | 739 | |
419 | tn->fn->raw = ref; | 740 | fn->frags += 1; |
420 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", | 741 | newfrag = new_fragment(fn, fn_ofs, |
421 | ref_offset(ref), je32_to_cpu(node.i.version), | 742 | this->ofs + this->size - fn_ofs); |
422 | je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize))); | 743 | if (unlikely(!newfrag)) |
423 | jffs2_add_tn_to_tree(tn, &ret_tn); | 744 | return -ENOMEM; |
424 | break; | 745 | |
425 | 746 | if (fn_ofs == this->ofs) { | |
426 | default: | 747 | /* |
427 | if (ref_flags(ref) == REF_UNCHECKED) { | 748 | * The new node starts at the same offset as |
428 | struct jffs2_eraseblock *jeb; | 749 | * the hole and supersieds the hole. |
429 | uint32_t len; | 750 | */ |
430 | 751 | dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n", | |
431 | printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n", | 752 | fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
432 | je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 753 | |
433 | 754 | rb_replace_node(&this->rb, &newfrag->rb, root); | |
434 | /* Mark the node as having been checked and fix the accounting accordingly */ | 755 | jffs2_free_node_frag(this); |
435 | spin_lock(&c->erase_completion_lock); | 756 | } else { |
436 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 757 | /* |
437 | len = ref_totlen(c, jeb, ref); | 758 | * The hole becomes shorter as its right part |
438 | 759 | * is supersieded by the new fragment. | |
439 | jeb->used_size += len; | 760 | */ |
440 | jeb->unchecked_size -= len; | 761 | dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n", |
441 | c->used_size += len; | 762 | this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size); |
442 | c->unchecked_size -= len; | 763 | |
443 | 764 | dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs, | |
444 | mark_ref_normal(ref); | 765 | fn_ofs + this->ofs + this->size - fn_ofs, fn->frags); |
445 | spin_unlock(&c->erase_completion_lock); | 766 | |
767 | this->size -= newfrag->size; | ||
768 | jffs2_fragtree_insert(newfrag, this); | ||
769 | rb_insert_color(&newfrag->rb, root); | ||
446 | } | 770 | } |
447 | node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype)); | 771 | |
448 | if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) { | 772 | fn_ofs += newfrag->size; |
449 | /* Hmmm. This should have been caught at scan time. */ | 773 | fn_size -= newfrag->size; |
450 | printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n", | 774 | this = rb_entry(rb_next(&newfrag->rb), |
451 | ref_offset(ref)); | 775 | struct jffs2_node_frag, rb); |
452 | printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", | 776 | |
453 | je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen), | 777 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", |
454 | je32_to_cpu(node.u.hdr_crc)); | 778 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); |
455 | jffs2_mark_node_obsolete(c, ref); | 779 | } |
456 | } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) { | 780 | |
457 | case JFFS2_FEATURE_INCOMPAT: | 781 | /* |
458 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 782 | * 'This' node is not the hole so it obsoletes the new fragment |
459 | /* EEP */ | 783 | * either fully or partially. |
460 | BUG(); | 784 | */ |
461 | break; | 785 | if (this->ofs + this->size >= fn_ofs + fn_size) { |
462 | case JFFS2_FEATURE_ROCOMPAT: | 786 | /* The new node is obsolete, drop it */ |
463 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | 787 | if (fn->frags == 0) { |
464 | if (!(c->flags & JFFS2_SB_FLAG_RO)) | 788 | dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size); |
465 | BUG(); | 789 | ref_flag = REF_OBSOLETE; |
466 | break; | ||
467 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
468 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
469 | break; | ||
470 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
471 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); | ||
472 | jffs2_mark_node_obsolete(c, ref); | ||
473 | break; | ||
474 | } | 790 | } |
791 | goto out_ok; | ||
792 | } else { | ||
793 | struct jffs2_node_frag *new_this; | ||
794 | |||
795 | /* 'This' node obsoletes the beginning of the new node */ | ||
796 | dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size); | ||
797 | |||
798 | ref_flag = REF_NORMAL; | ||
799 | |||
800 | fn_size -= this->ofs + this->size - fn_ofs; | ||
801 | fn_ofs = this->ofs + this->size; | ||
802 | dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size); | ||
803 | |||
804 | new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb); | ||
805 | if (!new_this) { | ||
806 | /* | ||
807 | * There is no next fragment. Add the rest of | ||
808 | * the new node as the right-hand child. | ||
809 | */ | ||
810 | if (!checked) { | ||
811 | err = check_node(c, f, tn); | ||
812 | if (unlikely(err != 0)) | ||
813 | return err; | ||
814 | checked = 1; | ||
815 | } | ||
475 | 816 | ||
817 | fn->frags += 1; | ||
818 | newfrag = new_fragment(fn, fn_ofs, fn_size); | ||
819 | if (unlikely(!newfrag)) | ||
820 | return -ENOMEM; | ||
821 | |||
822 | dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n", | ||
823 | newfrag->ofs, newfrag->ofs + newfrag->size); | ||
824 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | ||
825 | rb_insert_color(&newfrag->rb, root); | ||
826 | goto out_ok; | ||
827 | } else { | ||
828 | this = new_this; | ||
829 | dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n", | ||
830 | this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)"); | ||
831 | } | ||
476 | } | 832 | } |
477 | spin_lock(&c->erase_completion_lock); | 833 | } |
834 | |||
835 | out_ok: | ||
836 | BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE); | ||
478 | 837 | ||
838 | if (ref_flag == REF_OBSOLETE) { | ||
839 | dbg_fragtree2("the node is obsolete now\n"); | ||
840 | /* jffs2_mark_node_obsolete() will adjust space accounting */ | ||
841 | jffs2_mark_node_obsolete(c, fn->raw); | ||
842 | return 1; | ||
479 | } | 843 | } |
844 | |||
845 | dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE"); | ||
846 | |||
847 | /* Space accounting was adjusted at check_node_data() */ | ||
848 | spin_lock(&c->erase_completion_lock); | ||
849 | fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag; | ||
480 | spin_unlock(&c->erase_completion_lock); | 850 | spin_unlock(&c->erase_completion_lock); |
481 | *tnp = ret_tn; | ||
482 | *fdp = ret_fd; | ||
483 | 851 | ||
484 | return 0; | 852 | return 0; |
485 | |||
486 | free_out: | ||
487 | jffs2_free_tmp_dnode_info_list(&ret_tn); | ||
488 | jffs2_free_full_dirent_list(ret_fd); | ||
489 | return err; | ||
490 | } | 853 | } |
491 | 854 | ||
492 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | 855 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) |
@@ -499,24 +862,21 @@ void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache | |||
499 | 862 | ||
500 | /* During mount, this needs no locking. During normal operation, its | 863 | /* During mount, this needs no locking. During normal operation, its |
501 | callers want to do other stuff while still holding the inocache_lock. | 864 | callers want to do other stuff while still holding the inocache_lock. |
502 | Rather than introducing special case get_ino_cache functions or | 865 | Rather than introducing special case get_ino_cache functions or |
503 | callbacks, we just let the caller do the locking itself. */ | 866 | callbacks, we just let the caller do the locking itself. */ |
504 | 867 | ||
505 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 868 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) |
506 | { | 869 | { |
507 | struct jffs2_inode_cache *ret; | 870 | struct jffs2_inode_cache *ret; |
508 | 871 | ||
509 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino)); | ||
510 | |||
511 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; | 872 | ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; |
512 | while (ret && ret->ino < ino) { | 873 | while (ret && ret->ino < ino) { |
513 | ret = ret->next; | 874 | ret = ret->next; |
514 | } | 875 | } |
515 | 876 | ||
516 | if (ret && ret->ino != ino) | 877 | if (ret && ret->ino != ino) |
517 | ret = NULL; | 878 | ret = NULL; |
518 | 879 | ||
519 | D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino)); | ||
520 | return ret; | 880 | return ret; |
521 | } | 881 | } |
522 | 882 | ||
@@ -528,7 +888,7 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
528 | if (!new->ino) | 888 | if (!new->ino) |
529 | new->ino = ++c->highest_ino; | 889 | new->ino = ++c->highest_ino; |
530 | 890 | ||
531 | D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino)); | 891 | dbg_inocache("add %p (ino #%u)\n", new, new->ino); |
532 | 892 | ||
533 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; | 893 | prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; |
534 | 894 | ||
@@ -544,11 +904,12 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new | |||
544 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | 904 | void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) |
545 | { | 905 | { |
546 | struct jffs2_inode_cache **prev; | 906 | struct jffs2_inode_cache **prev; |
547 | D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino)); | 907 | |
908 | dbg_inocache("del %p (ino #%u)\n", old, old->ino); | ||
548 | spin_lock(&c->inocache_lock); | 909 | spin_lock(&c->inocache_lock); |
549 | 910 | ||
550 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; | 911 | prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; |
551 | 912 | ||
552 | while ((*prev) && (*prev)->ino < old->ino) { | 913 | while ((*prev) && (*prev)->ino < old->ino) { |
553 | prev = &(*prev)->next; | 914 | prev = &(*prev)->next; |
554 | } | 915 | } |
@@ -558,7 +919,7 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) | |||
558 | 919 | ||
559 | /* Free it now unless it's in READING or CLEARING state, which | 920 | /* Free it now unless it's in READING or CLEARING state, which |
560 | are the transitions upon read_inode() and clear_inode(). The | 921 | are the transitions upon read_inode() and clear_inode(). The |
561 | rest of the time we know nobody else is looking at it, and | 922 | rest of the time we know nobody else is looking at it, and |
562 | if it's held by read_inode() or clear_inode() they'll free it | 923 | if it's held by read_inode() or clear_inode() they'll free it |
563 | for themselves. */ | 924 | for themselves. */ |
564 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) | 925 | if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) |
@@ -571,7 +932,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c) | |||
571 | { | 932 | { |
572 | int i; | 933 | int i; |
573 | struct jffs2_inode_cache *this, *next; | 934 | struct jffs2_inode_cache *this, *next; |
574 | 935 | ||
575 | for (i=0; i<INOCACHE_HASHSIZE; i++) { | 936 | for (i=0; i<INOCACHE_HASHSIZE; i++) { |
576 | this = c->inocache_list[i]; | 937 | this = c->inocache_list[i]; |
577 | while (this) { | 938 | while (this) { |
@@ -598,38 +959,30 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) | |||
598 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; | 959 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; |
599 | } | 960 | } |
600 | } | 961 | } |
601 | 962 | ||
602 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) | 963 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) |
603 | { | 964 | { |
604 | /* The common case in lookup is that there will be a node | 965 | /* The common case in lookup is that there will be a node |
605 | which precisely matches. So we go looking for that first */ | 966 | which precisely matches. So we go looking for that first */ |
606 | struct rb_node *next; | 967 | struct rb_node *next; |
607 | struct jffs2_node_frag *prev = NULL; | 968 | struct jffs2_node_frag *prev = NULL; |
608 | struct jffs2_node_frag *frag = NULL; | 969 | struct jffs2_node_frag *frag = NULL; |
609 | 970 | ||
610 | D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset)); | 971 | dbg_fragtree2("root %p, offset %d\n", fragtree, offset); |
611 | 972 | ||
612 | next = fragtree->rb_node; | 973 | next = fragtree->rb_node; |
613 | 974 | ||
614 | while(next) { | 975 | while(next) { |
615 | frag = rb_entry(next, struct jffs2_node_frag, rb); | 976 | frag = rb_entry(next, struct jffs2_node_frag, rb); |
616 | 977 | ||
617 | D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n", | ||
618 | frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right)); | ||
619 | if (frag->ofs + frag->size <= offset) { | 978 | if (frag->ofs + frag->size <= offset) { |
620 | D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n", | ||
621 | frag->ofs, frag->ofs+frag->size)); | ||
622 | /* Remember the closest smaller match on the way down */ | 979 | /* Remember the closest smaller match on the way down */ |
623 | if (!prev || frag->ofs > prev->ofs) | 980 | if (!prev || frag->ofs > prev->ofs) |
624 | prev = frag; | 981 | prev = frag; |
625 | next = frag->rb.rb_right; | 982 | next = frag->rb.rb_right; |
626 | } else if (frag->ofs > offset) { | 983 | } else if (frag->ofs > offset) { |
627 | D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n", | ||
628 | frag->ofs, frag->ofs+frag->size)); | ||
629 | next = frag->rb.rb_left; | 984 | next = frag->rb.rb_left; |
630 | } else { | 985 | } else { |
631 | D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n", | ||
632 | frag->ofs, frag->ofs+frag->size)); | ||
633 | return frag; | 986 | return frag; |
634 | } | 987 | } |
635 | } | 988 | } |
@@ -638,11 +991,11 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ | |||
638 | and return the closest smaller one */ | 991 | and return the closest smaller one */ |
639 | 992 | ||
640 | if (prev) | 993 | if (prev) |
641 | D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n", | 994 | dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", |
642 | prev->ofs, prev->ofs+prev->size)); | 995 | prev->ofs, prev->ofs+prev->size); |
643 | else | 996 | else |
644 | D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n")); | 997 | dbg_fragtree2("returning NULL, empty fragtree\n"); |
645 | 998 | ||
646 | return prev; | 999 | return prev; |
647 | } | 1000 | } |
648 | 1001 | ||
@@ -656,39 +1009,32 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
656 | if (!root->rb_node) | 1009 | if (!root->rb_node) |
657 | return; | 1010 | return; |
658 | 1011 | ||
659 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | 1012 | dbg_fragtree("killing\n"); |
660 | 1013 | ||
1014 | frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); | ||
661 | while(frag) { | 1015 | while(frag) { |
662 | if (frag->rb.rb_left) { | 1016 | if (frag->rb.rb_left) { |
663 | D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", | ||
664 | frag, frag->ofs, frag->ofs+frag->size)); | ||
665 | frag = frag_left(frag); | 1017 | frag = frag_left(frag); |
666 | continue; | 1018 | continue; |
667 | } | 1019 | } |
668 | if (frag->rb.rb_right) { | 1020 | if (frag->rb.rb_right) { |
669 | D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", | ||
670 | frag, frag->ofs, frag->ofs+frag->size)); | ||
671 | frag = frag_right(frag); | 1021 | frag = frag_right(frag); |
672 | continue; | 1022 | continue; |
673 | } | 1023 | } |
674 | 1024 | ||
675 | D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n", | ||
676 | frag->ofs, frag->ofs+frag->size, frag->node, | ||
677 | frag->node?frag->node->frags:0)); | ||
678 | |||
679 | if (frag->node && !(--frag->node->frags)) { | 1025 | if (frag->node && !(--frag->node->frags)) { |
680 | /* Not a hole, and it's the final remaining frag | 1026 | /* Not a hole, and it's the final remaining frag |
681 | of this node. Free the node */ | 1027 | of this node. Free the node */ |
682 | if (c) | 1028 | if (c) |
683 | jffs2_mark_node_obsolete(c, frag->node->raw); | 1029 | jffs2_mark_node_obsolete(c, frag->node->raw); |
684 | 1030 | ||
685 | jffs2_free_full_dnode(frag->node); | 1031 | jffs2_free_full_dnode(frag->node); |
686 | } | 1032 | } |
687 | parent = frag_parent(frag); | 1033 | parent = frag_parent(frag); |
688 | if (parent) { | 1034 | if (parent) { |
689 | if (frag_left(parent) == frag) | 1035 | if (frag_left(parent) == frag) |
690 | parent->rb.rb_left = NULL; | 1036 | parent->rb.rb_left = NULL; |
691 | else | 1037 | else |
692 | parent->rb.rb_right = NULL; | 1038 | parent->rb.rb_right = NULL; |
693 | } | 1039 | } |
694 | 1040 | ||
@@ -698,29 +1044,3 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
698 | cond_resched(); | 1044 | cond_resched(); |
699 | } | 1045 | } |
700 | } | 1046 | } |
701 | |||
702 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) | ||
703 | { | ||
704 | struct rb_node *parent = &base->rb; | ||
705 | struct rb_node **link = &parent; | ||
706 | |||
707 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, | ||
708 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
709 | |||
710 | while (*link) { | ||
711 | parent = *link; | ||
712 | base = rb_entry(parent, struct jffs2_node_frag, rb); | ||
713 | |||
714 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); | ||
715 | if (newfrag->ofs > base->ofs) | ||
716 | link = &base->rb.rb_right; | ||
717 | else if (newfrag->ofs < base->ofs) | ||
718 | link = &base->rb.rb_left; | ||
719 | else { | ||
720 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); | ||
721 | BUG(); | ||
722 | } | ||
723 | } | ||
724 | |||
725 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
726 | } | ||