diff options
-rw-r--r-- | fs/jffs2/nodelist.c | 717 | ||||
-rw-r--r-- | fs/jffs2/nodelist.h | 10 | ||||
-rw-r--r-- | fs/jffs2/readinode.c | 863 |
3 files changed, 790 insertions, 800 deletions
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 9d08d3388186..8373d312b195 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.c,v 1.100 2005/07/22 10:32:08 dedekind Exp $ | 10 | * $Id: nodelist.c,v 1.101 2005/07/27 14:46:11 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -55,515 +55,284 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new | |||
55 | }); | 55 | }); |
56 | } | 56 | } |
57 | 57 | ||
58 | /* | 58 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) |
59 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in | ||
60 | * order of increasing version. | ||
61 | */ | ||
62 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) | ||
63 | { | 59 | { |
64 | struct rb_node **p = &list->rb_node; | 60 | if (this->node) { |
65 | struct rb_node * parent = NULL; | 61 | this->node->frags--; |
66 | struct jffs2_tmp_dnode_info *this; | 62 | if (!this->node->frags) { |
67 | 63 | /* The node has no valid frags left. It's totally obsoleted */ | |
68 | while (*p) { | 64 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", |
69 | parent = *p; | 65 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); |
70 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); | 66 | jffs2_mark_node_obsolete(c, this->node->raw); |
71 | 67 | jffs2_free_full_dnode(this->node); | |
72 | /* There may actually be a collision here, but it doesn't | 68 | } else { |
73 | actually matter. As long as the two nodes with the same | 69 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", |
74 | version are together, it's all fine. */ | 70 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, |
75 | if (tn->version < this->version) | 71 | this->node->frags)); |
76 | p = &(*p)->rb_left; | 72 | mark_ref_normal(this->node->raw); |
77 | else | 73 | } |
78 | p = &(*p)->rb_right; | 74 | |
79 | } | 75 | } |
80 | 76 | jffs2_free_node_frag(this); | |
81 | rb_link_node(&tn->rb, parent, p); | ||
82 | rb_insert_color(&tn->rb, list); | ||
83 | } | 77 | } |
84 | 78 | ||
85 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | 79 | static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) |
86 | { | 80 | { |
87 | struct rb_node *this; | 81 | struct rb_node *parent = &base->rb; |
88 | struct jffs2_tmp_dnode_info *tn; | 82 | struct rb_node **link = &parent; |
89 | 83 | ||
90 | this = list->rb_node; | 84 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, |
85 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
91 | 86 | ||
92 | /* Now at bottom of tree */ | 87 | while (*link) { |
93 | while (this) { | 88 | parent = *link; |
94 | if (this->rb_left) | 89 | base = rb_entry(parent, struct jffs2_node_frag, rb); |
95 | this = this->rb_left; | 90 | |
96 | else if (this->rb_right) | 91 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); |
97 | this = this->rb_right; | 92 | if (newfrag->ofs > base->ofs) |
93 | link = &base->rb.rb_right; | ||
94 | else if (newfrag->ofs < base->ofs) | ||
95 | link = &base->rb.rb_left; | ||
98 | else { | 96 | else { |
99 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | 97 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); |
100 | jffs2_free_full_dnode(tn->fn); | 98 | BUG(); |
101 | jffs2_free_tmp_dnode_info(tn); | ||
102 | |||
103 | this = this->rb_parent; | ||
104 | if (!this) | ||
105 | break; | ||
106 | |||
107 | if (this->rb_left == &tn->rb) | ||
108 | this->rb_left = NULL; | ||
109 | else if (this->rb_right == &tn->rb) | ||
110 | this->rb_right = NULL; | ||
111 | else BUG(); | ||
112 | } | 99 | } |
113 | } | 100 | } |
114 | list->rb_node = NULL; | 101 | |
102 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
115 | } | 103 | } |
116 | 104 | ||
117 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) | 105 | /* Doesn't set inode->i_size */ |
106 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | ||
118 | { | 107 | { |
119 | struct jffs2_full_dirent *next; | 108 | struct jffs2_node_frag *this; |
109 | uint32_t lastend; | ||
120 | 110 | ||
121 | while (fd) { | 111 | /* Skip all the nodes which are completed before this one starts */ |
122 | next = fd->next; | 112 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); |
123 | jffs2_free_full_dirent(fd); | ||
124 | fd = next; | ||
125 | } | ||
126 | } | ||
127 | 113 | ||
128 | /* Returns first valid node after 'ref'. May return 'ref' */ | 114 | if (this) { |
129 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) | 115 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", |
130 | { | 116 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); |
131 | while (ref && ref->next_in_ino) { | 117 | lastend = this->ofs + this->size; |
132 | if (!ref_obsolete(ref)) | 118 | } else { |
133 | return ref; | 119 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); |
134 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); | 120 | lastend = 0; |
135 | ref = ref->next_in_ino; | ||
136 | } | 121 | } |
137 | return NULL; | 122 | |
138 | } | 123 | /* See if we ran off the end of the list */ |
124 | if (lastend <= newfrag->ofs) { | ||
125 | /* We did */ | ||
126 | |||
127 | /* Check if 'this' node was on the same page as the new node. | ||
128 | If so, both 'this' and the new node get marked REF_NORMAL so | ||
129 | the GC can take a look. | ||
130 | */ | ||
131 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | ||
132 | if (this->node) | ||
133 | mark_ref_normal(this->node->raw); | ||
134 | mark_ref_normal(newfrag->node->raw); | ||
135 | } | ||
139 | 136 | ||
140 | /* | 137 | if (lastend < newfrag->node->ofs) { |
141 | * Helper function for jffs2_get_inode_nodes(). | 138 | /* ... and we need to put a hole in before the new node */ |
142 | * It is called every time an directory entry node is found. | 139 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); |
143 | * | 140 | if (!holefrag) { |
144 | * Returns: 0 on succes; | 141 | jffs2_free_node_frag(newfrag); |
145 | * 1 if the node should be marked obsolete; | 142 | return -ENOMEM; |
146 | * negative error code on failure. | 143 | } |
147 | */ | 144 | holefrag->ofs = lastend; |
148 | static inline int | 145 | holefrag->size = newfrag->node->ofs - lastend; |
149 | read_direntry(struct jffs2_sb_info *c, | 146 | holefrag->node = NULL; |
150 | struct jffs2_raw_node_ref *ref, | 147 | if (this) { |
151 | struct jffs2_raw_dirent *rd, | 148 | /* By definition, the 'this' node has no right-hand child, |
152 | uint32_t read, | 149 | because there are no frags with offset greater than it. |
153 | struct jffs2_full_dirent **fdp, | 150 | So that's where we want to put the hole */ |
154 | int32_t *latest_mctime, | 151 | D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); |
155 | uint32_t *mctime_ver) | 152 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); |
156 | { | 153 | } else { |
157 | struct jffs2_full_dirent *fd; | 154 | D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); |
158 | 155 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | |
159 | /* The direntry nodes are checked during the flash scanning */ | 156 | } |
160 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | 157 | rb_insert_color(&holefrag->rb, list); |
161 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | 158 | this = holefrag; |
162 | BUG_ON(ref_obsolete(ref)); | 159 | } |
163 | 160 | if (this) { | |
164 | /* Sanity check */ | 161 | /* By definition, the 'this' node has no right-hand child, |
165 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | 162 | because there are no frags with offset greater than it. |
166 | printk(KERN_ERR "Error! Illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | 163 | So that's where we want to put new fragment */ |
167 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | 164 | D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); |
168 | return 1; | 165 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); |
166 | } else { | ||
167 | D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); | ||
168 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | ||
169 | } | ||
170 | rb_insert_color(&newfrag->rb, list); | ||
171 | return 0; | ||
169 | } | 172 | } |
170 | |||
171 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); | ||
172 | if (unlikely(!fd)) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | fd->raw = ref; | ||
176 | fd->version = je32_to_cpu(rd->version); | ||
177 | fd->ino = je32_to_cpu(rd->ino); | ||
178 | fd->type = rd->type; | ||
179 | 173 | ||
180 | /* Pick out the mctime of the latest dirent */ | 174 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", |
181 | if(fd->version > *mctime_ver) { | 175 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); |
182 | *mctime_ver = fd->version; | ||
183 | *latest_mctime = je32_to_cpu(rd->mctime); | ||
184 | } | ||
185 | 176 | ||
186 | /* | 177 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, |
187 | * Copy as much of the name as possible from the raw | 178 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs |
188 | * dirent we've already read from the flash. | ||
189 | */ | 179 | */ |
190 | if (read > sizeof(*rd)) | 180 | if (newfrag->ofs > this->ofs) { |
191 | memcpy(&fd->name[0], &rd->name[0], | 181 | /* This node isn't completely obsoleted. The start of it remains valid */ |
192 | min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); | 182 | |
193 | 183 | /* Mark the new node and the partially covered node REF_NORMAL -- let | |
194 | /* Do we need to copy any more of the name directly from the flash? */ | 184 | the GC take a look at them */ |
195 | if (rd->nsize + sizeof(*rd) > read) { | 185 | mark_ref_normal(newfrag->node->raw); |
196 | /* FIXME: point() */ | 186 | if (this->node) |
197 | int err; | 187 | mark_ref_normal(this->node->raw); |
198 | int already = read - sizeof(*rd); | 188 | |
189 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
190 | /* The new node splits 'this' frag into two */ | ||
191 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | ||
192 | if (!newfrag2) { | ||
193 | jffs2_free_node_frag(newfrag); | ||
194 | return -ENOMEM; | ||
195 | } | ||
196 | D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); | ||
197 | if (this->node) | ||
198 | printk("phys 0x%08x\n", ref_offset(this->node->raw)); | ||
199 | else | ||
200 | printk("hole\n"); | ||
201 | ) | ||
202 | |||
203 | /* New second frag pointing to this's node */ | ||
204 | newfrag2->ofs = newfrag->ofs + newfrag->size; | ||
205 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | ||
206 | newfrag2->node = this->node; | ||
207 | if (this->node) | ||
208 | this->node->frags++; | ||
209 | |||
210 | /* Adjust size of original 'this' */ | ||
211 | this->size = newfrag->ofs - this->ofs; | ||
212 | |||
213 | /* Now, we know there's no node with offset | ||
214 | greater than this->ofs but smaller than | ||
215 | newfrag2->ofs or newfrag->ofs, for obvious | ||
216 | reasons. So we can do a tree insert from | ||
217 | 'this' to insert newfrag, and a tree insert | ||
218 | from newfrag to insert newfrag2. */ | ||
219 | jffs2_fragtree_insert(newfrag, this); | ||
220 | rb_insert_color(&newfrag->rb, list); | ||
199 | 221 | ||
200 | err = jffs2_flash_read(c, (ref_offset(ref)) + read, | 222 | jffs2_fragtree_insert(newfrag2, newfrag); |
201 | rd->nsize - already, &read, &fd->name[already]); | 223 | rb_insert_color(&newfrag2->rb, list); |
202 | if (unlikely(read != rd->nsize - already) && likely(!err)) | ||
203 | return -EIO; | ||
204 | 224 | ||
205 | if (unlikely(err)) { | 225 | return 0; |
206 | printk(KERN_WARNING "Read remainder of name: error %d\n", err); | ||
207 | jffs2_free_full_dirent(fd); | ||
208 | return -EIO; | ||
209 | } | 226 | } |
210 | } | 227 | /* New node just reduces 'this' frag in size, doesn't split it */ |
211 | 228 | this->size = newfrag->ofs - this->ofs; | |
212 | fd->nhash = full_name_hash(fd->name, rd->nsize); | ||
213 | fd->next = NULL; | ||
214 | fd->name[rd->nsize] = '\0'; | ||
215 | |||
216 | /* | ||
217 | * Wheee. We now have a complete jffs2_full_dirent structure, with | ||
218 | * the name in it and everything. Link it into the list | ||
219 | */ | ||
220 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
221 | |||
222 | jffs2_add_fd_to_list(c, fd, fdp); | ||
223 | 229 | ||
224 | return 0; | 230 | /* Again, we know it lives down here in the tree */ |
225 | } | 231 | jffs2_fragtree_insert(newfrag, this); |
226 | 232 | rb_insert_color(&newfrag->rb, list); | |
227 | /* | 233 | } else { |
228 | * Helper function for jffs2_get_inode_nodes(). | 234 | /* New frag starts at the same point as 'this' used to. Replace |
229 | * It is called every time an inode node is found. | 235 | it in the tree without doing a delete and insertion */ |
230 | * | 236 | D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", |
231 | * Returns: 0 on succes; | 237 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, |
232 | * 1 if the node should be marked obsolete; | 238 | this, this->ofs, this->ofs+this->size)); |
233 | * negative error code on failure. | ||
234 | */ | ||
235 | static inline int | ||
236 | read_dnode(struct jffs2_sb_info *c, | ||
237 | struct jffs2_raw_node_ref *ref, | ||
238 | struct jffs2_raw_inode *rd, | ||
239 | uint32_t read, | ||
240 | struct rb_root *tnp, | ||
241 | int32_t *latest_mctime, | ||
242 | uint32_t *mctime_ver) | ||
243 | { | ||
244 | struct jffs2_eraseblock *jeb; | ||
245 | struct jffs2_tmp_dnode_info *tn; | ||
246 | 239 | ||
247 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | 240 | rb_replace_node(&this->rb, &newfrag->rb, list); |
248 | BUG_ON(ref_obsolete(ref)); | ||
249 | |||
250 | /* If we've never checked the CRCs on this node, check them now */ | ||
251 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
252 | uint32_t crc, len; | ||
253 | |||
254 | crc = crc32(0, rd, sizeof(*rd) - 8); | ||
255 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { | ||
256 | printk(KERN_WARNING "Header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", | ||
257 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); | ||
258 | return 1; | ||
259 | } | ||
260 | 241 | ||
261 | /* Sanity checks */ | 242 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { |
262 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || | 243 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); |
263 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { | 244 | jffs2_obsolete_node_frag(c, this); |
264 | printk(KERN_WARNING "Inode corrupted at %#08x, totlen %d, #ino %d, version %d, " | ||
265 | "isize %d, csize %d, dsize %d \n", | ||
266 | ref_offset(ref), je32_to_cpu(rd->totlen), je32_to_cpu(rd->ino), | ||
267 | je32_to_cpu(rd->version), je32_to_cpu(rd->isize), | ||
268 | je32_to_cpu(rd->csize), je32_to_cpu(rd->dsize)); | ||
269 | return 1; | ||
270 | } | ||
271 | |||
272 | if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) { | ||
273 | unsigned char *buf = NULL; | ||
274 | uint32_t pointed = 0; | ||
275 | int err; | ||
276 | #ifndef __ECOS | ||
277 | if (c->mtd->point) { | ||
278 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), | ||
279 | &read, &buf); | ||
280 | if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) { | ||
281 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", read)); | ||
282 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), | ||
283 | je32_to_cpu(rd->csize)); | ||
284 | } else if (unlikely(err)){ | ||
285 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | ||
286 | } else | ||
287 | pointed = 1; /* succefully pointed to device */ | ||
288 | } | ||
289 | #endif | ||
290 | if(!pointed){ | ||
291 | buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL); | ||
292 | if (!buf) | ||
293 | return -ENOMEM; | ||
294 | |||
295 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), | ||
296 | &read, buf); | ||
297 | if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err)) | ||
298 | err = -EIO; | ||
299 | if (err) { | ||
300 | kfree(buf); | ||
301 | return err; | ||
302 | } | ||
303 | } | ||
304 | crc = crc32(0, buf, je32_to_cpu(rd->csize)); | ||
305 | if(!pointed) | ||
306 | kfree(buf); | ||
307 | #ifndef __ECOS | ||
308 | else | ||
309 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize)); | ||
310 | #endif | ||
311 | |||
312 | if (crc != je32_to_cpu(rd->data_crc)) { | ||
313 | printk(KERN_NOTICE "Data CRC failed on node at %#08x: read %#08x, calculated %#08x\n", | ||
314 | ref_offset(ref), je32_to_cpu(rd->data_crc), crc); | ||
315 | return 1; | ||
316 | } | ||
317 | |||
318 | } | ||
319 | |||
320 | /* Mark the node as having been checked and fix the accounting accordingly */ | ||
321 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
322 | len = ref_totlen(c, jeb, ref); | ||
323 | |||
324 | spin_lock(&c->erase_completion_lock); | ||
325 | jeb->used_size += len; | ||
326 | jeb->unchecked_size -= len; | ||
327 | c->used_size += len; | ||
328 | c->unchecked_size -= len; | ||
329 | |||
330 | /* If node covers at least a whole page, or if it starts at the | ||
331 | beginning of a page and runs to the end of the file, or if | ||
332 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. | ||
333 | |||
334 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) | ||
335 | when the overlapping node(s) get added to the tree anyway. | ||
336 | */ | ||
337 | if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) || | ||
338 | ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) && | ||
339 | (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) { | ||
340 | D1(printk(KERN_DEBUG "Marking node at %#08x REF_PRISTINE\n", ref_offset(ref))); | ||
341 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | ||
342 | } else { | 245 | } else { |
343 | D1(printk(KERN_DEBUG "Marking node at %#08x REF_NORMAL\n", ref_offset(ref))); | 246 | this->ofs += newfrag->size; |
344 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | 247 | this->size -= newfrag->size; |
248 | |||
249 | jffs2_fragtree_insert(this, newfrag); | ||
250 | rb_insert_color(&this->rb, list); | ||
251 | return 0; | ||
345 | } | 252 | } |
346 | spin_unlock(&c->erase_completion_lock); | ||
347 | } | 253 | } |
348 | 254 | /* OK, now we have newfrag added in the correct place in the tree, but | |
349 | tn = jffs2_alloc_tmp_dnode_info(); | 255 | frag_next(newfrag) may be a fragment which is overlapped by it |
350 | if (!tn) { | 256 | */ |
351 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | 257 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { |
352 | return -ENOMEM; | 258 | /* 'this' frag is obsoleted completely. */ |
259 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); | ||
260 | rb_erase(&this->rb, list); | ||
261 | jffs2_obsolete_node_frag(c, this); | ||
353 | } | 262 | } |
263 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
264 | the new frag */ | ||
354 | 265 | ||
355 | tn->fn = jffs2_alloc_full_dnode(); | 266 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { |
356 | if (!tn->fn) { | 267 | return 0; |
357 | D1(printk(KERN_DEBUG "alloc fn failed\n")); | ||
358 | jffs2_free_tmp_dnode_info(tn); | ||
359 | return -ENOMEM; | ||
360 | } | 268 | } |
361 | 269 | /* Still some overlap but we don't need to move it in the tree */ | |
362 | tn->version = je32_to_cpu(rd->version); | 270 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); |
363 | tn->fn->ofs = je32_to_cpu(rd->offset); | 271 | this->ofs = newfrag->ofs + newfrag->size; |
364 | tn->fn->raw = ref; | 272 | |
365 | 273 | /* And mark them REF_NORMAL so the GC takes a look at them */ | |
366 | /* There was a bug where we wrote hole nodes out with | 274 | if (this->node) |
367 | csize/dsize swapped. Deal with it */ | 275 | mark_ref_normal(this->node->raw); |
368 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize)) | 276 | mark_ref_normal(newfrag->node->raw); |
369 | tn->fn->size = je32_to_cpu(rd->csize); | ||
370 | else // normal case... | ||
371 | tn->fn->size = je32_to_cpu(rd->dsize); | ||
372 | |||
373 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %#04x, dsize %#04x\n", | ||
374 | ref_offset(ref), je32_to_cpu(rd->version), | ||
375 | je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize))); | ||
376 | |||
377 | jffs2_add_tn_to_tree(tn, tnp); | ||
378 | 277 | ||
379 | return 0; | 278 | return 0; |
380 | } | 279 | } |
381 | 280 | ||
382 | /* | 281 | /* Given an inode, probably with existing list of fragments, add the new node |
383 | * Helper function for jffs2_get_inode_nodes(). | 282 | * to the fragment list. |
384 | * It is called every time an unknown node is found. | ||
385 | * | ||
386 | * Returns: 0 on succes; | ||
387 | * 1 if the node should be marked obsolete; | ||
388 | * negative error code on failure. | ||
389 | */ | 283 | */ |
390 | static inline int | 284 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) |
391 | read_unknown(struct jffs2_sb_info *c, | ||
392 | struct jffs2_raw_node_ref *ref, | ||
393 | struct jffs2_unknown_node *un, | ||
394 | uint32_t read) | ||
395 | { | 285 | { |
396 | /* We don't mark unknown nodes as REF_UNCHECKED */ | 286 | int ret; |
397 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | 287 | struct jffs2_node_frag *newfrag; |
398 | |||
399 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); | ||
400 | 288 | ||
401 | if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { | 289 | D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); |
402 | 290 | ||
403 | /* Hmmm. This should have been caught at scan time. */ | 291 | if (unlikely(!fn->size)) |
404 | printk(KERN_WARNING "Warning! Node header CRC failed at %#08x. " | 292 | return 0; |
405 | "But it must have been OK earlier.\n", ref_offset(ref)); | ||
406 | D1(printk(KERN_DEBUG "Node was: { %#04x, %#04x, %#08x, %#08x }\n", | ||
407 | je16_to_cpu(un->magic), je16_to_cpu(un->nodetype), | ||
408 | je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc))); | ||
409 | return 1; | ||
410 | } else { | ||
411 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { | ||
412 | 293 | ||
413 | case JFFS2_FEATURE_INCOMPAT: | 294 | newfrag = jffs2_alloc_node_frag(); |
414 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %#04X at %#08x\n", | 295 | if (unlikely(!newfrag)) |
415 | je16_to_cpu(un->nodetype), ref_offset(ref)); | 296 | return -ENOMEM; |
416 | /* EEP */ | ||
417 | BUG(); | ||
418 | break; | ||
419 | |||
420 | case JFFS2_FEATURE_ROCOMPAT: | ||
421 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %#04X at %#08x\n", | ||
422 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
423 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); | ||
424 | break; | ||
425 | |||
426 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
427 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", | ||
428 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
429 | break; | ||
430 | |||
431 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
432 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", | ||
433 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
434 | return 1; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated | ||
442 | with this ino, returning the former in order of version */ | ||
443 | 297 | ||
444 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 298 | D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", |
445 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | 299 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); |
446 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
447 | uint32_t *mctime_ver) | ||
448 | { | ||
449 | struct jffs2_raw_node_ref *ref, *valid_ref; | ||
450 | struct rb_root ret_tn = RB_ROOT; | ||
451 | struct jffs2_full_dirent *ret_fd = NULL; | ||
452 | union jffs2_node_union node; | ||
453 | size_t retlen; | ||
454 | int err; | ||
455 | |||
456 | *mctime_ver = 0; | ||
457 | 300 | ||
458 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); | 301 | newfrag->ofs = fn->ofs; |
459 | 302 | newfrag->size = fn->size; | |
460 | spin_lock(&c->erase_completion_lock); | 303 | newfrag->node = fn; |
461 | 304 | newfrag->node->frags = 1; | |
462 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); | 305 | |
463 | 306 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | |
464 | if (!valid_ref && (f->inocache->ino != 1)) | 307 | if (unlikely(ret)) |
465 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); | 308 | return ret; |
466 | 309 | ||
467 | while (valid_ref) { | 310 | /* If we now share a page with other nodes, mark either previous |
468 | /* We can hold a pointer to a non-obsolete node without the spinlock, | 311 | or next node REF_NORMAL, as appropriate. */ |
469 | but _obsolete_ nodes may disappear at any time, if the block | 312 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { |
470 | they're in gets erased. So if we mark 'ref' obsolete while we're | 313 | struct jffs2_node_frag *prev = frag_prev(newfrag); |
471 | not holding the lock, it can go away immediately. For that reason, | 314 | |
472 | we find the next valid node first, before processing 'ref'. | 315 | mark_ref_normal(fn->raw); |
473 | */ | 316 | /* If we don't start at zero there's _always_ a previous */ |
474 | ref = valid_ref; | 317 | if (prev->node) |
475 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | 318 | mark_ref_normal(prev->node->raw); |
476 | spin_unlock(&c->erase_completion_lock); | 319 | } |
477 | |||
478 | cond_resched(); | ||
479 | |||
480 | /* FIXME: point() */ | ||
481 | err = jffs2_flash_read(c, (ref_offset(ref)), | ||
482 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | ||
483 | &retlen, (void *)&node); | ||
484 | if (err) { | ||
485 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | ||
486 | goto free_out; | ||
487 | } | ||
488 | |||
489 | switch (je16_to_cpu(node.u.nodetype)) { | ||
490 | |||
491 | case JFFS2_NODETYPE_DIRENT: | ||
492 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); | ||
493 | |||
494 | if (retlen < sizeof(node.d)) { | ||
495 | printk(KERN_WARNING "Warning! Short read dirent at %#08x\n", ref_offset(ref)); | ||
496 | err = -EIO; | ||
497 | goto free_out; | ||
498 | } | ||
499 | |||
500 | err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver); | ||
501 | if (err == 1) { | ||
502 | jffs2_mark_node_obsolete(c, ref); | ||
503 | break; | ||
504 | } else if (unlikely(err)) | ||
505 | goto free_out; | ||
506 | |||
507 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
508 | *highest_version = je32_to_cpu(node.d.version); | ||
509 | |||
510 | break; | ||
511 | |||
512 | case JFFS2_NODETYPE_INODE: | ||
513 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); | ||
514 | |||
515 | if (retlen < sizeof(node.i)) { | ||
516 | printk(KERN_WARNING "Warning! Short read dnode at %#08x\n", ref_offset(ref)); | ||
517 | err = -EIO; | ||
518 | goto free_out; | ||
519 | } | ||
520 | |||
521 | err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver); | ||
522 | if (err == 1) { | ||
523 | jffs2_mark_node_obsolete(c, ref); | ||
524 | break; | ||
525 | } else if (unlikely(err)) | ||
526 | goto free_out; | ||
527 | |||
528 | if (je32_to_cpu(node.i.version) > *highest_version) | ||
529 | *highest_version = je32_to_cpu(node.i.version); | ||
530 | |||
531 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", | ||
532 | je32_to_cpu(node.i.version), *highest_version)); | ||
533 | |||
534 | break; | ||
535 | |||
536 | default: | ||
537 | /* Check we've managed to read at least the common node header */ | ||
538 | if (retlen < sizeof(struct jffs2_unknown_node)) { | ||
539 | printk(KERN_WARNING "Warning! Short read unknown node at %#08x\n", | ||
540 | ref_offset(ref)); | ||
541 | return -EIO; | ||
542 | } | ||
543 | |||
544 | err = read_unknown(c, ref, &node.u, retlen); | ||
545 | if (err == 1) { | ||
546 | jffs2_mark_node_obsolete(c, ref); | ||
547 | break; | ||
548 | } else if (unlikely(err)) | ||
549 | goto free_out; | ||
550 | 320 | ||
321 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | ||
322 | struct jffs2_node_frag *next = frag_next(newfrag); | ||
323 | |||
324 | if (next) { | ||
325 | mark_ref_normal(fn->raw); | ||
326 | if (next->node) | ||
327 | mark_ref_normal(next->node->raw); | ||
551 | } | 328 | } |
552 | spin_lock(&c->erase_completion_lock); | ||
553 | |||
554 | } | 329 | } |
555 | spin_unlock(&c->erase_completion_lock); | 330 | jffs2_dbg_fragtree_paranoia_check_nolock(f); |
556 | *tnp = ret_tn; | 331 | jffs2_dbg_dump_fragtree_nolock(f); |
557 | *fdp = ret_fd; | ||
558 | |||
559 | return 0; | 332 | return 0; |
560 | |||
561 | free_out: | ||
562 | jffs2_free_tmp_dnode_info_list(&ret_tn); | ||
563 | jffs2_free_full_dirent_list(ret_fd); | ||
564 | return err; | ||
565 | } | 333 | } |
566 | 334 | ||
335 | |||
567 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) | 336 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) |
568 | { | 337 | { |
569 | spin_lock(&c->inocache_lock); | 338 | spin_lock(&c->inocache_lock); |
@@ -773,29 +542,3 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
773 | cond_resched(); | 542 | cond_resched(); |
774 | } | 543 | } |
775 | } | 544 | } |
776 | |||
777 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) | ||
778 | { | ||
779 | struct rb_node *parent = &base->rb; | ||
780 | struct rb_node **link = &parent; | ||
781 | |||
782 | D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, | ||
783 | newfrag->ofs, newfrag->ofs+newfrag->size, base)); | ||
784 | |||
785 | while (*link) { | ||
786 | parent = *link; | ||
787 | base = rb_entry(parent, struct jffs2_node_frag, rb); | ||
788 | |||
789 | D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); | ||
790 | if (newfrag->ofs > base->ofs) | ||
791 | link = &base->rb.rb_right; | ||
792 | else if (newfrag->ofs < base->ofs) | ||
793 | link = &base->rb.rb_left; | ||
794 | else { | ||
795 | printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); | ||
796 | BUG(); | ||
797 | } | ||
798 | } | ||
799 | |||
800 | rb_link_node(&newfrag->rb, &base->rb, link); | ||
801 | } | ||
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 0058e395641b..452fc81f391a 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: nodelist.h,v 1.134 2005/07/24 15:29:56 dedekind Exp $ | 10 | * $Id: nodelist.h,v 1.135 2005/07/27 14:46:11 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -297,10 +297,6 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root) | |||
297 | 297 | ||
298 | /* nodelist.c */ | 298 | /* nodelist.c */ |
299 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); | 299 | void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); |
300 | int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
301 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, | ||
302 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
303 | uint32_t *mctime_ver); | ||
304 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); | 300 | void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); |
305 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | 301 | struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); |
306 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); | 302 | void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); |
@@ -309,10 +305,11 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c); | |||
309 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); | 305 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); |
310 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); | 306 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); |
311 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); | 307 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); |
312 | void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base); | ||
313 | struct rb_node *rb_next(struct rb_node *); | 308 | struct rb_node *rb_next(struct rb_node *); |
314 | struct rb_node *rb_prev(struct rb_node *); | 309 | struct rb_node *rb_prev(struct rb_node *); |
315 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); | 310 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); |
311 | void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); | ||
312 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
316 | 313 | ||
317 | /* nodemgmt.c */ | 314 | /* nodemgmt.c */ |
318 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | 315 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); |
@@ -337,7 +334,6 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
337 | 334 | ||
338 | /* readinode.c */ | 335 | /* readinode.c */ |
339 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 336 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); |
340 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | ||
341 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 337 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
342 | uint32_t ino, struct jffs2_raw_inode *latest_node); | 338 | uint32_t ino, struct jffs2_raw_inode *latest_node); |
343 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | 339 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 339ba46320fa..85a285b2a309 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: readinode.c,v 1.130 2005/07/24 15:29:56 dedekind Exp $ | 10 | * $Id: readinode.c,v 1.131 2005/07/27 14:46:11 dedekind Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -20,376 +20,537 @@ | |||
20 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
21 | #include "nodelist.h" | 21 | #include "nodelist.h" |
22 | 22 | ||
23 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); | 23 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) |
24 | |||
25 | static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) | ||
26 | { | 24 | { |
27 | if (this->node) { | 25 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); |
28 | this->node->frags--; | 26 | |
29 | if (!this->node->frags) { | 27 | D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); |
30 | /* The node has no valid frags left. It's totally obsoleted */ | 28 | |
31 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", | 29 | /* We know frag->ofs <= size. That's what lookup does for us */ |
32 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); | 30 | if (frag && frag->ofs != size) { |
33 | jffs2_mark_node_obsolete(c, this->node->raw); | 31 | if (frag->ofs+frag->size >= size) { |
34 | jffs2_free_full_dnode(this->node); | 32 | D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); |
35 | } else { | 33 | frag->size = size - frag->ofs; |
36 | D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", | ||
37 | ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, | ||
38 | this->node->frags)); | ||
39 | mark_ref_normal(this->node->raw); | ||
40 | } | 34 | } |
41 | 35 | frag = frag_next(frag); | |
36 | } | ||
37 | while (frag && frag->ofs >= size) { | ||
38 | struct jffs2_node_frag *next = frag_next(frag); | ||
39 | |||
40 | D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | ||
41 | frag_erase(frag, list); | ||
42 | jffs2_obsolete_node_frag(c, frag); | ||
43 | frag = next; | ||
42 | } | 44 | } |
43 | jffs2_free_node_frag(this); | ||
44 | } | 45 | } |
45 | 46 | ||
46 | /* Given an inode, probably with existing list of fragments, add the new node | 47 | /* |
47 | * to the fragment list. | 48 | * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in |
49 | * order of increasing version. | ||
48 | */ | 50 | */ |
49 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) | 51 | static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) |
50 | { | 52 | { |
51 | int ret; | 53 | struct rb_node **p = &list->rb_node; |
52 | struct jffs2_node_frag *newfrag; | 54 | struct rb_node * parent = NULL; |
55 | struct jffs2_tmp_dnode_info *this; | ||
56 | |||
57 | while (*p) { | ||
58 | parent = *p; | ||
59 | this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); | ||
60 | |||
61 | /* There may actually be a collision here, but it doesn't | ||
62 | actually matter. As long as the two nodes with the same | ||
63 | version are together, it's all fine. */ | ||
64 | if (tn->version < this->version) | ||
65 | p = &(*p)->rb_left; | ||
66 | else | ||
67 | p = &(*p)->rb_right; | ||
68 | } | ||
69 | |||
70 | rb_link_node(&tn->rb, parent, p); | ||
71 | rb_insert_color(&tn->rb, list); | ||
72 | } | ||
53 | 73 | ||
54 | D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); | 74 | static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) |
75 | { | ||
76 | struct rb_node *this; | ||
77 | struct jffs2_tmp_dnode_info *tn; | ||
78 | |||
79 | this = list->rb_node; | ||
80 | |||
81 | /* Now at bottom of tree */ | ||
82 | while (this) { | ||
83 | if (this->rb_left) | ||
84 | this = this->rb_left; | ||
85 | else if (this->rb_right) | ||
86 | this = this->rb_right; | ||
87 | else { | ||
88 | tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); | ||
89 | jffs2_free_full_dnode(tn->fn); | ||
90 | jffs2_free_tmp_dnode_info(tn); | ||
91 | |||
92 | this = this->rb_parent; | ||
93 | if (!this) | ||
94 | break; | ||
95 | |||
96 | if (this->rb_left == &tn->rb) | ||
97 | this->rb_left = NULL; | ||
98 | else if (this->rb_right == &tn->rb) | ||
99 | this->rb_right = NULL; | ||
100 | else BUG(); | ||
101 | } | ||
102 | } | ||
103 | list->rb_node = NULL; | ||
104 | } | ||
55 | 105 | ||
56 | if (unlikely(!fn->size)) | 106 | static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) |
57 | return 0; | 107 | { |
108 | struct jffs2_full_dirent *next; | ||
58 | 109 | ||
59 | newfrag = jffs2_alloc_node_frag(); | 110 | while (fd) { |
60 | if (unlikely(!newfrag)) | 111 | next = fd->next; |
61 | return -ENOMEM; | 112 | jffs2_free_full_dirent(fd); |
113 | fd = next; | ||
114 | } | ||
115 | } | ||
62 | 116 | ||
63 | D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", | 117 | /* Returns first valid node after 'ref'. May return 'ref' */ |
64 | fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); | 118 | static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) |
65 | 119 | { | |
66 | newfrag->ofs = fn->ofs; | 120 | while (ref && ref->next_in_ino) { |
67 | newfrag->size = fn->size; | 121 | if (!ref_obsolete(ref)) |
68 | newfrag->node = fn; | 122 | return ref; |
69 | newfrag->node->frags = 1; | 123 | D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); |
124 | ref = ref->next_in_ino; | ||
125 | } | ||
126 | return NULL; | ||
127 | } | ||
70 | 128 | ||
71 | ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); | 129 | /* |
72 | if (ret) | 130 | * Helper function for jffs2_get_inode_nodes(). |
73 | return ret; | 131 | * It is called every time an directory entry node is found. |
132 | * | ||
133 | * Returns: 0 on succes; | ||
134 | * 1 if the node should be marked obsolete; | ||
135 | * negative error code on failure. | ||
136 | */ | ||
137 | static inline int | ||
138 | read_direntry(struct jffs2_sb_info *c, | ||
139 | struct jffs2_raw_node_ref *ref, | ||
140 | struct jffs2_raw_dirent *rd, | ||
141 | uint32_t read, | ||
142 | struct jffs2_full_dirent **fdp, | ||
143 | int32_t *latest_mctime, | ||
144 | uint32_t *mctime_ver) | ||
145 | { | ||
146 | struct jffs2_full_dirent *fd; | ||
147 | |||
148 | /* The direntry nodes are checked during the flash scanning */ | ||
149 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | ||
150 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | ||
151 | BUG_ON(ref_obsolete(ref)); | ||
152 | |||
153 | /* Sanity check */ | ||
154 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | ||
155 | printk(KERN_ERR "Error! Illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | ||
156 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); | ||
161 | if (unlikely(!fd)) | ||
162 | return -ENOMEM; | ||
74 | 163 | ||
75 | /* If we now share a page with other nodes, mark either previous | 164 | fd->raw = ref; |
76 | or next node REF_NORMAL, as appropriate. */ | 165 | fd->version = je32_to_cpu(rd->version); |
77 | if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { | 166 | fd->ino = je32_to_cpu(rd->ino); |
78 | struct jffs2_node_frag *prev = frag_prev(newfrag); | 167 | fd->type = rd->type; |
79 | 168 | ||
80 | mark_ref_normal(fn->raw); | 169 | /* Pick out the mctime of the latest dirent */ |
81 | /* If we don't start at zero there's _always_ a previous */ | 170 | if(fd->version > *mctime_ver) { |
82 | if (prev->node) | 171 | *mctime_ver = fd->version; |
83 | mark_ref_normal(prev->node->raw); | 172 | *latest_mctime = je32_to_cpu(rd->mctime); |
84 | } | 173 | } |
85 | 174 | ||
86 | if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { | 175 | /* |
87 | struct jffs2_node_frag *next = frag_next(newfrag); | 176 | * Copy as much of the name as possible from the raw |
177 | * dirent we've already read from the flash. | ||
178 | */ | ||
179 | if (read > sizeof(*rd)) | ||
180 | memcpy(&fd->name[0], &rd->name[0], | ||
181 | min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); | ||
88 | 182 | ||
89 | if (next) { | 183 | /* Do we need to copy any more of the name directly from the flash? */ |
90 | mark_ref_normal(fn->raw); | 184 | if (rd->nsize + sizeof(*rd) > read) { |
91 | if (next->node) | 185 | /* FIXME: point() */ |
92 | mark_ref_normal(next->node->raw); | 186 | int err; |
187 | int already = read - sizeof(*rd); | ||
188 | |||
189 | err = jffs2_flash_read(c, (ref_offset(ref)) + read, | ||
190 | rd->nsize - already, &read, &fd->name[already]); | ||
191 | if (unlikely(read != rd->nsize - already) && likely(!err)) | ||
192 | return -EIO; | ||
193 | |||
194 | if (unlikely(err)) { | ||
195 | printk(KERN_WARNING "Read remainder of name: error %d\n", err); | ||
196 | jffs2_free_full_dirent(fd); | ||
197 | return -EIO; | ||
93 | } | 198 | } |
94 | } | 199 | } |
95 | jffs2_dbg_fragtree_paranoia_check_nolock(f); | 200 | |
96 | jffs2_dbg_dump_fragtree_nolock(f); | 201 | fd->nhash = full_name_hash(fd->name, rd->nsize); |
202 | fd->next = NULL; | ||
203 | fd->name[rd->nsize] = '\0'; | ||
204 | |||
205 | /* | ||
206 | * Wheee. We now have a complete jffs2_full_dirent structure, with | ||
207 | * the name in it and everything. Link it into the list | ||
208 | */ | ||
209 | D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); | ||
210 | |||
211 | jffs2_add_fd_to_list(c, fd, fdp); | ||
212 | |||
97 | return 0; | 213 | return 0; |
98 | } | 214 | } |
99 | 215 | ||
100 | /* Doesn't set inode->i_size */ | 216 | /* |
101 | static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) | 217 | * Helper function for jffs2_get_inode_nodes(). |
218 | * It is called every time an inode node is found. | ||
219 | * | ||
220 | * Returns: 0 on succes; | ||
221 | * 1 if the node should be marked obsolete; | ||
222 | * negative error code on failure. | ||
223 | */ | ||
224 | static inline int | ||
225 | read_dnode(struct jffs2_sb_info *c, | ||
226 | struct jffs2_raw_node_ref *ref, | ||
227 | struct jffs2_raw_inode *rd, | ||
228 | uint32_t read, | ||
229 | struct rb_root *tnp, | ||
230 | int32_t *latest_mctime, | ||
231 | uint32_t *mctime_ver) | ||
102 | { | 232 | { |
103 | struct jffs2_node_frag *this; | 233 | struct jffs2_eraseblock *jeb; |
104 | uint32_t lastend; | 234 | struct jffs2_tmp_dnode_info *tn; |
105 | 235 | ||
106 | /* Skip all the nodes which are completed before this one starts */ | 236 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
107 | this = jffs2_lookup_node_frag(list, newfrag->node->ofs); | 237 | BUG_ON(ref_obsolete(ref)); |
108 | 238 | ||
109 | if (this) { | 239 | /* If we've never checked the CRCs on this node, check them now */ |
110 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | 240 | if (ref_flags(ref) == REF_UNCHECKED) { |
111 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | 241 | uint32_t crc, len; |
112 | lastend = this->ofs + this->size; | 242 | |
113 | } else { | 243 | crc = crc32(0, rd, sizeof(*rd) - 8); |
114 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); | 244 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { |
115 | lastend = 0; | 245 | printk(KERN_WARNING "Header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", |
116 | } | 246 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); |
117 | 247 | return 1; | |
118 | /* See if we ran off the end of the list */ | 248 | } |
119 | if (lastend <= newfrag->ofs) { | 249 | |
120 | /* We did */ | 250 | /* Sanity checks */ |
121 | 251 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || | |
122 | /* Check if 'this' node was on the same page as the new node. | 252 | unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { |
123 | If so, both 'this' and the new node get marked REF_NORMAL so | 253 | printk(KERN_WARNING "Inode corrupted at %#08x, totlen %d, #ino %d, version %d, " |
124 | the GC can take a look. | 254 | "isize %d, csize %d, dsize %d \n", |
125 | */ | 255 | ref_offset(ref), je32_to_cpu(rd->totlen), je32_to_cpu(rd->ino), |
126 | if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { | 256 | je32_to_cpu(rd->version), je32_to_cpu(rd->isize), |
127 | if (this->node) | 257 | je32_to_cpu(rd->csize), je32_to_cpu(rd->dsize)); |
128 | mark_ref_normal(this->node->raw); | 258 | return 1; |
129 | mark_ref_normal(newfrag->node->raw); | ||
130 | } | 259 | } |
131 | 260 | ||
132 | if (lastend < newfrag->node->ofs) { | 261 | if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) { |
133 | /* ... and we need to put a hole in before the new node */ | 262 | unsigned char *buf = NULL; |
134 | struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); | 263 | uint32_t pointed = 0; |
135 | if (!holefrag) { | 264 | int err; |
136 | jffs2_free_node_frag(newfrag); | 265 | #ifndef __ECOS |
137 | return -ENOMEM; | 266 | if (c->mtd->point) { |
267 | err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), | ||
268 | &read, &buf); | ||
269 | if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) { | ||
270 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", read)); | ||
271 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), | ||
272 | je32_to_cpu(rd->csize)); | ||
273 | } else if (unlikely(err)){ | ||
274 | D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); | ||
275 | } else | ||
276 | pointed = 1; /* succefully pointed to device */ | ||
138 | } | 277 | } |
139 | holefrag->ofs = lastend; | 278 | #endif |
140 | holefrag->size = newfrag->node->ofs - lastend; | 279 | if(!pointed){ |
141 | holefrag->node = NULL; | 280 | buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL); |
142 | if (this) { | 281 | if (!buf) |
143 | /* By definition, the 'this' node has no right-hand child, | 282 | return -ENOMEM; |
144 | because there are no frags with offset greater than it. | 283 | |
145 | So that's where we want to put the hole */ | 284 | err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize), |
146 | D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); | 285 | &read, buf); |
147 | rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); | 286 | if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err)) |
148 | } else { | 287 | err = -EIO; |
149 | D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); | 288 | if (err) { |
150 | rb_link_node(&holefrag->rb, NULL, &list->rb_node); | 289 | kfree(buf); |
290 | return err; | ||
291 | } | ||
151 | } | 292 | } |
152 | rb_insert_color(&holefrag->rb, list); | 293 | crc = crc32(0, buf, je32_to_cpu(rd->csize)); |
153 | this = holefrag; | 294 | if(!pointed) |
154 | } | 295 | kfree(buf); |
155 | if (this) { | 296 | #ifndef __ECOS |
156 | /* By definition, the 'this' node has no right-hand child, | 297 | else |
157 | because there are no frags with offset greater than it. | 298 | c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize)); |
158 | So that's where we want to put the hole */ | 299 | #endif |
159 | D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); | 300 | |
160 | rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); | 301 | if (crc != je32_to_cpu(rd->data_crc)) { |
161 | } else { | 302 | printk(KERN_NOTICE "Data CRC failed on node at %#08x: read %#08x, calculated %#08x\n", |
162 | D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); | 303 | ref_offset(ref), je32_to_cpu(rd->data_crc), crc); |
163 | rb_link_node(&newfrag->rb, NULL, &list->rb_node); | 304 | return 1; |
164 | } | ||
165 | rb_insert_color(&newfrag->rb, list); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", | ||
170 | this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); | ||
171 | |||
172 | /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, | ||
173 | * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs | ||
174 | */ | ||
175 | if (newfrag->ofs > this->ofs) { | ||
176 | /* This node isn't completely obsoleted. The start of it remains valid */ | ||
177 | |||
178 | /* Mark the new node and the partially covered node REF_NORMAL -- let | ||
179 | the GC take a look at them */ | ||
180 | mark_ref_normal(newfrag->node->raw); | ||
181 | if (this->node) | ||
182 | mark_ref_normal(this->node->raw); | ||
183 | |||
184 | if (this->ofs + this->size > newfrag->ofs + newfrag->size) { | ||
185 | /* The new node splits 'this' frag into two */ | ||
186 | struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); | ||
187 | if (!newfrag2) { | ||
188 | jffs2_free_node_frag(newfrag); | ||
189 | return -ENOMEM; | ||
190 | } | 305 | } |
191 | D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); | ||
192 | if (this->node) | ||
193 | printk("phys 0x%08x\n", ref_offset(this->node->raw)); | ||
194 | else | ||
195 | printk("hole\n"); | ||
196 | ) | ||
197 | |||
198 | /* New second frag pointing to this's node */ | ||
199 | newfrag2->ofs = newfrag->ofs + newfrag->size; | ||
200 | newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; | ||
201 | newfrag2->node = this->node; | ||
202 | if (this->node) | ||
203 | this->node->frags++; | ||
204 | |||
205 | /* Adjust size of original 'this' */ | ||
206 | this->size = newfrag->ofs - this->ofs; | ||
207 | |||
208 | /* Now, we know there's no node with offset | ||
209 | greater than this->ofs but smaller than | ||
210 | newfrag2->ofs or newfrag->ofs, for obvious | ||
211 | reasons. So we can do a tree insert from | ||
212 | 'this' to insert newfrag, and a tree insert | ||
213 | from newfrag to insert newfrag2. */ | ||
214 | jffs2_fragtree_insert(newfrag, this); | ||
215 | rb_insert_color(&newfrag->rb, list); | ||
216 | 306 | ||
217 | jffs2_fragtree_insert(newfrag2, newfrag); | ||
218 | rb_insert_color(&newfrag2->rb, list); | ||
219 | |||
220 | return 0; | ||
221 | } | 307 | } |
222 | /* New node just reduces 'this' frag in size, doesn't split it */ | ||
223 | this->size = newfrag->ofs - this->ofs; | ||
224 | 308 | ||
225 | /* Again, we know it lives down here in the tree */ | 309 | /* Mark the node as having been checked and fix the accounting accordingly */ |
226 | jffs2_fragtree_insert(newfrag, this); | 310 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
227 | rb_insert_color(&newfrag->rb, list); | 311 | len = ref_totlen(c, jeb, ref); |
228 | } else { | 312 | |
229 | /* New frag starts at the same point as 'this' used to. Replace | 313 | spin_lock(&c->erase_completion_lock); |
230 | it in the tree without doing a delete and insertion */ | 314 | jeb->used_size += len; |
231 | D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", | 315 | jeb->unchecked_size -= len; |
232 | newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, | 316 | c->used_size += len; |
233 | this, this->ofs, this->ofs+this->size)); | 317 | c->unchecked_size -= len; |
234 | 318 | ||
235 | rb_replace_node(&this->rb, &newfrag->rb, list); | 319 | /* If node covers at least a whole page, or if it starts at the |
236 | 320 | beginning of a page and runs to the end of the file, or if | |
237 | if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { | 321 | it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. |
238 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); | ||
239 | jffs2_obsolete_node_frag(c, this); | ||
240 | } else { | ||
241 | this->ofs += newfrag->size; | ||
242 | this->size -= newfrag->size; | ||
243 | 322 | ||
244 | jffs2_fragtree_insert(this, newfrag); | 323 | If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) |
245 | rb_insert_color(&this->rb, list); | 324 | when the overlapping node(s) get added to the tree anyway. |
246 | return 0; | 325 | */ |
326 | if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) || | ||
327 | ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) && | ||
328 | (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) { | ||
329 | D1(printk(KERN_DEBUG "Marking node at %#08x REF_PRISTINE\n", ref_offset(ref))); | ||
330 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | ||
331 | } else { | ||
332 | D1(printk(KERN_DEBUG "Marking node at %#08x REF_NORMAL\n", ref_offset(ref))); | ||
333 | ref->flash_offset = ref_offset(ref) | REF_NORMAL; | ||
247 | } | 334 | } |
335 | spin_unlock(&c->erase_completion_lock); | ||
248 | } | 336 | } |
249 | /* OK, now we have newfrag added in the correct place in the tree, but | ||
250 | frag_next(newfrag) may be a fragment which is overlapped by it | ||
251 | */ | ||
252 | while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { | ||
253 | /* 'this' frag is obsoleted completely. */ | ||
254 | D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); | ||
255 | rb_erase(&this->rb, list); | ||
256 | jffs2_obsolete_node_frag(c, this); | ||
257 | } | ||
258 | /* Now we're pointing at the first frag which isn't totally obsoleted by | ||
259 | the new frag */ | ||
260 | 337 | ||
261 | if (!this || newfrag->ofs + newfrag->size == this->ofs) { | 338 | tn = jffs2_alloc_tmp_dnode_info(); |
262 | return 0; | 339 | if (!tn) { |
340 | D1(printk(KERN_DEBUG "alloc tn failed\n")); | ||
341 | return -ENOMEM; | ||
263 | } | 342 | } |
264 | /* Still some overlap but we don't need to move it in the tree */ | ||
265 | this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); | ||
266 | this->ofs = newfrag->ofs + newfrag->size; | ||
267 | 343 | ||
268 | /* And mark them REF_NORMAL so the GC takes a look at them */ | 344 | tn->fn = jffs2_alloc_full_dnode(); |
269 | if (this->node) | 345 | if (!tn->fn) { |
270 | mark_ref_normal(this->node->raw); | 346 | D1(printk(KERN_DEBUG "alloc fn failed\n")); |
271 | mark_ref_normal(newfrag->node->raw); | 347 | jffs2_free_tmp_dnode_info(tn); |
348 | return -ENOMEM; | ||
349 | } | ||
350 | |||
351 | tn->version = je32_to_cpu(rd->version); | ||
352 | tn->fn->ofs = je32_to_cpu(rd->offset); | ||
353 | tn->fn->raw = ref; | ||
354 | |||
355 | /* There was a bug where we wrote hole nodes out with | ||
356 | csize/dsize swapped. Deal with it */ | ||
357 | if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize)) | ||
358 | tn->fn->size = je32_to_cpu(rd->csize); | ||
359 | else // normal case... | ||
360 | tn->fn->size = je32_to_cpu(rd->dsize); | ||
361 | |||
362 | D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %#04x, dsize %#04x\n", | ||
363 | ref_offset(ref), je32_to_cpu(rd->version), | ||
364 | je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize))); | ||
365 | |||
366 | jffs2_add_tn_to_tree(tn, tnp); | ||
272 | 367 | ||
273 | return 0; | 368 | return 0; |
274 | } | 369 | } |
275 | 370 | ||
276 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) | 371 | /* |
372 | * Helper function for jffs2_get_inode_nodes(). | ||
373 | * It is called every time an unknown node is found. | ||
374 | * | ||
375 | * Returns: 0 on succes; | ||
376 | * 1 if the node should be marked obsolete; | ||
377 | * negative error code on failure. | ||
378 | */ | ||
379 | static inline int | ||
380 | read_unknown(struct jffs2_sb_info *c, | ||
381 | struct jffs2_raw_node_ref *ref, | ||
382 | struct jffs2_unknown_node *un, | ||
383 | uint32_t read) | ||
277 | { | 384 | { |
278 | struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); | 385 | /* We don't mark unknown nodes as REF_UNCHECKED */ |
386 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | ||
387 | |||
388 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); | ||
279 | 389 | ||
280 | D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); | 390 | if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { |
281 | 391 | ||
282 | /* We know frag->ofs <= size. That's what lookup does for us */ | 392 | /* Hmmm. This should have been caught at scan time. */ |
283 | if (frag && frag->ofs != size) { | 393 | printk(KERN_WARNING "Warning! Node header CRC failed at %#08x. " |
284 | if (frag->ofs+frag->size >= size) { | 394 | "But it must have been OK earlier.\n", ref_offset(ref)); |
285 | D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 395 | D1(printk(KERN_DEBUG "Node was: { %#04x, %#04x, %#08x, %#08x }\n", |
286 | frag->size = size - frag->ofs; | 396 | je16_to_cpu(un->magic), je16_to_cpu(un->nodetype), |
397 | je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc))); | ||
398 | return 1; | ||
399 | } else { | ||
400 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { | ||
401 | |||
402 | case JFFS2_FEATURE_INCOMPAT: | ||
403 | printk(KERN_NOTICE "Unknown INCOMPAT nodetype %#04X at %#08x\n", | ||
404 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
405 | /* EEP */ | ||
406 | BUG(); | ||
407 | break; | ||
408 | |||
409 | case JFFS2_FEATURE_ROCOMPAT: | ||
410 | printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %#04X at %#08x\n", | ||
411 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
412 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); | ||
413 | break; | ||
414 | |||
415 | case JFFS2_FEATURE_RWCOMPAT_COPY: | ||
416 | printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", | ||
417 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
418 | break; | ||
419 | |||
420 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | ||
421 | printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", | ||
422 | je16_to_cpu(un->nodetype), ref_offset(ref)); | ||
423 | return 1; | ||
287 | } | 424 | } |
288 | frag = frag_next(frag); | ||
289 | } | 425 | } |
290 | while (frag && frag->ofs >= size) { | ||
291 | struct jffs2_node_frag *next = frag_next(frag); | ||
292 | 426 | ||
293 | D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); | 427 | return 0; |
294 | frag_erase(frag, list); | ||
295 | jffs2_obsolete_node_frag(c, frag); | ||
296 | frag = next; | ||
297 | } | ||
298 | } | 428 | } |
299 | 429 | ||
300 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | 430 | /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated |
301 | 431 | with this ino, returning the former in order of version */ | |
302 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | ||
303 | struct jffs2_inode_info *f, | ||
304 | struct jffs2_raw_inode *latest_node); | ||
305 | 432 | ||
306 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 433 | static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
307 | uint32_t ino, struct jffs2_raw_inode *latest_node) | 434 | struct rb_root *tnp, struct jffs2_full_dirent **fdp, |
435 | uint32_t *highest_version, uint32_t *latest_mctime, | ||
436 | uint32_t *mctime_ver) | ||
308 | { | 437 | { |
309 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); | 438 | struct jffs2_raw_node_ref *ref, *valid_ref; |
439 | struct rb_root ret_tn = RB_ROOT; | ||
440 | struct jffs2_full_dirent *ret_fd = NULL; | ||
441 | union jffs2_node_union node; | ||
442 | size_t retlen; | ||
443 | int err; | ||
310 | 444 | ||
311 | retry_inocache: | 445 | *mctime_ver = 0; |
312 | spin_lock(&c->inocache_lock); | 446 | |
313 | f->inocache = jffs2_get_ino_cache(c, ino); | 447 | D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); |
314 | 448 | ||
315 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); | 449 | spin_lock(&c->erase_completion_lock); |
316 | 450 | ||
317 | if (f->inocache) { | 451 | valid_ref = jffs2_first_valid_node(f->inocache->nodes); |
318 | /* Check its state. We may need to wait before we can use it */ | 452 | |
319 | switch(f->inocache->state) { | 453 | if (!valid_ref && (f->inocache->ino != 1)) |
320 | case INO_STATE_UNCHECKED: | 454 | printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); |
321 | case INO_STATE_CHECKEDABSENT: | 455 | |
322 | f->inocache->state = INO_STATE_READING; | 456 | while (valid_ref) { |
323 | break; | 457 | /* We can hold a pointer to a non-obsolete node without the spinlock, |
458 | but _obsolete_ nodes may disappear at any time, if the block | ||
459 | they're in gets erased. So if we mark 'ref' obsolete while we're | ||
460 | not holding the lock, it can go away immediately. For that reason, | ||
461 | we find the next valid node first, before processing 'ref'. | ||
462 | */ | ||
463 | ref = valid_ref; | ||
464 | valid_ref = jffs2_first_valid_node(ref->next_in_ino); | ||
465 | spin_unlock(&c->erase_completion_lock); | ||
466 | |||
467 | cond_resched(); | ||
468 | |||
469 | /* FIXME: point() */ | ||
470 | err = jffs2_flash_read(c, (ref_offset(ref)), | ||
471 | min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), | ||
472 | &retlen, (void *)&node); | ||
473 | if (err) { | ||
474 | printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); | ||
475 | goto free_out; | ||
476 | } | ||
324 | 477 | ||
325 | case INO_STATE_CHECKING: | 478 | switch (je16_to_cpu(node.u.nodetype)) { |
326 | case INO_STATE_GC: | 479 | |
327 | /* If it's in either of these states, we need | 480 | case JFFS2_NODETYPE_DIRENT: |
328 | to wait for whoever's got it to finish and | 481 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); |
329 | put it back. */ | 482 | |
330 | D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", | 483 | if (retlen < sizeof(node.d)) { |
331 | ino, f->inocache->state)); | 484 | printk(KERN_WARNING "Warning! Short read dirent at %#08x\n", ref_offset(ref)); |
332 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 485 | err = -EIO; |
333 | goto retry_inocache; | 486 | goto free_out; |
487 | } | ||
488 | |||
489 | err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver); | ||
490 | if (err == 1) { | ||
491 | jffs2_mark_node_obsolete(c, ref); | ||
492 | break; | ||
493 | } else if (unlikely(err)) | ||
494 | goto free_out; | ||
495 | |||
496 | if (je32_to_cpu(node.d.version) > *highest_version) | ||
497 | *highest_version = je32_to_cpu(node.d.version); | ||
334 | 498 | ||
335 | case INO_STATE_READING: | ||
336 | case INO_STATE_PRESENT: | ||
337 | /* Eep. This should never happen. It can | ||
338 | happen if Linux calls read_inode() again | ||
339 | before clear_inode() has finished though. */ | ||
340 | printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | ||
341 | /* Fail. That's probably better than allowing it to succeed */ | ||
342 | f->inocache = NULL; | ||
343 | break; | 499 | break; |
344 | 500 | ||
345 | default: | 501 | case JFFS2_NODETYPE_INODE: |
346 | BUG(); | 502 | D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); |
347 | } | 503 | |
348 | } | 504 | if (retlen < sizeof(node.i)) { |
349 | spin_unlock(&c->inocache_lock); | 505 | printk(KERN_WARNING "Warning! Short read dnode at %#08x\n", ref_offset(ref)); |
506 | err = -EIO; | ||
507 | goto free_out; | ||
508 | } | ||
350 | 509 | ||
351 | if (!f->inocache && ino == 1) { | 510 | err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver); |
352 | /* Special case - no root inode on medium */ | 511 | if (err == 1) { |
353 | f->inocache = jffs2_alloc_inode_cache(); | 512 | jffs2_mark_node_obsolete(c, ref); |
354 | if (!f->inocache) { | 513 | break; |
355 | printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); | 514 | } else if (unlikely(err)) |
356 | return -ENOMEM; | 515 | goto free_out; |
357 | } | ||
358 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); | ||
359 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
360 | f->inocache->ino = f->inocache->nlink = 1; | ||
361 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
362 | f->inocache->state = INO_STATE_READING; | ||
363 | jffs2_add_ino_cache(c, f->inocache); | ||
364 | } | ||
365 | if (!f->inocache) { | ||
366 | printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); | ||
367 | return -ENOENT; | ||
368 | } | ||
369 | 516 | ||
370 | return jffs2_do_read_inode_internal(c, f, latest_node); | 517 | if (je32_to_cpu(node.i.version) > *highest_version) |
371 | } | 518 | *highest_version = je32_to_cpu(node.i.version); |
519 | |||
520 | D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", | ||
521 | je32_to_cpu(node.i.version), *highest_version)); | ||
372 | 522 | ||
373 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 523 | break; |
374 | { | ||
375 | struct jffs2_raw_inode n; | ||
376 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
377 | int ret; | ||
378 | 524 | ||
379 | if (!f) | 525 | default: |
380 | return -ENOMEM; | 526 | /* Check we've managed to read at least the common node header */ |
527 | if (retlen < sizeof(struct jffs2_unknown_node)) { | ||
528 | printk(KERN_WARNING "Warning! Short read unknown node at %#08x\n", | ||
529 | ref_offset(ref)); | ||
530 | return -EIO; | ||
531 | } | ||
381 | 532 | ||
382 | memset(f, 0, sizeof(*f)); | 533 | err = read_unknown(c, ref, &node.u, retlen); |
383 | init_MUTEX_LOCKED(&f->sem); | 534 | if (err == 1) { |
384 | f->inocache = ic; | 535 | jffs2_mark_node_obsolete(c, ref); |
536 | break; | ||
537 | } else if (unlikely(err)) | ||
538 | goto free_out; | ||
539 | |||
540 | } | ||
541 | spin_lock(&c->erase_completion_lock); | ||
385 | 542 | ||
386 | ret = jffs2_do_read_inode_internal(c, f, &n); | ||
387 | if (!ret) { | ||
388 | up(&f->sem); | ||
389 | jffs2_do_clear_inode(c, f); | ||
390 | } | 543 | } |
391 | kfree (f); | 544 | spin_unlock(&c->erase_completion_lock); |
392 | return ret; | 545 | *tnp = ret_tn; |
546 | *fdp = ret_fd; | ||
547 | |||
548 | return 0; | ||
549 | |||
550 | free_out: | ||
551 | jffs2_free_tmp_dnode_info_list(&ret_tn); | ||
552 | jffs2_free_full_dirent_list(ret_fd); | ||
553 | return err; | ||
393 | } | 554 | } |
394 | 555 | ||
395 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | 556 | static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, |
@@ -618,6 +779,96 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
618 | return 0; | 779 | return 0; |
619 | } | 780 | } |
620 | 781 | ||
782 | /* Scan the list of all nodes present for this ino, build map of versions, etc. */ | ||
783 | int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
784 | uint32_t ino, struct jffs2_raw_inode *latest_node) | ||
785 | { | ||
786 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); | ||
787 | |||
788 | retry_inocache: | ||
789 | spin_lock(&c->inocache_lock); | ||
790 | f->inocache = jffs2_get_ino_cache(c, ino); | ||
791 | |||
792 | D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); | ||
793 | |||
794 | if (f->inocache) { | ||
795 | /* Check its state. We may need to wait before we can use it */ | ||
796 | switch(f->inocache->state) { | ||
797 | case INO_STATE_UNCHECKED: | ||
798 | case INO_STATE_CHECKEDABSENT: | ||
799 | f->inocache->state = INO_STATE_READING; | ||
800 | break; | ||
801 | |||
802 | case INO_STATE_CHECKING: | ||
803 | case INO_STATE_GC: | ||
804 | /* If it's in either of these states, we need | ||
805 | to wait for whoever's got it to finish and | ||
806 | put it back. */ | ||
807 | D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", | ||
808 | ino, f->inocache->state)); | ||
809 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | ||
810 | goto retry_inocache; | ||
811 | |||
812 | case INO_STATE_READING: | ||
813 | case INO_STATE_PRESENT: | ||
814 | /* Eep. This should never happen. It can | ||
815 | happen if Linux calls read_inode() again | ||
816 | before clear_inode() has finished though. */ | ||
817 | printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); | ||
818 | /* Fail. That's probably better than allowing it to succeed */ | ||
819 | f->inocache = NULL; | ||
820 | break; | ||
821 | |||
822 | default: | ||
823 | BUG(); | ||
824 | } | ||
825 | } | ||
826 | spin_unlock(&c->inocache_lock); | ||
827 | |||
828 | if (!f->inocache && ino == 1) { | ||
829 | /* Special case - no root inode on medium */ | ||
830 | f->inocache = jffs2_alloc_inode_cache(); | ||
831 | if (!f->inocache) { | ||
832 | printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); | ||
833 | return -ENOMEM; | ||
834 | } | ||
835 | D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); | ||
836 | memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); | ||
837 | f->inocache->ino = f->inocache->nlink = 1; | ||
838 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | ||
839 | f->inocache->state = INO_STATE_READING; | ||
840 | jffs2_add_ino_cache(c, f->inocache); | ||
841 | } | ||
842 | if (!f->inocache) { | ||
843 | printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); | ||
844 | return -ENOENT; | ||
845 | } | ||
846 | |||
847 | return jffs2_do_read_inode_internal(c, f, latest_node); | ||
848 | } | ||
849 | |||
850 | int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
851 | { | ||
852 | struct jffs2_raw_inode n; | ||
853 | struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); | ||
854 | int ret; | ||
855 | |||
856 | if (!f) | ||
857 | return -ENOMEM; | ||
858 | |||
859 | memset(f, 0, sizeof(*f)); | ||
860 | init_MUTEX_LOCKED(&f->sem); | ||
861 | f->inocache = ic; | ||
862 | |||
863 | ret = jffs2_do_read_inode_internal(c, f, &n); | ||
864 | if (!ret) { | ||
865 | up(&f->sem); | ||
866 | jffs2_do_clear_inode(c, f); | ||
867 | } | ||
868 | kfree (f); | ||
869 | return ret; | ||
870 | } | ||
871 | |||
621 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | 872 | void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) |
622 | { | 873 | { |
623 | struct jffs2_full_dirent *fd, *fds; | 874 | struct jffs2_full_dirent *fd, *fds; |