aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/wbuf.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-05-26 16:19:05 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-26 16:19:05 -0400
commit9bfeb691e75b21fdaa80ffae719083200b190381 (patch)
tree3c828820f1385249835f85e5073b4ffd10fcd09c /fs/jffs2/wbuf.c
parentf75e5097ef298c5a0aa106faa211d1afdc92dc3d (diff)
[JFFS2] Switch to using an array of jffs2_raw_node_refs instead of a list.
This allows us to drop another pointer from the struct jffs2_raw_node_ref, shrinking it to 8 bytes on 32-bit machines (if the TEST_TOTLEN) paranoia check is turned off, which will be committed soon). Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'fs/jffs2/wbuf.c')
-rw-r--r--fs/jffs2/wbuf.c287
1 files changed, 203 insertions, 84 deletions
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index e16e45ea0474..2febece89062 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -156,72 +156,126 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
156 jffs2_erase_pending_trigger(c); 156 jffs2_erase_pending_trigger(c);
157 } 157 }
158 158
159 /* Adjust its size counts accordingly */ 159 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
160 c->wasted_size += jeb->free_size; 160 uint32_t oldfree = jeb->free_size;
161 c->free_size -= jeb->free_size; 161
162 jeb->wasted_size += jeb->free_size; 162 jffs2_link_node_ref(c, jeb,
163 jeb->free_size = 0; 163 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
164 oldfree, NULL);
165 /* convert to wasted */
166 c->wasted_size += oldfree;
167 jeb->wasted_size += oldfree;
168 c->dirty_size -= oldfree;
169 jeb->dirty_size -= oldfree;
170 }
164 171
165 jffs2_dbg_dump_block_lists_nolock(c); 172 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb); 173 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 174 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
168} 175}
169 176
177static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
178 struct jffs2_inode_info *f,
179 struct jffs2_raw_node_ref *raw,
180 union jffs2_node_union *node)
181{
182 struct jffs2_node_frag *frag;
183 struct jffs2_full_dirent *fd;
184
185 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
186 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
187
188 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
189 je16_to_cpu(node->u.magic) != 0);
190
191 switch (je16_to_cpu(node->u.nodetype)) {
192 case JFFS2_NODETYPE_INODE:
193 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
194 BUG_ON(!frag);
195 /* Find a frag which refers to the full_dnode we want to modify */
196 while (!frag->node || frag->node->raw != raw) {
197 frag = frag_next(frag);
198 BUG_ON(!frag);
199 }
200 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
201 return &frag->node->raw;
202 break;
203
204 case JFFS2_NODETYPE_DIRENT:
205 for (fd = f->dents; fd; fd = fd->next) {
206 if (fd->raw == raw) {
207 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
208 return &fd->raw;
209 }
210 }
211 BUG();
212 default:
213 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
214 je16_to_cpu(node->u.nodetype));
215 break;
216 }
217 return NULL;
218}
219
170/* Recover from failure to write wbuf. Recover the nodes up to the 220/* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */ 221 * wbuf, not the one which we were starting to try to write. */
172 222
173static void jffs2_wbuf_recover(struct jffs2_sb_info *c) 223static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174{ 224{
175 struct jffs2_eraseblock *jeb, *new_jeb; 225 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw; 226 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
177 size_t retlen; 227 size_t retlen;
178 int ret; 228 int ret;
229 int nr_refile = 0;
179 unsigned char *buf; 230 unsigned char *buf;
180 uint32_t start, end, ofs, len; 231 uint32_t start, end, ofs, len;
181 232
182 jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; 233 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
183 234
184 if (jffs2_prealloc_raw_node_refs(c, jeb, c->reserved_refs + 1))
185 return;
186
187 spin_lock(&c->erase_completion_lock); 235 spin_lock(&c->erase_completion_lock);
188
189 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); 236 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
237 spin_unlock(&c->erase_completion_lock);
238
239 BUG_ON(!ref_obsolete(jeb->last_node));
190 240
191 /* Find the first node to be recovered, by skipping over every 241 /* Find the first node to be recovered, by skipping over every
192 node which ends before the wbuf starts, or which is obsolete. */ 242 node which ends before the wbuf starts, or which is obsolete. */
193 first_raw = &jeb->first_node; 243 for (next = raw = jeb->first_node; next; raw = next) {
194 while (*first_raw && 244 next = ref_next(raw);
195 (ref_obsolete(*first_raw) || 245
196 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { 246 if (ref_obsolete(raw) ||
197 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", 247 (next && ref_offset(next) <= c->wbuf_ofs)) {
198 ref_offset(*first_raw), ref_flags(*first_raw), 248 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
199 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)), 249 ref_offset(raw), ref_flags(raw),
200 c->wbuf_ofs)); 250 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
201 first_raw = &(*first_raw)->next_phys; 251 c->wbuf_ofs);
252 continue;
253 }
254 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
255 ref_offset(raw), ref_flags(raw),
256 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
257
258 first_raw = raw;
259 break;
202 } 260 }
203 261
204 if (!*first_raw) { 262 if (!first_raw) {
205 /* All nodes were obsolete. Nothing to recover. */ 263 /* All nodes were obsolete. Nothing to recover. */
206 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); 264 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
207 spin_unlock(&c->erase_completion_lock); 265 c->wbuf_len = 0;
208 return; 266 return;
209 } 267 }
210 268
211 start = ref_offset(*first_raw); 269 start = ref_offset(first_raw);
212 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw); 270 end = ref_offset(jeb->last_node);
271 nr_refile = 1;
213 272
214 /* Find the last node to be recovered */ 273 /* Count the number of refs which need to be copied */
215 raw = first_raw; 274 while ((raw = ref_next(raw)) != jeb->last_node)
216 while ((*raw)) { 275 nr_refile++;
217 if (!ref_obsolete(*raw))
218 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
219 276
220 raw = &(*raw)->next_phys; 277 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
221 } 278 start, end, end - start, nr_refile);
222 spin_unlock(&c->erase_completion_lock);
223
224 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
225 279
226 buf = NULL; 280 buf = NULL;
227 if (start < c->wbuf_ofs) { 281 if (start < c->wbuf_ofs) {
@@ -248,13 +302,24 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
248 kfree(buf); 302 kfree(buf);
249 buf = NULL; 303 buf = NULL;
250 read_failed: 304 read_failed:
251 first_raw = &(*first_raw)->next_phys; 305 first_raw = ref_next(first_raw);
306 nr_refile--;
307 while (first_raw && ref_obsolete(first_raw)) {
308 first_raw = ref_next(first_raw);
309 nr_refile--;
310 }
311
252 /* If this was the only node to be recovered, give up */ 312 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw)) 313 if (!first_raw) {
314 c->wbuf_len = 0;
254 return; 315 return;
316 }
255 317
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */ 318 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw); 319 start = ref_offset(first_raw);
320 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
321 start, end, end - start, nr_refile);
322
258 } else { 323 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */ 324 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); 325 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
@@ -263,7 +328,6 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. 328 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */ 329 Either 'buf' contains the data, or we find it in the wbuf */
265 330
266
267 /* ... and get an allocation of space from a shiny new block instead */ 331 /* ... and get an allocation of space from a shiny new block instead */
268 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); 332 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
269 if (ret) { 333 if (ret) {
@@ -271,6 +335,14 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
271 kfree(buf); 335 kfree(buf);
272 return; 336 return;
273 } 337 }
338
339 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
340 if (ret) {
341 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
342 kfree(buf);
343 return;
344 }
345
274 ofs = write_ofs(c); 346 ofs = write_ofs(c);
275 347
276 if (end-start >= c->wbuf_pagesize) { 348 if (end-start >= c->wbuf_pagesize) {
@@ -304,7 +376,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
304 kfree(buf); 376 kfree(buf);
305 377
306 if (retlen) 378 if (retlen)
307 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, *first_raw), NULL); 379 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
308 380
309 return; 381 return;
310 } 382 }
@@ -314,12 +386,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
314 c->wbuf_ofs = ofs + towrite; 386 c->wbuf_ofs = ofs + towrite;
315 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); 387 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
316 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ 388 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
317 kfree(buf);
318 } else { 389 } else {
319 /* OK, now we're left with the dregs in whichever buffer we're using */ 390 /* OK, now we're left with the dregs in whichever buffer we're using */
320 if (buf) { 391 if (buf) {
321 memcpy(c->wbuf, buf, end-start); 392 memcpy(c->wbuf, buf, end-start);
322 kfree(buf);
323 } else { 393 } else {
324 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); 394 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
325 } 395 }
@@ -331,62 +401,111 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
331 new_jeb = &c->blocks[ofs / c->sector_size]; 401 new_jeb = &c->blocks[ofs / c->sector_size];
332 402
333 spin_lock(&c->erase_completion_lock); 403 spin_lock(&c->erase_completion_lock);
334 if (new_jeb->first_node) { 404 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
335 /* Odd, but possible with ST flash later maybe */ 405 uint32_t rawlen = ref_totlen(c, jeb, raw);
336 new_jeb->last_node->next_phys = *first_raw; 406 struct jffs2_inode_cache *ic;
337 } else { 407 struct jffs2_raw_node_ref *new_ref;
338 new_jeb->first_node = *first_raw; 408 struct jffs2_raw_node_ref **adjust_ref = NULL;
339 } 409 struct jffs2_inode_info *f = NULL;
340
341 raw = first_raw;
342 while (*raw) {
343 uint32_t rawlen = ref_totlen(c, jeb, *raw);
344 410
345 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", 411 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
346 rawlen, ref_offset(*raw), ref_flags(*raw), ofs)); 412 rawlen, ref_offset(raw), ref_flags(raw), ofs));
413
414 ic = jffs2_raw_ref_to_ic(raw);
415
416 /* Ick. This XATTR mess should be fixed shortly... */
417 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
418 struct jffs2_xattr_datum *xd = (void *)ic;
419 BUG_ON(xd->node != raw);
420 adjust_ref = &xd->node;
421 raw->next_in_ino = NULL;
422 ic = NULL;
423 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
424 struct jffs2_xattr_datum *xr = (void *)ic;
425 BUG_ON(xr->node != raw);
426 adjust_ref = &xr->node;
427 raw->next_in_ino = NULL;
428 ic = NULL;
429 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
430 struct jffs2_raw_node_ref **p = &ic->nodes;
431
432 /* Remove the old node from the per-inode list */
433 while (*p && *p != (void *)ic) {
434 if (*p == raw) {
435 (*p) = (raw->next_in_ino);
436 raw->next_in_ino = NULL;
437 break;
438 }
439 p = &((*p)->next_in_ino);
440 }
347 441
348 if (ref_obsolete(*raw)) { 442 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
349 /* Shouldn't really happen much */ 443 /* If it's an in-core inode, then we have to adjust any
350 new_jeb->dirty_size += rawlen; 444 full_dirent or full_dnode structure to point to the
351 new_jeb->free_size -= rawlen; 445 new version instead of the old */
352 c->dirty_size += rawlen; 446 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
353 } else { 447 if (IS_ERR(f)) {
354 new_jeb->used_size += rawlen; 448 /* Should never happen; it _must_ be present */
355 new_jeb->free_size -= rawlen; 449 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
450 ic->ino, PTR_ERR(f));
451 BUG();
452 }
453 /* We don't lock f->sem. There's a number of ways we could
454 end up in here with it already being locked, and nobody's
455 going to modify it on us anyway because we hold the
456 alloc_sem. We're only changing one ->raw pointer too,
457 which we can get away with without upsetting readers. */
458 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
459 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
460 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
461 ic->state != INO_STATE_CHECKEDABSENT &&
462 ic->state != INO_STATE_GC)) {
463 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
464 BUG();
465 }
466 }
467
468 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
469
470 if (adjust_ref) {
471 BUG_ON(*adjust_ref != raw);
472 *adjust_ref = new_ref;
473 }
474 if (f)
475 jffs2_gc_release_inode(c, f);
476
477 if (!ref_obsolete(raw)) {
356 jeb->dirty_size += rawlen; 478 jeb->dirty_size += rawlen;
357 jeb->used_size -= rawlen; 479 jeb->used_size -= rawlen;
358 c->dirty_size += rawlen; 480 c->dirty_size += rawlen;
481 c->used_size -= rawlen;
482 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
483 BUG_ON(raw->next_in_ino);
359 } 484 }
360 c->free_size -= rawlen;
361 (*raw)->flash_offset = ofs | ref_flags(*raw);
362 ofs += rawlen; 485 ofs += rawlen;
363 new_jeb->last_node = *raw;
364
365 raw = &(*raw)->next_phys;
366 } 486 }
367 487
488 kfree(buf);
489
368 /* Fix up the original jeb now it's on the bad_list */ 490 /* Fix up the original jeb now it's on the bad_list */
369 *first_raw = NULL; 491 if (first_raw == jeb->first_node) {
370 if (first_raw == &jeb->first_node) {
371 jeb->last_node = NULL;
372 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 492 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
373 list_del(&jeb->list); 493 list_del(&jeb->list);
374 list_add(&jeb->list, &c->erase_pending_list); 494 list_add(&jeb->list, &c->erase_pending_list);
375 c->nr_erasing_blocks++; 495 c->nr_erasing_blocks++;
376 jffs2_erase_pending_trigger(c); 496 jffs2_erase_pending_trigger(c);
377 } 497 }
378 else
379 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
380 498
381 jffs2_dbg_acct_sanity_check_nolock(c, jeb); 499 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
382 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 500 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
383 501
384 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); 502 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
385 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); 503 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
386 504
387 spin_unlock(&c->erase_completion_lock); 505 spin_unlock(&c->erase_completion_lock);
388 506
389 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n")); 507 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
508
390} 509}
391 510
392/* Meaning of pad argument: 511/* Meaning of pad argument:
@@ -400,6 +519,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
400 519
401static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) 520static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
402{ 521{
522 struct jffs2_eraseblock *wbuf_jeb;
403 int ret; 523 int ret;
404 size_t retlen; 524 size_t retlen;
405 525
@@ -417,7 +537,8 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
417 if (!c->wbuf_len) /* already checked c->wbuf above */ 537 if (!c->wbuf_len) /* already checked c->wbuf above */
418 return 0; 538 return 0;
419 539
420 if (jffs2_prealloc_raw_node_refs(c, c->nextblock, c->reserved_refs + 1)) 540 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
541 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
421 return -ENOMEM; 542 return -ENOMEM;
422 543
423 /* claim remaining space on the page 544 /* claim remaining space on the page
@@ -473,32 +594,29 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
473 594
474 /* Adjust free size of the block if we padded. */ 595 /* Adjust free size of the block if we padded. */
475 if (pad) { 596 if (pad) {
476 struct jffs2_eraseblock *jeb;
477 uint32_t waste = c->wbuf_pagesize - c->wbuf_len; 597 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
478 598
479 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
480
481 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", 599 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
482 (jeb==c->nextblock)?"next":"", jeb->offset)); 600 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
483 601
484 /* wbuf_pagesize - wbuf_len is the amount of space that's to be 602 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
485 padded. If there is less free space in the block than that, 603 padded. If there is less free space in the block than that,
486 something screwed up */ 604 something screwed up */
487 if (jeb->free_size < waste) { 605 if (wbuf_jeb->free_size < waste) {
488 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", 606 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
489 c->wbuf_ofs, c->wbuf_len, waste); 607 c->wbuf_ofs, c->wbuf_len, waste);
490 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", 608 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
491 jeb->offset, jeb->free_size); 609 wbuf_jeb->offset, wbuf_jeb->free_size);
492 BUG(); 610 BUG();
493 } 611 }
494 612
495 spin_lock(&c->erase_completion_lock); 613 spin_lock(&c->erase_completion_lock);
496 614
497 jffs2_link_node_ref(c, jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); 615 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
498 /* FIXME: that made it count as dirty. Convert to wasted */ 616 /* FIXME: that made it count as dirty. Convert to wasted */
499 jeb->dirty_size -= waste; 617 wbuf_jeb->dirty_size -= waste;
500 c->dirty_size -= waste; 618 c->dirty_size -= waste;
501 jeb->wasted_size += waste; 619 wbuf_jeb->wasted_size += waste;
502 c->wasted_size += waste; 620 c->wasted_size += waste;
503 } else 621 } else
504 spin_lock(&c->erase_completion_lock); 622 spin_lock(&c->erase_completion_lock);
@@ -758,7 +876,8 @@ outerr:
758 * This is the entry for flash write. 876 * This is the entry for flash write.
759 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev 877 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
760*/ 878*/
761int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) 879int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
880 size_t *retlen, const u_char *buf)
762{ 881{
763 struct kvec vecs[1]; 882 struct kvec vecs[1];
764 883
@@ -953,7 +1072,7 @@ int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblo
953 } 1072 }
954 D1(if (retval == 1) { 1073 D1(if (retval == 1) {
955 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset); 1074 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
956 printk(KERN_WARNING "OOB at %08x was ", offset); 1075 printk(KERN_WARNING "OOB at %08zx was ", offset);
957 for (i=0; i < oob_size; i++) { 1076 for (i=0; i < oob_size; i++) {
958 printk("%02x ", buf[i]); 1077 printk("%02x ", buf[i]);
959 } 1078 }