diff options
Diffstat (limited to 'fs/jffs2/nodemgmt.c')
-rw-r--r-- | fs/jffs2/nodemgmt.c | 67 |
1 files changed, 30 insertions, 37 deletions
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 49127a1f0458..9a0f312cfcda 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -374,7 +374,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
374 | * @c: superblock info | 374 | * @c: superblock info |
375 | * @new: new node reference to add | 375 | * @new: new node reference to add |
376 | * @len: length of this physical node | 376 | * @len: length of this physical node |
377 | * @dirty: dirty flag for new node | ||
378 | * | 377 | * |
379 | * Should only be used to report nodes for which space has been allocated | 378 | * Should only be used to report nodes for which space has been allocated |
380 | * by jffs2_reserve_space. | 379 | * by jffs2_reserve_space. |
@@ -382,13 +381,14 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
382 | * Must be called with the alloc_sem held. | 381 | * Must be called with the alloc_sem held. |
383 | */ | 382 | */ |
384 | 383 | ||
385 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | 384 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new, uint32_t len) |
386 | { | 385 | { |
387 | struct jffs2_eraseblock *jeb; | 386 | struct jffs2_eraseblock *jeb; |
388 | uint32_t len; | ||
389 | 387 | ||
390 | jeb = &c->blocks[new->flash_offset / c->sector_size]; | 388 | jeb = &c->blocks[new->flash_offset / c->sector_size]; |
391 | len = ref_totlen(c, jeb, new); | 389 | #ifdef TEST_TOTLEN |
390 | new->__totlen = len; | ||
391 | #endif | ||
392 | 392 | ||
393 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); | 393 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); |
394 | #if 1 | 394 | #if 1 |
@@ -403,21 +403,7 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
403 | #endif | 403 | #endif |
404 | spin_lock(&c->erase_completion_lock); | 404 | spin_lock(&c->erase_completion_lock); |
405 | 405 | ||
406 | if (!jeb->first_node) | 406 | jffs2_link_node_ref(c, jeb, new, len); |
407 | jeb->first_node = new; | ||
408 | if (jeb->last_node) | ||
409 | jeb->last_node->next_phys = new; | ||
410 | jeb->last_node = new; | ||
411 | |||
412 | jeb->free_size -= len; | ||
413 | c->free_size -= len; | ||
414 | if (ref_obsolete(new)) { | ||
415 | jeb->dirty_size += len; | ||
416 | c->dirty_size += len; | ||
417 | } else { | ||
418 | jeb->used_size += len; | ||
419 | c->used_size += len; | ||
420 | } | ||
421 | 407 | ||
422 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { | 408 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
423 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ | 409 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
@@ -470,6 +456,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
470 | struct jffs2_unknown_node n; | 456 | struct jffs2_unknown_node n; |
471 | int ret, addedsize; | 457 | int ret, addedsize; |
472 | size_t retlen; | 458 | size_t retlen; |
459 | uint32_t freed_len; | ||
473 | 460 | ||
474 | if(!ref) { | 461 | if(!ref) { |
475 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); | 462 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); |
@@ -499,32 +486,34 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
499 | 486 | ||
500 | spin_lock(&c->erase_completion_lock); | 487 | spin_lock(&c->erase_completion_lock); |
501 | 488 | ||
489 | freed_len = ref_totlen(c, jeb, ref); | ||
490 | |||
502 | if (ref_flags(ref) == REF_UNCHECKED) { | 491 | if (ref_flags(ref) == REF_UNCHECKED) { |
503 | D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { | 492 | D1(if (unlikely(jeb->unchecked_size < freed_len)) { |
504 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", | 493 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", |
505 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 494 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
506 | BUG(); | 495 | BUG(); |
507 | }) | 496 | }) |
508 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 497 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); |
509 | jeb->unchecked_size -= ref_totlen(c, jeb, ref); | 498 | jeb->unchecked_size -= freed_len; |
510 | c->unchecked_size -= ref_totlen(c, jeb, ref); | 499 | c->unchecked_size -= freed_len; |
511 | } else { | 500 | } else { |
512 | D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { | 501 | D1(if (unlikely(jeb->used_size < freed_len)) { |
513 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", | 502 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", |
514 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 503 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
515 | BUG(); | 504 | BUG(); |
516 | }) | 505 | }) |
517 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 506 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); |
518 | jeb->used_size -= ref_totlen(c, jeb, ref); | 507 | jeb->used_size -= freed_len; |
519 | c->used_size -= ref_totlen(c, jeb, ref); | 508 | c->used_size -= freed_len; |
520 | } | 509 | } |
521 | 510 | ||
522 | // Take care, that wasted size is taken into concern | 511 | // Take care, that wasted size is taken into concern |
523 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { | 512 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { |
524 | D1(printk(KERN_DEBUG "Dirtying\n")); | 513 | D1(printk(KERN_DEBUG "Dirtying\n")); |
525 | addedsize = ref_totlen(c, jeb, ref); | 514 | addedsize = freed_len; |
526 | jeb->dirty_size += ref_totlen(c, jeb, ref); | 515 | jeb->dirty_size += freed_len; |
527 | c->dirty_size += ref_totlen(c, jeb, ref); | 516 | c->dirty_size += freed_len; |
528 | 517 | ||
529 | /* Convert wasted space to dirty, if not a bad block */ | 518 | /* Convert wasted space to dirty, if not a bad block */ |
530 | if (jeb->wasted_size) { | 519 | if (jeb->wasted_size) { |
@@ -545,8 +534,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
545 | } else { | 534 | } else { |
546 | D1(printk(KERN_DEBUG "Wasting\n")); | 535 | D1(printk(KERN_DEBUG "Wasting\n")); |
547 | addedsize = 0; | 536 | addedsize = 0; |
548 | jeb->wasted_size += ref_totlen(c, jeb, ref); | 537 | jeb->wasted_size += freed_len; |
549 | c->wasted_size += ref_totlen(c, jeb, ref); | 538 | c->wasted_size += freed_len; |
550 | } | 539 | } |
551 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | 540 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
552 | 541 | ||
@@ -634,8 +623,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
634 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | 623 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); |
635 | goto out_erase_sem; | 624 | goto out_erase_sem; |
636 | } | 625 | } |
637 | if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { | 626 | if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { |
638 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); | 627 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); |
639 | goto out_erase_sem; | 628 | goto out_erase_sem; |
640 | } | 629 | } |
641 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | 630 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { |
@@ -692,7 +681,9 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
692 | 681 | ||
693 | spin_lock(&c->erase_completion_lock); | 682 | spin_lock(&c->erase_completion_lock); |
694 | 683 | ||
684 | #ifdef TEST_TOTLEN | ||
695 | ref->__totlen += n->__totlen; | 685 | ref->__totlen += n->__totlen; |
686 | #endif | ||
696 | ref->next_phys = n->next_phys; | 687 | ref->next_phys = n->next_phys; |
697 | if (jeb->last_node == n) jeb->last_node = ref; | 688 | if (jeb->last_node == n) jeb->last_node = ref; |
698 | if (jeb->gc_node == n) { | 689 | if (jeb->gc_node == n) { |
@@ -715,7 +706,9 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
715 | p = p->next_phys; | 706 | p = p->next_phys; |
716 | 707 | ||
717 | if (ref_obsolete(p) && !ref->next_in_ino) { | 708 | if (ref_obsolete(p) && !ref->next_in_ino) { |
709 | #ifdef TEST_TOTLEN | ||
718 | p->__totlen += ref->__totlen; | 710 | p->__totlen += ref->__totlen; |
711 | #endif | ||
719 | if (jeb->last_node == ref) { | 712 | if (jeb->last_node == ref) { |
720 | jeb->last_node = p; | 713 | jeb->last_node = p; |
721 | } | 714 | } |