diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 13:24:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 13:24:08 -0500 |
commit | b3ce1debe2685383a9ad6ace9c49869c3968c013 (patch) | |
tree | dcb606fac467d6ce78a9c608a1e0d2323af44f2b /fs/jffs2/wbuf.c | |
parent | 5b2f7ffcb734d3046144dfbd5ac6d76254a9e522 (diff) | |
parent | c2965f1129ee54afcc4ef293ff0f25fa3a7e7392 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6
Some manual fixups for clashing kfree() cleanups etc.
Diffstat (limited to 'fs/jffs2/wbuf.c')
-rw-r--r-- | fs/jffs2/wbuf.c | 212 |
1 files changed, 126 insertions, 86 deletions
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 7bc7f2d571f6..4cebf0e57c46 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
11 | * | 11 | * |
12 | * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $ | 12 | * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $ |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
@@ -30,12 +30,12 @@ | |||
30 | static unsigned char *brokenbuf; | 30 | static unsigned char *brokenbuf; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
34 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
35 | |||
33 | /* max. erase failures before we mark a block bad */ | 36 | /* max. erase failures before we mark a block bad */ |
34 | #define MAX_ERASE_FAILURES 2 | 37 | #define MAX_ERASE_FAILURES 2 |
35 | 38 | ||
36 | /* two seconds timeout for timed wbuf-flushing */ | ||
37 | #define WBUF_FLUSH_TIMEOUT 2 * HZ | ||
38 | |||
39 | struct jffs2_inodirty { | 39 | struct jffs2_inodirty { |
40 | uint32_t ino; | 40 | uint32_t ino; |
41 | struct jffs2_inodirty *next; | 41 | struct jffs2_inodirty *next; |
@@ -139,7 +139,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
139 | { | 139 | { |
140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | 140 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); |
141 | 141 | ||
142 | D2(jffs2_dump_block_lists(c)); | ||
143 | /* File the existing block on the bad_used_list.... */ | 142 | /* File the existing block on the bad_used_list.... */ |
144 | if (c->nextblock == jeb) | 143 | if (c->nextblock == jeb) |
145 | c->nextblock = NULL; | 144 | c->nextblock = NULL; |
@@ -156,7 +155,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
156 | c->nr_erasing_blocks++; | 155 | c->nr_erasing_blocks++; |
157 | jffs2_erase_pending_trigger(c); | 156 | jffs2_erase_pending_trigger(c); |
158 | } | 157 | } |
159 | D2(jffs2_dump_block_lists(c)); | ||
160 | 158 | ||
161 | /* Adjust its size counts accordingly */ | 159 | /* Adjust its size counts accordingly */ |
162 | c->wasted_size += jeb->free_size; | 160 | c->wasted_size += jeb->free_size; |
@@ -164,8 +162,9 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
164 | jeb->wasted_size += jeb->free_size; | 162 | jeb->wasted_size += jeb->free_size; |
165 | jeb->free_size = 0; | 163 | jeb->free_size = 0; |
166 | 164 | ||
167 | ACCT_SANITY_CHECK(c,jeb); | 165 | jffs2_dbg_dump_block_lists_nolock(c); |
168 | D1(ACCT_PARANOIA_CHECK(jeb)); | 166 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
167 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | ||
169 | } | 168 | } |
170 | 169 | ||
171 | /* Recover from failure to write wbuf. Recover the nodes up to the | 170 | /* Recover from failure to write wbuf. Recover the nodes up to the |
@@ -189,7 +188,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
189 | /* Find the first node to be recovered, by skipping over every | 188 | /* Find the first node to be recovered, by skipping over every |
190 | node which ends before the wbuf starts, or which is obsolete. */ | 189 | node which ends before the wbuf starts, or which is obsolete. */ |
191 | first_raw = &jeb->first_node; | 190 | first_raw = &jeb->first_node; |
192 | while (*first_raw && | 191 | while (*first_raw && |
193 | (ref_obsolete(*first_raw) || | 192 | (ref_obsolete(*first_raw) || |
194 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { | 193 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { |
195 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", | 194 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", |
@@ -238,7 +237,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
238 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); | 237 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); |
239 | else | 238 | else |
240 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); | 239 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); |
241 | 240 | ||
242 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { | 241 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { |
243 | /* ECC recovered */ | 242 | /* ECC recovered */ |
244 | ret = 0; | 243 | ret = 0; |
@@ -266,7 +265,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
266 | 265 | ||
267 | 266 | ||
268 | /* ... and get an allocation of space from a shiny new block instead */ | 267 | /* ... and get an allocation of space from a shiny new block instead */ |
269 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len); | 268 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
270 | if (ret) { | 269 | if (ret) { |
271 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 270 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
272 | kfree(buf); | 271 | kfree(buf); |
@@ -275,15 +274,15 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
275 | if (end-start >= c->wbuf_pagesize) { | 274 | if (end-start >= c->wbuf_pagesize) { |
276 | /* Need to do another write immediately, but it's possible | 275 | /* Need to do another write immediately, but it's possible |
277 | that this is just because the wbuf itself is completely | 276 | that this is just because the wbuf itself is completely |
278 | full, and there's nothing earlier read back from the | 277 | full, and there's nothing earlier read back from the |
279 | flash. Hence 'buf' isn't necessarily what we're writing | 278 | flash. Hence 'buf' isn't necessarily what we're writing |
280 | from. */ | 279 | from. */ |
281 | unsigned char *rewrite_buf = buf?:c->wbuf; | 280 | unsigned char *rewrite_buf = buf?:c->wbuf; |
282 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 281 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
283 | 282 | ||
284 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 283 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
285 | towrite, ofs)); | 284 | towrite, ofs)); |
286 | 285 | ||
287 | #ifdef BREAKMEHEADER | 286 | #ifdef BREAKMEHEADER |
288 | static int breakme; | 287 | static int breakme; |
289 | if (breakme++ == 20) { | 288 | if (breakme++ == 20) { |
@@ -391,11 +390,11 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
391 | else | 390 | else |
392 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); | 391 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); |
393 | 392 | ||
394 | ACCT_SANITY_CHECK(c,jeb); | 393 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
395 | D1(ACCT_PARANOIA_CHECK(jeb)); | 394 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
396 | 395 | ||
397 | ACCT_SANITY_CHECK(c,new_jeb); | 396 | jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); |
398 | D1(ACCT_PARANOIA_CHECK(new_jeb)); | 397 | jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); |
399 | 398 | ||
400 | spin_unlock(&c->erase_completion_lock); | 399 | spin_unlock(&c->erase_completion_lock); |
401 | 400 | ||
@@ -434,15 +433,15 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
434 | this happens, if we have a change to a new block, | 433 | this happens, if we have a change to a new block, |
435 | or if fsync forces us to flush the writebuffer. | 434 | or if fsync forces us to flush the writebuffer. |
436 | if we have a switch to next page, we will not have | 435 | if we have a switch to next page, we will not have |
437 | enough remaining space for this. | 436 | enough remaining space for this. |
438 | */ | 437 | */ |
439 | if (pad && !jffs2_dataflash(c)) { | 438 | if (pad ) { |
440 | c->wbuf_len = PAD(c->wbuf_len); | 439 | c->wbuf_len = PAD(c->wbuf_len); |
441 | 440 | ||
442 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR | 441 | /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR |
443 | with 8 byte page size */ | 442 | with 8 byte page size */ |
444 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); | 443 | memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); |
445 | 444 | ||
446 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { | 445 | if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { |
447 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); | 446 | struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); |
448 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 447 | padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -453,7 +452,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
453 | } | 452 | } |
454 | /* else jffs2_flash_writev has actually filled in the rest of the | 453 | /* else jffs2_flash_writev has actually filled in the rest of the |
455 | buffer for us, and will deal with the node refs etc. later. */ | 454 | buffer for us, and will deal with the node refs etc. later. */ |
456 | 455 | ||
457 | #ifdef BREAKME | 456 | #ifdef BREAKME |
458 | static int breakme; | 457 | static int breakme; |
459 | if (breakme++ == 20) { | 458 | if (breakme++ == 20) { |
@@ -462,9 +461,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
462 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | 461 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, |
463 | &retlen, brokenbuf, NULL, c->oobinfo); | 462 | &retlen, brokenbuf, NULL, c->oobinfo); |
464 | ret = -EIO; | 463 | ret = -EIO; |
465 | } else | 464 | } else |
466 | #endif | 465 | #endif |
467 | 466 | ||
468 | if (jffs2_cleanmarker_oob(c)) | 467 | if (jffs2_cleanmarker_oob(c)) |
469 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); | 468 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); |
470 | else | 469 | else |
@@ -487,7 +486,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
487 | spin_lock(&c->erase_completion_lock); | 486 | spin_lock(&c->erase_completion_lock); |
488 | 487 | ||
489 | /* Adjust free size of the block if we padded. */ | 488 | /* Adjust free size of the block if we padded. */ |
490 | if (pad && !jffs2_dataflash(c)) { | 489 | if (pad) { |
491 | struct jffs2_eraseblock *jeb; | 490 | struct jffs2_eraseblock *jeb; |
492 | 491 | ||
493 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | 492 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; |
@@ -495,7 +494,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
495 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 494 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
496 | (jeb==c->nextblock)?"next":"", jeb->offset)); | 495 | (jeb==c->nextblock)?"next":"", jeb->offset)); |
497 | 496 | ||
498 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 497 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
499 | padded. If there is less free space in the block than that, | 498 | padded. If there is less free space in the block than that, |
500 | something screwed up */ | 499 | something screwed up */ |
501 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { | 500 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { |
@@ -523,9 +522,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
523 | return 0; | 522 | return 0; |
524 | } | 523 | } |
525 | 524 | ||
526 | /* Trigger garbage collection to flush the write-buffer. | 525 | /* Trigger garbage collection to flush the write-buffer. |
527 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are | 526 | If ino arg is zero, do it if _any_ real (i.e. not GC) writes are |
528 | outstanding. If ino arg non-zero, do it only if a write for the | 527 | outstanding. If ino arg non-zero, do it only if a write for the |
529 | given inode is outstanding. */ | 528 | given inode is outstanding. */ |
530 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | 529 | int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) |
531 | { | 530 | { |
@@ -604,15 +603,6 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | |||
604 | 603 | ||
605 | return ret; | 604 | return ret; |
606 | } | 605 | } |
607 | |||
608 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | ||
609 | #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) | ||
610 | #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) | ||
611 | #else | ||
612 | #define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) ) | ||
613 | #define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) ) | ||
614 | #endif | ||
615 | |||
616 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | 606 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) |
617 | { | 607 | { |
618 | struct kvec outvecs[3]; | 608 | struct kvec outvecs[3]; |
@@ -629,13 +619,13 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
629 | /* If not NAND flash, don't bother */ | 619 | /* If not NAND flash, don't bother */ |
630 | if (!jffs2_is_writebuffered(c)) | 620 | if (!jffs2_is_writebuffered(c)) |
631 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | 621 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); |
632 | 622 | ||
633 | down_write(&c->wbuf_sem); | 623 | down_write(&c->wbuf_sem); |
634 | 624 | ||
635 | /* If wbuf_ofs is not initialized, set it to target address */ | 625 | /* If wbuf_ofs is not initialized, set it to target address */ |
636 | if (c->wbuf_ofs == 0xFFFFFFFF) { | 626 | if (c->wbuf_ofs == 0xFFFFFFFF) { |
637 | c->wbuf_ofs = PAGE_DIV(to); | 627 | c->wbuf_ofs = PAGE_DIV(to); |
638 | c->wbuf_len = PAGE_MOD(to); | 628 | c->wbuf_len = PAGE_MOD(to); |
639 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 629 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
640 | } | 630 | } |
641 | 631 | ||
@@ -649,10 +639,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
649 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 639 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
650 | } | 640 | } |
651 | } | 641 | } |
652 | 642 | ||
653 | /* Sanity checks on target address. | 643 | /* Sanity checks on target address. |
654 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | 644 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), |
655 | and it's permitted to write at the beginning of a new | 645 | and it's permitted to write at the beginning of a new |
656 | erase block. Anything else, and you die. | 646 | erase block. Anything else, and you die. |
657 | New block starts at xxx000c (0-b = block header) | 647 | New block starts at xxx000c (0-b = block header) |
658 | */ | 648 | */ |
@@ -670,8 +660,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
670 | } | 660 | } |
671 | /* set pointer to new block */ | 661 | /* set pointer to new block */ |
672 | c->wbuf_ofs = PAGE_DIV(to); | 662 | c->wbuf_ofs = PAGE_DIV(to); |
673 | c->wbuf_len = PAGE_MOD(to); | 663 | c->wbuf_len = PAGE_MOD(to); |
674 | } | 664 | } |
675 | 665 | ||
676 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 666 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
677 | /* We're not writing immediately after the writebuffer. Bad. */ | 667 | /* We're not writing immediately after the writebuffer. Bad. */ |
@@ -691,21 +681,21 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
691 | invec = 0; | 681 | invec = 0; |
692 | outvec = 0; | 682 | outvec = 0; |
693 | 683 | ||
694 | /* Fill writebuffer first, if already in use */ | 684 | /* Fill writebuffer first, if already in use */ |
695 | if (c->wbuf_len) { | 685 | if (c->wbuf_len) { |
696 | uint32_t invec_ofs = 0; | 686 | uint32_t invec_ofs = 0; |
697 | 687 | ||
698 | /* adjust alignment offset */ | 688 | /* adjust alignment offset */ |
699 | if (c->wbuf_len != PAGE_MOD(to)) { | 689 | if (c->wbuf_len != PAGE_MOD(to)) { |
700 | c->wbuf_len = PAGE_MOD(to); | 690 | c->wbuf_len = PAGE_MOD(to); |
701 | /* take care of alignment to next page */ | 691 | /* take care of alignment to next page */ |
702 | if (!c->wbuf_len) | 692 | if (!c->wbuf_len) |
703 | c->wbuf_len = c->wbuf_pagesize; | 693 | c->wbuf_len = c->wbuf_pagesize; |
704 | } | 694 | } |
705 | 695 | ||
706 | while(c->wbuf_len < c->wbuf_pagesize) { | 696 | while(c->wbuf_len < c->wbuf_pagesize) { |
707 | uint32_t thislen; | 697 | uint32_t thislen; |
708 | 698 | ||
709 | if (invec == count) | 699 | if (invec == count) |
710 | goto alldone; | 700 | goto alldone; |
711 | 701 | ||
@@ -713,17 +703,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
713 | 703 | ||
714 | if (thislen >= invecs[invec].iov_len) | 704 | if (thislen >= invecs[invec].iov_len) |
715 | thislen = invecs[invec].iov_len; | 705 | thislen = invecs[invec].iov_len; |
716 | 706 | ||
717 | invec_ofs = thislen; | 707 | invec_ofs = thislen; |
718 | 708 | ||
719 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | 709 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); |
720 | c->wbuf_len += thislen; | 710 | c->wbuf_len += thislen; |
721 | donelen += thislen; | 711 | donelen += thislen; |
722 | /* Get next invec, if actual did not fill the buffer */ | 712 | /* Get next invec, if actual did not fill the buffer */ |
723 | if (c->wbuf_len < c->wbuf_pagesize) | 713 | if (c->wbuf_len < c->wbuf_pagesize) |
724 | invec++; | 714 | invec++; |
725 | } | 715 | } |
726 | 716 | ||
727 | /* write buffer is full, flush buffer */ | 717 | /* write buffer is full, flush buffer */ |
728 | ret = __jffs2_flush_wbuf(c, NOPAD); | 718 | ret = __jffs2_flush_wbuf(c, NOPAD); |
729 | if (ret) { | 719 | if (ret) { |
@@ -782,10 +772,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
782 | 772 | ||
783 | /* We did cross a page boundary, so we write some now */ | 773 | /* We did cross a page boundary, so we write some now */ |
784 | if (jffs2_cleanmarker_oob(c)) | 774 | if (jffs2_cleanmarker_oob(c)) |
785 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | 775 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); |
786 | else | 776 | else |
787 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | 777 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); |
788 | 778 | ||
789 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | 779 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { |
790 | /* At this point we have no problem, | 780 | /* At this point we have no problem, |
791 | c->wbuf is empty. However refile nextblock to avoid | 781 | c->wbuf is empty. However refile nextblock to avoid |
@@ -802,7 +792,7 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
802 | spin_unlock(&c->erase_completion_lock); | 792 | spin_unlock(&c->erase_completion_lock); |
803 | goto exit; | 793 | goto exit; |
804 | } | 794 | } |
805 | 795 | ||
806 | donelen += wbuf_retlen; | 796 | donelen += wbuf_retlen; |
807 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | 797 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); |
808 | 798 | ||
@@ -836,11 +826,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
836 | alldone: | 826 | alldone: |
837 | *retlen = donelen; | 827 | *retlen = donelen; |
838 | 828 | ||
829 | if (jffs2_sum_active()) { | ||
830 | int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); | ||
831 | if (res) | ||
832 | return res; | ||
833 | } | ||
834 | |||
839 | if (c->wbuf_len && ino) | 835 | if (c->wbuf_len && ino) |
840 | jffs2_wbuf_dirties_inode(c, ino); | 836 | jffs2_wbuf_dirties_inode(c, ino); |
841 | 837 | ||
842 | ret = 0; | 838 | ret = 0; |
843 | 839 | ||
844 | exit: | 840 | exit: |
845 | up_write(&c->wbuf_sem); | 841 | up_write(&c->wbuf_sem); |
846 | return ret; | 842 | return ret; |
@@ -855,7 +851,7 @@ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *r | |||
855 | struct kvec vecs[1]; | 851 | struct kvec vecs[1]; |
856 | 852 | ||
857 | if (!jffs2_is_writebuffered(c)) | 853 | if (!jffs2_is_writebuffered(c)) |
858 | return c->mtd->write(c->mtd, ofs, len, retlen, buf); | 854 | return jffs2_flash_direct_write(c, ofs, len, retlen, buf); |
859 | 855 | ||
860 | vecs[0].iov_base = (unsigned char *) buf; | 856 | vecs[0].iov_base = (unsigned char *) buf; |
861 | vecs[0].iov_len = len; | 857 | vecs[0].iov_len = len; |
@@ -883,18 +879,18 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
883 | if ( (ret == -EBADMSG) && (*retlen == len) ) { | 879 | if ( (ret == -EBADMSG) && (*retlen == len) ) { |
884 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | 880 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", |
885 | len, ofs); | 881 | len, ofs); |
886 | /* | 882 | /* |
887 | * We have the raw data without ECC correction in the buffer, maybe | 883 | * We have the raw data without ECC correction in the buffer, maybe |
888 | * we are lucky and all data or parts are correct. We check the node. | 884 | * we are lucky and all data or parts are correct. We check the node. |
889 | * If data are corrupted node check will sort it out. | 885 | * If data are corrupted node check will sort it out. |
890 | * We keep this block, it will fail on write or erase and the we | 886 | * We keep this block, it will fail on write or erase and the we |
891 | * mark it bad. Or should we do that now? But we should give him a chance. | 887 | * mark it bad. Or should we do that now? But we should give him a chance. |
892 | * Maybe we had a system crash or power loss before the ecc write or | 888 | * Maybe we had a system crash or power loss before the ecc write or |
893 | * a erase was completed. | 889 | * a erase was completed. |
894 | * So we return success. :) | 890 | * So we return success. :) |
895 | */ | 891 | */ |
896 | ret = 0; | 892 | ret = 0; |
897 | } | 893 | } |
898 | 894 | ||
899 | /* if no writebuffer available or write buffer empty, return */ | 895 | /* if no writebuffer available or write buffer empty, return */ |
900 | if (!c->wbuf_pagesize || !c->wbuf_len) | 896 | if (!c->wbuf_pagesize || !c->wbuf_len) |
@@ -909,16 +905,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
909 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ | 905 | if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ |
910 | goto exit; | 906 | goto exit; |
911 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ | 907 | lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ |
912 | if (lwbf > len) | 908 | if (lwbf > len) |
913 | lwbf = len; | 909 | lwbf = len; |
914 | } else { | 910 | } else { |
915 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ | 911 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ |
916 | if (orbf > len) /* is write beyond write buffer ? */ | 912 | if (orbf > len) /* is write beyond write buffer ? */ |
917 | goto exit; | 913 | goto exit; |
918 | lwbf = len - orbf; /* number of bytes to copy */ | 914 | lwbf = len - orbf; /* number of bytes to copy */ |
919 | if (lwbf > c->wbuf_len) | 915 | if (lwbf > c->wbuf_len) |
920 | lwbf = c->wbuf_len; | 916 | lwbf = c->wbuf_len; |
921 | } | 917 | } |
922 | if (lwbf > 0) | 918 | if (lwbf > 0) |
923 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); | 919 | memcpy(buf+orbf,c->wbuf+owbf,lwbf); |
924 | 920 | ||
@@ -946,7 +942,7 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
946 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); | 942 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); |
947 | return -ENOMEM; | 943 | return -ENOMEM; |
948 | } | 944 | } |
949 | /* | 945 | /* |
950 | * if mode = 0, we scan for a total empty oob area, else we have | 946 | * if mode = 0, we scan for a total empty oob area, else we have |
951 | * to take care of the cleanmarker in the first page of the block | 947 | * to take care of the cleanmarker in the first page of the block |
952 | */ | 948 | */ |
@@ -955,41 +951,41 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
955 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | 951 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); |
956 | goto out; | 952 | goto out; |
957 | } | 953 | } |
958 | 954 | ||
959 | if (retlen < len) { | 955 | if (retlen < len) { |
960 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " | 956 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " |
961 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); | 957 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); |
962 | ret = -EIO; | 958 | ret = -EIO; |
963 | goto out; | 959 | goto out; |
964 | } | 960 | } |
965 | 961 | ||
966 | /* Special check for first page */ | 962 | /* Special check for first page */ |
967 | for(i = 0; i < oob_size ; i++) { | 963 | for(i = 0; i < oob_size ; i++) { |
968 | /* Yeah, we know about the cleanmarker. */ | 964 | /* Yeah, we know about the cleanmarker. */ |
969 | if (mode && i >= c->fsdata_pos && | 965 | if (mode && i >= c->fsdata_pos && |
970 | i < c->fsdata_pos + c->fsdata_len) | 966 | i < c->fsdata_pos + c->fsdata_len) |
971 | continue; | 967 | continue; |
972 | 968 | ||
973 | if (buf[i] != 0xFF) { | 969 | if (buf[i] != 0xFF) { |
974 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", | 970 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", |
975 | buf[page+i], page+i, jeb->offset)); | 971 | buf[i], i, jeb->offset)); |
976 | ret = 1; | 972 | ret = 1; |
977 | goto out; | 973 | goto out; |
978 | } | 974 | } |
979 | } | 975 | } |
980 | 976 | ||
981 | /* we know, we are aligned :) */ | 977 | /* we know, we are aligned :) */ |
982 | for (page = oob_size; page < len; page += sizeof(long)) { | 978 | for (page = oob_size; page < len; page += sizeof(long)) { |
983 | unsigned long dat = *(unsigned long *)(&buf[page]); | 979 | unsigned long dat = *(unsigned long *)(&buf[page]); |
984 | if(dat != -1) { | 980 | if(dat != -1) { |
985 | ret = 1; | 981 | ret = 1; |
986 | goto out; | 982 | goto out; |
987 | } | 983 | } |
988 | } | 984 | } |
989 | 985 | ||
990 | out: | 986 | out: |
991 | kfree(buf); | 987 | kfree(buf); |
992 | 988 | ||
993 | return ret; | 989 | return ret; |
994 | } | 990 | } |
995 | 991 | ||
@@ -1071,7 +1067,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1071 | n.totlen = cpu_to_je32(8); | 1067 | n.totlen = cpu_to_je32(8); |
1072 | 1068 | ||
1073 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); | 1069 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); |
1074 | 1070 | ||
1075 | if (ret) { | 1071 | if (ret) { |
1076 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1072 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1077 | return ret; | 1073 | return ret; |
@@ -1083,7 +1079,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
1083 | return 0; | 1079 | return 0; |
1084 | } | 1080 | } |
1085 | 1081 | ||
1086 | /* | 1082 | /* |
1087 | * On NAND we try to mark this block bad. If the block was erased more | 1083 | * On NAND we try to mark this block bad. If the block was erased more |
1088 | * than MAX_ERASE_FAILURES we mark it finaly bad. | 1084 | * than MAX_ERASE_FAILURES we mark it finaly bad. |
1089 | * Don't care about failures. This block remains on the erase-pending | 1085 | * Don't care about failures. This block remains on the erase-pending |
@@ -1104,7 +1100,7 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1104 | 1100 | ||
1105 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); | 1101 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); |
1106 | ret = c->mtd->block_markbad(c->mtd, bad_offset); | 1102 | ret = c->mtd->block_markbad(c->mtd, bad_offset); |
1107 | 1103 | ||
1108 | if (ret) { | 1104 | if (ret) { |
1109 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1105 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1110 | return ret; | 1106 | return ret; |
@@ -1128,7 +1124,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1128 | /* Do this only, if we have an oob buffer */ | 1124 | /* Do this only, if we have an oob buffer */ |
1129 | if (!c->mtd->oobsize) | 1125 | if (!c->mtd->oobsize) |
1130 | return 0; | 1126 | return 0; |
1131 | 1127 | ||
1132 | /* Cleanmarker is out-of-band, so inline size zero */ | 1128 | /* Cleanmarker is out-of-band, so inline size zero */ |
1133 | c->cleanmarker_size = 0; | 1129 | c->cleanmarker_size = 0; |
1134 | 1130 | ||
@@ -1154,7 +1150,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
1154 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; | 1150 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; |
1155 | c->badblock_pos = 15; | 1151 | c->badblock_pos = 15; |
1156 | break; | 1152 | break; |
1157 | 1153 | ||
1158 | default: | 1154 | default: |
1159 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); | 1155 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); |
1160 | return -EINVAL; | 1156 | return -EINVAL; |
@@ -1171,7 +1167,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1171 | init_rwsem(&c->wbuf_sem); | 1167 | init_rwsem(&c->wbuf_sem); |
1172 | c->wbuf_pagesize = c->mtd->oobblock; | 1168 | c->wbuf_pagesize = c->mtd->oobblock; |
1173 | c->wbuf_ofs = 0xFFFFFFFF; | 1169 | c->wbuf_ofs = 0xFFFFFFFF; |
1174 | 1170 | ||
1175 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1171 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1176 | if (!c->wbuf) | 1172 | if (!c->wbuf) |
1177 | return -ENOMEM; | 1173 | return -ENOMEM; |
@@ -1197,17 +1193,41 @@ void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) | |||
1197 | 1193 | ||
1198 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | 1194 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { |
1199 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ | 1195 | c->cleanmarker_size = 0; /* No cleanmarkers needed */ |
1200 | 1196 | ||
1201 | /* Initialize write buffer */ | 1197 | /* Initialize write buffer */ |
1202 | init_rwsem(&c->wbuf_sem); | 1198 | init_rwsem(&c->wbuf_sem); |
1203 | c->wbuf_pagesize = c->sector_size; | ||
1204 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1205 | 1199 | ||
1200 | |||
1201 | c->wbuf_pagesize = c->mtd->erasesize; | ||
1202 | |||
1203 | /* Find a suitable c->sector_size | ||
1204 | * - Not too much sectors | ||
1205 | * - Sectors have to be at least 4 K + some bytes | ||
1206 | * - All known dataflashes have erase sizes of 528 or 1056 | ||
1207 | * - we take at least 8 eraseblocks and want to have at least 8K size | ||
1208 | * - The concatenation should be a power of 2 | ||
1209 | */ | ||
1210 | |||
1211 | c->sector_size = 8 * c->mtd->erasesize; | ||
1212 | |||
1213 | while (c->sector_size < 8192) { | ||
1214 | c->sector_size *= 2; | ||
1215 | } | ||
1216 | |||
1217 | /* It may be necessary to adjust the flash size */ | ||
1218 | c->flash_size = c->mtd->size; | ||
1219 | |||
1220 | if ((c->flash_size % c->sector_size) != 0) { | ||
1221 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; | ||
1222 | printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); | ||
1223 | }; | ||
1224 | |||
1225 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1206 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1226 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
1207 | if (!c->wbuf) | 1227 | if (!c->wbuf) |
1208 | return -ENOMEM; | 1228 | return -ENOMEM; |
1209 | 1229 | ||
1210 | printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize); | 1230 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); |
1211 | 1231 | ||
1212 | return 0; | 1232 | return 0; |
1213 | } | 1233 | } |
@@ -1235,3 +1255,23 @@ int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { | |||
1235 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { | 1255 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { |
1236 | kfree(c->wbuf); | 1256 | kfree(c->wbuf); |
1237 | } | 1257 | } |
1258 | |||
1259 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | ||
1260 | /* Cleanmarker currently occupies a whole programming region */ | ||
1261 | c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd); | ||
1262 | |||
1263 | /* Initialize write buffer */ | ||
1264 | init_rwsem(&c->wbuf_sem); | ||
1265 | c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd); | ||
1266 | c->wbuf_ofs = 0xFFFFFFFF; | ||
1267 | |||
1268 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1269 | if (!c->wbuf) | ||
1270 | return -ENOMEM; | ||
1271 | |||
1272 | return 0; | ||
1273 | } | ||
1274 | |||
1275 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { | ||
1276 | kfree(c->wbuf); | ||
1277 | } | ||