diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/jffs2/wbuf.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'fs/jffs2/wbuf.c')
-rw-r--r-- | fs/jffs2/wbuf.c | 258 |
1 files changed, 101 insertions, 157 deletions
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index a6597d60d76..4515bea0268 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -11,8 +11,6 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
17 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
18 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
@@ -20,7 +18,6 @@ | |||
20 | #include <linux/mtd/nand.h> | 18 | #include <linux/mtd/nand.h> |
21 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
22 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
23 | #include <linux/writeback.h> | ||
24 | 21 | ||
25 | #include "nodelist.h" | 22 | #include "nodelist.h" |
26 | 23 | ||
@@ -86,7 +83,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) | |||
86 | { | 83 | { |
87 | struct jffs2_inodirty *new; | 84 | struct jffs2_inodirty *new; |
88 | 85 | ||
89 | /* Schedule delayed write-buffer write-out */ | 86 | /* Mark the superblock dirty so that kupdated will flush... */ |
90 | jffs2_dirty_trigger(c); | 87 | jffs2_dirty_trigger(c); |
91 | 88 | ||
92 | if (jffs2_wbuf_pending_for_ino(c, ino)) | 89 | if (jffs2_wbuf_pending_for_ino(c, ino)) |
@@ -94,7 +91,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) | |||
94 | 91 | ||
95 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 92 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
96 | if (!new) { | 93 | if (!new) { |
97 | jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); | 94 | D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); |
98 | jffs2_clear_wbuf_ino_list(c); | 95 | jffs2_clear_wbuf_ino_list(c); |
99 | c->wbuf_inodes = &inodirty_nomem; | 96 | c->wbuf_inodes = &inodirty_nomem; |
100 | return; | 97 | return; |
@@ -116,20 +113,19 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
116 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { | 113 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { |
117 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 114 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
118 | 115 | ||
119 | jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", | 116 | D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); |
120 | jeb->offset); | ||
121 | list_del(this); | 117 | list_del(this); |
122 | if ((jiffies + (n++)) & 127) { | 118 | if ((jiffies + (n++)) & 127) { |
123 | /* Most of the time, we just erase it immediately. Otherwise we | 119 | /* Most of the time, we just erase it immediately. Otherwise we |
124 | spend ages scanning it on mount, etc. */ | 120 | spend ages scanning it on mount, etc. */ |
125 | jffs2_dbg(1, "...and adding to erase_pending_list\n"); | 121 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); |
126 | list_add_tail(&jeb->list, &c->erase_pending_list); | 122 | list_add_tail(&jeb->list, &c->erase_pending_list); |
127 | c->nr_erasing_blocks++; | 123 | c->nr_erasing_blocks++; |
128 | jffs2_garbage_collect_trigger(c); | 124 | jffs2_garbage_collect_trigger(c); |
129 | } else { | 125 | } else { |
130 | /* Sometimes, however, we leave it elsewhere so it doesn't get | 126 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
131 | immediately reused, and we spread the load a bit. */ | 127 | immediately reused, and we spread the load a bit. */ |
132 | jffs2_dbg(1, "...and adding to erasable_list\n"); | 128 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); |
133 | list_add_tail(&jeb->list, &c->erasable_list); | 129 | list_add_tail(&jeb->list, &c->erasable_list); |
134 | } | 130 | } |
135 | } | 131 | } |
@@ -140,7 +136,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
140 | 136 | ||
141 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) | 137 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) |
142 | { | 138 | { |
143 | jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); | 139 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); |
144 | 140 | ||
145 | /* File the existing block on the bad_used_list.... */ | 141 | /* File the existing block on the bad_used_list.... */ |
146 | if (c->nextblock == jeb) | 142 | if (c->nextblock == jeb) |
@@ -148,14 +144,12 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
148 | else /* Not sure this should ever happen... need more coffee */ | 144 | else /* Not sure this should ever happen... need more coffee */ |
149 | list_del(&jeb->list); | 145 | list_del(&jeb->list); |
150 | if (jeb->first_node) { | 146 | if (jeb->first_node) { |
151 | jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", | 147 | D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); |
152 | jeb->offset); | ||
153 | list_add(&jeb->list, &c->bad_used_list); | 148 | list_add(&jeb->list, &c->bad_used_list); |
154 | } else { | 149 | } else { |
155 | BUG_ON(allow_empty == REFILE_NOTEMPTY); | 150 | BUG_ON(allow_empty == REFILE_NOTEMPTY); |
156 | /* It has to have had some nodes or we couldn't be here */ | 151 | /* It has to have had some nodes or we couldn't be here */ |
157 | jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", | 152 | D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); |
158 | jeb->offset); | ||
159 | list_add(&jeb->list, &c->erase_pending_list); | 153 | list_add(&jeb->list, &c->erase_pending_list); |
160 | c->nr_erasing_blocks++; | 154 | c->nr_erasing_blocks++; |
161 | jffs2_garbage_collect_trigger(c); | 155 | jffs2_garbage_collect_trigger(c); |
@@ -234,14 +228,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, | |||
234 | size_t retlen; | 228 | size_t retlen; |
235 | char *eccstr; | 229 | char *eccstr; |
236 | 230 | ||
237 | ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); | 231 | ret = c->mtd->read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); |
238 | if (ret && ret != -EUCLEAN && ret != -EBADMSG) { | 232 | if (ret && ret != -EUCLEAN && ret != -EBADMSG) { |
239 | pr_warn("%s(): Read back of page at %08x failed: %d\n", | 233 | printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret); |
240 | __func__, c->wbuf_ofs, ret); | ||
241 | return ret; | 234 | return ret; |
242 | } else if (retlen != c->wbuf_pagesize) { | 235 | } else if (retlen != c->wbuf_pagesize) { |
243 | pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n", | 236 | printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize); |
244 | __func__, ofs, retlen, c->wbuf_pagesize); | ||
245 | return -EIO; | 237 | return -EIO; |
246 | } | 238 | } |
247 | if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) | 239 | if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) |
@@ -254,12 +246,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, | |||
254 | else | 246 | else |
255 | eccstr = "OK or unused"; | 247 | eccstr = "OK or unused"; |
256 | 248 | ||
257 | pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n", | 249 | printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n", |
258 | eccstr, c->wbuf_ofs); | 250 | eccstr, c->wbuf_ofs); |
259 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, | 251 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, |
260 | c->wbuf, c->wbuf_pagesize, 0); | 252 | c->wbuf, c->wbuf_pagesize, 0); |
261 | 253 | ||
262 | pr_warn("Read back:\n"); | 254 | printk(KERN_WARNING "Read back:\n"); |
263 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, | 255 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, |
264 | c->wbuf_verify, c->wbuf_pagesize, 0); | 256 | c->wbuf_verify, c->wbuf_pagesize, 0); |
265 | 257 | ||
@@ -316,7 +308,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
316 | 308 | ||
317 | if (!first_raw) { | 309 | if (!first_raw) { |
318 | /* All nodes were obsolete. Nothing to recover. */ | 310 | /* All nodes were obsolete. Nothing to recover. */ |
319 | jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); | 311 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); |
320 | c->wbuf_len = 0; | 312 | c->wbuf_len = 0; |
321 | return; | 313 | return; |
322 | } | 314 | } |
@@ -339,14 +331,13 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
339 | 331 | ||
340 | buf = kmalloc(end - start, GFP_KERNEL); | 332 | buf = kmalloc(end - start, GFP_KERNEL); |
341 | if (!buf) { | 333 | if (!buf) { |
342 | pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n"); | 334 | printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n"); |
343 | 335 | ||
344 | goto read_failed; | 336 | goto read_failed; |
345 | } | 337 | } |
346 | 338 | ||
347 | /* Do the read... */ | 339 | /* Do the read... */ |
348 | ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, | 340 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); |
349 | buf); | ||
350 | 341 | ||
351 | /* ECC recovered ? */ | 342 | /* ECC recovered ? */ |
352 | if ((ret == -EUCLEAN || ret == -EBADMSG) && | 343 | if ((ret == -EUCLEAN || ret == -EBADMSG) && |
@@ -354,7 +345,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
354 | ret = 0; | 345 | ret = 0; |
355 | 346 | ||
356 | if (ret || retlen != c->wbuf_ofs - start) { | 347 | if (ret || retlen != c->wbuf_ofs - start) { |
357 | pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n"); | 348 | printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); |
358 | 349 | ||
359 | kfree(buf); | 350 | kfree(buf); |
360 | buf = NULL; | 351 | buf = NULL; |
@@ -388,7 +379,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
388 | /* ... and get an allocation of space from a shiny new block instead */ | 379 | /* ... and get an allocation of space from a shiny new block instead */ |
389 | ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); | 380 | ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
390 | if (ret) { | 381 | if (ret) { |
391 | pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 382 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
392 | kfree(buf); | 383 | kfree(buf); |
393 | return; | 384 | return; |
394 | } | 385 | } |
@@ -398,7 +389,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
398 | 389 | ||
399 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); | 390 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); |
400 | if (ret) { | 391 | if (ret) { |
401 | pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); | 392 | printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); |
402 | kfree(buf); | 393 | kfree(buf); |
403 | return; | 394 | return; |
404 | } | 395 | } |
@@ -414,24 +405,25 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
414 | unsigned char *rewrite_buf = buf?:c->wbuf; | 405 | unsigned char *rewrite_buf = buf?:c->wbuf; |
415 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 406 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
416 | 407 | ||
417 | jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 408 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
418 | towrite, ofs); | 409 | towrite, ofs)); |
419 | 410 | ||
420 | #ifdef BREAKMEHEADER | 411 | #ifdef BREAKMEHEADER |
421 | static int breakme; | 412 | static int breakme; |
422 | if (breakme++ == 20) { | 413 | if (breakme++ == 20) { |
423 | pr_notice("Faking write error at 0x%08x\n", ofs); | 414 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); |
424 | breakme = 0; | 415 | breakme = 0; |
425 | mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); | 416 | c->mtd->write(c->mtd, ofs, towrite, &retlen, |
417 | brokenbuf); | ||
426 | ret = -EIO; | 418 | ret = -EIO; |
427 | } else | 419 | } else |
428 | #endif | 420 | #endif |
429 | ret = mtd_write(c->mtd, ofs, towrite, &retlen, | 421 | ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, |
430 | rewrite_buf); | 422 | rewrite_buf); |
431 | 423 | ||
432 | if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { | 424 | if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { |
433 | /* Argh. We tried. Really we did. */ | 425 | /* Argh. We tried. Really we did. */ |
434 | pr_crit("Recovery of wbuf failed due to a second write error\n"); | 426 | printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); |
435 | kfree(buf); | 427 | kfree(buf); |
436 | 428 | ||
437 | if (retlen) | 429 | if (retlen) |
@@ -439,7 +431,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
439 | 431 | ||
440 | return; | 432 | return; |
441 | } | 433 | } |
442 | pr_notice("Recovery of wbuf succeeded to %08x\n", ofs); | 434 | printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); |
443 | 435 | ||
444 | c->wbuf_len = (end - start) - towrite; | 436 | c->wbuf_len = (end - start) - towrite; |
445 | c->wbuf_ofs = ofs + towrite; | 437 | c->wbuf_ofs = ofs + towrite; |
@@ -467,8 +459,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
467 | struct jffs2_raw_node_ref **adjust_ref = NULL; | 459 | struct jffs2_raw_node_ref **adjust_ref = NULL; |
468 | struct jffs2_inode_info *f = NULL; | 460 | struct jffs2_inode_info *f = NULL; |
469 | 461 | ||
470 | jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", | 462 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", |
471 | rawlen, ref_offset(raw), ref_flags(raw), ofs); | 463 | rawlen, ref_offset(raw), ref_flags(raw), ofs)); |
472 | 464 | ||
473 | ic = jffs2_raw_ref_to_ic(raw); | 465 | ic = jffs2_raw_ref_to_ic(raw); |
474 | 466 | ||
@@ -548,8 +540,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
548 | 540 | ||
549 | /* Fix up the original jeb now it's on the bad_list */ | 541 | /* Fix up the original jeb now it's on the bad_list */ |
550 | if (first_raw == jeb->first_node) { | 542 | if (first_raw == jeb->first_node) { |
551 | jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", | 543 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); |
552 | jeb->offset); | ||
553 | list_move(&jeb->list, &c->erase_pending_list); | 544 | list_move(&jeb->list, &c->erase_pending_list); |
554 | c->nr_erasing_blocks++; | 545 | c->nr_erasing_blocks++; |
555 | jffs2_garbage_collect_trigger(c); | 546 | jffs2_garbage_collect_trigger(c); |
@@ -563,8 +554,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
563 | 554 | ||
564 | spin_unlock(&c->erase_completion_lock); | 555 | spin_unlock(&c->erase_completion_lock); |
565 | 556 | ||
566 | jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", | 557 | D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); |
567 | c->wbuf_ofs, c->wbuf_len); | ||
568 | 558 | ||
569 | } | 559 | } |
570 | 560 | ||
@@ -588,8 +578,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
588 | if (!jffs2_is_writebuffered(c)) | 578 | if (!jffs2_is_writebuffered(c)) |
589 | return 0; | 579 | return 0; |
590 | 580 | ||
591 | if (!mutex_is_locked(&c->alloc_sem)) { | 581 | if (mutex_trylock(&c->alloc_sem)) { |
592 | pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n"); | 582 | mutex_unlock(&c->alloc_sem); |
583 | printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); | ||
593 | BUG(); | 584 | BUG(); |
594 | } | 585 | } |
595 | 586 | ||
@@ -627,23 +618,22 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
627 | #ifdef BREAKME | 618 | #ifdef BREAKME |
628 | static int breakme; | 619 | static int breakme; |
629 | if (breakme++ == 20) { | 620 | if (breakme++ == 20) { |
630 | pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); | 621 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); |
631 | breakme = 0; | 622 | breakme = 0; |
632 | mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, | 623 | c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, |
633 | brokenbuf); | 624 | brokenbuf); |
634 | ret = -EIO; | 625 | ret = -EIO; |
635 | } else | 626 | } else |
636 | #endif | 627 | #endif |
637 | 628 | ||
638 | ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | 629 | ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); |
639 | &retlen, c->wbuf); | ||
640 | 630 | ||
641 | if (ret) { | 631 | if (ret) { |
642 | pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret); | 632 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret); |
643 | goto wfail; | 633 | goto wfail; |
644 | } else if (retlen != c->wbuf_pagesize) { | 634 | } else if (retlen != c->wbuf_pagesize) { |
645 | pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", | 635 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", |
646 | retlen, c->wbuf_pagesize); | 636 | retlen, c->wbuf_pagesize); |
647 | ret = -EIO; | 637 | ret = -EIO; |
648 | goto wfail; | 638 | goto wfail; |
649 | } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { | 639 | } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { |
@@ -657,18 +647,17 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
657 | if (pad) { | 647 | if (pad) { |
658 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; | 648 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; |
659 | 649 | ||
660 | jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 650 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
661 | (wbuf_jeb == c->nextblock) ? "next" : "", | 651 | (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); |
662 | wbuf_jeb->offset); | ||
663 | 652 | ||
664 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 653 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
665 | padded. If there is less free space in the block than that, | 654 | padded. If there is less free space in the block than that, |
666 | something screwed up */ | 655 | something screwed up */ |
667 | if (wbuf_jeb->free_size < waste) { | 656 | if (wbuf_jeb->free_size < waste) { |
668 | pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", | 657 | printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", |
669 | c->wbuf_ofs, c->wbuf_len, waste); | 658 | c->wbuf_ofs, c->wbuf_len, waste); |
670 | pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", | 659 | printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", |
671 | wbuf_jeb->offset, wbuf_jeb->free_size); | 660 | wbuf_jeb->offset, wbuf_jeb->free_size); |
672 | BUG(); | 661 | BUG(); |
673 | } | 662 | } |
674 | 663 | ||
@@ -705,14 +694,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
705 | uint32_t old_wbuf_len; | 694 | uint32_t old_wbuf_len; |
706 | int ret = 0; | 695 | int ret = 0; |
707 | 696 | ||
708 | jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); | 697 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); |
709 | 698 | ||
710 | if (!c->wbuf) | 699 | if (!c->wbuf) |
711 | return 0; | 700 | return 0; |
712 | 701 | ||
713 | mutex_lock(&c->alloc_sem); | 702 | mutex_lock(&c->alloc_sem); |
714 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { | 703 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { |
715 | jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); | 704 | D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); |
716 | mutex_unlock(&c->alloc_sem); | 705 | mutex_unlock(&c->alloc_sem); |
717 | return 0; | 706 | return 0; |
718 | } | 707 | } |
@@ -722,8 +711,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
722 | 711 | ||
723 | if (c->unchecked_size) { | 712 | if (c->unchecked_size) { |
724 | /* GC won't make any progress for a while */ | 713 | /* GC won't make any progress for a while */ |
725 | jffs2_dbg(1, "%s(): padding. Not finished checking\n", | 714 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); |
726 | __func__); | ||
727 | down_write(&c->wbuf_sem); | 715 | down_write(&c->wbuf_sem); |
728 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); | 716 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); |
729 | /* retry flushing wbuf in case jffs2_wbuf_recover | 717 | /* retry flushing wbuf in case jffs2_wbuf_recover |
@@ -736,7 +724,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
736 | 724 | ||
737 | mutex_unlock(&c->alloc_sem); | 725 | mutex_unlock(&c->alloc_sem); |
738 | 726 | ||
739 | jffs2_dbg(1, "%s(): calls gc pass\n", __func__); | 727 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); |
740 | 728 | ||
741 | ret = jffs2_garbage_collect_pass(c); | 729 | ret = jffs2_garbage_collect_pass(c); |
742 | if (ret) { | 730 | if (ret) { |
@@ -754,7 +742,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
754 | mutex_lock(&c->alloc_sem); | 742 | mutex_lock(&c->alloc_sem); |
755 | } | 743 | } |
756 | 744 | ||
757 | jffs2_dbg(1, "%s(): ends...\n", __func__); | 745 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); |
758 | 746 | ||
759 | mutex_unlock(&c->alloc_sem); | 747 | mutex_unlock(&c->alloc_sem); |
760 | return ret; | 748 | return ret; |
@@ -823,8 +811,9 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
823 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { | 811 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { |
824 | /* It's a write to a new block */ | 812 | /* It's a write to a new block */ |
825 | if (c->wbuf_len) { | 813 | if (c->wbuf_len) { |
826 | jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", | 814 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " |
827 | __func__, (unsigned long)to, c->wbuf_ofs); | 815 | "causes flush of wbuf at 0x%08x\n", |
816 | (unsigned long)to, c->wbuf_ofs)); | ||
828 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | 817 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); |
829 | if (ret) | 818 | if (ret) |
830 | goto outerr; | 819 | goto outerr; |
@@ -836,11 +825,11 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
836 | 825 | ||
837 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 826 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
838 | /* We're not writing immediately after the writebuffer. Bad. */ | 827 | /* We're not writing immediately after the writebuffer. Bad. */ |
839 | pr_crit("%s(): Non-contiguous write to %08lx\n", | 828 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " |
840 | __func__, (unsigned long)to); | 829 | "to %08lx\n", (unsigned long)to); |
841 | if (c->wbuf_len) | 830 | if (c->wbuf_len) |
842 | pr_crit("wbuf was previously %08x-%08x\n", | 831 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", |
843 | c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); | 832 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); |
844 | BUG(); | 833 | BUG(); |
845 | } | 834 | } |
846 | 835 | ||
@@ -873,8 +862,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
873 | v += wbuf_retlen; | 862 | v += wbuf_retlen; |
874 | 863 | ||
875 | if (vlen >= c->wbuf_pagesize) { | 864 | if (vlen >= c->wbuf_pagesize) { |
876 | ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), | 865 | ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen), |
877 | &wbuf_retlen, v); | 866 | &wbuf_retlen, v); |
878 | if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) | 867 | if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) |
879 | goto outfile; | 868 | goto outfile; |
880 | 869 | ||
@@ -960,16 +949,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
960 | int ret; | 949 | int ret; |
961 | 950 | ||
962 | if (!jffs2_is_writebuffered(c)) | 951 | if (!jffs2_is_writebuffered(c)) |
963 | return mtd_read(c->mtd, ofs, len, retlen, buf); | 952 | return c->mtd->read(c->mtd, ofs, len, retlen, buf); |
964 | 953 | ||
965 | /* Read flash */ | 954 | /* Read flash */ |
966 | down_read(&c->wbuf_sem); | 955 | down_read(&c->wbuf_sem); |
967 | ret = mtd_read(c->mtd, ofs, len, retlen, buf); | 956 | ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); |
968 | 957 | ||
969 | if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { | 958 | if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { |
970 | if (ret == -EBADMSG) | 959 | if (ret == -EBADMSG) |
971 | pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | 960 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)" |
972 | len, ofs); | 961 | " returned ECC error\n", len, ofs); |
973 | /* | 962 | /* |
974 | * We have the raw data without ECC correction in the buffer, | 963 | * We have the raw data without ECC correction in the buffer, |
975 | * maybe we are lucky and all data or parts are correct. We | 964 | * maybe we are lucky and all data or parts are correct. We |
@@ -1037,17 +1026,18 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
1037 | int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); | 1026 | int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); |
1038 | struct mtd_oob_ops ops; | 1027 | struct mtd_oob_ops ops; |
1039 | 1028 | ||
1040 | ops.mode = MTD_OPS_AUTO_OOB; | 1029 | ops.mode = MTD_OOB_AUTO; |
1041 | ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; | 1030 | ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; |
1042 | ops.oobbuf = c->oobbuf; | 1031 | ops.oobbuf = c->oobbuf; |
1043 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; | 1032 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; |
1044 | ops.datbuf = NULL; | 1033 | ops.datbuf = NULL; |
1045 | 1034 | ||
1046 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1035 | ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops); |
1047 | if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { | 1036 | if (ret || ops.oobretlen != ops.ooblen) { |
1048 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", | 1037 | printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" |
1049 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | 1038 | " bytes, read %zd bytes, error %d\n", |
1050 | if (!ret || mtd_is_bitflip(ret)) | 1039 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
1040 | if (!ret) | ||
1051 | ret = -EIO; | 1041 | ret = -EIO; |
1052 | return ret; | 1042 | return ret; |
1053 | } | 1043 | } |
@@ -1058,8 +1048,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
1058 | continue; | 1048 | continue; |
1059 | 1049 | ||
1060 | if (ops.oobbuf[i] != 0xFF) { | 1050 | if (ops.oobbuf[i] != 0xFF) { |
1061 | jffs2_dbg(2, "Found %02x at %x in OOB for " | 1051 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " |
1062 | "%08x\n", ops.oobbuf[i], i, jeb->offset); | 1052 | "%08x\n", ops.oobbuf[i], i, jeb->offset)); |
1063 | return 1; | 1053 | return 1; |
1064 | } | 1054 | } |
1065 | } | 1055 | } |
@@ -1079,17 +1069,18 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, | |||
1079 | struct mtd_oob_ops ops; | 1069 | struct mtd_oob_ops ops; |
1080 | int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); | 1070 | int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); |
1081 | 1071 | ||
1082 | ops.mode = MTD_OPS_AUTO_OOB; | 1072 | ops.mode = MTD_OOB_AUTO; |
1083 | ops.ooblen = cmlen; | 1073 | ops.ooblen = cmlen; |
1084 | ops.oobbuf = c->oobbuf; | 1074 | ops.oobbuf = c->oobbuf; |
1085 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; | 1075 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; |
1086 | ops.datbuf = NULL; | 1076 | ops.datbuf = NULL; |
1087 | 1077 | ||
1088 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1078 | ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops); |
1089 | if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { | 1079 | if (ret || ops.oobretlen != ops.ooblen) { |
1090 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", | 1080 | printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" |
1091 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | 1081 | " bytes, read %zd bytes, error %d\n", |
1092 | if (!ret || mtd_is_bitflip(ret)) | 1082 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
1083 | if (!ret) | ||
1093 | ret = -EIO; | 1084 | ret = -EIO; |
1094 | return ret; | 1085 | return ret; |
1095 | } | 1086 | } |
@@ -1104,16 +1095,17 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, | |||
1104 | struct mtd_oob_ops ops; | 1095 | struct mtd_oob_ops ops; |
1105 | int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); | 1096 | int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); |
1106 | 1097 | ||
1107 | ops.mode = MTD_OPS_AUTO_OOB; | 1098 | ops.mode = MTD_OOB_AUTO; |
1108 | ops.ooblen = cmlen; | 1099 | ops.ooblen = cmlen; |
1109 | ops.oobbuf = (uint8_t *)&oob_cleanmarker; | 1100 | ops.oobbuf = (uint8_t *)&oob_cleanmarker; |
1110 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; | 1101 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; |
1111 | ops.datbuf = NULL; | 1102 | ops.datbuf = NULL; |
1112 | 1103 | ||
1113 | ret = mtd_write_oob(c->mtd, jeb->offset, &ops); | 1104 | ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops); |
1114 | if (ret || ops.oobretlen != ops.ooblen) { | 1105 | if (ret || ops.oobretlen != ops.ooblen) { |
1115 | pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", | 1106 | printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd" |
1116 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | 1107 | " bytes, read %zd bytes, error %d\n", |
1108 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | ||
1117 | if (!ret) | 1109 | if (!ret) |
1118 | ret = -EIO; | 1110 | ret = -EIO; |
1119 | return ret; | 1111 | return ret; |
@@ -1138,58 +1130,19 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1138 | if( ++jeb->bad_count < MAX_ERASE_FAILURES) | 1130 | if( ++jeb->bad_count < MAX_ERASE_FAILURES) |
1139 | return 0; | 1131 | return 0; |
1140 | 1132 | ||
1141 | pr_warn("marking eraseblock at %08x as bad\n", bad_offset); | 1133 | if (!c->mtd->block_markbad) |
1142 | ret = mtd_block_markbad(c->mtd, bad_offset); | 1134 | return 1; // What else can we do? |
1135 | |||
1136 | printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset); | ||
1137 | ret = c->mtd->block_markbad(c->mtd, bad_offset); | ||
1143 | 1138 | ||
1144 | if (ret) { | 1139 | if (ret) { |
1145 | jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", | 1140 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); |
1146 | __func__, jeb->offset, ret); | ||
1147 | return ret; | 1141 | return ret; |
1148 | } | 1142 | } |
1149 | return 1; | 1143 | return 1; |
1150 | } | 1144 | } |
1151 | 1145 | ||
1152 | static struct jffs2_sb_info *work_to_sb(struct work_struct *work) | ||
1153 | { | ||
1154 | struct delayed_work *dwork; | ||
1155 | |||
1156 | dwork = container_of(work, struct delayed_work, work); | ||
1157 | return container_of(dwork, struct jffs2_sb_info, wbuf_dwork); | ||
1158 | } | ||
1159 | |||
1160 | static void delayed_wbuf_sync(struct work_struct *work) | ||
1161 | { | ||
1162 | struct jffs2_sb_info *c = work_to_sb(work); | ||
1163 | struct super_block *sb = OFNI_BS_2SFFJ(c); | ||
1164 | |||
1165 | spin_lock(&c->wbuf_dwork_lock); | ||
1166 | c->wbuf_queued = 0; | ||
1167 | spin_unlock(&c->wbuf_dwork_lock); | ||
1168 | |||
1169 | if (!(sb->s_flags & MS_RDONLY)) { | ||
1170 | jffs2_dbg(1, "%s()\n", __func__); | ||
1171 | jffs2_flush_wbuf_gc(c, 0); | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | void jffs2_dirty_trigger(struct jffs2_sb_info *c) | ||
1176 | { | ||
1177 | struct super_block *sb = OFNI_BS_2SFFJ(c); | ||
1178 | unsigned long delay; | ||
1179 | |||
1180 | if (sb->s_flags & MS_RDONLY) | ||
1181 | return; | ||
1182 | |||
1183 | spin_lock(&c->wbuf_dwork_lock); | ||
1184 | if (!c->wbuf_queued) { | ||
1185 | jffs2_dbg(1, "%s()\n", __func__); | ||
1186 | delay = msecs_to_jiffies(dirty_writeback_interval * 10); | ||
1187 | queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay); | ||
1188 | c->wbuf_queued = 1; | ||
1189 | } | ||
1190 | spin_unlock(&c->wbuf_dwork_lock); | ||
1191 | } | ||
1192 | |||
1193 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | 1146 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c) |
1194 | { | 1147 | { |
1195 | struct nand_ecclayout *oinfo = c->mtd->ecclayout; | 1148 | struct nand_ecclayout *oinfo = c->mtd->ecclayout; |
@@ -1201,18 +1154,16 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1201 | c->cleanmarker_size = 0; | 1154 | c->cleanmarker_size = 0; |
1202 | 1155 | ||
1203 | if (!oinfo || oinfo->oobavail == 0) { | 1156 | if (!oinfo || oinfo->oobavail == 0) { |
1204 | pr_err("inconsistent device description\n"); | 1157 | printk(KERN_ERR "inconsistent device description\n"); |
1205 | return -EINVAL; | 1158 | return -EINVAL; |
1206 | } | 1159 | } |
1207 | 1160 | ||
1208 | jffs2_dbg(1, "using OOB on NAND\n"); | 1161 | D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n")); |
1209 | 1162 | ||
1210 | c->oobavail = oinfo->oobavail; | 1163 | c->oobavail = oinfo->oobavail; |
1211 | 1164 | ||
1212 | /* Initialise write buffer */ | 1165 | /* Initialise write buffer */ |
1213 | init_rwsem(&c->wbuf_sem); | 1166 | init_rwsem(&c->wbuf_sem); |
1214 | spin_lock_init(&c->wbuf_dwork_lock); | ||
1215 | INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); | ||
1216 | c->wbuf_pagesize = c->mtd->writesize; | 1167 | c->wbuf_pagesize = c->mtd->writesize; |
1217 | c->wbuf_ofs = 0xFFFFFFFF; | 1168 | c->wbuf_ofs = 0xFFFFFFFF; |
1218 | 1169 | ||
@@ -1251,8 +1202,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | |||
1251 | 1202 | ||
1252 | /* Initialize write buffer */ | 1203 | /* Initialize write buffer */ |
1253 | init_rwsem(&c->wbuf_sem); | 1204 | init_rwsem(&c->wbuf_sem); |
1254 | spin_lock_init(&c->wbuf_dwork_lock); | 1205 | |
1255 | INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); | 1206 | |
1256 | c->wbuf_pagesize = c->mtd->erasesize; | 1207 | c->wbuf_pagesize = c->mtd->erasesize; |
1257 | 1208 | ||
1258 | /* Find a suitable c->sector_size | 1209 | /* Find a suitable c->sector_size |
@@ -1274,7 +1225,7 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | |||
1274 | 1225 | ||
1275 | if ((c->flash_size % c->sector_size) != 0) { | 1226 | if ((c->flash_size % c->sector_size) != 0) { |
1276 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; | 1227 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; |
1277 | pr_warn("flash size adjusted to %dKiB\n", c->flash_size); | 1228 | printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); |
1278 | }; | 1229 | }; |
1279 | 1230 | ||
1280 | c->wbuf_ofs = 0xFFFFFFFF; | 1231 | c->wbuf_ofs = 0xFFFFFFFF; |
@@ -1291,8 +1242,7 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | |||
1291 | } | 1242 | } |
1292 | #endif | 1243 | #endif |
1293 | 1244 | ||
1294 | pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", | 1245 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); |
1295 | c->wbuf_pagesize, c->sector_size); | ||
1296 | 1246 | ||
1297 | return 0; | 1247 | return 0; |
1298 | } | 1248 | } |
@@ -1311,9 +1261,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | |||
1311 | 1261 | ||
1312 | /* Initialize write buffer */ | 1262 | /* Initialize write buffer */ |
1313 | init_rwsem(&c->wbuf_sem); | 1263 | init_rwsem(&c->wbuf_sem); |
1314 | spin_lock_init(&c->wbuf_dwork_lock); | ||
1315 | INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); | ||
1316 | |||
1317 | c->wbuf_pagesize = c->mtd->writesize; | 1264 | c->wbuf_pagesize = c->mtd->writesize; |
1318 | c->wbuf_ofs = 0xFFFFFFFF; | 1265 | c->wbuf_ofs = 0xFFFFFFFF; |
1319 | 1266 | ||
@@ -1346,8 +1293,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) { | |||
1346 | return 0; | 1293 | return 0; |
1347 | 1294 | ||
1348 | init_rwsem(&c->wbuf_sem); | 1295 | init_rwsem(&c->wbuf_sem); |
1349 | spin_lock_init(&c->wbuf_dwork_lock); | ||
1350 | INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); | ||
1351 | 1296 | ||
1352 | c->wbuf_pagesize = c->mtd->writesize; | 1297 | c->wbuf_pagesize = c->mtd->writesize; |
1353 | c->wbuf_ofs = 0xFFFFFFFF; | 1298 | c->wbuf_ofs = 0xFFFFFFFF; |
@@ -1355,8 +1300,7 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) { | |||
1355 | if (!c->wbuf) | 1300 | if (!c->wbuf) |
1356 | return -ENOMEM; | 1301 | return -ENOMEM; |
1357 | 1302 | ||
1358 | pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", | 1303 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); |
1359 | c->wbuf_pagesize, c->sector_size); | ||
1360 | 1304 | ||
1361 | return 0; | 1305 | return 0; |
1362 | } | 1306 | } |