aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/wbuf.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2012-02-15 18:56:43 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-26 19:39:24 -0400
commit9c261b33a9c417ccaf07f41796be278d09d02d49 (patch)
tree6cf47f47364647dfbba845c0fd3f05539072175a /fs/jffs2/wbuf.c
parentbf011f2ed53d587fdd8148c173c4f09ed77bdf1a (diff)
jffs2: Convert most D1/D2 macros to jffs2_dbg
D1 and D2 macros are mostly uses to emit debugging messages. Convert the logging uses of D1 & D2 to jffs2_dbg(level, fmt, ...) to be a bit more consistent style with the rest of the kernel. All jffs2_dbg output is now at KERN_DEBUG where some of the previous uses were emitted at various KERN_<LEVEL>s. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'fs/jffs2/wbuf.c')
-rw-r--r--fs/jffs2/wbuf.c67
1 files changed, 37 insertions, 30 deletions
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 30e8f47e8a23..d626eb2113e1 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -91,7 +91,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
91 91
92 new = kmalloc(sizeof(*new), GFP_KERNEL); 92 new = kmalloc(sizeof(*new), GFP_KERNEL);
93 if (!new) { 93 if (!new) {
94 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); 94 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
95 jffs2_clear_wbuf_ino_list(c); 95 jffs2_clear_wbuf_ino_list(c);
96 c->wbuf_inodes = &inodirty_nomem; 96 c->wbuf_inodes = &inodirty_nomem;
97 return; 97 return;
@@ -113,19 +113,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
113 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { 113 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
114 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); 114 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
115 115
116 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); 116 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
117 jeb->offset);
117 list_del(this); 118 list_del(this);
118 if ((jiffies + (n++)) & 127) { 119 if ((jiffies + (n++)) & 127) {
119 /* Most of the time, we just erase it immediately. Otherwise we 120 /* Most of the time, we just erase it immediately. Otherwise we
120 spend ages scanning it on mount, etc. */ 121 spend ages scanning it on mount, etc. */
121 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); 122 jffs2_dbg(1, "...and adding to erase_pending_list\n");
122 list_add_tail(&jeb->list, &c->erase_pending_list); 123 list_add_tail(&jeb->list, &c->erase_pending_list);
123 c->nr_erasing_blocks++; 124 c->nr_erasing_blocks++;
124 jffs2_garbage_collect_trigger(c); 125 jffs2_garbage_collect_trigger(c);
125 } else { 126 } else {
126 /* Sometimes, however, we leave it elsewhere so it doesn't get 127 /* Sometimes, however, we leave it elsewhere so it doesn't get
127 immediately reused, and we spread the load a bit. */ 128 immediately reused, and we spread the load a bit. */
128 D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); 129 jffs2_dbg(1, "...and adding to erasable_list\n");
129 list_add_tail(&jeb->list, &c->erasable_list); 130 list_add_tail(&jeb->list, &c->erasable_list);
130 } 131 }
131 } 132 }
@@ -136,7 +137,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
136 137
137static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) 138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
138{ 139{
139 D1(printk("About to refile bad block at %08x\n", jeb->offset)); 140 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
140 141
141 /* File the existing block on the bad_used_list.... */ 142 /* File the existing block on the bad_used_list.... */
142 if (c->nextblock == jeb) 143 if (c->nextblock == jeb)
@@ -144,12 +145,14 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
144 else /* Not sure this should ever happen... need more coffee */ 145 else /* Not sure this should ever happen... need more coffee */
145 list_del(&jeb->list); 146 list_del(&jeb->list);
146 if (jeb->first_node) { 147 if (jeb->first_node) {
147 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); 148 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
149 jeb->offset);
148 list_add(&jeb->list, &c->bad_used_list); 150 list_add(&jeb->list, &c->bad_used_list);
149 } else { 151 } else {
150 BUG_ON(allow_empty == REFILE_NOTEMPTY); 152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
151 /* It has to have had some nodes or we couldn't be here */ 153 /* It has to have had some nodes or we couldn't be here */
152 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); 154 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
155 jeb->offset);
153 list_add(&jeb->list, &c->erase_pending_list); 156 list_add(&jeb->list, &c->erase_pending_list);
154 c->nr_erasing_blocks++; 157 c->nr_erasing_blocks++;
155 jffs2_garbage_collect_trigger(c); 158 jffs2_garbage_collect_trigger(c);
@@ -308,7 +311,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
308 311
309 if (!first_raw) { 312 if (!first_raw) {
310 /* All nodes were obsolete. Nothing to recover. */ 313 /* All nodes were obsolete. Nothing to recover. */
311 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); 314 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
312 c->wbuf_len = 0; 315 c->wbuf_len = 0;
313 return; 316 return;
314 } 317 }
@@ -406,8 +409,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
406 unsigned char *rewrite_buf = buf?:c->wbuf; 409 unsigned char *rewrite_buf = buf?:c->wbuf;
407 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); 410 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
408 411
409 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", 412 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
410 towrite, ofs)); 413 towrite, ofs);
411 414
412#ifdef BREAKMEHEADER 415#ifdef BREAKMEHEADER
413 static int breakme; 416 static int breakme;
@@ -459,8 +462,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
459 struct jffs2_raw_node_ref **adjust_ref = NULL; 462 struct jffs2_raw_node_ref **adjust_ref = NULL;
460 struct jffs2_inode_info *f = NULL; 463 struct jffs2_inode_info *f = NULL;
461 464
462 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", 465 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
463 rawlen, ref_offset(raw), ref_flags(raw), ofs)); 466 rawlen, ref_offset(raw), ref_flags(raw), ofs);
464 467
465 ic = jffs2_raw_ref_to_ic(raw); 468 ic = jffs2_raw_ref_to_ic(raw);
466 469
@@ -540,7 +543,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
540 543
541 /* Fix up the original jeb now it's on the bad_list */ 544 /* Fix up the original jeb now it's on the bad_list */
542 if (first_raw == jeb->first_node) { 545 if (first_raw == jeb->first_node) {
543 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 546 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
547 jeb->offset);
544 list_move(&jeb->list, &c->erase_pending_list); 548 list_move(&jeb->list, &c->erase_pending_list);
545 c->nr_erasing_blocks++; 549 c->nr_erasing_blocks++;
546 jffs2_garbage_collect_trigger(c); 550 jffs2_garbage_collect_trigger(c);
@@ -554,7 +558,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
554 558
555 spin_unlock(&c->erase_completion_lock); 559 spin_unlock(&c->erase_completion_lock);
556 560
557 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); 561 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
562 c->wbuf_ofs, c->wbuf_len);
558 563
559} 564}
560 565
@@ -647,8 +652,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
647 if (pad) { 652 if (pad) {
648 uint32_t waste = c->wbuf_pagesize - c->wbuf_len; 653 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
649 654
650 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", 655 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
651 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); 656 (wbuf_jeb == c->nextblock) ? "next" : "",
657 wbuf_jeb->offset);
652 658
653 /* wbuf_pagesize - wbuf_len is the amount of space that's to be 659 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
654 padded. If there is less free space in the block than that, 660 padded. If there is less free space in the block than that,
@@ -694,14 +700,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
694 uint32_t old_wbuf_len; 700 uint32_t old_wbuf_len;
695 int ret = 0; 701 int ret = 0;
696 702
697 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); 703 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
698 704
699 if (!c->wbuf) 705 if (!c->wbuf)
700 return 0; 706 return 0;
701 707
702 mutex_lock(&c->alloc_sem); 708 mutex_lock(&c->alloc_sem);
703 if (!jffs2_wbuf_pending_for_ino(c, ino)) { 709 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
704 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); 710 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
705 mutex_unlock(&c->alloc_sem); 711 mutex_unlock(&c->alloc_sem);
706 return 0; 712 return 0;
707 } 713 }
@@ -711,7 +717,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
711 717
712 if (c->unchecked_size) { 718 if (c->unchecked_size) {
713 /* GC won't make any progress for a while */ 719 /* GC won't make any progress for a while */
714 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); 720 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
721 __func__);
715 down_write(&c->wbuf_sem); 722 down_write(&c->wbuf_sem);
716 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); 723 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
717 /* retry flushing wbuf in case jffs2_wbuf_recover 724 /* retry flushing wbuf in case jffs2_wbuf_recover
@@ -724,7 +731,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
724 731
725 mutex_unlock(&c->alloc_sem); 732 mutex_unlock(&c->alloc_sem);
726 733
727 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); 734 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
728 735
729 ret = jffs2_garbage_collect_pass(c); 736 ret = jffs2_garbage_collect_pass(c);
730 if (ret) { 737 if (ret) {
@@ -742,7 +749,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
742 mutex_lock(&c->alloc_sem); 749 mutex_lock(&c->alloc_sem);
743 } 750 }
744 751
745 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); 752 jffs2_dbg(1, "%s(): ends...\n", __func__);
746 753
747 mutex_unlock(&c->alloc_sem); 754 mutex_unlock(&c->alloc_sem);
748 return ret; 755 return ret;
@@ -811,9 +818,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
811 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { 818 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
812 /* It's a write to a new block */ 819 /* It's a write to a new block */
813 if (c->wbuf_len) { 820 if (c->wbuf_len) {
814 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " 821 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
815 "causes flush of wbuf at 0x%08x\n", 822 __func__, (unsigned long)to, c->wbuf_ofs);
816 (unsigned long)to, c->wbuf_ofs));
817 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); 823 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
818 if (ret) 824 if (ret)
819 goto outerr; 825 goto outerr;
@@ -825,8 +831,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
825 831
826 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { 832 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
827 /* We're not writing immediately after the writebuffer. Bad. */ 833 /* We're not writing immediately after the writebuffer. Bad. */
828 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " 834 printk(KERN_CRIT "%s(): Non-contiguous write to %08lx\n",
829 "to %08lx\n", (unsigned long)to); 835 __func__, (unsigned long)to);
830 if (c->wbuf_len) 836 if (c->wbuf_len)
831 printk(KERN_CRIT "wbuf was previously %08x-%08x\n", 837 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
832 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); 838 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
@@ -1048,8 +1054,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1048 continue; 1054 continue;
1049 1055
1050 if (ops.oobbuf[i] != 0xFF) { 1056 if (ops.oobbuf[i] != 0xFF) {
1051 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " 1057 jffs2_dbg(2, "Found %02x at %x in OOB for "
1052 "%08x\n", ops.oobbuf[i], i, jeb->offset)); 1058 "%08x\n", ops.oobbuf[i], i, jeb->offset);
1053 return 1; 1059 return 1;
1054 } 1060 }
1055 } 1061 }
@@ -1134,7 +1140,8 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
1134 ret = mtd_block_markbad(c->mtd, bad_offset); 1140 ret = mtd_block_markbad(c->mtd, bad_offset);
1135 1141
1136 if (ret) { 1142 if (ret) {
1137 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); 1143 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1144 __func__, jeb->offset, ret);
1138 return ret; 1145 return ret;
1139 } 1146 }
1140 return 1; 1147 return 1;
@@ -1155,7 +1162,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1155 return -EINVAL; 1162 return -EINVAL;
1156 } 1163 }
1157 1164
1158 D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n")); 1165 jffs2_dbg(1, "JFFS2 using OOB on NAND\n");
1159 1166
1160 c->oobavail = oinfo->oobavail; 1167 c->oobavail = oinfo->oobavail;
1161 1168