aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/gc.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2012-02-15 18:56:43 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-03-26 19:39:24 -0400
commit9c261b33a9c417ccaf07f41796be278d09d02d49 (patch)
tree6cf47f47364647dfbba845c0fd3f05539072175a /fs/jffs2/gc.c
parentbf011f2ed53d587fdd8148c173c4f09ed77bdf1a (diff)
jffs2: Convert most D1/D2 macros to jffs2_dbg
D1 and D2 macros are mostly uses to emit debugging messages. Convert the logging uses of D1 & D2 to jffs2_dbg(level, fmt, ...) to be a bit more consistent style with the rest of the kernel. All jffs2_dbg output is now at KERN_DEBUG where some of the previous uses were emitted at various KERN_<LEVEL>s. Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'fs/jffs2/gc.c')
-rw-r--r--fs/jffs2/gc.c162
1 files changed, 96 insertions, 66 deletions
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 31dce611337..85e703a2936 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -51,44 +51,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
51 number of free blocks is low. */ 51 number of free blocks is low. */
52again: 52again:
53 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { 53 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
54 D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); 54 jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
55 nextlist = &c->bad_used_list; 55 nextlist = &c->bad_used_list;
56 } else if (n < 50 && !list_empty(&c->erasable_list)) { 56 } else if (n < 50 && !list_empty(&c->erasable_list)) {
57 /* Note that most of them will have gone directly to be erased. 57 /* Note that most of them will have gone directly to be erased.
58 So don't favour the erasable_list _too_ much. */ 58 So don't favour the erasable_list _too_ much. */
59 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); 59 jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
60 nextlist = &c->erasable_list; 60 nextlist = &c->erasable_list;
61 } else if (n < 110 && !list_empty(&c->very_dirty_list)) { 61 } else if (n < 110 && !list_empty(&c->very_dirty_list)) {
62 /* Most of the time, pick one off the very_dirty list */ 62 /* Most of the time, pick one off the very_dirty list */
63 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); 63 jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
64 nextlist = &c->very_dirty_list; 64 nextlist = &c->very_dirty_list;
65 } else if (n < 126 && !list_empty(&c->dirty_list)) { 65 } else if (n < 126 && !list_empty(&c->dirty_list)) {
66 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); 66 jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
67 nextlist = &c->dirty_list; 67 nextlist = &c->dirty_list;
68 } else if (!list_empty(&c->clean_list)) { 68 } else if (!list_empty(&c->clean_list)) {
69 D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); 69 jffs2_dbg(1, "Picking block from clean_list to GC next\n");
70 nextlist = &c->clean_list; 70 nextlist = &c->clean_list;
71 } else if (!list_empty(&c->dirty_list)) { 71 } else if (!list_empty(&c->dirty_list)) {
72 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); 72 jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
73 73
74 nextlist = &c->dirty_list; 74 nextlist = &c->dirty_list;
75 } else if (!list_empty(&c->very_dirty_list)) { 75 } else if (!list_empty(&c->very_dirty_list)) {
76 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); 76 jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
77 nextlist = &c->very_dirty_list; 77 nextlist = &c->very_dirty_list;
78 } else if (!list_empty(&c->erasable_list)) { 78 } else if (!list_empty(&c->erasable_list)) {
79 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); 79 jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
80 80
81 nextlist = &c->erasable_list; 81 nextlist = &c->erasable_list;
82 } else if (!list_empty(&c->erasable_pending_wbuf_list)) { 82 } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
83 /* There are blocks are wating for the wbuf sync */ 83 /* There are blocks are wating for the wbuf sync */
84 D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n")); 84 jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
85 spin_unlock(&c->erase_completion_lock); 85 spin_unlock(&c->erase_completion_lock);
86 jffs2_flush_wbuf_pad(c); 86 jffs2_flush_wbuf_pad(c);
87 spin_lock(&c->erase_completion_lock); 87 spin_lock(&c->erase_completion_lock);
88 goto again; 88 goto again;
89 } else { 89 } else {
90 /* Eep. All were empty */ 90 /* Eep. All were empty */
91 D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); 91 jffs2_dbg(1, "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
92 return NULL; 92 return NULL;
93 } 93 }
94 94
@@ -103,7 +103,8 @@ again:
103 103
104 /* Have we accidentally picked a clean block with wasted space ? */ 104 /* Have we accidentally picked a clean block with wasted space ? */
105 if (ret->wasted_size) { 105 if (ret->wasted_size) {
106 D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); 106 jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
107 ret->wasted_size);
107 ret->dirty_size += ret->wasted_size; 108 ret->dirty_size += ret->wasted_size;
108 c->wasted_size -= ret->wasted_size; 109 c->wasted_size -= ret->wasted_size;
109 c->dirty_size += ret->wasted_size; 110 c->dirty_size += ret->wasted_size;
@@ -163,8 +164,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
163 } 164 }
164 165
165 if (!ic->pino_nlink) { 166 if (!ic->pino_nlink) {
166 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n", 167 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
167 ic->ino)); 168 ic->ino);
168 spin_unlock(&c->inocache_lock); 169 spin_unlock(&c->inocache_lock);
169 jffs2_xattr_delete_inode(c, ic); 170 jffs2_xattr_delete_inode(c, ic);
170 continue; 171 continue;
@@ -172,7 +173,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
172 switch(ic->state) { 173 switch(ic->state) {
173 case INO_STATE_CHECKEDABSENT: 174 case INO_STATE_CHECKEDABSENT:
174 case INO_STATE_PRESENT: 175 case INO_STATE_PRESENT:
175 D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); 176 jffs2_dbg(1, "Skipping ino #%u already checked\n",
177 ic->ino);
176 spin_unlock(&c->inocache_lock); 178 spin_unlock(&c->inocache_lock);
177 continue; 179 continue;
178 180
@@ -186,7 +188,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
186 /* We need to wait for it to finish, lest we move on 188 /* We need to wait for it to finish, lest we move on
187 and trigger the BUG() above while we haven't yet 189 and trigger the BUG() above while we haven't yet
188 finished checking all its nodes */ 190 finished checking all its nodes */
189 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); 191 jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
192 ic->ino);
190 /* We need to come back again for the _same_ inode. We've 193 /* We need to come back again for the _same_ inode. We've
191 made no progress in this case, but that should be OK */ 194 made no progress in this case, but that should be OK */
192 c->checked_ino--; 195 c->checked_ino--;
@@ -204,7 +207,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
204 ic->state = INO_STATE_CHECKING; 207 ic->state = INO_STATE_CHECKING;
205 spin_unlock(&c->inocache_lock); 208 spin_unlock(&c->inocache_lock);
206 209
207 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); 210 jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
211 __func__, ic->ino);
208 212
209 ret = jffs2_do_crccheck_inode(c, ic); 213 ret = jffs2_do_crccheck_inode(c, ic);
210 if (ret) 214 if (ret)
@@ -220,11 +224,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
220 !list_empty(&c->erase_pending_list)) { 224 !list_empty(&c->erase_pending_list)) {
221 spin_unlock(&c->erase_completion_lock); 225 spin_unlock(&c->erase_completion_lock);
222 mutex_unlock(&c->alloc_sem); 226 mutex_unlock(&c->alloc_sem);
223 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n")); 227 jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
224 if (jffs2_erase_pending_blocks(c, 1)) 228 if (jffs2_erase_pending_blocks(c, 1))
225 return 0; 229 return 0;
226 230
227 D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n")); 231 jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
228 spin_lock(&c->erase_completion_lock); 232 spin_lock(&c->erase_completion_lock);
229 mutex_lock(&c->alloc_sem); 233 mutex_lock(&c->alloc_sem);
230 } 234 }
@@ -242,13 +246,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
242 mutex_unlock(&c->alloc_sem); 246 mutex_unlock(&c->alloc_sem);
243 return -EAGAIN; 247 return -EAGAIN;
244 } 248 }
245 D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); 249 jffs2_dbg(1, "jffs2: Couldn't find erase block to garbage collect!\n");
246 spin_unlock(&c->erase_completion_lock); 250 spin_unlock(&c->erase_completion_lock);
247 mutex_unlock(&c->alloc_sem); 251 mutex_unlock(&c->alloc_sem);
248 return -EIO; 252 return -EIO;
249 } 253 }
250 254
251 D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); 255 jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
256 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
252 D1(if (c->nextblock) 257 D1(if (c->nextblock)
253 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); 258 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
254 259
@@ -261,7 +266,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
261 gcblock_dirty = jeb->dirty_size; 266 gcblock_dirty = jeb->dirty_size;
262 267
263 while(ref_obsolete(raw)) { 268 while(ref_obsolete(raw)) {
264 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); 269 jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
270 ref_offset(raw));
265 raw = ref_next(raw); 271 raw = ref_next(raw);
266 if (unlikely(!raw)) { 272 if (unlikely(!raw)) {
267 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); 273 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
@@ -275,7 +281,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
275 } 281 }
276 jeb->gc_node = raw; 282 jeb->gc_node = raw;
277 283
278 D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); 284 jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
285 ref_offset(raw));
279 286
280 if (!raw->next_in_ino) { 287 if (!raw->next_in_ino) {
281 /* Inode-less node. Clean marker, snapshot or something like that */ 288 /* Inode-less node. Clean marker, snapshot or something like that */
@@ -316,7 +323,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
316 323
317 spin_unlock(&c->erase_completion_lock); 324 spin_unlock(&c->erase_completion_lock);
318 325
319 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); 326 jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
327 __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
328 ic->ino);
320 329
321 /* Three possibilities: 330 /* Three possibilities:
322 1. Inode is already in-core. We must iget it and do proper 331 1. Inode is already in-core. We must iget it and do proper
@@ -336,8 +345,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
336 if (ref_flags(raw) == REF_PRISTINE) 345 if (ref_flags(raw) == REF_PRISTINE)
337 ic->state = INO_STATE_GC; 346 ic->state = INO_STATE_GC;
338 else { 347 else {
339 D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", 348 jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
340 ic->ino)); 349 ic->ino);
341 } 350 }
342 break; 351 break;
343 352
@@ -367,8 +376,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
367 drop the alloc_sem before sleeping. */ 376 drop the alloc_sem before sleeping. */
368 377
369 mutex_unlock(&c->alloc_sem); 378 mutex_unlock(&c->alloc_sem);
370 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", 379 jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
371 ic->ino, ic->state)); 380 __func__, ic->ino, ic->state);
372 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 381 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
373 /* And because we dropped the alloc_sem we must start again from the 382 /* And because we dropped the alloc_sem we must start again from the
374 beginning. Ponder chance of livelock here -- we're returning success 383 beginning. Ponder chance of livelock here -- we're returning success
@@ -445,7 +454,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
445 454
446 eraseit: 455 eraseit:
447 if (c->gcblock && !c->gcblock->used_size) { 456 if (c->gcblock && !c->gcblock->used_size) {
448 D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); 457 jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
458 c->gcblock->offset);
449 /* We're GC'ing an empty block? */ 459 /* We're GC'ing an empty block? */
450 list_add_tail(&c->gcblock->list, &c->erase_pending_list); 460 list_add_tail(&c->gcblock->list, &c->erase_pending_list);
451 c->gcblock = NULL; 461 c->gcblock = NULL;
@@ -475,12 +485,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
475 485
476 if (c->gcblock != jeb) { 486 if (c->gcblock != jeb) {
477 spin_unlock(&c->erase_completion_lock); 487 spin_unlock(&c->erase_completion_lock);
478 D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); 488 jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
479 goto upnout; 489 goto upnout;
480 } 490 }
481 if (ref_obsolete(raw)) { 491 if (ref_obsolete(raw)) {
482 spin_unlock(&c->erase_completion_lock); 492 spin_unlock(&c->erase_completion_lock);
483 D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); 493 jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
484 /* They'll call again */ 494 /* They'll call again */
485 goto upnout; 495 goto upnout;
486 } 496 }
@@ -562,7 +572,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
562 uint32_t crc, rawlen; 572 uint32_t crc, rawlen;
563 int retried = 0; 573 int retried = 0;
564 574
565 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); 575 jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
576 ref_offset(raw));
566 577
567 alloclen = rawlen = ref_totlen(c, c->gcblock, raw); 578 alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
568 579
@@ -671,7 +682,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
671 682
672 retried = 1; 683 retried = 1;
673 684
674 D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); 685 jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
675 686
676 jffs2_dbg_acct_sanity_check(c,jeb); 687 jffs2_dbg_acct_sanity_check(c,jeb);
677 jffs2_dbg_acct_paranoia_check(c, jeb); 688 jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -681,14 +692,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
681 it is only an upper estimation */ 692 it is only an upper estimation */
682 693
683 if (!ret) { 694 if (!ret) {
684 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); 695 jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
696 phys_ofs);
685 697
686 jffs2_dbg_acct_sanity_check(c,jeb); 698 jffs2_dbg_acct_sanity_check(c,jeb);
687 jffs2_dbg_acct_paranoia_check(c, jeb); 699 jffs2_dbg_acct_paranoia_check(c, jeb);
688 700
689 goto retry; 701 goto retry;
690 } 702 }
691 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 703 jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
704 ret);
692 } 705 }
693 706
694 if (!ret) 707 if (!ret)
@@ -698,7 +711,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
698 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); 711 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
699 712
700 jffs2_mark_node_obsolete(c, raw); 713 jffs2_mark_node_obsolete(c, raw);
701 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); 714 jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
715 ref_offset(raw));
702 716
703 out_node: 717 out_node:
704 kfree(node); 718 kfree(node);
@@ -725,7 +739,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
725 /* For these, we don't actually need to read the old node */ 739 /* For these, we don't actually need to read the old node */
726 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); 740 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
727 mdata = (char *)&dev; 741 mdata = (char *)&dev;
728 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); 742 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
743 __func__, mdatalen);
729 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { 744 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
730 mdatalen = fn->size; 745 mdatalen = fn->size;
731 mdata = kmalloc(fn->size, GFP_KERNEL); 746 mdata = kmalloc(fn->size, GFP_KERNEL);
@@ -739,7 +754,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
739 kfree(mdata); 754 kfree(mdata);
740 return ret; 755 return ret;
741 } 756 }
742 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); 757 jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
758 __func__, mdatalen);
743 759
744 } 760 }
745 761
@@ -887,7 +903,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
887 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) 903 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
888 continue; 904 continue;
889 905
890 D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); 906 jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
907 ref_offset(raw));
891 908
892 /* This is an obsolete node belonging to the same directory, and it's of the right 909 /* This is an obsolete node belonging to the same directory, and it's of the right
893 length. We need to take a closer look...*/ 910 length. We need to take a closer look...*/
@@ -923,8 +940,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
923 a new deletion dirent to replace it */ 940 a new deletion dirent to replace it */
924 mutex_unlock(&c->erase_free_sem); 941 mutex_unlock(&c->erase_free_sem);
925 942
926 D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", 943 jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
927 ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); 944 ref_offset(fd->raw), fd->name,
945 ref_offset(raw), je32_to_cpu(rd->ino));
928 kfree(rd); 946 kfree(rd);
929 947
930 return jffs2_garbage_collect_dirent(c, jeb, f, fd); 948 return jffs2_garbage_collect_dirent(c, jeb, f, fd);
@@ -964,8 +982,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
964 uint32_t alloclen, ilen; 982 uint32_t alloclen, ilen;
965 int ret; 983 int ret;
966 984
967 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", 985 jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
968 f->inocache->ino, start, end)); 986 f->inocache->ino, start, end);
969 987
970 memset(&ri, 0, sizeof(ri)); 988 memset(&ri, 0, sizeof(ri));
971 989
@@ -1117,8 +1135,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1117 1135
1118 memset(&ri, 0, sizeof(ri)); 1136 memset(&ri, 0, sizeof(ri));
1119 1137
1120 D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", 1138 jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
1121 f->inocache->ino, start, end)); 1139 f->inocache->ino, start, end);
1122 1140
1123 orig_end = end; 1141 orig_end = end;
1124 orig_start = start; 1142 orig_start = start;
@@ -1149,15 +1167,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1149 /* If the previous frag doesn't even reach the beginning, there's 1167 /* If the previous frag doesn't even reach the beginning, there's
1150 excessive fragmentation. Just merge. */ 1168 excessive fragmentation. Just merge. */
1151 if (frag->ofs > min) { 1169 if (frag->ofs > min) {
1152 D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", 1170 jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
1153 frag->ofs, frag->ofs+frag->size)); 1171 frag->ofs, frag->ofs+frag->size);
1154 start = frag->ofs; 1172 start = frag->ofs;
1155 continue; 1173 continue;
1156 } 1174 }
1157 /* OK. This frag holds the first byte of the page. */ 1175 /* OK. This frag holds the first byte of the page. */
1158 if (!frag->node || !frag->node->raw) { 1176 if (!frag->node || !frag->node->raw) {
1159 D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", 1177 jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
1160 frag->ofs, frag->ofs+frag->size)); 1178 frag->ofs, frag->ofs+frag->size);
1161 break; 1179 break;
1162 } else { 1180 } else {
1163 1181
@@ -1171,19 +1189,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1171 jeb = &c->blocks[raw->flash_offset / c->sector_size]; 1189 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1172 1190
1173 if (jeb == c->gcblock) { 1191 if (jeb == c->gcblock) {
1174 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", 1192 jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1175 frag->ofs, frag->ofs+frag->size, ref_offset(raw))); 1193 frag->ofs,
1194 frag->ofs + frag->size,
1195 ref_offset(raw));
1176 start = frag->ofs; 1196 start = frag->ofs;
1177 break; 1197 break;
1178 } 1198 }
1179 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { 1199 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1180 D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", 1200 jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
1181 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1201 frag->ofs,
1202 frag->ofs + frag->size,
1203 jeb->offset);
1182 break; 1204 break;
1183 } 1205 }
1184 1206
1185 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", 1207 jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
1186 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1208 frag->ofs,
1209 frag->ofs + frag->size,
1210 jeb->offset);
1187 start = frag->ofs; 1211 start = frag->ofs;
1188 break; 1212 break;
1189 } 1213 }
@@ -1199,15 +1223,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1199 /* If the previous frag doesn't even reach the beginning, there's lots 1223 /* If the previous frag doesn't even reach the beginning, there's lots
1200 of fragmentation. Just merge. */ 1224 of fragmentation. Just merge. */
1201 if (frag->ofs+frag->size < max) { 1225 if (frag->ofs+frag->size < max) {
1202 D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", 1226 jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
1203 frag->ofs, frag->ofs+frag->size)); 1227 frag->ofs, frag->ofs+frag->size);
1204 end = frag->ofs + frag->size; 1228 end = frag->ofs + frag->size;
1205 continue; 1229 continue;
1206 } 1230 }
1207 1231
1208 if (!frag->node || !frag->node->raw) { 1232 if (!frag->node || !frag->node->raw) {
1209 D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", 1233 jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
1210 frag->ofs, frag->ofs+frag->size)); 1234 frag->ofs, frag->ofs+frag->size);
1211 break; 1235 break;
1212 } else { 1236 } else {
1213 1237
@@ -1221,25 +1245,31 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1221 jeb = &c->blocks[raw->flash_offset / c->sector_size]; 1245 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1222 1246
1223 if (jeb == c->gcblock) { 1247 if (jeb == c->gcblock) {
1224 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", 1248 jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1225 frag->ofs, frag->ofs+frag->size, ref_offset(raw))); 1249 frag->ofs,
1250 frag->ofs + frag->size,
1251 ref_offset(raw));
1226 end = frag->ofs + frag->size; 1252 end = frag->ofs + frag->size;
1227 break; 1253 break;
1228 } 1254 }
1229 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { 1255 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1230 D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", 1256 jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
1231 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1257 frag->ofs,
1258 frag->ofs + frag->size,
1259 jeb->offset);
1232 break; 1260 break;
1233 } 1261 }
1234 1262
1235 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", 1263 jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
1236 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1264 frag->ofs,
1265 frag->ofs + frag->size,
1266 jeb->offset);
1237 end = frag->ofs + frag->size; 1267 end = frag->ofs + frag->size;
1238 break; 1268 break;
1239 } 1269 }
1240 } 1270 }
1241 D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", 1271 jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
1242 orig_start, orig_end, start, end)); 1272 orig_start, orig_end, start, end);
1243 1273
1244 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); 1274 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
1245 BUG_ON(end < orig_end); 1275 BUG_ON(end < orig_end);