aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/erase.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@tglx.tec.linutronix.de>2005-07-15 02:14:44 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-07-15 02:14:44 -0400
commit5d157885f383ccc0660c011fa488ae4edb77ab16 (patch)
tree0caba31219184fcf47fb7f91ef330217812cc149 /fs/jffs2/erase.c
parentba460e48064edeb57e3398eb8972c58de33f11ea (diff)
[JFFS2] Fix node allocation leak
In the rare case of failing to write the cleanmarker the allocated node was not freed. Pointed out by Forrest Zhao Initial cleanup by Joern Engel Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'fs/jffs2/erase.c')
-rw-r--r--fs/jffs2/erase.c174
1 files changed, 92 insertions, 82 deletions
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 6a4c0a3685da..787d84ac2bcd 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * For licensing information, see the file 'LICENCE' in this directory. 8 * For licensing information, see the file 'LICENCE' in this directory.
9 * 9 *
10 * $Id: erase.c,v 1.76 2005/05/03 15:11:40 dedekind Exp $ 10 * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $
11 * 11 *
12 */ 12 */
13 13
@@ -300,100 +300,86 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
300 jeb->last_node = NULL; 300 jeb->last_node = NULL;
301} 301}
302 302
303static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 303static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
304{ 304{
305 struct jffs2_raw_node_ref *marker_ref = NULL; 305 void *ebuf;
306 unsigned char *ebuf; 306 uint32_t ofs;
307 size_t retlen; 307 size_t retlen;
308 int ret; 308 int ret = -EIO;
309 uint32_t bad_offset; 309
310
311 if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) {
312 marker_ref = jffs2_alloc_raw_node_ref();
313 if (!marker_ref) {
314 printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
315 /* Stick it back on the list from whence it came and come back later */
316 jffs2_erase_pending_trigger(c);
317 spin_lock(&c->erase_completion_lock);
318 list_add(&jeb->list, &c->erase_complete_list);
319 spin_unlock(&c->erase_completion_lock);
320 return;
321 }
322 }
323 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 310 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
324 if (!ebuf) { 311 if (!ebuf) {
325 printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset); 312 printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
326 } else { 313 return -EAGAIN;
327 uint32_t ofs = jeb->offset; 314 }
328 315
329 D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); 316 D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
330 while(ofs < jeb->offset + c->sector_size) {
331 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
332 int i;
333 317
334 bad_offset = ofs; 318 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
319 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
320 int i;
335 321
336 ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); 322 *bad_offset = ofs;
337 323
338 if (ret) { 324 ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
339 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 325 if (ret) {
340 goto bad; 326 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
341 } 327 goto fail;
342 if (retlen != readlen) { 328 }
343 printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); 329 if (retlen != readlen) {
344 goto bad; 330 printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
345 } 331 goto fail;
346 for (i=0; i<readlen; i += sizeof(unsigned long)) { 332 }
347 /* It's OK. We know it's properly aligned */ 333 for (i=0; i<readlen; i += sizeof(unsigned long)) {
348 unsigned long datum = *(unsigned long *)(&ebuf[i]); 334 /* It's OK. We know it's properly aligned */
349 if (datum + 1) { 335 unsigned long *datum = ebuf + i;
350 bad_offset += i; 336 if (*datum + 1) {
351 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset); 337 *bad_offset += i;
352 bad: 338 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
353 if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) 339 goto fail;
354 jffs2_free_raw_node_ref(marker_ref);
355 kfree(ebuf);
356 bad2:
357 spin_lock(&c->erase_completion_lock);
358 /* Stick it on a list (any list) so
359 erase_failed can take it right off
360 again. Silly, but shouldn't happen
361 often. */
362 list_add(&jeb->list, &c->erasing_list);
363 spin_unlock(&c->erase_completion_lock);
364 jffs2_erase_failed(c, jeb, bad_offset);
365 return;
366 }
367 } 340 }
368 ofs += readlen;
369 cond_resched();
370 } 341 }
371 kfree(ebuf); 342 ofs += readlen;
343 cond_resched();
372 } 344 }
345 ret = 0;
346fail:
347 kfree(ebuf);
348 return ret;
349}
373 350
374 bad_offset = jeb->offset; 351static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
352{
353 struct jffs2_raw_node_ref *marker_ref = NULL;
354 size_t retlen;
355 int ret;
356 uint32_t bad_offset;
357
358 switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
359 case -EAGAIN: goto refile;
360 case -EIO: goto filebad;
361 }
375 362
376 /* Write the erase complete marker */ 363 /* Write the erase complete marker */
377 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); 364 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
378 if (jffs2_cleanmarker_oob(c)) { 365 bad_offset = jeb->offset;
379 366
380 if (jffs2_write_nand_cleanmarker(c, jeb)) 367 /* Cleanmarker in oob area or no cleanmarker at all ? */
381 goto bad2; 368 if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) {
382
383 jeb->first_node = jeb->last_node = NULL;
384 369
385 jeb->free_size = c->sector_size; 370 if (jffs2_cleanmarker_oob(c)) {
386 jeb->used_size = 0; 371 if (jffs2_write_nand_cleanmarker(c, jeb))
387 jeb->dirty_size = 0; 372 goto filebad;
388 jeb->wasted_size = 0; 373 }
389 } else if (c->cleanmarker_size == 0) {
390 jeb->first_node = jeb->last_node = NULL;
391 374
375 jeb->first_node = jeb->last_node = NULL;
392 jeb->free_size = c->sector_size; 376 jeb->free_size = c->sector_size;
393 jeb->used_size = 0; 377 jeb->used_size = 0;
394 jeb->dirty_size = 0; 378 jeb->dirty_size = 0;
395 jeb->wasted_size = 0; 379 jeb->wasted_size = 0;
380
396 } else { 381 } else {
382
397 struct kvec vecs[1]; 383 struct kvec vecs[1];
398 struct jffs2_unknown_node marker = { 384 struct jffs2_unknown_node marker = {
399 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), 385 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
@@ -401,21 +387,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
401 .totlen = cpu_to_je32(c->cleanmarker_size) 387 .totlen = cpu_to_je32(c->cleanmarker_size)
402 }; 388 };
403 389
390 marker_ref = jffs2_alloc_raw_node_ref();
391 if (!marker_ref) {
392 printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
393 goto refile;
394 }
395
404 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); 396 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
405 397
406 vecs[0].iov_base = (unsigned char *) &marker; 398 vecs[0].iov_base = (unsigned char *) &marker;
407 vecs[0].iov_len = sizeof(marker); 399 vecs[0].iov_len = sizeof(marker);
408 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); 400 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
409 401
410 if (ret) { 402 if (ret || retlen != sizeof(marker)) {
411 printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", 403 if (ret)
412 jeb->offset, ret); 404 printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
413 goto bad2; 405 jeb->offset, ret);
414 } 406 else
415 if (retlen != sizeof(marker)) { 407 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
416 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 408 jeb->offset, sizeof(marker), retlen);
417 jeb->offset, sizeof(marker), retlen); 409
418 goto bad2; 410 jffs2_free_raw_node_ref(marker_ref);
411 goto filebad;
419 } 412 }
420 413
421 marker_ref->next_in_ino = NULL; 414 marker_ref->next_in_ino = NULL;
@@ -444,5 +437,22 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
444 c->nr_free_blocks++; 437 c->nr_free_blocks++;
445 spin_unlock(&c->erase_completion_lock); 438 spin_unlock(&c->erase_completion_lock);
446 wake_up(&c->erase_wait); 439 wake_up(&c->erase_wait);
447} 440 return;
441
442filebad:
443 spin_lock(&c->erase_completion_lock);
444 /* Stick it on a list (any list) so erase_failed can take it
445 right off again. Silly, but shouldn't happen often. */
446 list_add(&jeb->list, &c->erasing_list);
447 spin_unlock(&c->erase_completion_lock);
448 jffs2_erase_failed(c, jeb, bad_offset);
449 return;
448 450
451refile:
452 /* Stick it back on the list from whence it came and come back later */
453 jffs2_erase_pending_trigger(c);
454 spin_lock(&c->erase_completion_lock);
455 list_add(&jeb->list, &c->erase_complete_list);
456 spin_unlock(&c->erase_completion_lock);
457 return;
458}