diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-16 13:24:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-16 13:24:32 -0400 |
commit | 1fa4aad496b9c96fcde6c8f905a43ae6733e5a79 (patch) | |
tree | 46fed6895f9b25d8525a40c2221a2335c15257be | |
parent | 9fb1759a3102c26cd8f64254a7c3e532782c2bb8 (diff) | |
parent | 19870da7ea2fc483bf73a189046a430fd9b01391 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6
-rw-r--r-- | drivers/mtd/nand/nand_base.c | 22 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_bbt.c | 20 | ||||
-rw-r--r-- | fs/jffs2/erase.c | 174 |
3 files changed, 112 insertions, 104 deletions
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 1bd71a598c79..eee5115658c8 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -59,7 +59,7 @@ | |||
59 | * The AG-AND chips have nice features for speed improvement, | 59 | * The AG-AND chips have nice features for speed improvement, |
60 | * which are not supported yet. Read / program 4 pages in one go. | 60 | * which are not supported yet. Read / program 4 pages in one go. |
61 | * | 61 | * |
62 | * $Id: nand_base.c,v 1.146 2005/06/17 15:02:06 gleixner Exp $ | 62 | * $Id: nand_base.c,v 1.147 2005/07/15 07:18:06 gleixner Exp $ |
63 | * | 63 | * |
64 | * This program is free software; you can redistribute it and/or modify | 64 | * This program is free software; you can redistribute it and/or modify |
65 | * it under the terms of the GNU General Public License version 2 as | 65 | * it under the terms of the GNU General Public License version 2 as |
@@ -1409,16 +1409,6 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t | |||
1409 | thislen = min_t(int, thislen, len); | 1409 | thislen = min_t(int, thislen, len); |
1410 | this->read_buf(mtd, &buf[i], thislen); | 1410 | this->read_buf(mtd, &buf[i], thislen); |
1411 | i += thislen; | 1411 | i += thislen; |
1412 | |||
1413 | /* Apply delay or wait for ready/busy pin | ||
1414 | * Do this before the AUTOINCR check, so no problems | ||
1415 | * arise if a chip which does auto increment | ||
1416 | * is marked as NOAUTOINCR by the board driver. | ||
1417 | */ | ||
1418 | if (!this->dev_ready) | ||
1419 | udelay (this->chip_delay); | ||
1420 | else | ||
1421 | nand_wait_ready(mtd); | ||
1422 | 1412 | ||
1423 | /* Read more ? */ | 1413 | /* Read more ? */ |
1424 | if (i < len) { | 1414 | if (i < len) { |
@@ -1432,6 +1422,16 @@ static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t | |||
1432 | this->select_chip(mtd, chipnr); | 1422 | this->select_chip(mtd, chipnr); |
1433 | } | 1423 | } |
1434 | 1424 | ||
1425 | /* Apply delay or wait for ready/busy pin | ||
1426 | * Do this before the AUTOINCR check, so no problems | ||
1427 | * arise if a chip which does auto increment | ||
1428 | * is marked as NOAUTOINCR by the board driver. | ||
1429 | */ | ||
1430 | if (!this->dev_ready) | ||
1431 | udelay (this->chip_delay); | ||
1432 | else | ||
1433 | nand_wait_ready(mtd); | ||
1434 | |||
1435 | /* Check, if the chip supports auto page increment | 1435 | /* Check, if the chip supports auto page increment |
1436 | * or if we have hit a block boundary. | 1436 | * or if we have hit a block boundary. |
1437 | */ | 1437 | */ |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 5ac2d2962220..7535ef53685e 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) | 7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) |
8 | * | 8 | * |
9 | * $Id: nand_bbt.c,v 1.33 2005/06/14 15:47:56 gleixner Exp $ | 9 | * $Id: nand_bbt.c,v 1.35 2005/07/15 13:53:47 gleixner Exp $ |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
@@ -109,24 +109,21 @@ static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_des | |||
109 | /** | 109 | /** |
110 | * check_short_pattern - [GENERIC] check if a pattern is in the buffer | 110 | * check_short_pattern - [GENERIC] check if a pattern is in the buffer |
111 | * @buf: the buffer to search | 111 | * @buf: the buffer to search |
112 | * @len: the length of buffer to search | ||
113 | * @paglen: the pagelength | ||
114 | * @td: search pattern descriptor | 112 | * @td: search pattern descriptor |
115 | * | 113 | * |
116 | * Check for a pattern at the given place. Used to search bad block | 114 | * Check for a pattern at the given place. Used to search bad block |
117 | * tables and good / bad block identifiers. Same as check_pattern, but | 115 | * tables and good / bad block identifiers. Same as check_pattern, but |
118 | * no optional empty check and the pattern is expected to start | 116 | * no optional empty check |
119 | * at offset 0. | ||
120 | * | 117 | * |
121 | */ | 118 | */ |
122 | static int check_short_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) | 119 | static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td) |
123 | { | 120 | { |
124 | int i; | 121 | int i; |
125 | uint8_t *p = buf; | 122 | uint8_t *p = buf; |
126 | 123 | ||
127 | /* Compare the pattern */ | 124 | /* Compare the pattern */ |
128 | for (i = 0; i < td->len; i++) { | 125 | for (i = 0; i < td->len; i++) { |
129 | if (p[i] != td->pattern[i]) | 126 | if (p[td->offs + i] != td->pattern[i]) |
130 | return -1; | 127 | return -1; |
131 | } | 128 | } |
132 | return 0; | 129 | return 0; |
@@ -337,13 +334,14 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
337 | if (!(bd->options & NAND_BBT_SCANEMPTY)) { | 334 | if (!(bd->options & NAND_BBT_SCANEMPTY)) { |
338 | size_t retlen; | 335 | size_t retlen; |
339 | 336 | ||
340 | /* No need to read pages fully, just read required OOB bytes */ | 337 | /* Read the full oob until read_oob is fixed to |
341 | ret = mtd->read_oob(mtd, from + j * mtd->oobblock + bd->offs, | 338 | * handle single byte reads for 16 bit buswidth */ |
342 | readlen, &retlen, &buf[0]); | 339 | ret = mtd->read_oob(mtd, from + j * mtd->oobblock, |
340 | mtd->oobsize, &retlen, buf); | ||
343 | if (ret) | 341 | if (ret) |
344 | return ret; | 342 | return ret; |
345 | 343 | ||
346 | if (check_short_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) { | 344 | if (check_short_pattern (buf, bd)) { |
347 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); | 345 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); |
348 | printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n", | 346 | printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n", |
349 | i >> 1, (unsigned int) from); | 347 | i >> 1, (unsigned int) from); |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 6a4c0a3685da..787d84ac2bcd 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. | 8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * | 9 | * |
10 | * $Id: erase.c,v 1.76 2005/05/03 15:11:40 dedekind Exp $ | 10 | * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $ |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
@@ -300,100 +300,86 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase | |||
300 | jeb->last_node = NULL; | 300 | jeb->last_node = NULL; |
301 | } | 301 | } |
302 | 302 | ||
303 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 303 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) |
304 | { | 304 | { |
305 | struct jffs2_raw_node_ref *marker_ref = NULL; | 305 | void *ebuf; |
306 | unsigned char *ebuf; | 306 | uint32_t ofs; |
307 | size_t retlen; | 307 | size_t retlen; |
308 | int ret; | 308 | int ret = -EIO; |
309 | uint32_t bad_offset; | 309 | |
310 | |||
311 | if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) { | ||
312 | marker_ref = jffs2_alloc_raw_node_ref(); | ||
313 | if (!marker_ref) { | ||
314 | printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n"); | ||
315 | /* Stick it back on the list from whence it came and come back later */ | ||
316 | jffs2_erase_pending_trigger(c); | ||
317 | spin_lock(&c->erase_completion_lock); | ||
318 | list_add(&jeb->list, &c->erase_complete_list); | ||
319 | spin_unlock(&c->erase_completion_lock); | ||
320 | return; | ||
321 | } | ||
322 | } | ||
323 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 310 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
324 | if (!ebuf) { | 311 | if (!ebuf) { |
325 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset); | 312 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); |
326 | } else { | 313 | return -EAGAIN; |
327 | uint32_t ofs = jeb->offset; | 314 | } |
328 | 315 | ||
329 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | 316 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); |
330 | while(ofs < jeb->offset + c->sector_size) { | ||
331 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | ||
332 | int i; | ||
333 | 317 | ||
334 | bad_offset = ofs; | 318 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { |
319 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | ||
320 | int i; | ||
335 | 321 | ||
336 | ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); | 322 | *bad_offset = ofs; |
337 | 323 | ||
338 | if (ret) { | 324 | ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf); |
339 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); | 325 | if (ret) { |
340 | goto bad; | 326 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); |
341 | } | 327 | goto fail; |
342 | if (retlen != readlen) { | 328 | } |
343 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); | 329 | if (retlen != readlen) { |
344 | goto bad; | 330 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); |
345 | } | 331 | goto fail; |
346 | for (i=0; i<readlen; i += sizeof(unsigned long)) { | 332 | } |
347 | /* It's OK. We know it's properly aligned */ | 333 | for (i=0; i<readlen; i += sizeof(unsigned long)) { |
348 | unsigned long datum = *(unsigned long *)(&ebuf[i]); | 334 | /* It's OK. We know it's properly aligned */ |
349 | if (datum + 1) { | 335 | unsigned long *datum = ebuf + i; |
350 | bad_offset += i; | 336 | if (*datum + 1) { |
351 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset); | 337 | *bad_offset += i; |
352 | bad: | 338 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); |
353 | if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) | 339 | goto fail; |
354 | jffs2_free_raw_node_ref(marker_ref); | ||
355 | kfree(ebuf); | ||
356 | bad2: | ||
357 | spin_lock(&c->erase_completion_lock); | ||
358 | /* Stick it on a list (any list) so | ||
359 | erase_failed can take it right off | ||
360 | again. Silly, but shouldn't happen | ||
361 | often. */ | ||
362 | list_add(&jeb->list, &c->erasing_list); | ||
363 | spin_unlock(&c->erase_completion_lock); | ||
364 | jffs2_erase_failed(c, jeb, bad_offset); | ||
365 | return; | ||
366 | } | ||
367 | } | 340 | } |
368 | ofs += readlen; | ||
369 | cond_resched(); | ||
370 | } | 341 | } |
371 | kfree(ebuf); | 342 | ofs += readlen; |
343 | cond_resched(); | ||
372 | } | 344 | } |
345 | ret = 0; | ||
346 | fail: | ||
347 | kfree(ebuf); | ||
348 | return ret; | ||
349 | } | ||
373 | 350 | ||
374 | bad_offset = jeb->offset; | 351 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
352 | { | ||
353 | struct jffs2_raw_node_ref *marker_ref = NULL; | ||
354 | size_t retlen; | ||
355 | int ret; | ||
356 | uint32_t bad_offset; | ||
357 | |||
358 | switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { | ||
359 | case -EAGAIN: goto refile; | ||
360 | case -EIO: goto filebad; | ||
361 | } | ||
375 | 362 | ||
376 | /* Write the erase complete marker */ | 363 | /* Write the erase complete marker */ |
377 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 364 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); |
378 | if (jffs2_cleanmarker_oob(c)) { | 365 | bad_offset = jeb->offset; |
379 | 366 | ||
380 | if (jffs2_write_nand_cleanmarker(c, jeb)) | 367 | /* Cleanmarker in oob area or no cleanmarker at all ? */ |
381 | goto bad2; | 368 | if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { |
382 | |||
383 | jeb->first_node = jeb->last_node = NULL; | ||
384 | 369 | ||
385 | jeb->free_size = c->sector_size; | 370 | if (jffs2_cleanmarker_oob(c)) { |
386 | jeb->used_size = 0; | 371 | if (jffs2_write_nand_cleanmarker(c, jeb)) |
387 | jeb->dirty_size = 0; | 372 | goto filebad; |
388 | jeb->wasted_size = 0; | 373 | } |
389 | } else if (c->cleanmarker_size == 0) { | ||
390 | jeb->first_node = jeb->last_node = NULL; | ||
391 | 374 | ||
375 | jeb->first_node = jeb->last_node = NULL; | ||
392 | jeb->free_size = c->sector_size; | 376 | jeb->free_size = c->sector_size; |
393 | jeb->used_size = 0; | 377 | jeb->used_size = 0; |
394 | jeb->dirty_size = 0; | 378 | jeb->dirty_size = 0; |
395 | jeb->wasted_size = 0; | 379 | jeb->wasted_size = 0; |
380 | |||
396 | } else { | 381 | } else { |
382 | |||
397 | struct kvec vecs[1]; | 383 | struct kvec vecs[1]; |
398 | struct jffs2_unknown_node marker = { | 384 | struct jffs2_unknown_node marker = { |
399 | .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), | 385 | .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), |
@@ -401,21 +387,28 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
401 | .totlen = cpu_to_je32(c->cleanmarker_size) | 387 | .totlen = cpu_to_je32(c->cleanmarker_size) |
402 | }; | 388 | }; |
403 | 389 | ||
390 | marker_ref = jffs2_alloc_raw_node_ref(); | ||
391 | if (!marker_ref) { | ||
392 | printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n"); | ||
393 | goto refile; | ||
394 | } | ||
395 | |||
404 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); | 396 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); |
405 | 397 | ||
406 | vecs[0].iov_base = (unsigned char *) ▮ | 398 | vecs[0].iov_base = (unsigned char *) ▮ |
407 | vecs[0].iov_len = sizeof(marker); | 399 | vecs[0].iov_len = sizeof(marker); |
408 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | 400 | ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); |
409 | 401 | ||
410 | if (ret) { | 402 | if (ret || retlen != sizeof(marker)) { |
411 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 403 | if (ret) |
412 | jeb->offset, ret); | 404 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", |
413 | goto bad2; | 405 | jeb->offset, ret); |
414 | } | 406 | else |
415 | if (retlen != sizeof(marker)) { | 407 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", |
416 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | 408 | jeb->offset, sizeof(marker), retlen); |
417 | jeb->offset, sizeof(marker), retlen); | 409 | |
418 | goto bad2; | 410 | jffs2_free_raw_node_ref(marker_ref); |
411 | goto filebad; | ||
419 | } | 412 | } |
420 | 413 | ||
421 | marker_ref->next_in_ino = NULL; | 414 | marker_ref->next_in_ino = NULL; |
@@ -444,5 +437,22 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
444 | c->nr_free_blocks++; | 437 | c->nr_free_blocks++; |
445 | spin_unlock(&c->erase_completion_lock); | 438 | spin_unlock(&c->erase_completion_lock); |
446 | wake_up(&c->erase_wait); | 439 | wake_up(&c->erase_wait); |
447 | } | 440 | return; |
441 | |||
442 | filebad: | ||
443 | spin_lock(&c->erase_completion_lock); | ||
444 | /* Stick it on a list (any list) so erase_failed can take it | ||
445 | right off again. Silly, but shouldn't happen often. */ | ||
446 | list_add(&jeb->list, &c->erasing_list); | ||
447 | spin_unlock(&c->erase_completion_lock); | ||
448 | jffs2_erase_failed(c, jeb, bad_offset); | ||
449 | return; | ||
448 | 450 | ||
451 | refile: | ||
452 | /* Stick it back on the list from whence it came and come back later */ | ||
453 | jffs2_erase_pending_trigger(c); | ||
454 | spin_lock(&c->erase_completion_lock); | ||
455 | list_add(&jeb->list, &c->erase_complete_list); | ||
456 | spin_unlock(&c->erase_completion_lock); | ||
457 | return; | ||
458 | } | ||