diff options
Diffstat (limited to 'drivers/mtd/ftl.c')
-rw-r--r-- | drivers/mtd/ftl.c | 128 |
1 files changed, 64 insertions, 64 deletions
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index d32c1b3a8ce3..de7e231d6d18 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* This version ported to the Linux-MTD system by dwmw2@infradead.org | 1 | /* This version ported to the Linux-MTD system by dwmw2@infradead.org |
2 | * $Id: ftl.c,v 1.55 2005/01/17 13:47:21 hvr Exp $ | 2 | * $Id: ftl.c,v 1.58 2005/11/07 11:14:19 gleixner Exp $ |
3 | * | 3 | * |
4 | * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 4 | * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
5 | * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups | 5 | * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups |
@@ -53,7 +53,7 @@ | |||
53 | Use of the FTL format for non-PCMCIA applications may be an | 53 | Use of the FTL format for non-PCMCIA applications may be an |
54 | infringement of these patents. For additional information, | 54 | infringement of these patents. For additional information, |
55 | contact M-Systems (http://www.m-sys.com) directly. | 55 | contact M-Systems (http://www.m-sys.com) directly. |
56 | 56 | ||
57 | ======================================================================*/ | 57 | ======================================================================*/ |
58 | #include <linux/mtd/blktrans.h> | 58 | #include <linux/mtd/blktrans.h> |
59 | #include <linux/module.h> | 59 | #include <linux/module.h> |
@@ -160,7 +160,7 @@ static void ftl_erase_callback(struct erase_info *done); | |||
160 | Scan_header() checks to see if a memory region contains an FTL | 160 | Scan_header() checks to see if a memory region contains an FTL |
161 | partition. build_maps() reads all the erase unit headers, builds | 161 | partition. build_maps() reads all the erase unit headers, builds |
162 | the erase unit map, and then builds the virtual page map. | 162 | the erase unit map, and then builds the virtual page map. |
163 | 163 | ||
164 | ======================================================================*/ | 164 | ======================================================================*/ |
165 | 165 | ||
166 | static int scan_header(partition_t *part) | 166 | static int scan_header(partition_t *part) |
@@ -176,10 +176,10 @@ static int scan_header(partition_t *part) | |||
176 | (offset + sizeof(header)) < max_offset; | 176 | (offset + sizeof(header)) < max_offset; |
177 | offset += part->mbd.mtd->erasesize ? : 0x2000) { | 177 | offset += part->mbd.mtd->erasesize ? : 0x2000) { |
178 | 178 | ||
179 | err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret, | 179 | err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret, |
180 | (unsigned char *)&header); | 180 | (unsigned char *)&header); |
181 | 181 | ||
182 | if (err) | 182 | if (err) |
183 | return err; | 183 | return err; |
184 | 184 | ||
185 | if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break; | 185 | if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break; |
@@ -232,10 +232,10 @@ static int build_maps(partition_t *part) | |||
232 | for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) { | 232 | for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) { |
233 | offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN)) | 233 | offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN)) |
234 | << part->header.EraseUnitSize); | 234 | << part->header.EraseUnitSize); |
235 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval, | 235 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval, |
236 | (unsigned char *)&header); | 236 | (unsigned char *)&header); |
237 | 237 | ||
238 | if (ret) | 238 | if (ret) |
239 | goto out_XferInfo; | 239 | goto out_XferInfo; |
240 | 240 | ||
241 | ret = -1; | 241 | ret = -1; |
@@ -274,7 +274,7 @@ static int build_maps(partition_t *part) | |||
274 | "don't add up!\n"); | 274 | "don't add up!\n"); |
275 | goto out_XferInfo; | 275 | goto out_XferInfo; |
276 | } | 276 | } |
277 | 277 | ||
278 | /* Set up virtual page map */ | 278 | /* Set up virtual page map */ |
279 | blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; | 279 | blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; |
280 | part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t)); | 280 | part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t)); |
@@ -296,12 +296,12 @@ static int build_maps(partition_t *part) | |||
296 | part->EUNInfo[i].Free = 0; | 296 | part->EUNInfo[i].Free = 0; |
297 | part->EUNInfo[i].Deleted = 0; | 297 | part->EUNInfo[i].Deleted = 0; |
298 | offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); | 298 | offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); |
299 | 299 | ||
300 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, | 300 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, |
301 | part->BlocksPerUnit * sizeof(u_int32_t), &retval, | 301 | part->BlocksPerUnit * sizeof(u_int32_t), &retval, |
302 | (unsigned char *)part->bam_cache); | 302 | (unsigned char *)part->bam_cache); |
303 | 303 | ||
304 | if (ret) | 304 | if (ret) |
305 | goto out_bam_cache; | 305 | goto out_bam_cache; |
306 | 306 | ||
307 | for (j = 0; j < part->BlocksPerUnit; j++) { | 307 | for (j = 0; j < part->BlocksPerUnit; j++) { |
@@ -316,7 +316,7 @@ static int build_maps(partition_t *part) | |||
316 | part->EUNInfo[i].Deleted++; | 316 | part->EUNInfo[i].Deleted++; |
317 | } | 317 | } |
318 | } | 318 | } |
319 | 319 | ||
320 | ret = 0; | 320 | ret = 0; |
321 | goto out; | 321 | goto out; |
322 | 322 | ||
@@ -336,7 +336,7 @@ out: | |||
336 | 336 | ||
337 | Erase_xfer() schedules an asynchronous erase operation for a | 337 | Erase_xfer() schedules an asynchronous erase operation for a |
338 | transfer unit. | 338 | transfer unit. |
339 | 339 | ||
340 | ======================================================================*/ | 340 | ======================================================================*/ |
341 | 341 | ||
342 | static int erase_xfer(partition_t *part, | 342 | static int erase_xfer(partition_t *part, |
@@ -351,10 +351,10 @@ static int erase_xfer(partition_t *part, | |||
351 | xfer->state = XFER_ERASING; | 351 | xfer->state = XFER_ERASING; |
352 | 352 | ||
353 | /* Is there a free erase slot? Always in MTD. */ | 353 | /* Is there a free erase slot? Always in MTD. */ |
354 | 354 | ||
355 | 355 | ||
356 | erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL); | 356 | erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL); |
357 | if (!erase) | 357 | if (!erase) |
358 | return -ENOMEM; | 358 | return -ENOMEM; |
359 | 359 | ||
360 | erase->mtd = part->mbd.mtd; | 360 | erase->mtd = part->mbd.mtd; |
@@ -362,7 +362,7 @@ static int erase_xfer(partition_t *part, | |||
362 | erase->addr = xfer->Offset; | 362 | erase->addr = xfer->Offset; |
363 | erase->len = 1 << part->header.EraseUnitSize; | 363 | erase->len = 1 << part->header.EraseUnitSize; |
364 | erase->priv = (u_long)part; | 364 | erase->priv = (u_long)part; |
365 | 365 | ||
366 | ret = part->mbd.mtd->erase(part->mbd.mtd, erase); | 366 | ret = part->mbd.mtd->erase(part->mbd.mtd, erase); |
367 | 367 | ||
368 | if (!ret) | 368 | if (!ret) |
@@ -377,7 +377,7 @@ static int erase_xfer(partition_t *part, | |||
377 | 377 | ||
378 | Prepare_xfer() takes a freshly erased transfer unit and gives | 378 | Prepare_xfer() takes a freshly erased transfer unit and gives |
379 | it an appropriate header. | 379 | it an appropriate header. |
380 | 380 | ||
381 | ======================================================================*/ | 381 | ======================================================================*/ |
382 | 382 | ||
383 | static void ftl_erase_callback(struct erase_info *erase) | 383 | static void ftl_erase_callback(struct erase_info *erase) |
@@ -385,7 +385,7 @@ static void ftl_erase_callback(struct erase_info *erase) | |||
385 | partition_t *part; | 385 | partition_t *part; |
386 | struct xfer_info_t *xfer; | 386 | struct xfer_info_t *xfer; |
387 | int i; | 387 | int i; |
388 | 388 | ||
389 | /* Look up the transfer unit */ | 389 | /* Look up the transfer unit */ |
390 | part = (partition_t *)(erase->priv); | 390 | part = (partition_t *)(erase->priv); |
391 | 391 | ||
@@ -422,7 +422,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
422 | 422 | ||
423 | xfer = &part->XferInfo[i]; | 423 | xfer = &part->XferInfo[i]; |
424 | xfer->state = XFER_FAILED; | 424 | xfer->state = XFER_FAILED; |
425 | 425 | ||
426 | DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); | 426 | DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); |
427 | 427 | ||
428 | /* Write the transfer unit header */ | 428 | /* Write the transfer unit header */ |
@@ -446,7 +446,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
446 | 446 | ||
447 | for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { | 447 | for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { |
448 | 448 | ||
449 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), | 449 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), |
450 | &retlen, (u_char *)&ctl); | 450 | &retlen, (u_char *)&ctl); |
451 | 451 | ||
452 | if (ret) | 452 | if (ret) |
@@ -454,7 +454,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
454 | } | 454 | } |
455 | xfer->state = XFER_PREPARED; | 455 | xfer->state = XFER_PREPARED; |
456 | return 0; | 456 | return 0; |
457 | 457 | ||
458 | } /* prepare_xfer */ | 458 | } /* prepare_xfer */ |
459 | 459 | ||
460 | /*====================================================================== | 460 | /*====================================================================== |
@@ -466,7 +466,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
466 | All data blocks are copied to the corresponding blocks in the | 466 | All data blocks are copied to the corresponding blocks in the |
467 | target unit, so the virtual block map does not need to be | 467 | target unit, so the virtual block map does not need to be |
468 | updated. | 468 | updated. |
469 | 469 | ||
470 | ======================================================================*/ | 470 | ======================================================================*/ |
471 | 471 | ||
472 | static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | 472 | static int copy_erase_unit(partition_t *part, u_int16_t srcunit, |
@@ -486,14 +486,14 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
486 | xfer = &part->XferInfo[xferunit]; | 486 | xfer = &part->XferInfo[xferunit]; |
487 | DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", | 487 | DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", |
488 | eun->Offset, xfer->Offset); | 488 | eun->Offset, xfer->Offset); |
489 | 489 | ||
490 | 490 | ||
491 | /* Read current BAM */ | 491 | /* Read current BAM */ |
492 | if (part->bam_index != srcunit) { | 492 | if (part->bam_index != srcunit) { |
493 | 493 | ||
494 | offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); | 494 | offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); |
495 | 495 | ||
496 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, | 496 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, |
497 | part->BlocksPerUnit * sizeof(u_int32_t), | 497 | part->BlocksPerUnit * sizeof(u_int32_t), |
498 | &retlen, (u_char *) (part->bam_cache)); | 498 | &retlen, (u_char *) (part->bam_cache)); |
499 | 499 | ||
@@ -501,11 +501,11 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
501 | part->bam_index = 0xffff; | 501 | part->bam_index = 0xffff; |
502 | 502 | ||
503 | if (ret) { | 503 | if (ret) { |
504 | printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n"); | 504 | printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n"); |
505 | return ret; | 505 | return ret; |
506 | } | 506 | } |
507 | } | 507 | } |
508 | 508 | ||
509 | /* Write the LogicalEUN for the transfer unit */ | 509 | /* Write the LogicalEUN for the transfer unit */ |
510 | xfer->state = XFER_UNKNOWN; | 510 | xfer->state = XFER_UNKNOWN; |
511 | offset = xfer->Offset + 20; /* Bad! */ | 511 | offset = xfer->Offset + 20; /* Bad! */ |
@@ -513,12 +513,12 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
513 | 513 | ||
514 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), | 514 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), |
515 | &retlen, (u_char *) &unit); | 515 | &retlen, (u_char *) &unit); |
516 | 516 | ||
517 | if (ret) { | 517 | if (ret) { |
518 | printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); | 518 | printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); |
519 | return ret; | 519 | return ret; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* Copy all data blocks from source unit to transfer unit */ | 522 | /* Copy all data blocks from source unit to transfer unit */ |
523 | src = eun->Offset; dest = xfer->Offset; | 523 | src = eun->Offset; dest = xfer->Offset; |
524 | 524 | ||
@@ -558,15 +558,15 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
558 | } | 558 | } |
559 | 559 | ||
560 | /* Write the BAM to the transfer unit */ | 560 | /* Write the BAM to the transfer unit */ |
561 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset), | 561 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset), |
562 | part->BlocksPerUnit * sizeof(int32_t), &retlen, | 562 | part->BlocksPerUnit * sizeof(int32_t), &retlen, |
563 | (u_char *)part->bam_cache); | 563 | (u_char *)part->bam_cache); |
564 | if (ret) { | 564 | if (ret) { |
565 | printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); | 565 | printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); |
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
568 | 568 | ||
569 | 569 | ||
570 | /* All clear? Then update the LogicalEUN again */ | 570 | /* All clear? Then update the LogicalEUN again */ |
571 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), | 571 | ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), |
572 | &retlen, (u_char *)&srcunitswap); | 572 | &retlen, (u_char *)&srcunitswap); |
@@ -574,9 +574,9 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
574 | if (ret) { | 574 | if (ret) { |
575 | printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); | 575 | printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); |
576 | return ret; | 576 | return ret; |
577 | } | 577 | } |
578 | 578 | ||
579 | 579 | ||
580 | /* Update the maps and usage stats*/ | 580 | /* Update the maps and usage stats*/ |
581 | i = xfer->EraseCount; | 581 | i = xfer->EraseCount; |
582 | xfer->EraseCount = eun->EraseCount; | 582 | xfer->EraseCount = eun->EraseCount; |
@@ -588,10 +588,10 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
588 | part->FreeTotal += free; | 588 | part->FreeTotal += free; |
589 | eun->Free = free; | 589 | eun->Free = free; |
590 | eun->Deleted = 0; | 590 | eun->Deleted = 0; |
591 | 591 | ||
592 | /* Now, the cache should be valid for the new block */ | 592 | /* Now, the cache should be valid for the new block */ |
593 | part->bam_index = srcunit; | 593 | part->bam_index = srcunit; |
594 | 594 | ||
595 | return 0; | 595 | return 0; |
596 | } /* copy_erase_unit */ | 596 | } /* copy_erase_unit */ |
597 | 597 | ||
@@ -608,7 +608,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit, | |||
608 | oldest data unit instead. This means that we generally postpone | 608 | oldest data unit instead. This means that we generally postpone |
609 | the next reclaimation as long as possible, but shuffle static | 609 | the next reclaimation as long as possible, but shuffle static |
610 | stuff around a bit for wear leveling. | 610 | stuff around a bit for wear leveling. |
611 | 611 | ||
612 | ======================================================================*/ | 612 | ======================================================================*/ |
613 | 613 | ||
614 | static int reclaim_block(partition_t *part) | 614 | static int reclaim_block(partition_t *part) |
@@ -666,7 +666,7 @@ static int reclaim_block(partition_t *part) | |||
666 | else | 666 | else |
667 | DEBUG(1, "ftl_cs: reclaim failed: no " | 667 | DEBUG(1, "ftl_cs: reclaim failed: no " |
668 | "suitable transfer units!\n"); | 668 | "suitable transfer units!\n"); |
669 | 669 | ||
670 | return -EIO; | 670 | return -EIO; |
671 | } | 671 | } |
672 | } | 672 | } |
@@ -715,7 +715,7 @@ static int reclaim_block(partition_t *part) | |||
715 | returns the block index -- the erase unit is just the currently | 715 | returns the block index -- the erase unit is just the currently |
716 | cached unit. If there are no free blocks, it returns 0 -- this | 716 | cached unit. If there are no free blocks, it returns 0 -- this |
717 | is never a valid data block because it contains the header. | 717 | is never a valid data block because it contains the header. |
718 | 718 | ||
719 | ======================================================================*/ | 719 | ======================================================================*/ |
720 | 720 | ||
721 | #ifdef PSYCHO_DEBUG | 721 | #ifdef PSYCHO_DEBUG |
@@ -737,7 +737,7 @@ static u_int32_t find_free(partition_t *part) | |||
737 | u_int32_t blk; | 737 | u_int32_t blk; |
738 | size_t retlen; | 738 | size_t retlen; |
739 | int ret; | 739 | int ret; |
740 | 740 | ||
741 | /* Find an erase unit with some free space */ | 741 | /* Find an erase unit with some free space */ |
742 | stop = (part->bam_index == 0xffff) ? 0 : part->bam_index; | 742 | stop = (part->bam_index == 0xffff) ? 0 : part->bam_index; |
743 | eun = stop; | 743 | eun = stop; |
@@ -749,17 +749,17 @@ static u_int32_t find_free(partition_t *part) | |||
749 | 749 | ||
750 | if (part->EUNInfo[eun].Free == 0) | 750 | if (part->EUNInfo[eun].Free == 0) |
751 | return 0; | 751 | return 0; |
752 | 752 | ||
753 | /* Is this unit's BAM cached? */ | 753 | /* Is this unit's BAM cached? */ |
754 | if (eun != part->bam_index) { | 754 | if (eun != part->bam_index) { |
755 | /* Invalidate cache */ | 755 | /* Invalidate cache */ |
756 | part->bam_index = 0xffff; | 756 | part->bam_index = 0xffff; |
757 | 757 | ||
758 | ret = part->mbd.mtd->read(part->mbd.mtd, | 758 | ret = part->mbd.mtd->read(part->mbd.mtd, |
759 | part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), | 759 | part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), |
760 | part->BlocksPerUnit * sizeof(u_int32_t), | 760 | part->BlocksPerUnit * sizeof(u_int32_t), |
761 | &retlen, (u_char *) (part->bam_cache)); | 761 | &retlen, (u_char *) (part->bam_cache)); |
762 | 762 | ||
763 | if (ret) { | 763 | if (ret) { |
764 | printk(KERN_WARNING"ftl: Error reading BAM in find_free\n"); | 764 | printk(KERN_WARNING"ftl: Error reading BAM in find_free\n"); |
765 | return 0; | 765 | return 0; |
@@ -781,14 +781,14 @@ static u_int32_t find_free(partition_t *part) | |||
781 | } | 781 | } |
782 | DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun); | 782 | DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun); |
783 | return blk; | 783 | return blk; |
784 | 784 | ||
785 | } /* find_free */ | 785 | } /* find_free */ |
786 | 786 | ||
787 | 787 | ||
788 | /*====================================================================== | 788 | /*====================================================================== |
789 | 789 | ||
790 | Read a series of sectors from an FTL partition. | 790 | Read a series of sectors from an FTL partition. |
791 | 791 | ||
792 | ======================================================================*/ | 792 | ======================================================================*/ |
793 | 793 | ||
794 | static int ftl_read(partition_t *part, caddr_t buffer, | 794 | static int ftl_read(partition_t *part, caddr_t buffer, |
@@ -798,7 +798,7 @@ static int ftl_read(partition_t *part, caddr_t buffer, | |||
798 | u_long i; | 798 | u_long i; |
799 | int ret; | 799 | int ret; |
800 | size_t offset, retlen; | 800 | size_t offset, retlen; |
801 | 801 | ||
802 | DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", | 802 | DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", |
803 | part, sector, nblocks); | 803 | part, sector, nblocks); |
804 | if (!(part->state & FTL_FORMATTED)) { | 804 | if (!(part->state & FTL_FORMATTED)) { |
@@ -834,7 +834,7 @@ static int ftl_read(partition_t *part, caddr_t buffer, | |||
834 | /*====================================================================== | 834 | /*====================================================================== |
835 | 835 | ||
836 | Write a series of sectors to an FTL partition | 836 | Write a series of sectors to an FTL partition |
837 | 837 | ||
838 | ======================================================================*/ | 838 | ======================================================================*/ |
839 | 839 | ||
840 | static int set_bam_entry(partition_t *part, u_int32_t log_addr, | 840 | static int set_bam_entry(partition_t *part, u_int32_t log_addr, |
@@ -855,7 +855,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr, | |||
855 | blk = (log_addr % bsize) / SECTOR_SIZE; | 855 | blk = (log_addr % bsize) / SECTOR_SIZE; |
856 | offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) + | 856 | offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) + |
857 | le32_to_cpu(part->header.BAMOffset)); | 857 | le32_to_cpu(part->header.BAMOffset)); |
858 | 858 | ||
859 | #ifdef PSYCHO_DEBUG | 859 | #ifdef PSYCHO_DEBUG |
860 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t), | 860 | ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t), |
861 | &retlen, (u_char *)&old_addr); | 861 | &retlen, (u_char *)&old_addr); |
@@ -925,7 +925,7 @@ static int ftl_write(partition_t *part, caddr_t buffer, | |||
925 | if (ret) | 925 | if (ret) |
926 | return ret; | 926 | return ret; |
927 | } | 927 | } |
928 | 928 | ||
929 | bsize = 1 << part->header.EraseUnitSize; | 929 | bsize = 1 << part->header.EraseUnitSize; |
930 | 930 | ||
931 | virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; | 931 | virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; |
@@ -949,12 +949,12 @@ static int ftl_write(partition_t *part, caddr_t buffer, | |||
949 | log_addr = part->bam_index * bsize + blk * SECTOR_SIZE; | 949 | log_addr = part->bam_index * bsize + blk * SECTOR_SIZE; |
950 | part->EUNInfo[part->bam_index].Free--; | 950 | part->EUNInfo[part->bam_index].Free--; |
951 | part->FreeTotal--; | 951 | part->FreeTotal--; |
952 | if (set_bam_entry(part, log_addr, 0xfffffffe)) | 952 | if (set_bam_entry(part, log_addr, 0xfffffffe)) |
953 | return -EIO; | 953 | return -EIO; |
954 | part->EUNInfo[part->bam_index].Deleted++; | 954 | part->EUNInfo[part->bam_index].Deleted++; |
955 | offset = (part->EUNInfo[part->bam_index].Offset + | 955 | offset = (part->EUNInfo[part->bam_index].Offset + |
956 | blk * SECTOR_SIZE); | 956 | blk * SECTOR_SIZE); |
957 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, | 957 | ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, |
958 | buffer); | 958 | buffer); |
959 | 959 | ||
960 | if (ret) { | 960 | if (ret) { |
@@ -964,7 +964,7 @@ static int ftl_write(partition_t *part, caddr_t buffer, | |||
964 | offset); | 964 | offset); |
965 | return -EIO; | 965 | return -EIO; |
966 | } | 966 | } |
967 | 967 | ||
968 | /* Only delete the old entry when the new entry is ready */ | 968 | /* Only delete the old entry when the new entry is ready */ |
969 | old_addr = part->VirtualBlockMap[sector+i]; | 969 | old_addr = part->VirtualBlockMap[sector+i]; |
970 | if (old_addr != 0xffffffff) { | 970 | if (old_addr != 0xffffffff) { |
@@ -979,7 +979,7 @@ static int ftl_write(partition_t *part, caddr_t buffer, | |||
979 | return -EIO; | 979 | return -EIO; |
980 | part->VirtualBlockMap[sector+i] = log_addr; | 980 | part->VirtualBlockMap[sector+i] = log_addr; |
981 | part->EUNInfo[part->bam_index].Deleted--; | 981 | part->EUNInfo[part->bam_index].Deleted--; |
982 | 982 | ||
983 | buffer += SECTOR_SIZE; | 983 | buffer += SECTOR_SIZE; |
984 | virt_addr += SECTOR_SIZE; | 984 | virt_addr += SECTOR_SIZE; |
985 | } | 985 | } |
@@ -1034,20 +1034,20 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
1034 | partition_t *partition; | 1034 | partition_t *partition; |
1035 | 1035 | ||
1036 | partition = kmalloc(sizeof(partition_t), GFP_KERNEL); | 1036 | partition = kmalloc(sizeof(partition_t), GFP_KERNEL); |
1037 | 1037 | ||
1038 | if (!partition) { | 1038 | if (!partition) { |
1039 | printk(KERN_WARNING "No memory to scan for FTL on %s\n", | 1039 | printk(KERN_WARNING "No memory to scan for FTL on %s\n", |
1040 | mtd->name); | 1040 | mtd->name); |
1041 | return; | 1041 | return; |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | memset(partition, 0, sizeof(partition_t)); | 1044 | memset(partition, 0, sizeof(partition_t)); |
1045 | 1045 | ||
1046 | partition->mbd.mtd = mtd; | 1046 | partition->mbd.mtd = mtd; |
1047 | 1047 | ||
1048 | if ((scan_header(partition) == 0) && | 1048 | if ((scan_header(partition) == 0) && |
1049 | (build_maps(partition) == 0)) { | 1049 | (build_maps(partition) == 0)) { |
1050 | 1050 | ||
1051 | partition->state = FTL_FORMATTED; | 1051 | partition->state = FTL_FORMATTED; |
1052 | #ifdef PCMCIA_DEBUG | 1052 | #ifdef PCMCIA_DEBUG |
1053 | printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n", | 1053 | printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n", |
@@ -1086,7 +1086,7 @@ struct mtd_blktrans_ops ftl_tr = { | |||
1086 | 1086 | ||
1087 | int init_ftl(void) | 1087 | int init_ftl(void) |
1088 | { | 1088 | { |
1089 | DEBUG(0, "$Id: ftl.c,v 1.55 2005/01/17 13:47:21 hvr Exp $\n"); | 1089 | DEBUG(0, "$Id: ftl.c,v 1.58 2005/11/07 11:14:19 gleixner Exp $\n"); |
1090 | 1090 | ||
1091 | return register_mtd_blktrans(&ftl_tr); | 1091 | return register_mtd_blktrans(&ftl_tr); |
1092 | } | 1092 | } |