diff options
author | Thomas Gleixner <tglx@cruncher.tec.linutronix.de> | 2006-05-23 05:49:14 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@cruncher.tec.linutronix.de> | 2006-05-23 05:49:14 -0400 |
commit | dcb0932884b801290efd80fbc37630297b98181f (patch) | |
tree | c6519feda290a963087958f1e196abec11866d49 /fs | |
parent | ce4c61f184864991881ec789f7524f4b332eaafc (diff) |
[JFFS2] Simplify writebuffer handling
The writev based write buffer implementation was far to complex as
in most use cases the write buffer had to be handled anyway.
Simplify the write buffer handling and use mtd->write instead.
From extensive testing no performance impact has been noted.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/jffs2/wbuf.c | 272 |
1 files changed, 102 insertions, 170 deletions
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 45e3573cf107..0442a5753d33 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -613,20 +613,30 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | |||
613 | 613 | ||
614 | return ret; | 614 | return ret; |
615 | } | 615 | } |
616 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | 616 | |
617 | static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, | ||
618 | size_t len) | ||
617 | { | 619 | { |
618 | struct kvec outvecs[3]; | 620 | if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) |
619 | uint32_t totlen = 0; | 621 | return 0; |
620 | uint32_t split_ofs = 0; | 622 | |
621 | uint32_t old_totlen; | 623 | if (len > (c->wbuf_pagesize - c->wbuf_len)) |
622 | int ret, splitvec = -1; | 624 | len = c->wbuf_pagesize - c->wbuf_len; |
623 | int invec, outvec; | 625 | memcpy(c->wbuf + c->wbuf_len, buf, len); |
624 | size_t wbuf_retlen; | 626 | c->wbuf_len += (uint32_t) len; |
625 | unsigned char *wbuf_ptr; | 627 | return len; |
626 | size_t donelen = 0; | 628 | } |
629 | |||
630 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
631 | unsigned long count, loff_t to, size_t *retlen, | ||
632 | uint32_t ino) | ||
633 | { | ||
634 | struct jffs2_eraseblock *jeb; | ||
635 | size_t wbuf_retlen, donelen = 0; | ||
627 | uint32_t outvec_to = to; | 636 | uint32_t outvec_to = to; |
637 | int ret, invec; | ||
628 | 638 | ||
629 | /* If not NAND flash, don't bother */ | 639 | /* If not writebuffered flash, don't bother */ |
630 | if (!jffs2_is_writebuffered(c)) | 640 | if (!jffs2_is_writebuffered(c)) |
631 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | 641 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); |
632 | 642 | ||
@@ -639,9 +649,11 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
639 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 649 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
640 | } | 650 | } |
641 | 651 | ||
642 | /* Fixup the wbuf if we are moving to a new eraseblock. The checks below | 652 | /* |
643 | fail for ECC'd NOR because cleanmarker == 16, so a block starts at | 653 | * Fixup the wbuf if we are moving to a new eraseblock. The |
644 | xxx0010. */ | 654 | * checks below fail for ECC'd NOR because cleanmarker == 16, |
655 | * so a block starts at xxx0010. | ||
656 | */ | ||
645 | if (jffs2_nor_ecc(c)) { | 657 | if (jffs2_nor_ecc(c)) { |
646 | if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { | 658 | if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { |
647 | c->wbuf_ofs = PAGE_DIV(to); | 659 | c->wbuf_ofs = PAGE_DIV(to); |
@@ -650,23 +662,22 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
650 | } | 662 | } |
651 | } | 663 | } |
652 | 664 | ||
653 | /* Sanity checks on target address. | 665 | /* |
654 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | 666 | * Sanity checks on target address. It's permitted to write |
655 | and it's permitted to write at the beginning of a new | 667 | * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to |
656 | erase block. Anything else, and you die. | 668 | * write at the beginning of a new erase block. Anything else, |
657 | New block starts at xxx000c (0-b = block header) | 669 | * and you die. New block starts at xxx000c (0-b = block |
658 | */ | 670 | * header) |
671 | */ | ||
659 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { | 672 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { |
660 | /* It's a write to a new block */ | 673 | /* It's a write to a new block */ |
661 | if (c->wbuf_len) { | 674 | if (c->wbuf_len) { |
662 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs)); | 675 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " |
676 | "causes flush of wbuf at 0x%08x\n", | ||
677 | (unsigned long)to, c->wbuf_ofs)); | ||
663 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | 678 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); |
664 | if (ret) { | 679 | if (ret) |
665 | /* the underlying layer has to check wbuf_len to do the cleanup */ | 680 | goto outerr; |
666 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
667 | *retlen = 0; | ||
668 | goto exit; | ||
669 | } | ||
670 | } | 681 | } |
671 | /* set pointer to new block */ | 682 | /* set pointer to new block */ |
672 | c->wbuf_ofs = PAGE_DIV(to); | 683 | c->wbuf_ofs = PAGE_DIV(to); |
@@ -675,165 +686,70 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
675 | 686 | ||
676 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 687 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
677 | /* We're not writing immediately after the writebuffer. Bad. */ | 688 | /* We're not writing immediately after the writebuffer. Bad. */ |
678 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to); | 689 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " |
690 | "to %08lx\n", (unsigned long)to); | ||
679 | if (c->wbuf_len) | 691 | if (c->wbuf_len) |
680 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", | 692 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", |
681 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); | 693 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); |
682 | BUG(); | ||
683 | } | ||
684 | |||
685 | /* Note outvecs[3] above. We know count is never greater than 2 */ | ||
686 | if (count > 2) { | ||
687 | printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count); | ||
688 | BUG(); | 694 | BUG(); |
689 | } | 695 | } |
690 | 696 | ||
691 | invec = 0; | 697 | /* adjust alignment offset */ |
692 | outvec = 0; | 698 | if (c->wbuf_len != PAGE_MOD(to)) { |
693 | 699 | c->wbuf_len = PAGE_MOD(to); | |
694 | /* Fill writebuffer first, if already in use */ | 700 | /* take care of alignment to next page */ |
695 | if (c->wbuf_len) { | 701 | if (!c->wbuf_len) { |
696 | uint32_t invec_ofs = 0; | 702 | c->wbuf_len = c->wbuf_pagesize; |
697 | 703 | ret = __jffs2_flush_wbuf(c, NOPAD); | |
698 | /* adjust alignment offset */ | 704 | if (ret) |
699 | if (c->wbuf_len != PAGE_MOD(to)) { | 705 | goto outerr; |
700 | c->wbuf_len = PAGE_MOD(to); | ||
701 | /* take care of alignment to next page */ | ||
702 | if (!c->wbuf_len) | ||
703 | c->wbuf_len = c->wbuf_pagesize; | ||
704 | } | ||
705 | |||
706 | while(c->wbuf_len < c->wbuf_pagesize) { | ||
707 | uint32_t thislen; | ||
708 | |||
709 | if (invec == count) | ||
710 | goto alldone; | ||
711 | |||
712 | thislen = c->wbuf_pagesize - c->wbuf_len; | ||
713 | |||
714 | if (thislen >= invecs[invec].iov_len) | ||
715 | thislen = invecs[invec].iov_len; | ||
716 | |||
717 | invec_ofs = thislen; | ||
718 | |||
719 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | ||
720 | c->wbuf_len += thislen; | ||
721 | donelen += thislen; | ||
722 | /* Get next invec, if actual did not fill the buffer */ | ||
723 | if (c->wbuf_len < c->wbuf_pagesize) | ||
724 | invec++; | ||
725 | } | ||
726 | |||
727 | /* write buffer is full, flush buffer */ | ||
728 | ret = __jffs2_flush_wbuf(c, NOPAD); | ||
729 | if (ret) { | ||
730 | /* the underlying layer has to check wbuf_len to do the cleanup */ | ||
731 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
732 | /* Retlen zero to make sure our caller doesn't mark the space dirty. | ||
733 | We've already done everything that's necessary */ | ||
734 | *retlen = 0; | ||
735 | goto exit; | ||
736 | } | ||
737 | outvec_to += donelen; | ||
738 | c->wbuf_ofs = outvec_to; | ||
739 | |||
740 | /* All invecs done ? */ | ||
741 | if (invec == count) | ||
742 | goto alldone; | ||
743 | |||
744 | /* Set up the first outvec, containing the remainder of the | ||
745 | invec we partially used */ | ||
746 | if (invecs[invec].iov_len > invec_ofs) { | ||
747 | outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs; | ||
748 | totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs; | ||
749 | if (totlen > c->wbuf_pagesize) { | ||
750 | splitvec = outvec; | ||
751 | split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen); | ||
752 | } | ||
753 | outvec++; | ||
754 | } | ||
755 | invec++; | ||
756 | } | ||
757 | |||
758 | /* OK, now we've flushed the wbuf and the start of the bits | ||
759 | we have been asked to write, now to write the rest.... */ | ||
760 | |||
761 | /* totlen holds the amount of data still to be written */ | ||
762 | old_totlen = totlen; | ||
763 | for ( ; invec < count; invec++,outvec++ ) { | ||
764 | outvecs[outvec].iov_base = invecs[invec].iov_base; | ||
765 | totlen += outvecs[outvec].iov_len = invecs[invec].iov_len; | ||
766 | if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) { | ||
767 | splitvec = outvec; | ||
768 | split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen); | ||
769 | old_totlen = totlen; | ||
770 | } | 706 | } |
771 | } | 707 | } |
772 | 708 | ||
773 | /* Now the outvecs array holds all the remaining data to write */ | 709 | for (invec = 0; invec < count; invec++) { |
774 | /* Up to splitvec,split_ofs is to be written immediately. The rest | 710 | int vlen = invecs[invec].iov_len; |
775 | goes into the (now-empty) wbuf */ | 711 | uint8_t *v = invecs[invec].iov_base; |
776 | |||
777 | if (splitvec != -1) { | ||
778 | uint32_t remainder; | ||
779 | |||
780 | remainder = outvecs[splitvec].iov_len - split_ofs; | ||
781 | outvecs[splitvec].iov_len = split_ofs; | ||
782 | |||
783 | /* We did cross a page boundary, so we write some now */ | ||
784 | if (jffs2_cleanmarker_oob(c)) | ||
785 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | ||
786 | else | ||
787 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | ||
788 | |||
789 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | ||
790 | /* At this point we have no problem, | ||
791 | c->wbuf is empty. However refile nextblock to avoid | ||
792 | writing again to same address. | ||
793 | */ | ||
794 | struct jffs2_eraseblock *jeb; | ||
795 | 712 | ||
796 | spin_lock(&c->erase_completion_lock); | 713 | wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); |
797 | 714 | ||
798 | jeb = &c->blocks[outvec_to / c->sector_size]; | 715 | if (c->wbuf_len == c->wbuf_pagesize) { |
799 | jffs2_block_refile(c, jeb, REFILE_ANYWAY); | 716 | ret = __jffs2_flush_wbuf(c, NOPAD); |
800 | 717 | if (ret) | |
801 | *retlen = 0; | 718 | goto outerr; |
802 | spin_unlock(&c->erase_completion_lock); | ||
803 | goto exit; | ||
804 | } | 719 | } |
805 | 720 | vlen -= wbuf_retlen; | |
721 | outvec_to += wbuf_retlen; | ||
806 | donelen += wbuf_retlen; | 722 | donelen += wbuf_retlen; |
807 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | 723 | v += wbuf_retlen; |
808 | 724 | ||
809 | if (remainder) { | 725 | if (vlen >= c->wbuf_pagesize) { |
810 | outvecs[splitvec].iov_base += split_ofs; | 726 | ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen), |
811 | outvecs[splitvec].iov_len = remainder; | 727 | &wbuf_retlen, v); |
812 | } else { | 728 | if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) |
813 | splitvec++; | 729 | goto outfile; |
730 | |||
731 | vlen -= wbuf_retlen; | ||
732 | outvec_to += wbuf_retlen; | ||
733 | c->wbuf_ofs = outvec_to; | ||
734 | donelen += wbuf_retlen; | ||
735 | v += wbuf_retlen; | ||
814 | } | 736 | } |
815 | 737 | ||
816 | } else { | 738 | wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); |
817 | splitvec = 0; | 739 | if (c->wbuf_len == c->wbuf_pagesize) { |
818 | } | 740 | ret = __jffs2_flush_wbuf(c, NOPAD); |
819 | 741 | if (ret) | |
820 | /* Now splitvec points to the start of the bits we have to copy | 742 | goto outerr; |
821 | into the wbuf */ | 743 | } |
822 | wbuf_ptr = c->wbuf; | ||
823 | 744 | ||
824 | for ( ; splitvec < outvec; splitvec++) { | 745 | outvec_to += wbuf_retlen; |
825 | /* Don't copy the wbuf into itself */ | 746 | donelen += wbuf_retlen; |
826 | if (outvecs[splitvec].iov_base == c->wbuf) | ||
827 | continue; | ||
828 | memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len); | ||
829 | wbuf_ptr += outvecs[splitvec].iov_len; | ||
830 | donelen += outvecs[splitvec].iov_len; | ||
831 | } | 747 | } |
832 | c->wbuf_len = wbuf_ptr - c->wbuf; | ||
833 | 748 | ||
834 | /* If there's a remainder in the wbuf and it's a non-GC write, | 749 | /* |
835 | remember that the wbuf affects this ino */ | 750 | * If there's a remainder in the wbuf and it's a non-GC write, |
836 | alldone: | 751 | * remember that the wbuf affects this ino |
752 | */ | ||
837 | *retlen = donelen; | 753 | *retlen = donelen; |
838 | 754 | ||
839 | if (jffs2_sum_active()) { | 755 | if (jffs2_sum_active()) { |
@@ -846,8 +762,24 @@ alldone: | |||
846 | jffs2_wbuf_dirties_inode(c, ino); | 762 | jffs2_wbuf_dirties_inode(c, ino); |
847 | 763 | ||
848 | ret = 0; | 764 | ret = 0; |
765 | up_write(&c->wbuf_sem); | ||
766 | return ret; | ||
849 | 767 | ||
850 | exit: | 768 | outfile: |
769 | /* | ||
770 | * At this point we have no problem, c->wbuf is empty. However | ||
771 | * refile nextblock to avoid writing again to same address. | ||
772 | */ | ||
773 | |||
774 | spin_lock(&c->erase_completion_lock); | ||
775 | |||
776 | jeb = &c->blocks[outvec_to / c->sector_size]; | ||
777 | jffs2_block_refile(c, jeb, REFILE_ANYWAY); | ||
778 | |||
779 | spin_unlock(&c->erase_completion_lock); | ||
780 | |||
781 | outerr: | ||
782 | *retlen = 0; | ||
851 | up_write(&c->wbuf_sem); | 783 | up_write(&c->wbuf_sem); |
852 | return ret; | 784 | return ret; |
853 | } | 785 | } |