diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-22 14:34:09 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-22 14:34:09 -0400 |
commit | f20e3b5fe7ead0615309433260b9784d8da0bbbd (patch) | |
tree | eabb2e47a0355ac4e8024b7087b4e7cb9f324358 /fs/ubifs/file.c | |
parent | bcbfe664e7af019e698cef2feb85ac2b4f1ac11d (diff) | |
parent | f030d7b65e4e6399f23de2a41a58d1b607b6bd89 (diff) |
Merge branch 'for-rmk' of git://git.android.com/kernel into devel
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r-- | fs/ubifs/file.c | 260 |
1 files changed, 260 insertions, 0 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 3d698e2022b1..51cf511d44d9 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -147,6 +147,12 @@ static int do_readpage(struct page *page) | |||
147 | err = ret; | 147 | err = ret; |
148 | if (err != -ENOENT) | 148 | if (err != -ENOENT) |
149 | break; | 149 | break; |
150 | } else if (block + 1 == beyond) { | ||
151 | int dlen = le32_to_cpu(dn->size); | ||
152 | int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); | ||
153 | |||
154 | if (ilen && ilen < dlen) | ||
155 | memset(addr + ilen, 0, dlen - ilen); | ||
150 | } | 156 | } |
151 | } | 157 | } |
152 | if (++i >= UBIFS_BLOCKS_PER_PAGE) | 158 | if (++i >= UBIFS_BLOCKS_PER_PAGE) |
@@ -577,8 +583,262 @@ out: | |||
577 | return copied; | 583 | return copied; |
578 | } | 584 | } |
579 | 585 | ||
586 | /** | ||
587 | * populate_page - copy data nodes into a page for bulk-read. | ||
588 | * @c: UBIFS file-system description object | ||
589 | * @page: page | ||
590 | * @bu: bulk-read information | ||
591 | * @n: next zbranch slot | ||
592 | * | ||
593 | * This function returns %0 on success and a negative error code on failure. | ||
594 | */ | ||
595 | static int populate_page(struct ubifs_info *c, struct page *page, | ||
596 | struct bu_info *bu, int *n) | ||
597 | { | ||
598 | int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; | ||
599 | struct inode *inode = page->mapping->host; | ||
600 | loff_t i_size = i_size_read(inode); | ||
601 | unsigned int page_block; | ||
602 | void *addr, *zaddr; | ||
603 | pgoff_t end_index; | ||
604 | |||
605 | dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", | ||
606 | inode->i_ino, page->index, i_size, page->flags); | ||
607 | |||
608 | addr = zaddr = kmap(page); | ||
609 | |||
610 | end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | ||
611 | if (!i_size || page->index > end_index) { | ||
612 | hole = 1; | ||
613 | memset(addr, 0, PAGE_CACHE_SIZE); | ||
614 | goto out_hole; | ||
615 | } | ||
616 | |||
617 | page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; | ||
618 | while (1) { | ||
619 | int err, len, out_len, dlen; | ||
620 | |||
621 | if (nn >= bu->cnt) { | ||
622 | hole = 1; | ||
623 | memset(addr, 0, UBIFS_BLOCK_SIZE); | ||
624 | } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { | ||
625 | struct ubifs_data_node *dn; | ||
626 | |||
627 | dn = bu->buf + (bu->zbranch[nn].offs - offs); | ||
628 | |||
629 | ubifs_assert(dn->ch.sqnum > | ||
630 | ubifs_inode(inode)->creat_sqnum); | ||
631 | |||
632 | len = le32_to_cpu(dn->size); | ||
633 | if (len <= 0 || len > UBIFS_BLOCK_SIZE) | ||
634 | goto out_err; | ||
635 | |||
636 | dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; | ||
637 | out_len = UBIFS_BLOCK_SIZE; | ||
638 | err = ubifs_decompress(&dn->data, dlen, addr, &out_len, | ||
639 | le16_to_cpu(dn->compr_type)); | ||
640 | if (err || len != out_len) | ||
641 | goto out_err; | ||
642 | |||
643 | if (len < UBIFS_BLOCK_SIZE) | ||
644 | memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); | ||
645 | |||
646 | nn += 1; | ||
647 | read = (i << UBIFS_BLOCK_SHIFT) + len; | ||
648 | } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { | ||
649 | nn += 1; | ||
650 | continue; | ||
651 | } else { | ||
652 | hole = 1; | ||
653 | memset(addr, 0, UBIFS_BLOCK_SIZE); | ||
654 | } | ||
655 | if (++i >= UBIFS_BLOCKS_PER_PAGE) | ||
656 | break; | ||
657 | addr += UBIFS_BLOCK_SIZE; | ||
658 | page_block += 1; | ||
659 | } | ||
660 | |||
661 | if (end_index == page->index) { | ||
662 | int len = i_size & (PAGE_CACHE_SIZE - 1); | ||
663 | |||
664 | if (len && len < read) | ||
665 | memset(zaddr + len, 0, read - len); | ||
666 | } | ||
667 | |||
668 | out_hole: | ||
669 | if (hole) { | ||
670 | SetPageChecked(page); | ||
671 | dbg_gen("hole"); | ||
672 | } | ||
673 | |||
674 | SetPageUptodate(page); | ||
675 | ClearPageError(page); | ||
676 | flush_dcache_page(page); | ||
677 | kunmap(page); | ||
678 | *n = nn; | ||
679 | return 0; | ||
680 | |||
681 | out_err: | ||
682 | ClearPageUptodate(page); | ||
683 | SetPageError(page); | ||
684 | flush_dcache_page(page); | ||
685 | kunmap(page); | ||
686 | ubifs_err("bad data node (block %u, inode %lu)", | ||
687 | page_block, inode->i_ino); | ||
688 | return -EINVAL; | ||
689 | } | ||
690 | |||
691 | /** | ||
692 | * ubifs_do_bulk_read - do bulk-read. | ||
693 | * @c: UBIFS file-system description object | ||
694 | * @page1: first page | ||
695 | * | ||
696 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. | ||
697 | */ | ||
698 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | ||
699 | { | ||
700 | pgoff_t offset = page1->index, end_index; | ||
701 | struct address_space *mapping = page1->mapping; | ||
702 | struct inode *inode = mapping->host; | ||
703 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
704 | struct bu_info *bu; | ||
705 | int err, page_idx, page_cnt, ret = 0, n = 0; | ||
706 | loff_t isize; | ||
707 | |||
708 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); | ||
709 | if (!bu) | ||
710 | return 0; | ||
711 | |||
712 | bu->buf_len = c->bulk_read_buf_size; | ||
713 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS); | ||
714 | if (!bu->buf) | ||
715 | goto out_free; | ||
716 | |||
717 | data_key_init(c, &bu->key, inode->i_ino, | ||
718 | offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
719 | |||
720 | err = ubifs_tnc_get_bu_keys(c, bu); | ||
721 | if (err) | ||
722 | goto out_warn; | ||
723 | |||
724 | if (bu->eof) { | ||
725 | /* Turn off bulk-read at the end of the file */ | ||
726 | ui->read_in_a_row = 1; | ||
727 | ui->bulk_read = 0; | ||
728 | } | ||
729 | |||
730 | page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; | ||
731 | if (!page_cnt) { | ||
732 | /* | ||
733 | * This happens when there are multiple blocks per page and the | ||
734 | * blocks for the first page we are looking for, are not | ||
735 | * together. If all the pages were like this, bulk-read would | ||
736 | * reduce performance, so we turn it off for a while. | ||
737 | */ | ||
738 | ui->read_in_a_row = 0; | ||
739 | ui->bulk_read = 0; | ||
740 | goto out_free; | ||
741 | } | ||
742 | |||
743 | if (bu->cnt) { | ||
744 | err = ubifs_tnc_bulk_read(c, bu); | ||
745 | if (err) | ||
746 | goto out_warn; | ||
747 | } | ||
748 | |||
749 | err = populate_page(c, page1, bu, &n); | ||
750 | if (err) | ||
751 | goto out_warn; | ||
752 | |||
753 | unlock_page(page1); | ||
754 | ret = 1; | ||
755 | |||
756 | isize = i_size_read(inode); | ||
757 | if (isize == 0) | ||
758 | goto out_free; | ||
759 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | ||
760 | |||
761 | for (page_idx = 1; page_idx < page_cnt; page_idx++) { | ||
762 | pgoff_t page_offset = offset + page_idx; | ||
763 | struct page *page; | ||
764 | |||
765 | if (page_offset > end_index) | ||
766 | break; | ||
767 | page = find_or_create_page(mapping, page_offset, | ||
768 | GFP_NOFS | __GFP_COLD); | ||
769 | if (!page) | ||
770 | break; | ||
771 | if (!PageUptodate(page)) | ||
772 | err = populate_page(c, page, bu, &n); | ||
773 | unlock_page(page); | ||
774 | page_cache_release(page); | ||
775 | if (err) | ||
776 | break; | ||
777 | } | ||
778 | |||
779 | ui->last_page_read = offset + page_idx - 1; | ||
780 | |||
781 | out_free: | ||
782 | kfree(bu->buf); | ||
783 | kfree(bu); | ||
784 | return ret; | ||
785 | |||
786 | out_warn: | ||
787 | ubifs_warn("ignoring error %d and skipping bulk-read", err); | ||
788 | goto out_free; | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. | ||
793 | * @page: page from which to start bulk-read. | ||
794 | * | ||
795 | * Some flash media are capable of reading sequentially at faster rates. UBIFS | ||
796 | * bulk-read facility is designed to take advantage of that, by reading in one | ||
797 | * go consecutive data nodes that are also located consecutively in the same | ||
798 | * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. | ||
799 | */ | ||
800 | static int ubifs_bulk_read(struct page *page) | ||
801 | { | ||
802 | struct inode *inode = page->mapping->host; | ||
803 | struct ubifs_info *c = inode->i_sb->s_fs_info; | ||
804 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
805 | pgoff_t index = page->index, last_page_read = ui->last_page_read; | ||
806 | int ret = 0; | ||
807 | |||
808 | ui->last_page_read = index; | ||
809 | |||
810 | if (!c->bulk_read) | ||
811 | return 0; | ||
812 | /* | ||
813 | * Bulk-read is protected by ui_mutex, but it is an optimization, so | ||
814 | * don't bother if we cannot lock the mutex. | ||
815 | */ | ||
816 | if (!mutex_trylock(&ui->ui_mutex)) | ||
817 | return 0; | ||
818 | if (index != last_page_read + 1) { | ||
819 | /* Turn off bulk-read if we stop reading sequentially */ | ||
820 | ui->read_in_a_row = 1; | ||
821 | if (ui->bulk_read) | ||
822 | ui->bulk_read = 0; | ||
823 | goto out_unlock; | ||
824 | } | ||
825 | if (!ui->bulk_read) { | ||
826 | ui->read_in_a_row += 1; | ||
827 | if (ui->read_in_a_row < 3) | ||
828 | goto out_unlock; | ||
829 | /* Three reads in a row, so switch on bulk-read */ | ||
830 | ui->bulk_read = 1; | ||
831 | } | ||
832 | ret = ubifs_do_bulk_read(c, page); | ||
833 | out_unlock: | ||
834 | mutex_unlock(&ui->ui_mutex); | ||
835 | return ret; | ||
836 | } | ||
837 | |||
580 | static int ubifs_readpage(struct file *file, struct page *page) | 838 | static int ubifs_readpage(struct file *file, struct page *page) |
581 | { | 839 | { |
840 | if (ubifs_bulk_read(page)) | ||
841 | return 0; | ||
582 | do_readpage(page); | 842 | do_readpage(page); |
583 | unlock_page(page); | 843 | unlock_page(page); |
584 | return 0; | 844 | return 0; |