diff options
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r-- | fs/ubifs/file.c | 70 |
1 files changed, 46 insertions, 24 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 8be827cc7078..0c5c27d63f6e 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -691,32 +691,22 @@ out_err: | |||
691 | /** | 691 | /** |
692 | * ubifs_do_bulk_read - do bulk-read. | 692 | * ubifs_do_bulk_read - do bulk-read. |
693 | * @c: UBIFS file-system description object | 693 | * @c: UBIFS file-system description object |
694 | * @page1: first page | 694 | * @bu: bulk-read information |
695 | * @page1: first page to read | ||
695 | * | 696 | * |
696 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. | 697 | * This function returns %1 if the bulk-read is done, otherwise %0 is returned. |
697 | */ | 698 | */ |
698 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | 699 | static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, |
700 | struct page *page1) | ||
699 | { | 701 | { |
700 | pgoff_t offset = page1->index, end_index; | 702 | pgoff_t offset = page1->index, end_index; |
701 | struct address_space *mapping = page1->mapping; | 703 | struct address_space *mapping = page1->mapping; |
702 | struct inode *inode = mapping->host; | 704 | struct inode *inode = mapping->host; |
703 | struct ubifs_inode *ui = ubifs_inode(inode); | 705 | struct ubifs_inode *ui = ubifs_inode(inode); |
704 | struct bu_info *bu; | ||
705 | int err, page_idx, page_cnt, ret = 0, n = 0; | 706 | int err, page_idx, page_cnt, ret = 0, n = 0; |
707 | int allocate = bu->buf ? 0 : 1; | ||
706 | loff_t isize; | 708 | loff_t isize; |
707 | 709 | ||
708 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); | ||
709 | if (!bu) | ||
710 | return 0; | ||
711 | |||
712 | bu->buf_len = c->bulk_read_buf_size; | ||
713 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); | ||
714 | if (!bu->buf) | ||
715 | goto out_free; | ||
716 | |||
717 | data_key_init(c, &bu->key, inode->i_ino, | ||
718 | offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
719 | |||
720 | err = ubifs_tnc_get_bu_keys(c, bu); | 710 | err = ubifs_tnc_get_bu_keys(c, bu); |
721 | if (err) | 711 | if (err) |
722 | goto out_warn; | 712 | goto out_warn; |
@@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | |||
735 | * together. If all the pages were like this, bulk-read would | 725 | * together. If all the pages were like this, bulk-read would |
736 | * reduce performance, so we turn it off for a while. | 726 | * reduce performance, so we turn it off for a while. |
737 | */ | 727 | */ |
738 | ui->read_in_a_row = 0; | 728 | goto out_bu_off; |
739 | ui->bulk_read = 0; | ||
740 | goto out_free; | ||
741 | } | 729 | } |
742 | 730 | ||
743 | if (bu->cnt) { | 731 | if (bu->cnt) { |
732 | if (allocate) { | ||
733 | /* | ||
734 | * Allocate bulk-read buffer depending on how many data | ||
735 | * nodes we are going to read. | ||
736 | */ | ||
737 | bu->buf_len = bu->zbranch[bu->cnt - 1].offs + | ||
738 | bu->zbranch[bu->cnt - 1].len - | ||
739 | bu->zbranch[0].offs; | ||
740 | ubifs_assert(bu->buf_len > 0); | ||
741 | ubifs_assert(bu->buf_len <= c->leb_size); | ||
742 | bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); | ||
743 | if (!bu->buf) | ||
744 | goto out_bu_off; | ||
745 | } | ||
746 | |||
744 | err = ubifs_tnc_bulk_read(c, bu); | 747 | err = ubifs_tnc_bulk_read(c, bu); |
745 | if (err) | 748 | if (err) |
746 | goto out_warn; | 749 | goto out_warn; |
@@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) | |||
779 | ui->last_page_read = offset + page_idx - 1; | 782 | ui->last_page_read = offset + page_idx - 1; |
780 | 783 | ||
781 | out_free: | 784 | out_free: |
782 | kfree(bu->buf); | 785 | if (allocate) |
783 | kfree(bu); | 786 | kfree(bu->buf); |
784 | return ret; | 787 | return ret; |
785 | 788 | ||
786 | out_warn: | 789 | out_warn: |
787 | ubifs_warn("ignoring error %d and skipping bulk-read", err); | 790 | ubifs_warn("ignoring error %d and skipping bulk-read", err); |
788 | goto out_free; | 791 | goto out_free; |
792 | |||
793 | out_bu_off: | ||
794 | ui->read_in_a_row = ui->bulk_read = 0; | ||
795 | goto out_free; | ||
789 | } | 796 | } |
790 | 797 | ||
791 | /** | 798 | /** |
@@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page) | |||
803 | struct ubifs_info *c = inode->i_sb->s_fs_info; | 810 | struct ubifs_info *c = inode->i_sb->s_fs_info; |
804 | struct ubifs_inode *ui = ubifs_inode(inode); | 811 | struct ubifs_inode *ui = ubifs_inode(inode); |
805 | pgoff_t index = page->index, last_page_read = ui->last_page_read; | 812 | pgoff_t index = page->index, last_page_read = ui->last_page_read; |
806 | int ret = 0; | 813 | struct bu_info *bu; |
814 | int err = 0; | ||
807 | 815 | ||
808 | ui->last_page_read = index; | 816 | ui->last_page_read = index; |
809 | |||
810 | if (!c->bulk_read) | 817 | if (!c->bulk_read) |
811 | return 0; | 818 | return 0; |
819 | |||
812 | /* | 820 | /* |
813 | * Bulk-read is protected by ui_mutex, but it is an optimization, so | 821 | * Bulk-read is protected by ui_mutex, but it is an optimization, so |
814 | * don't bother if we cannot lock the mutex. | 822 | * don't bother if we cannot lock the mutex. |
815 | */ | 823 | */ |
816 | if (!mutex_trylock(&ui->ui_mutex)) | 824 | if (!mutex_trylock(&ui->ui_mutex)) |
817 | return 0; | 825 | return 0; |
826 | |||
818 | if (index != last_page_read + 1) { | 827 | if (index != last_page_read + 1) { |
819 | /* Turn off bulk-read if we stop reading sequentially */ | 828 | /* Turn off bulk-read if we stop reading sequentially */ |
820 | ui->read_in_a_row = 1; | 829 | ui->read_in_a_row = 1; |
@@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page) | |||
822 | ui->bulk_read = 0; | 831 | ui->bulk_read = 0; |
823 | goto out_unlock; | 832 | goto out_unlock; |
824 | } | 833 | } |
834 | |||
825 | if (!ui->bulk_read) { | 835 | if (!ui->bulk_read) { |
826 | ui->read_in_a_row += 1; | 836 | ui->read_in_a_row += 1; |
827 | if (ui->read_in_a_row < 3) | 837 | if (ui->read_in_a_row < 3) |
@@ -829,10 +839,22 @@ static int ubifs_bulk_read(struct page *page) | |||
829 | /* Three reads in a row, so switch on bulk-read */ | 839 | /* Three reads in a row, so switch on bulk-read */ |
830 | ui->bulk_read = 1; | 840 | ui->bulk_read = 1; |
831 | } | 841 | } |
832 | ret = ubifs_do_bulk_read(c, page); | 842 | |
843 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); | ||
844 | if (!bu) | ||
845 | return 0; | ||
846 | |||
847 | bu->buf = NULL; | ||
848 | bu->buf_len = c->max_bu_buf_len; | ||
849 | data_key_init(c, &bu->key, inode->i_ino, | ||
850 | page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); | ||
851 | |||
852 | err = ubifs_do_bulk_read(c, bu, page); | ||
853 | kfree(bu); | ||
854 | |||
833 | out_unlock: | 855 | out_unlock: |
834 | mutex_unlock(&ui->ui_mutex); | 856 | mutex_unlock(&ui->ui_mutex); |
835 | return ret; | 857 | return err; |
836 | } | 858 | } |
837 | 859 | ||
838 | static int ubifs_readpage(struct file *file, struct page *page) | 860 | static int ubifs_readpage(struct file *file, struct page *page) |