aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ubifs/file.c70
-rw-r--r--fs/ubifs/super.c12
-rw-r--r--fs/ubifs/tnc.c7
-rw-r--r--fs/ubifs/ubifs.h4
4 files changed, 60 insertions, 33 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 8be827cc7078..0c5c27d63f6e 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -691,32 +691,22 @@ out_err:
691/** 691/**
692 * ubifs_do_bulk_read - do bulk-read. 692 * ubifs_do_bulk_read - do bulk-read.
693 * @c: UBIFS file-system description object 693 * @c: UBIFS file-system description object
694 * @page1: first page 694 * @bu: bulk-read information
695 * @page1: first page to read
695 * 696 *
696 * This function returns %1 if the bulk-read is done, otherwise %0 is returned. 697 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
697 */ 698 */
698static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) 699static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
700 struct page *page1)
699{ 701{
700 pgoff_t offset = page1->index, end_index; 702 pgoff_t offset = page1->index, end_index;
701 struct address_space *mapping = page1->mapping; 703 struct address_space *mapping = page1->mapping;
702 struct inode *inode = mapping->host; 704 struct inode *inode = mapping->host;
703 struct ubifs_inode *ui = ubifs_inode(inode); 705 struct ubifs_inode *ui = ubifs_inode(inode);
704 struct bu_info *bu;
705 int err, page_idx, page_cnt, ret = 0, n = 0; 706 int err, page_idx, page_cnt, ret = 0, n = 0;
707 int allocate = bu->buf ? 0 : 1;
706 loff_t isize; 708 loff_t isize;
707 709
708 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
709 if (!bu)
710 return 0;
711
712 bu->buf_len = c->bulk_read_buf_size;
713 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
714 if (!bu->buf)
715 goto out_free;
716
717 data_key_init(c, &bu->key, inode->i_ino,
718 offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
719
720 err = ubifs_tnc_get_bu_keys(c, bu); 710 err = ubifs_tnc_get_bu_keys(c, bu);
721 if (err) 711 if (err)
722 goto out_warn; 712 goto out_warn;
@@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
735 * together. If all the pages were like this, bulk-read would 725 * together. If all the pages were like this, bulk-read would
736 * reduce performance, so we turn it off for a while. 726 * reduce performance, so we turn it off for a while.
737 */ 727 */
738 ui->read_in_a_row = 0; 728 goto out_bu_off;
739 ui->bulk_read = 0;
740 goto out_free;
741 } 729 }
742 730
743 if (bu->cnt) { 731 if (bu->cnt) {
732 if (allocate) {
733 /*
734 * Allocate bulk-read buffer depending on how many data
735 * nodes we are going to read.
736 */
737 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
738 bu->zbranch[bu->cnt - 1].len -
739 bu->zbranch[0].offs;
740 ubifs_assert(bu->buf_len > 0);
741 ubifs_assert(bu->buf_len <= c->leb_size);
742 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
743 if (!bu->buf)
744 goto out_bu_off;
745 }
746
744 err = ubifs_tnc_bulk_read(c, bu); 747 err = ubifs_tnc_bulk_read(c, bu);
745 if (err) 748 if (err)
746 goto out_warn; 749 goto out_warn;
@@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
779 ui->last_page_read = offset + page_idx - 1; 782 ui->last_page_read = offset + page_idx - 1;
780 783
781out_free: 784out_free:
782 kfree(bu->buf); 785 if (allocate)
783 kfree(bu); 786 kfree(bu->buf);
784 return ret; 787 return ret;
785 788
786out_warn: 789out_warn:
787 ubifs_warn("ignoring error %d and skipping bulk-read", err); 790 ubifs_warn("ignoring error %d and skipping bulk-read", err);
788 goto out_free; 791 goto out_free;
792
793out_bu_off:
794 ui->read_in_a_row = ui->bulk_read = 0;
795 goto out_free;
789} 796}
790 797
791/** 798/**
@@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page)
803 struct ubifs_info *c = inode->i_sb->s_fs_info; 810 struct ubifs_info *c = inode->i_sb->s_fs_info;
804 struct ubifs_inode *ui = ubifs_inode(inode); 811 struct ubifs_inode *ui = ubifs_inode(inode);
805 pgoff_t index = page->index, last_page_read = ui->last_page_read; 812 pgoff_t index = page->index, last_page_read = ui->last_page_read;
806 int ret = 0; 813 struct bu_info *bu;
814 int err = 0;
807 815
808 ui->last_page_read = index; 816 ui->last_page_read = index;
809
810 if (!c->bulk_read) 817 if (!c->bulk_read)
811 return 0; 818 return 0;
819
812 /* 820 /*
813 * Bulk-read is protected by ui_mutex, but it is an optimization, so 821 * Bulk-read is protected by ui_mutex, but it is an optimization, so
814 * don't bother if we cannot lock the mutex. 822 * don't bother if we cannot lock the mutex.
815 */ 823 */
816 if (!mutex_trylock(&ui->ui_mutex)) 824 if (!mutex_trylock(&ui->ui_mutex))
817 return 0; 825 return 0;
826
818 if (index != last_page_read + 1) { 827 if (index != last_page_read + 1) {
819 /* Turn off bulk-read if we stop reading sequentially */ 828 /* Turn off bulk-read if we stop reading sequentially */
820 ui->read_in_a_row = 1; 829 ui->read_in_a_row = 1;
@@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page)
822 ui->bulk_read = 0; 831 ui->bulk_read = 0;
823 goto out_unlock; 832 goto out_unlock;
824 } 833 }
834
825 if (!ui->bulk_read) { 835 if (!ui->bulk_read) {
826 ui->read_in_a_row += 1; 836 ui->read_in_a_row += 1;
827 if (ui->read_in_a_row < 3) 837 if (ui->read_in_a_row < 3)
@@ -829,10 +839,22 @@ static int ubifs_bulk_read(struct page *page)
829 /* Three reads in a row, so switch on bulk-read */ 839 /* Three reads in a row, so switch on bulk-read */
830 ui->bulk_read = 1; 840 ui->bulk_read = 1;
831 } 841 }
832 ret = ubifs_do_bulk_read(c, page); 842
843 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
844 if (!bu)
845 return 0;
846
847 bu->buf = NULL;
848 bu->buf_len = c->max_bu_buf_len;
849 data_key_init(c, &bu->key, inode->i_ino,
850 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
851
852 err = ubifs_do_bulk_read(c, bu, page);
853 kfree(bu);
854
833out_unlock: 855out_unlock:
834 mutex_unlock(&ui->ui_mutex); 856 mutex_unlock(&ui->ui_mutex);
835 return ret; 857 return err;
836} 858}
837 859
838static int ubifs_readpage(struct file *file, struct page *page) 860static int ubifs_readpage(struct file *file, struct page *page)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index ea493e6f2652..1d511569c035 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -569,16 +569,16 @@ static int init_constants_early(struct ubifs_info *c)
569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; 569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
570 570
571 /* Buffer size for bulk-reads */ 571 /* Buffer size for bulk-reads */
572 c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 572 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
573 if (c->bulk_read_buf_size > c->leb_size) 573 if (c->max_bu_buf_len > c->leb_size)
574 c->bulk_read_buf_size = c->leb_size; 574 c->max_bu_buf_len = c->leb_size;
575 if (c->bulk_read_buf_size > UBIFS_KMALLOC_OK) { 575 if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
576 /* Check if we can kmalloc that much */ 576 /* Check if we can kmalloc that much */
577 void *try = kmalloc(c->bulk_read_buf_size, 577 void *try = kmalloc(c->max_bu_buf_len,
578 GFP_KERNEL | __GFP_NOWARN); 578 GFP_KERNEL | __GFP_NOWARN);
579 kfree(try); 579 kfree(try);
580 if (!try) 580 if (!try)
581 c->bulk_read_buf_size = UBIFS_KMALLOC_OK; 581 c->max_bu_buf_len = UBIFS_KMALLOC_OK;
582 } 582 }
583 return 0; 583 return 0;
584} 584}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 99e9a744cfd0..6eef5344a145 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1501,7 +1501,12 @@ out:
1501 * @bu: bulk-read parameters and results 1501 * @bu: bulk-read parameters and results
1502 * 1502 *
1503 * Lookup consecutive data node keys for the same inode that reside 1503 * Lookup consecutive data node keys for the same inode that reside
1504 * consecutively in the same LEB. 1504 * consecutively in the same LEB. This function returns zero in case of success
1505 * and a negative error code in case of failure.
1506 *
1507 * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
1508 * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
1509 * maxumum possible amount of nodes for bulk-read.
1505 */ 1510 */
1506int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) 1511int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1507{ 1512{
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 06ba51efd65d..870b5c479e95 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -969,7 +969,7 @@ struct ubifs_mount_opts {
969 * @mst_node: master node 969 * @mst_node: master node
970 * @mst_offs: offset of valid master node 970 * @mst_offs: offset of valid master node
971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
972 * @bulk_read_buf_size: buffer size for bulk-reads 972 * @max_bu_buf_len: maximum bulk-read buffer length
973 * 973 *
974 * @log_lebs: number of logical eraseblocks in the log 974 * @log_lebs: number of logical eraseblocks in the log
975 * @log_bytes: log size in bytes 975 * @log_bytes: log size in bytes
@@ -1217,7 +1217,7 @@ struct ubifs_info {
1217 struct ubifs_mst_node *mst_node; 1217 struct ubifs_mst_node *mst_node;
1218 int mst_offs; 1218 int mst_offs;
1219 struct mutex mst_mutex; 1219 struct mutex mst_mutex;
1220 int bulk_read_buf_size; 1220 int max_bu_buf_len;
1221 1221
1222 int log_lebs; 1222 int log_lebs;
1223 long long log_bytes; 1223 long long log_bytes;