aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs/file.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-19 04:53:15 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-21 11:59:33 -0500
commit3477d204658733aa3a87d3ae03b0327c1e599517 (patch)
tree1c9ba659f76c09a19b98f4bcbfac6fc67db43112 /fs/ubifs/file.c
parent6c0c42cdfd73fb161417403d8d077cb136e10bbf (diff)
UBIFS: pre-allocate bulk-read buffer
To avoid memory allocation failure during bulk-read, pre-allocate a bulk-read buffer, so that if there is only one bulk-reader at a time, it would just use the pre-allocated buffer and would not do any memory allocation. However, if there are more than 1 bulk- reader, then only one reader would use the pre-allocated buffer, while the other reader would allocate the buffer for itself. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r--fs/ubifs/file.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 0c5c27d63f6e..2624411d9758 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -811,15 +811,15 @@ static int ubifs_bulk_read(struct page *page)
811 struct ubifs_inode *ui = ubifs_inode(inode); 811 struct ubifs_inode *ui = ubifs_inode(inode);
812 pgoff_t index = page->index, last_page_read = ui->last_page_read; 812 pgoff_t index = page->index, last_page_read = ui->last_page_read;
813 struct bu_info *bu; 813 struct bu_info *bu;
814 int err = 0; 814 int err = 0, allocated = 0;
815 815
816 ui->last_page_read = index; 816 ui->last_page_read = index;
817 if (!c->bulk_read) 817 if (!c->bulk_read)
818 return 0; 818 return 0;
819 819
820 /* 820 /*
821 * Bulk-read is protected by ui_mutex, but it is an optimization, so 821 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
822 * don't bother if we cannot lock the mutex. 822 * so don't bother if we cannot lock the mutex.
823 */ 823 */
824 if (!mutex_trylock(&ui->ui_mutex)) 824 if (!mutex_trylock(&ui->ui_mutex))
825 return 0; 825 return 0;
@@ -840,17 +840,30 @@ static int ubifs_bulk_read(struct page *page)
840 ui->bulk_read = 1; 840 ui->bulk_read = 1;
841 } 841 }
842 842
843 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 843 /*
844 if (!bu) 844 * If possible, try to use pre-allocated bulk-read information, which
845 return 0; 845 * is protected by @c->bu_mutex.
846 */
847 if (mutex_trylock(&c->bu_mutex))
848 bu = &c->bu;
849 else {
850 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
851 if (!bu)
852 goto out_unlock;
853
854 bu->buf = NULL;
855 allocated = 1;
856 }
846 857
847 bu->buf = NULL;
848 bu->buf_len = c->max_bu_buf_len; 858 bu->buf_len = c->max_bu_buf_len;
849 data_key_init(c, &bu->key, inode->i_ino, 859 data_key_init(c, &bu->key, inode->i_ino,
850 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 860 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
851
852 err = ubifs_do_bulk_read(c, bu, page); 861 err = ubifs_do_bulk_read(c, bu, page);
853 kfree(bu); 862
863 if (!allocated)
864 mutex_unlock(&c->bu_mutex);
865 else
866 kfree(bu);
854 867
855out_unlock: 868out_unlock:
856 mutex_unlock(&ui->ui_mutex); 869 mutex_unlock(&ui->ui_mutex);