diff options
| author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-11-19 04:53:15 -0500 |
|---|---|---|
| committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-11-21 11:59:33 -0500 |
| commit | 3477d204658733aa3a87d3ae03b0327c1e599517 (patch) | |
| tree | 1c9ba659f76c09a19b98f4bcbfac6fc67db43112 | |
| parent | 6c0c42cdfd73fb161417403d8d077cb136e10bbf (diff) | |
UBIFS: pre-allocate bulk-read buffer
To avoid memory allocation failure during bulk-read, pre-allocate
a bulk-read buffer, so that if there is only one bulk-reader at
a time, it would just use the pre-allocated buffer and would not
do any memory allocation. However, if there are more than 1 bulk-
reader, then only one reader would use the pre-allocated buffer,
while the other reader would allocate the buffer for itself.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
| -rw-r--r-- | fs/ubifs/file.c | 31 | ||||
| -rw-r--r-- | fs/ubifs/super.c | 57 | ||||
| -rw-r--r-- | fs/ubifs/ubifs.h | 6 |
3 files changed, 76 insertions, 18 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0c5c27d63f6e..2624411d9758 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -811,15 +811,15 @@ static int ubifs_bulk_read(struct page *page) | |||
| 811 | struct ubifs_inode *ui = ubifs_inode(inode); | 811 | struct ubifs_inode *ui = ubifs_inode(inode); |
| 812 | pgoff_t index = page->index, last_page_read = ui->last_page_read; | 812 | pgoff_t index = page->index, last_page_read = ui->last_page_read; |
| 813 | struct bu_info *bu; | 813 | struct bu_info *bu; |
| 814 | int err = 0; | 814 | int err = 0, allocated = 0; |
| 815 | 815 | ||
| 816 | ui->last_page_read = index; | 816 | ui->last_page_read = index; |
| 817 | if (!c->bulk_read) | 817 | if (!c->bulk_read) |
| 818 | return 0; | 818 | return 0; |
| 819 | 819 | ||
| 820 | /* | 820 | /* |
| 821 | * Bulk-read is protected by ui_mutex, but it is an optimization, so | 821 | * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, |
| 822 | * don't bother if we cannot lock the mutex. | 822 | * so don't bother if we cannot lock the mutex. |
| 823 | */ | 823 | */ |
| 824 | if (!mutex_trylock(&ui->ui_mutex)) | 824 | if (!mutex_trylock(&ui->ui_mutex)) |
| 825 | return 0; | 825 | return 0; |
| @@ -840,17 +840,30 @@ static int ubifs_bulk_read(struct page *page) | |||
| 840 | ui->bulk_read = 1; | 840 | ui->bulk_read = 1; |
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); | 843 | /* |
| 844 | if (!bu) | 844 | * If possible, try to use pre-allocated bulk-read information, which |
| 845 | return 0; | 845 | * is protected by @c->bu_mutex. |
| 846 | */ | ||
| 847 | if (mutex_trylock(&c->bu_mutex)) | ||
| 848 | bu = &c->bu; | ||
| 849 | else { | ||
| 850 | bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); | ||
| 851 | if (!bu) | ||
| 852 | goto out_unlock; | ||
| 853 | |||
| 854 | bu->buf = NULL; | ||
| 855 | allocated = 1; | ||
| 856 | } | ||
| 846 | 857 | ||
| 847 | bu->buf = NULL; | ||
| 848 | bu->buf_len = c->max_bu_buf_len; | 858 | bu->buf_len = c->max_bu_buf_len; |
| 849 | data_key_init(c, &bu->key, inode->i_ino, | 859 | data_key_init(c, &bu->key, inode->i_ino, |
| 850 | page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); | 860 | page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); |
| 851 | |||
| 852 | err = ubifs_do_bulk_read(c, bu, page); | 861 | err = ubifs_do_bulk_read(c, bu, page); |
| 853 | kfree(bu); | 862 | |
| 863 | if (!allocated) | ||
| 864 | mutex_unlock(&c->bu_mutex); | ||
| 865 | else | ||
| 866 | kfree(bu); | ||
| 854 | 867 | ||
| 855 | out_unlock: | 868 | out_unlock: |
| 856 | mutex_unlock(&ui->ui_mutex); | 869 | mutex_unlock(&ui->ui_mutex); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1d511569c035..d80b2aef42b6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -572,14 +572,6 @@ static int init_constants_early(struct ubifs_info *c) | |||
| 572 | c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; | 572 | c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; |
| 573 | if (c->max_bu_buf_len > c->leb_size) | 573 | if (c->max_bu_buf_len > c->leb_size) |
| 574 | c->max_bu_buf_len = c->leb_size; | 574 | c->max_bu_buf_len = c->leb_size; |
| 575 | if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { | ||
| 576 | /* Check if we can kmalloc that much */ | ||
| 577 | void *try = kmalloc(c->max_bu_buf_len, | ||
| 578 | GFP_KERNEL | __GFP_NOWARN); | ||
| 579 | kfree(try); | ||
| 580 | if (!try) | ||
| 581 | c->max_bu_buf_len = UBIFS_KMALLOC_OK; | ||
| 582 | } | ||
| 583 | return 0; | 575 | return 0; |
| 584 | } | 576 | } |
| 585 | 577 | ||
| @@ -999,6 +991,34 @@ static void destroy_journal(struct ubifs_info *c) | |||
| 999 | } | 991 | } |
| 1000 | 992 | ||
| 1001 | /** | 993 | /** |
| 994 | * bu_init - initialize bulk-read information. | ||
| 995 | * @c: UBIFS file-system description object | ||
| 996 | */ | ||
| 997 | static void bu_init(struct ubifs_info *c) | ||
| 998 | { | ||
| 999 | ubifs_assert(c->bulk_read == 1); | ||
| 1000 | |||
| 1001 | if (c->bu.buf) | ||
| 1002 | return; /* Already initialized */ | ||
| 1003 | |||
| 1004 | again: | ||
| 1005 | c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); | ||
| 1006 | if (!c->bu.buf) { | ||
| 1007 | if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { | ||
| 1008 | c->max_bu_buf_len = UBIFS_KMALLOC_OK; | ||
| 1009 | goto again; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | /* Just disable bulk-read */ | ||
| 1013 | ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " | ||
| 1014 | "disabling it", c->max_bu_buf_len); | ||
| 1015 | c->mount_opts.bulk_read = 1; | ||
| 1016 | c->bulk_read = 0; | ||
| 1017 | return; | ||
| 1018 | } | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /** | ||
| 1002 | * mount_ubifs - mount UBIFS file-system. | 1022 | * mount_ubifs - mount UBIFS file-system. |
| 1003 | * @c: UBIFS file-system description object | 1023 | * @c: UBIFS file-system description object |
| 1004 | * | 1024 | * |
| @@ -1066,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c) | |||
| 1066 | goto out_free; | 1086 | goto out_free; |
| 1067 | } | 1087 | } |
| 1068 | 1088 | ||
| 1089 | if (c->bulk_read == 1) | ||
| 1090 | bu_init(c); | ||
| 1091 | |||
| 1092 | /* | ||
| 1093 | * We have to check all CRCs, even for data nodes, when we mount the FS | ||
| 1094 | * (specifically, when we are replaying). | ||
| 1095 | */ | ||
| 1069 | c->always_chk_crc = 1; | 1096 | c->always_chk_crc = 1; |
| 1070 | 1097 | ||
| 1071 | err = ubifs_read_superblock(c); | 1098 | err = ubifs_read_superblock(c); |
| @@ -1296,6 +1323,7 @@ out_cbuf: | |||
| 1296 | out_dereg: | 1323 | out_dereg: |
| 1297 | dbg_failure_mode_deregistration(c); | 1324 | dbg_failure_mode_deregistration(c); |
| 1298 | out_free: | 1325 | out_free: |
| 1326 | kfree(c->bu.buf); | ||
| 1299 | vfree(c->ileb_buf); | 1327 | vfree(c->ileb_buf); |
| 1300 | vfree(c->sbuf); | 1328 | vfree(c->sbuf); |
| 1301 | kfree(c->bottom_up_buf); | 1329 | kfree(c->bottom_up_buf); |
| @@ -1332,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c) | |||
| 1332 | kfree(c->cbuf); | 1360 | kfree(c->cbuf); |
| 1333 | kfree(c->rcvrd_mst_node); | 1361 | kfree(c->rcvrd_mst_node); |
| 1334 | kfree(c->mst_node); | 1362 | kfree(c->mst_node); |
| 1363 | kfree(c->bu.buf); | ||
| 1364 | vfree(c->ileb_buf); | ||
| 1335 | vfree(c->sbuf); | 1365 | vfree(c->sbuf); |
| 1336 | kfree(c->bottom_up_buf); | 1366 | kfree(c->bottom_up_buf); |
| 1337 | UBIFS_DBG(vfree(c->dbg_buf)); | 1367 | UBIFS_DBG(vfree(c->dbg_buf)); |
| 1338 | vfree(c->ileb_buf); | ||
| 1339 | dbg_failure_mode_deregistration(c); | 1368 | dbg_failure_mode_deregistration(c); |
| 1340 | } | 1369 | } |
| 1341 | 1370 | ||
| @@ -1633,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1633 | ubifs_err("invalid or unknown remount parameter"); | 1662 | ubifs_err("invalid or unknown remount parameter"); |
| 1634 | return err; | 1663 | return err; |
| 1635 | } | 1664 | } |
| 1665 | |||
| 1636 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { | 1666 | if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { |
| 1637 | err = ubifs_remount_rw(c); | 1667 | err = ubifs_remount_rw(c); |
| 1638 | if (err) | 1668 | if (err) |
| @@ -1640,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 1640 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) | 1670 | } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) |
| 1641 | ubifs_remount_ro(c); | 1671 | ubifs_remount_ro(c); |
| 1642 | 1672 | ||
| 1673 | if (c->bulk_read == 1) | ||
| 1674 | bu_init(c); | ||
| 1675 | else { | ||
| 1676 | dbg_gen("disable bulk-read"); | ||
| 1677 | kfree(c->bu.buf); | ||
| 1678 | c->bu.buf = NULL; | ||
| 1679 | } | ||
| 1680 | |||
| 1643 | return 0; | 1681 | return 0; |
| 1644 | } | 1682 | } |
| 1645 | 1683 | ||
| @@ -1730,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1730 | mutex_init(&c->log_mutex); | 1768 | mutex_init(&c->log_mutex); |
| 1731 | mutex_init(&c->mst_mutex); | 1769 | mutex_init(&c->mst_mutex); |
| 1732 | mutex_init(&c->umount_mutex); | 1770 | mutex_init(&c->umount_mutex); |
| 1771 | mutex_init(&c->bu_mutex); | ||
| 1733 | init_waitqueue_head(&c->cmt_wq); | 1772 | init_waitqueue_head(&c->cmt_wq); |
| 1734 | c->buds = RB_ROOT; | 1773 | c->buds = RB_ROOT; |
| 1735 | c->old_idx = RB_ROOT; | 1774 | c->old_idx = RB_ROOT; |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 870b5c479e95..46b172560a06 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
| @@ -969,7 +969,10 @@ struct ubifs_mount_opts { | |||
| 969 | * @mst_node: master node | 969 | * @mst_node: master node |
| 970 | * @mst_offs: offset of valid master node | 970 | * @mst_offs: offset of valid master node |
| 971 | * @mst_mutex: protects the master node area, @mst_node, and @mst_offs | 971 | * @mst_mutex: protects the master node area, @mst_node, and @mst_offs |
| 972 | * | ||
| 972 | * @max_bu_buf_len: maximum bulk-read buffer length | 973 | * @max_bu_buf_len: maximum bulk-read buffer length |
| 974 | * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu | ||
| 975 | * @bu: pre-allocated bulk-read information | ||
| 973 | * | 976 | * |
| 974 | * @log_lebs: number of logical eraseblocks in the log | 977 | * @log_lebs: number of logical eraseblocks in the log |
| 975 | * @log_bytes: log size in bytes | 978 | * @log_bytes: log size in bytes |
| @@ -1217,7 +1220,10 @@ struct ubifs_info { | |||
| 1217 | struct ubifs_mst_node *mst_node; | 1220 | struct ubifs_mst_node *mst_node; |
| 1218 | int mst_offs; | 1221 | int mst_offs; |
| 1219 | struct mutex mst_mutex; | 1222 | struct mutex mst_mutex; |
| 1223 | |||
| 1220 | int max_bu_buf_len; | 1224 | int max_bu_buf_len; |
| 1225 | struct mutex bu_mutex; | ||
| 1226 | struct bu_info bu; | ||
| 1221 | 1227 | ||
| 1222 | int log_lebs; | 1228 | int log_lebs; |
| 1223 | long long log_bytes; | 1229 | long long log_bytes; |
