aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs/super.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-18 13:20:05 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-21 11:59:25 -0500
commit6c0c42cdfd73fb161417403d8d077cb136e10bbf (patch)
tree343de0cb98df07295bc3e03eee083012ac12bae7 /fs/ubifs/super.c
parent39ce81ce7168aa7226fb9f182c3a2b57060d0905 (diff)
UBIFS: do not allocate too much
Bulk-read allocates 128KiB or more using kmalloc. The allocation starts failing often when the memory gets fragmented. UBIFS still works fine in this case, because it falls-back to standard (non-optimized) read method, though. This patch teaches bulk-read to allocate exactly the amount of memory it needs, instead of allocating 128KiB every time. This patch is also a preparation to the further fix where we'll have a pre-allocated bulk-read buffer as well. For example, now the @bu object is prepared in 'ubifs_bulk_read()', so we could path either pre-allocated or allocated information to 'ubifs_do_bulk_read()' later. Or teaching 'ubifs_do_bulk_read()' not to allocate 'bu->buf' if it is already there. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/super.c')
-rw-r--r--fs/ubifs/super.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index ea493e6f2652..1d511569c035 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -569,16 +569,16 @@ static int init_constants_early(struct ubifs_info *c)
569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; 569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
570 570
571 /* Buffer size for bulk-reads */ 571 /* Buffer size for bulk-reads */
572 c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 572 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
573 if (c->bulk_read_buf_size > c->leb_size) 573 if (c->max_bu_buf_len > c->leb_size)
574 c->bulk_read_buf_size = c->leb_size; 574 c->max_bu_buf_len = c->leb_size;
575 if (c->bulk_read_buf_size > UBIFS_KMALLOC_OK) { 575 if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
576 /* Check if we can kmalloc that much */ 576 /* Check if we can kmalloc that much */
577 void *try = kmalloc(c->bulk_read_buf_size, 577 void *try = kmalloc(c->max_bu_buf_len,
578 GFP_KERNEL | __GFP_NOWARN); 578 GFP_KERNEL | __GFP_NOWARN);
579 kfree(try); 579 kfree(try);
580 if (!try) 580 if (!try)
581 c->bulk_read_buf_size = UBIFS_KMALLOC_OK; 581 c->max_bu_buf_len = UBIFS_KMALLOC_OK;
582 } 582 }
583 return 0; 583 return 0;
584} 584}