aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorAlexey Korolev <akorolev@infradead.org>2008-11-13 08:40:38 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-12-10 08:35:23 -0500
commit8a4c2495b142fe612b291a810d9e695f269c26db (patch)
treee68d387a13d82918c5d744040ff9bf0973230f80 /drivers/mtd
parenta9fc8991883cdf029bd373a82cbc2d12a10799dd (diff)
MTD: nandsim: use less RAM
Nandsim consumes ~2x more RAM than the density of simulated device. It becomes critical if we need to simulate 256MB NAND and run stress tests on it. We investigated the reasons. nandsim allocates space for pages using kmalloc function. The size of LP nand page is 2112 bytes. kmalloc gets space from slab pools by chunks 2^n. So if we need to kmalloc 2112 bytes, 4096 bytes will be consumed by system. The best way to avoid this issue would be using kmem_cache allocations. AFAIK this mechanism specially designed to handle cases when arrays of allocations are used. Signed-off-by: Alexey Korolev <akorolev@infradead.org> Tested-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Acked-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/nand/nandsim.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index baa6f95e9621..43ce26d0f928 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -303,6 +303,9 @@ struct nandsim {
303 /* The simulated NAND flash pages array */ 303 /* The simulated NAND flash pages array */
304 union ns_mem *pages; 304 union ns_mem *pages;
305 305
306 /* Slab allocator for nand pages */
307 struct kmem_cache *nand_pages_slab;
308
306 /* Internal buffer of page + OOB size bytes */ 309 /* Internal buffer of page + OOB size bytes */
307 union ns_mem buf; 310 union ns_mem buf;
308 311
@@ -435,8 +438,8 @@ static struct mtd_info *nsmtd;
435static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE]; 438static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
436 439
437/* 440/*
438 * Allocate array of page pointers and initialize the array to NULL 441 * Allocate array of page pointers, create slab allocation for an array
439 * pointers. 442 * and initialize the array by NULL pointers.
440 * 443 *
441 * RETURNS: 0 if success, -ENOMEM if memory alloc fails. 444 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
442 */ 445 */
@@ -484,6 +487,12 @@ static int alloc_device(struct nandsim *ns)
484 for (i = 0; i < ns->geom.pgnum; i++) { 487 for (i = 0; i < ns->geom.pgnum; i++) {
485 ns->pages[i].byte = NULL; 488 ns->pages[i].byte = NULL;
486 } 489 }
490 ns->nand_pages_slab = kmem_cache_create("nandsim",
491 ns->geom.pgszoob, 0, 0, NULL);
492 if (!ns->nand_pages_slab) {
493 NS_ERR("cache_create: unable to create kmem_cache\n");
494 return -ENOMEM;
495 }
487 496
488 return 0; 497 return 0;
489 498
@@ -511,8 +520,10 @@ static void free_device(struct nandsim *ns)
511 if (ns->pages) { 520 if (ns->pages) {
512 for (i = 0; i < ns->geom.pgnum; i++) { 521 for (i = 0; i < ns->geom.pgnum; i++) {
513 if (ns->pages[i].byte) 522 if (ns->pages[i].byte)
514 kfree(ns->pages[i].byte); 523 kmem_cache_free(ns->nand_pages_slab,
524 ns->pages[i].byte);
515 } 525 }
526 kmem_cache_destroy(ns->nand_pages_slab);
516 vfree(ns->pages); 527 vfree(ns->pages);
517 } 528 }
518} 529}
@@ -1475,7 +1486,7 @@ static void erase_sector(struct nandsim *ns)
1475 for (i = 0; i < ns->geom.pgsec; i++) { 1486 for (i = 0; i < ns->geom.pgsec; i++) {
1476 if (mypage->byte != NULL) { 1487 if (mypage->byte != NULL) {
1477 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i); 1488 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1478 kfree(mypage->byte); 1489 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1479 mypage->byte = NULL; 1490 mypage->byte = NULL;
1480 } 1491 }
1481 mypage++; 1492 mypage++;
@@ -1538,10 +1549,10 @@ static int prog_page(struct nandsim *ns, int num)
1538 /* 1549 /*
1539 * We allocate memory with GFP_NOFS because a flash FS may 1550 * We allocate memory with GFP_NOFS because a flash FS may
1540 * utilize this. If it is holding an FS lock, then gets here, 1551 * utilize this. If it is holding an FS lock, then gets here,
1541 * then kmalloc runs writeback which goes to the FS again 1552 * then kernel memory alloc runs writeback which goes to the FS
1542 * and deadlocks. This was seen in practice. 1553 * again and deadlocks. This was seen in practice.
1543 */ 1554 */
1544 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS); 1555 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1545 if (mypage->byte == NULL) { 1556 if (mypage->byte == NULL) {
1546 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row); 1557 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1547 return -1; 1558 return -1;