aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c74
1 files changed, 38 insertions, 36 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 22bfb0b2ac8b..e291f5e1afbb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -368,7 +368,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
368 * manages a cache. 368 * manages a cache.
369 */ 369 */
370 370
371struct kmem_cache_s { 371struct kmem_cache {
372/* 1) per-cpu data, touched during every alloc/free */ 372/* 1) per-cpu data, touched during every alloc/free */
373 struct array_cache *array[NR_CPUS]; 373 struct array_cache *array[NR_CPUS];
374 unsigned int batchcount; 374 unsigned int batchcount;
@@ -1502,6 +1502,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1502{ 1502{
1503 size_t left_over, slab_size, ralign; 1503 size_t left_over, slab_size, ralign;
1504 kmem_cache_t *cachep = NULL; 1504 kmem_cache_t *cachep = NULL;
1505 struct list_head *p;
1505 1506
1506 /* 1507 /*
1507 * Sanity checks... these are all serious usage bugs. 1508 * Sanity checks... these are all serious usage bugs.
@@ -1516,6 +1517,35 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1516 BUG(); 1517 BUG();
1517 } 1518 }
1518 1519
1520 down(&cache_chain_sem);
1521
1522 list_for_each(p, &cache_chain) {
1523 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
1524 mm_segment_t old_fs = get_fs();
1525 char tmp;
1526 int res;
1527
1528 /*
1529 * This happens when the module gets unloaded and doesn't
1530 * destroy its slab cache and no-one else reuses the vmalloc
1531 * area of the module. Print a warning.
1532 */
1533 set_fs(KERNEL_DS);
1534 res = __get_user(tmp, pc->name);
1535 set_fs(old_fs);
1536 if (res) {
1537 printk("SLAB: cache with size %d has lost its name\n",
1538 pc->objsize);
1539 continue;
1540 }
1541
1542 if (!strcmp(pc->name,name)) {
1543 printk("kmem_cache_create: duplicate cache %s\n", name);
1544 dump_stack();
1545 goto oops;
1546 }
1547 }
1548
1519#if DEBUG 1549#if DEBUG
1520 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 1550 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
1521 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 1551 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
@@ -1592,7 +1622,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1592 /* Get cache's description obj. */ 1622 /* Get cache's description obj. */
1593 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); 1623 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
1594 if (!cachep) 1624 if (!cachep)
1595 goto opps; 1625 goto oops;
1596 memset(cachep, 0, sizeof(kmem_cache_t)); 1626 memset(cachep, 0, sizeof(kmem_cache_t));
1597 1627
1598#if DEBUG 1628#if DEBUG
@@ -1686,7 +1716,7 @@ next:
1686 printk("kmem_cache_create: couldn't create cache %s.\n", name); 1716 printk("kmem_cache_create: couldn't create cache %s.\n", name);
1687 kmem_cache_free(&cache_cache, cachep); 1717 kmem_cache_free(&cache_cache, cachep);
1688 cachep = NULL; 1718 cachep = NULL;
1689 goto opps; 1719 goto oops;
1690 } 1720 }
1691 slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) 1721 slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
1692 + sizeof(struct slab), align); 1722 + sizeof(struct slab), align);
@@ -1781,43 +1811,14 @@ next:
1781 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1811 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1782 } 1812 }
1783 1813
1784 /* Need the semaphore to access the chain. */
1785 down(&cache_chain_sem);
1786 {
1787 struct list_head *p;
1788 mm_segment_t old_fs;
1789
1790 old_fs = get_fs();
1791 set_fs(KERNEL_DS);
1792 list_for_each(p, &cache_chain) {
1793 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
1794 char tmp;
1795 /* This happens when the module gets unloaded and doesn't
1796 destroy its slab cache and noone else reuses the vmalloc
1797 area of the module. Print a warning. */
1798 if (__get_user(tmp,pc->name)) {
1799 printk("SLAB: cache with size %d has lost its name\n",
1800 pc->objsize);
1801 continue;
1802 }
1803 if (!strcmp(pc->name,name)) {
1804 printk("kmem_cache_create: duplicate cache %s\n",name);
1805 up(&cache_chain_sem);
1806 unlock_cpu_hotplug();
1807 BUG();
1808 }
1809 }
1810 set_fs(old_fs);
1811 }
1812
1813 /* cache setup completed, link it into the list */ 1814 /* cache setup completed, link it into the list */
1814 list_add(&cachep->next, &cache_chain); 1815 list_add(&cachep->next, &cache_chain);
1815 up(&cache_chain_sem);
1816 unlock_cpu_hotplug(); 1816 unlock_cpu_hotplug();
1817opps: 1817oops:
1818 if (!cachep && (flags & SLAB_PANIC)) 1818 if (!cachep && (flags & SLAB_PANIC))
1819 panic("kmem_cache_create(): failed to create slab `%s'\n", 1819 panic("kmem_cache_create(): failed to create slab `%s'\n",
1820 name); 1820 name);
1821 up(&cache_chain_sem);
1821 return cachep; 1822 return cachep;
1822} 1823}
1823EXPORT_SYMBOL(kmem_cache_create); 1824EXPORT_SYMBOL(kmem_cache_create);
@@ -3262,6 +3263,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3262 3263
3263/** 3264/**
3264 * cache_reap - Reclaim memory from caches. 3265 * cache_reap - Reclaim memory from caches.
3266 * @unused: unused parameter
3265 * 3267 *
3266 * Called from workqueue/eventd every few seconds. 3268 * Called from workqueue/eventd every few seconds.
3267 * Purpose: 3269 * Purpose:
@@ -3278,7 +3280,7 @@ static void cache_reap(void *unused)
3278 3280
3279 if (down_trylock(&cache_chain_sem)) { 3281 if (down_trylock(&cache_chain_sem)) {
3280 /* Give up. Setup the next iteration. */ 3282 /* Give up. Setup the next iteration. */
3281 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id()); 3283 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3282 return; 3284 return;
3283 } 3285 }
3284 3286
@@ -3347,7 +3349,7 @@ next:
3347 up(&cache_chain_sem); 3349 up(&cache_chain_sem);
3348 drain_remote_pages(); 3350 drain_remote_pages();
3349 /* Setup the next iteration */ 3351 /* Setup the next iteration */
3350 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id()); 3352 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3351} 3353}
3352 3354
3353#ifdef CONFIG_PROC_FS 3355#ifdef CONFIG_PROC_FS