aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h23
-rw-r--r--mm/slab.c45
2 files changed, 46 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3e3c3ab8ff94..7d66385ae750 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -62,16 +62,9 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo
62extern int kmem_cache_destroy(kmem_cache_t *); 62extern int kmem_cache_destroy(kmem_cache_t *);
63extern int kmem_cache_shrink(kmem_cache_t *); 63extern int kmem_cache_shrink(kmem_cache_t *);
64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); 64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
65#ifdef CONFIG_NUMA
66extern void *kmem_cache_alloc_node(kmem_cache_t *, int);
67#else
68static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node)
69{
70 return kmem_cache_alloc(cachep, GFP_KERNEL);
71}
72#endif
73extern void kmem_cache_free(kmem_cache_t *, void *); 65extern void kmem_cache_free(kmem_cache_t *, void *);
74extern unsigned int kmem_cache_size(kmem_cache_t *); 66extern unsigned int kmem_cache_size(kmem_cache_t *);
67extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
75 68
76/* Size description struct for general caches. */ 69/* Size description struct for general caches. */
77struct cache_sizes { 70struct cache_sizes {
@@ -109,6 +102,20 @@ extern void *kcalloc(size_t, size_t, unsigned int __nocast);
109extern void kfree(const void *); 102extern void kfree(const void *);
110extern unsigned int ksize(const void *); 103extern unsigned int ksize(const void *);
111 104
105#ifdef CONFIG_NUMA
106extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
107extern void *kmalloc_node(size_t size, int flags, int node);
108#else
109static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
110{
111 return kmem_cache_alloc(cachep, flags);
112}
113static inline void *kmalloc_node(size_t size, int flags, int node)
114{
115 return kmalloc(size, flags);
116}
117#endif
118
112extern int FASTCALL(kmem_cache_reap(int)); 119extern int FASTCALL(kmem_cache_reap(int));
113extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); 120extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
114 121
diff --git a/mm/slab.c b/mm/slab.c
index ec660d85ddd7..771cc09f9f1a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -583,7 +583,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
583 return cachep->array[smp_processor_id()]; 583 return cachep->array[smp_processor_id()];
584} 584}
585 585
586static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) 586static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
587{ 587{
588 struct cache_sizes *csizep = malloc_sizes; 588 struct cache_sizes *csizep = malloc_sizes;
589 589
@@ -607,6 +607,12 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
607 return csizep->cs_cachep; 607 return csizep->cs_cachep;
608} 608}
609 609
610kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
611{
612 return __find_general_cachep(size, gfpflags);
613}
614EXPORT_SYMBOL(kmem_find_general_cachep);
615
610/* Cal the num objs, wastage, and bytes left over for a given slab size. */ 616/* Cal the num objs, wastage, and bytes left over for a given slab size. */
611static void cache_estimate(unsigned long gfporder, size_t size, size_t align, 617static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
612 int flags, size_t *left_over, unsigned int *num) 618 int flags, size_t *left_over, unsigned int *num)
@@ -672,14 +678,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries,
672 int memsize = sizeof(void*)*entries+sizeof(struct array_cache); 678 int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
673 struct array_cache *nc = NULL; 679 struct array_cache *nc = NULL;
674 680
675 if (cpu != -1) { 681 if (cpu == -1)
676 kmem_cache_t *cachep;
677 cachep = kmem_find_general_cachep(memsize, GFP_KERNEL);
678 if (cachep)
679 nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu));
680 }
681 if (!nc)
682 nc = kmalloc(memsize, GFP_KERNEL); 682 nc = kmalloc(memsize, GFP_KERNEL);
683 else
684 nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu));
685
683 if (nc) { 686 if (nc) {
684 nc->avail = 0; 687 nc->avail = 0;
685 nc->limit = entries; 688 nc->limit = entries;
@@ -2361,7 +2364,7 @@ out:
2361 * and can sleep. And it will allocate memory on the given node, which 2364 * and can sleep. And it will allocate memory on the given node, which
2362 * can improve the performance for cpu bound structures. 2365 * can improve the performance for cpu bound structures.
2363 */ 2366 */
2364void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) 2367void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
2365{ 2368{
2366 int loop; 2369 int loop;
2367 void *objp; 2370 void *objp;
@@ -2393,7 +2396,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
2393 spin_unlock_irq(&cachep->spinlock); 2396 spin_unlock_irq(&cachep->spinlock);
2394 2397
2395 local_irq_disable(); 2398 local_irq_disable();
2396 if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { 2399 if (!cache_grow(cachep, flags, nodeid)) {
2397 local_irq_enable(); 2400 local_irq_enable();
2398 return NULL; 2401 return NULL;
2399 } 2402 }
@@ -2435,6 +2438,16 @@ got_slabp:
2435} 2438}
2436EXPORT_SYMBOL(kmem_cache_alloc_node); 2439EXPORT_SYMBOL(kmem_cache_alloc_node);
2437 2440
2441void *kmalloc_node(size_t size, int flags, int node)
2442{
2443 kmem_cache_t *cachep;
2444
2445 cachep = kmem_find_general_cachep(size, flags);
2446 if (unlikely(cachep == NULL))
2447 return NULL;
2448 return kmem_cache_alloc_node(cachep, flags, node);
2449}
2450EXPORT_SYMBOL(kmalloc_node);
2438#endif 2451#endif
2439 2452
2440/** 2453/**
@@ -2462,7 +2475,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags)
2462{ 2475{
2463 kmem_cache_t *cachep; 2476 kmem_cache_t *cachep;
2464 2477
2465 cachep = kmem_find_general_cachep(size, flags); 2478 /* If you want to save a few bytes .text space: replace
2479 * __ with kmem_.
2480 * Then kmalloc uses the uninlined functions instead of the inline
2481 * functions.
2482 */
2483 cachep = __find_general_cachep(size, flags);
2466 if (unlikely(cachep == NULL)) 2484 if (unlikely(cachep == NULL))
2467 return NULL; 2485 return NULL;
2468 return __cache_alloc(cachep, flags); 2486 return __cache_alloc(cachep, flags);
@@ -2489,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align)
2489 for (i = 0; i < NR_CPUS; i++) { 2507 for (i = 0; i < NR_CPUS; i++) {
2490 if (!cpu_possible(i)) 2508 if (!cpu_possible(i))
2491 continue; 2509 continue;
2492 pdata->ptrs[i] = kmem_cache_alloc_node( 2510 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL,
2493 kmem_find_general_cachep(size, GFP_KERNEL), 2511 cpu_to_node(i));
2494 cpu_to_node(i));
2495 2512
2496 if (!pdata->ptrs[i]) 2513 if (!pdata->ptrs[i])
2497 goto unwind_oom; 2514 goto unwind_oom;