diff options
-rw-r--r-- | include/linux/slab.h | 23 | ||||
-rw-r--r-- | mm/slab.c | 45 |
2 files changed, 46 insertions, 22 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 3e3c3ab8ff94..7d66385ae750 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -62,16 +62,9 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo | |||
62 | extern int kmem_cache_destroy(kmem_cache_t *); | 62 | extern int kmem_cache_destroy(kmem_cache_t *); |
63 | extern int kmem_cache_shrink(kmem_cache_t *); | 63 | extern int kmem_cache_shrink(kmem_cache_t *); |
64 | extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); | 64 | extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); |
65 | #ifdef CONFIG_NUMA | ||
66 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int); | ||
67 | #else | ||
68 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node) | ||
69 | { | ||
70 | return kmem_cache_alloc(cachep, GFP_KERNEL); | ||
71 | } | ||
72 | #endif | ||
73 | extern void kmem_cache_free(kmem_cache_t *, void *); | 65 | extern void kmem_cache_free(kmem_cache_t *, void *); |
74 | extern unsigned int kmem_cache_size(kmem_cache_t *); | 66 | extern unsigned int kmem_cache_size(kmem_cache_t *); |
67 | extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags); | ||
75 | 68 | ||
76 | /* Size description struct for general caches. */ | 69 | /* Size description struct for general caches. */ |
77 | struct cache_sizes { | 70 | struct cache_sizes { |
@@ -109,6 +102,20 @@ extern void *kcalloc(size_t, size_t, unsigned int __nocast); | |||
109 | extern void kfree(const void *); | 102 | extern void kfree(const void *); |
110 | extern unsigned int ksize(const void *); | 103 | extern unsigned int ksize(const void *); |
111 | 104 | ||
105 | #ifdef CONFIG_NUMA | ||
106 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); | ||
107 | extern void *kmalloc_node(size_t size, int flags, int node); | ||
108 | #else | ||
109 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) | ||
110 | { | ||
111 | return kmem_cache_alloc(cachep, flags); | ||
112 | } | ||
113 | static inline void *kmalloc_node(size_t size, int flags, int node) | ||
114 | { | ||
115 | return kmalloc(size, flags); | ||
116 | } | ||
117 | #endif | ||
118 | |||
112 | extern int FASTCALL(kmem_cache_reap(int)); | 119 | extern int FASTCALL(kmem_cache_reap(int)); |
113 | extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); | 120 | extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); |
114 | 121 | ||
@@ -583,7 +583,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) | |||
583 | return cachep->array[smp_processor_id()]; | 583 | return cachep->array[smp_processor_id()]; |
584 | } | 584 | } |
585 | 585 | ||
586 | static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) | 586 | static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags) |
587 | { | 587 | { |
588 | struct cache_sizes *csizep = malloc_sizes; | 588 | struct cache_sizes *csizep = malloc_sizes; |
589 | 589 | ||
@@ -607,6 +607,12 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) | |||
607 | return csizep->cs_cachep; | 607 | return csizep->cs_cachep; |
608 | } | 608 | } |
609 | 609 | ||
610 | kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) | ||
611 | { | ||
612 | return __find_general_cachep(size, gfpflags); | ||
613 | } | ||
614 | EXPORT_SYMBOL(kmem_find_general_cachep); | ||
615 | |||
610 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ | 616 | /* Cal the num objs, wastage, and bytes left over for a given slab size. */ |
611 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, | 617 | static void cache_estimate(unsigned long gfporder, size_t size, size_t align, |
612 | int flags, size_t *left_over, unsigned int *num) | 618 | int flags, size_t *left_over, unsigned int *num) |
@@ -672,14 +678,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries, | |||
672 | int memsize = sizeof(void*)*entries+sizeof(struct array_cache); | 678 | int memsize = sizeof(void*)*entries+sizeof(struct array_cache); |
673 | struct array_cache *nc = NULL; | 679 | struct array_cache *nc = NULL; |
674 | 680 | ||
675 | if (cpu != -1) { | 681 | if (cpu == -1) |
676 | kmem_cache_t *cachep; | ||
677 | cachep = kmem_find_general_cachep(memsize, GFP_KERNEL); | ||
678 | if (cachep) | ||
679 | nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu)); | ||
680 | } | ||
681 | if (!nc) | ||
682 | nc = kmalloc(memsize, GFP_KERNEL); | 682 | nc = kmalloc(memsize, GFP_KERNEL); |
683 | else | ||
684 | nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu)); | ||
685 | |||
683 | if (nc) { | 686 | if (nc) { |
684 | nc->avail = 0; | 687 | nc->avail = 0; |
685 | nc->limit = entries; | 688 | nc->limit = entries; |
@@ -2361,7 +2364,7 @@ out: | |||
2361 | * and can sleep. And it will allocate memory on the given node, which | 2364 | * and can sleep. And it will allocate memory on the given node, which |
2362 | * can improve the performance for cpu bound structures. | 2365 | * can improve the performance for cpu bound structures. |
2363 | */ | 2366 | */ |
2364 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) | 2367 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) |
2365 | { | 2368 | { |
2366 | int loop; | 2369 | int loop; |
2367 | void *objp; | 2370 | void *objp; |
@@ -2393,7 +2396,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) | |||
2393 | spin_unlock_irq(&cachep->spinlock); | 2396 | spin_unlock_irq(&cachep->spinlock); |
2394 | 2397 | ||
2395 | local_irq_disable(); | 2398 | local_irq_disable(); |
2396 | if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { | 2399 | if (!cache_grow(cachep, flags, nodeid)) { |
2397 | local_irq_enable(); | 2400 | local_irq_enable(); |
2398 | return NULL; | 2401 | return NULL; |
2399 | } | 2402 | } |
@@ -2435,6 +2438,16 @@ got_slabp: | |||
2435 | } | 2438 | } |
2436 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 2439 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
2437 | 2440 | ||
2441 | void *kmalloc_node(size_t size, int flags, int node) | ||
2442 | { | ||
2443 | kmem_cache_t *cachep; | ||
2444 | |||
2445 | cachep = kmem_find_general_cachep(size, flags); | ||
2446 | if (unlikely(cachep == NULL)) | ||
2447 | return NULL; | ||
2448 | return kmem_cache_alloc_node(cachep, flags, node); | ||
2449 | } | ||
2450 | EXPORT_SYMBOL(kmalloc_node); | ||
2438 | #endif | 2451 | #endif |
2439 | 2452 | ||
2440 | /** | 2453 | /** |
@@ -2462,7 +2475,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags) | |||
2462 | { | 2475 | { |
2463 | kmem_cache_t *cachep; | 2476 | kmem_cache_t *cachep; |
2464 | 2477 | ||
2465 | cachep = kmem_find_general_cachep(size, flags); | 2478 | /* If you want to save a few bytes .text space: replace |
2479 | * __ with kmem_. | ||
2480 | * Then kmalloc uses the uninlined functions instead of the inline | ||
2481 | * functions. | ||
2482 | */ | ||
2483 | cachep = __find_general_cachep(size, flags); | ||
2466 | if (unlikely(cachep == NULL)) | 2484 | if (unlikely(cachep == NULL)) |
2467 | return NULL; | 2485 | return NULL; |
2468 | return __cache_alloc(cachep, flags); | 2486 | return __cache_alloc(cachep, flags); |
@@ -2489,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align) | |||
2489 | for (i = 0; i < NR_CPUS; i++) { | 2507 | for (i = 0; i < NR_CPUS; i++) { |
2490 | if (!cpu_possible(i)) | 2508 | if (!cpu_possible(i)) |
2491 | continue; | 2509 | continue; |
2492 | pdata->ptrs[i] = kmem_cache_alloc_node( | 2510 | pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, |
2493 | kmem_find_general_cachep(size, GFP_KERNEL), | 2511 | cpu_to_node(i)); |
2494 | cpu_to_node(i)); | ||
2495 | 2512 | ||
2496 | if (!pdata->ptrs[i]) | 2513 | if (!pdata->ptrs[i]) |
2497 | goto unwind_oom; | 2514 | goto unwind_oom; |