diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 55 |
1 files changed, 32 insertions, 23 deletions
| @@ -650,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) | |||
| 650 | return cachep->array[smp_processor_id()]; | 650 | return cachep->array[smp_processor_id()]; |
| 651 | } | 651 | } |
| 652 | 652 | ||
| 653 | static inline kmem_cache_t *__find_general_cachep(size_t size, | 653 | static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) |
| 654 | unsigned int __nocast gfpflags) | ||
| 655 | { | 654 | { |
| 656 | struct cache_sizes *csizep = malloc_sizes; | 655 | struct cache_sizes *csizep = malloc_sizes; |
| 657 | 656 | ||
| @@ -675,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, | |||
| 675 | return csizep->cs_cachep; | 674 | return csizep->cs_cachep; |
| 676 | } | 675 | } |
| 677 | 676 | ||
| 678 | kmem_cache_t *kmem_find_general_cachep(size_t size, | 677 | kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
| 679 | unsigned int __nocast gfpflags) | ||
| 680 | { | 678 | { |
| 681 | return __find_general_cachep(size, gfpflags); | 679 | return __find_general_cachep(size, gfpflags); |
| 682 | } | 680 | } |
| @@ -1185,7 +1183,7 @@ __initcall(cpucache_init); | |||
| 1185 | * did not request dmaable memory, we might get it, but that | 1183 | * did not request dmaable memory, we might get it, but that |
| 1186 | * would be relatively rare and ignorable. | 1184 | * would be relatively rare and ignorable. |
| 1187 | */ | 1185 | */ |
| 1188 | static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 1186 | static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 1189 | { | 1187 | { |
| 1190 | struct page *page; | 1188 | struct page *page; |
| 1191 | void *addr; | 1189 | void *addr; |
| @@ -2048,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2048 | 2046 | ||
| 2049 | /* Get the memory for a slab management obj. */ | 2047 | /* Get the memory for a slab management obj. */ |
| 2050 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, | 2048 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, |
| 2051 | int colour_off, unsigned int __nocast local_flags) | 2049 | int colour_off, gfp_t local_flags) |
| 2052 | { | 2050 | { |
| 2053 | struct slab *slabp; | 2051 | struct slab *slabp; |
| 2054 | 2052 | ||
| @@ -2149,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |||
| 2149 | * Grow (by 1) the number of slabs within a cache. This is called by | 2147 | * Grow (by 1) the number of slabs within a cache. This is called by |
| 2150 | * kmem_cache_alloc() when there are no active objs left in a cache. | 2148 | * kmem_cache_alloc() when there are no active objs left in a cache. |
| 2151 | */ | 2149 | */ |
| 2152 | static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2150 | static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 2153 | { | 2151 | { |
| 2154 | struct slab *slabp; | 2152 | struct slab *slabp; |
| 2155 | void *objp; | 2153 | void *objp; |
| @@ -2356,7 +2354,7 @@ bad: | |||
| 2356 | #define check_slabp(x,y) do { } while(0) | 2354 | #define check_slabp(x,y) do { } while(0) |
| 2357 | #endif | 2355 | #endif |
| 2358 | 2356 | ||
| 2359 | static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) | 2357 | static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) |
| 2360 | { | 2358 | { |
| 2361 | int batchcount; | 2359 | int batchcount; |
| 2362 | struct kmem_list3 *l3; | 2360 | struct kmem_list3 *l3; |
| @@ -2456,7 +2454,7 @@ alloc_done: | |||
| 2456 | } | 2454 | } |
| 2457 | 2455 | ||
| 2458 | static inline void | 2456 | static inline void |
| 2459 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | 2457 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) |
| 2460 | { | 2458 | { |
| 2461 | might_sleep_if(flags & __GFP_WAIT); | 2459 | might_sleep_if(flags & __GFP_WAIT); |
| 2462 | #if DEBUG | 2460 | #if DEBUG |
| @@ -2467,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | |||
| 2467 | #if DEBUG | 2465 | #if DEBUG |
| 2468 | static void * | 2466 | static void * |
| 2469 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, | 2467 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, |
| 2470 | unsigned int __nocast flags, void *objp, void *caller) | 2468 | gfp_t flags, void *objp, void *caller) |
| 2471 | { | 2469 | { |
| 2472 | if (!objp) | 2470 | if (!objp) |
| 2473 | return objp; | 2471 | return objp; |
| @@ -2510,16 +2508,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, | |||
| 2510 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 2508 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
| 2511 | #endif | 2509 | #endif |
| 2512 | 2510 | ||
| 2513 | 2511 | static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
| 2514 | static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | ||
| 2515 | { | 2512 | { |
| 2516 | unsigned long save_flags; | ||
| 2517 | void* objp; | 2513 | void* objp; |
| 2518 | struct array_cache *ac; | 2514 | struct array_cache *ac; |
| 2519 | 2515 | ||
| 2520 | cache_alloc_debugcheck_before(cachep, flags); | 2516 | check_irq_off(); |
| 2521 | |||
| 2522 | local_irq_save(save_flags); | ||
| 2523 | ac = ac_data(cachep); | 2517 | ac = ac_data(cachep); |
| 2524 | if (likely(ac->avail)) { | 2518 | if (likely(ac->avail)) { |
| 2525 | STATS_INC_ALLOCHIT(cachep); | 2519 | STATS_INC_ALLOCHIT(cachep); |
| @@ -2529,6 +2523,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl | |||
| 2529 | STATS_INC_ALLOCMISS(cachep); | 2523 | STATS_INC_ALLOCMISS(cachep); |
| 2530 | objp = cache_alloc_refill(cachep, flags); | 2524 | objp = cache_alloc_refill(cachep, flags); |
| 2531 | } | 2525 | } |
| 2526 | return objp; | ||
| 2527 | } | ||
| 2528 | |||
| 2529 | static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | ||
| 2530 | { | ||
| 2531 | unsigned long save_flags; | ||
| 2532 | void* objp; | ||
| 2533 | |||
| 2534 | cache_alloc_debugcheck_before(cachep, flags); | ||
| 2535 | |||
| 2536 | local_irq_save(save_flags); | ||
| 2537 | objp = ____cache_alloc(cachep, flags); | ||
| 2532 | local_irq_restore(save_flags); | 2538 | local_irq_restore(save_flags); |
| 2533 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 2539 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
| 2534 | __builtin_return_address(0)); | 2540 | __builtin_return_address(0)); |
| @@ -2779,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
| 2779 | * Allocate an object from this cache. The flags are only relevant | 2785 | * Allocate an object from this cache. The flags are only relevant |
| 2780 | * if the cache has no available objects. | 2786 | * if the cache has no available objects. |
| 2781 | */ | 2787 | */ |
| 2782 | void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | 2788 | void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) |
| 2783 | { | 2789 | { |
| 2784 | return __cache_alloc(cachep, flags); | 2790 | return __cache_alloc(cachep, flags); |
| 2785 | } | 2791 | } |
| @@ -2840,7 +2846,7 @@ out: | |||
| 2840 | * New and improved: it will now make sure that the object gets | 2846 | * New and improved: it will now make sure that the object gets |
| 2841 | * put on the correct node list so that there is no false sharing. | 2847 | * put on the correct node list so that there is no false sharing. |
| 2842 | */ | 2848 | */ |
| 2843 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2849 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
| 2844 | { | 2850 | { |
| 2845 | unsigned long save_flags; | 2851 | unsigned long save_flags; |
| 2846 | void *ptr; | 2852 | void *ptr; |
| @@ -2856,7 +2862,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
| 2856 | 2862 | ||
| 2857 | cache_alloc_debugcheck_before(cachep, flags); | 2863 | cache_alloc_debugcheck_before(cachep, flags); |
| 2858 | local_irq_save(save_flags); | 2864 | local_irq_save(save_flags); |
| 2859 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 2865 | if (nodeid == numa_node_id()) |
| 2866 | ptr = ____cache_alloc(cachep, flags); | ||
| 2867 | else | ||
| 2868 | ptr = __cache_alloc_node(cachep, flags, nodeid); | ||
| 2860 | local_irq_restore(save_flags); | 2869 | local_irq_restore(save_flags); |
| 2861 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); | 2870 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); |
| 2862 | 2871 | ||
| @@ -2864,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
| 2864 | } | 2873 | } |
| 2865 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 2874 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 2866 | 2875 | ||
| 2867 | void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) | 2876 | void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 2868 | { | 2877 | { |
| 2869 | kmem_cache_t *cachep; | 2878 | kmem_cache_t *cachep; |
| 2870 | 2879 | ||
| @@ -2897,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node); | |||
| 2897 | * platforms. For example, on i386, it means that the memory must come | 2906 | * platforms. For example, on i386, it means that the memory must come |
| 2898 | * from the first 16MB. | 2907 | * from the first 16MB. |
| 2899 | */ | 2908 | */ |
| 2900 | void *__kmalloc(size_t size, unsigned int __nocast flags) | 2909 | void *__kmalloc(size_t size, gfp_t flags) |
| 2901 | { | 2910 | { |
| 2902 | kmem_cache_t *cachep; | 2911 | kmem_cache_t *cachep; |
| 2903 | 2912 | ||
| @@ -2986,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free); | |||
| 2986 | * @size: how many bytes of memory are required. | 2995 | * @size: how many bytes of memory are required. |
| 2987 | * @flags: the type of memory to allocate. | 2996 | * @flags: the type of memory to allocate. |
| 2988 | */ | 2997 | */ |
| 2989 | void *kzalloc(size_t size, unsigned int __nocast flags) | 2998 | void *kzalloc(size_t size, gfp_t flags) |
| 2990 | { | 2999 | { |
| 2991 | void *ret = kmalloc(size, flags); | 3000 | void *ret = kmalloc(size, flags); |
| 2992 | if (ret) | 3001 | if (ret) |
| @@ -3592,7 +3601,7 @@ unsigned int ksize(const void *objp) | |||
| 3592 | * @s: the string to duplicate | 3601 | * @s: the string to duplicate |
| 3593 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 3602 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
| 3594 | */ | 3603 | */ |
| 3595 | char *kstrdup(const char *s, unsigned int __nocast gfp) | 3604 | char *kstrdup(const char *s, gfp_t gfp) |
| 3596 | { | 3605 | { |
| 3597 | size_t len; | 3606 | size_t len; |
| 3598 | char *buf; | 3607 | char *buf; |
