aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5cbbdfa6dd0e..d05c678bceb3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -650,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
650 return cachep->array[smp_processor_id()]; 650 return cachep->array[smp_processor_id()];
651} 651}
652 652
653static inline kmem_cache_t *__find_general_cachep(size_t size, 653static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
654 unsigned int __nocast gfpflags)
655{ 654{
656 struct cache_sizes *csizep = malloc_sizes; 655 struct cache_sizes *csizep = malloc_sizes;
657 656
@@ -675,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
675 return csizep->cs_cachep; 674 return csizep->cs_cachep;
676} 675}
677 676
678kmem_cache_t *kmem_find_general_cachep(size_t size, 677kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
679 unsigned int __nocast gfpflags)
680{ 678{
681 return __find_general_cachep(size, gfpflags); 679 return __find_general_cachep(size, gfpflags);
682} 680}
@@ -1185,7 +1183,7 @@ __initcall(cpucache_init);
1185 * did not request dmaable memory, we might get it, but that 1183 * did not request dmaable memory, we might get it, but that
1186 * would be relatively rare and ignorable. 1184 * would be relatively rare and ignorable.
1187 */ 1185 */
1188static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 1186static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
1189{ 1187{
1190 struct page *page; 1188 struct page *page;
1191 void *addr; 1189 void *addr;
@@ -2048,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2048 2046
2049/* Get the memory for a slab management obj. */ 2047/* Get the memory for a slab management obj. */
2050static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2048static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
2051 int colour_off, unsigned int __nocast local_flags) 2049 int colour_off, gfp_t local_flags)
2052{ 2050{
2053 struct slab *slabp; 2051 struct slab *slabp;
2054 2052
@@ -2149,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2149 * Grow (by 1) the number of slabs within a cache. This is called by 2147 * Grow (by 1) the number of slabs within a cache. This is called by
2150 * kmem_cache_alloc() when there are no active objs left in a cache. 2148 * kmem_cache_alloc() when there are no active objs left in a cache.
2151 */ 2149 */
2152static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2150static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2153{ 2151{
2154 struct slab *slabp; 2152 struct slab *slabp;
2155 void *objp; 2153 void *objp;
@@ -2356,7 +2354,7 @@ bad:
2356#define check_slabp(x,y) do { } while(0) 2354#define check_slabp(x,y) do { } while(0)
2357#endif 2355#endif
2358 2356
2359static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) 2357static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2360{ 2358{
2361 int batchcount; 2359 int batchcount;
2362 struct kmem_list3 *l3; 2360 struct kmem_list3 *l3;
@@ -2456,7 +2454,7 @@ alloc_done:
2456} 2454}
2457 2455
2458static inline void 2456static inline void
2459cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) 2457cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
2460{ 2458{
2461 might_sleep_if(flags & __GFP_WAIT); 2459 might_sleep_if(flags & __GFP_WAIT);
2462#if DEBUG 2460#if DEBUG
@@ -2467,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
2467#if DEBUG 2465#if DEBUG
2468static void * 2466static void *
2469cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2467cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2470 unsigned int __nocast flags, void *objp, void *caller) 2468 gfp_t flags, void *objp, void *caller)
2471{ 2469{
2472 if (!objp) 2470 if (!objp)
2473 return objp; 2471 return objp;
@@ -2510,7 +2508,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2510#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2508#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2511#endif 2509#endif
2512 2510
2513static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2511static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2514{ 2512{
2515 void* objp; 2513 void* objp;
2516 struct array_cache *ac; 2514 struct array_cache *ac;
@@ -2528,7 +2526,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast
2528 return objp; 2526 return objp;
2529} 2527}
2530 2528
2531static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2529static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2532{ 2530{
2533 unsigned long save_flags; 2531 unsigned long save_flags;
2534 void* objp; 2532 void* objp;
@@ -2787,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2787 * Allocate an object from this cache. The flags are only relevant 2785 * Allocate an object from this cache. The flags are only relevant
2788 * if the cache has no available objects. 2786 * if the cache has no available objects.
2789 */ 2787 */
2790void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2788void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2791{ 2789{
2792 return __cache_alloc(cachep, flags); 2790 return __cache_alloc(cachep, flags);
2793} 2791}
@@ -2848,7 +2846,7 @@ out:
2848 * New and improved: it will now make sure that the object gets 2846 * New and improved: it will now make sure that the object gets
2849 * put on the correct node list so that there is no false sharing. 2847 * put on the correct node list so that there is no false sharing.
2850 */ 2848 */
2851void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2849void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2852{ 2850{
2853 unsigned long save_flags; 2851 unsigned long save_flags;
2854 void *ptr; 2852 void *ptr;
@@ -2875,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
2875} 2873}
2876EXPORT_SYMBOL(kmem_cache_alloc_node); 2874EXPORT_SYMBOL(kmem_cache_alloc_node);
2877 2875
2878void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 2876void *kmalloc_node(size_t size, gfp_t flags, int node)
2879{ 2877{
2880 kmem_cache_t *cachep; 2878 kmem_cache_t *cachep;
2881 2879
@@ -2908,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node);
2908 * platforms. For example, on i386, it means that the memory must come 2906 * platforms. For example, on i386, it means that the memory must come
2909 * from the first 16MB. 2907 * from the first 16MB.
2910 */ 2908 */
2911void *__kmalloc(size_t size, unsigned int __nocast flags) 2909void *__kmalloc(size_t size, gfp_t flags)
2912{ 2910{
2913 kmem_cache_t *cachep; 2911 kmem_cache_t *cachep;
2914 2912
@@ -2997,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free);
2997 * @size: how many bytes of memory are required. 2995 * @size: how many bytes of memory are required.
2998 * @flags: the type of memory to allocate. 2996 * @flags: the type of memory to allocate.
2999 */ 2997 */
3000void *kzalloc(size_t size, unsigned int __nocast flags) 2998void *kzalloc(size_t size, gfp_t flags)
3001{ 2999{
3002 void *ret = kmalloc(size, flags); 3000 void *ret = kmalloc(size, flags);
3003 if (ret) 3001 if (ret)
@@ -3603,7 +3601,7 @@ unsigned int ksize(const void *objp)
3603 * @s: the string to duplicate 3601 * @s: the string to duplicate
3604 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3602 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
3605 */ 3603 */
3606char *kstrdup(const char *s, unsigned int __nocast gfp) 3604char *kstrdup(const char *s, gfp_t gfp)
3607{ 3605{
3608 size_t len; 3606 size_t len;
3609 char *buf; 3607 char *buf;