diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 100 |
1 files changed, 55 insertions, 45 deletions
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | |||
308 | #define SIZE_L3 (1 + MAX_NUMNODES) | 308 | #define SIZE_L3 (1 + MAX_NUMNODES) |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * This function may be completely optimized away if | 311 | * This function must be completely optimized away if |
312 | * a constant is passed to it. Mostly the same as | 312 | * a constant is passed to it. Mostly the same as |
313 | * what is in linux/slab.h except it returns an | 313 | * what is in linux/slab.h except it returns an |
314 | * index. | 314 | * index. |
315 | */ | 315 | */ |
316 | static inline int index_of(const size_t size) | 316 | static __always_inline int index_of(const size_t size) |
317 | { | 317 | { |
318 | if (__builtin_constant_p(size)) { | 318 | if (__builtin_constant_p(size)) { |
319 | int i = 0; | 319 | int i = 0; |
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size) | |||
329 | extern void __bad_size(void); | 329 | extern void __bad_size(void); |
330 | __bad_size(); | 330 | __bad_size(); |
331 | } | 331 | } |
332 | } | 332 | } else |
333 | BUG(); | ||
333 | return 0; | 334 | return 0; |
334 | } | 335 | } |
335 | 336 | ||
@@ -639,7 +640,7 @@ static enum { | |||
639 | 640 | ||
640 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 641 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
641 | 642 | ||
642 | static void free_block(kmem_cache_t* cachep, void** objpp, int len); | 643 | static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); |
643 | static void enable_cpucache (kmem_cache_t *cachep); | 644 | static void enable_cpucache (kmem_cache_t *cachep); |
644 | static void cache_reap (void *unused); | 645 | static void cache_reap (void *unused); |
645 | static int __node_shrink(kmem_cache_t *cachep, int node); | 646 | static int __node_shrink(kmem_cache_t *cachep, int node); |
@@ -649,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) | |||
649 | return cachep->array[smp_processor_id()]; | 650 | return cachep->array[smp_processor_id()]; |
650 | } | 651 | } |
651 | 652 | ||
652 | static inline kmem_cache_t *__find_general_cachep(size_t size, | 653 | static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) |
653 | unsigned int __nocast gfpflags) | ||
654 | { | 654 | { |
655 | struct cache_sizes *csizep = malloc_sizes; | 655 | struct cache_sizes *csizep = malloc_sizes; |
656 | 656 | ||
@@ -674,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, | |||
674 | return csizep->cs_cachep; | 674 | return csizep->cs_cachep; |
675 | } | 675 | } |
676 | 676 | ||
677 | kmem_cache_t *kmem_find_general_cachep(size_t size, | 677 | kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
678 | unsigned int __nocast gfpflags) | ||
679 | { | 678 | { |
680 | return __find_general_cachep(size, gfpflags); | 679 | return __find_general_cachep(size, gfpflags); |
681 | } | 680 | } |
@@ -804,7 +803,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache | |||
804 | 803 | ||
805 | if (ac->avail) { | 804 | if (ac->avail) { |
806 | spin_lock(&rl3->list_lock); | 805 | spin_lock(&rl3->list_lock); |
807 | free_block(cachep, ac->entry, ac->avail); | 806 | free_block(cachep, ac->entry, ac->avail, node); |
808 | ac->avail = 0; | 807 | ac->avail = 0; |
809 | spin_unlock(&rl3->list_lock); | 808 | spin_unlock(&rl3->list_lock); |
810 | } | 809 | } |
@@ -925,7 +924,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
925 | /* Free limit for this kmem_list3 */ | 924 | /* Free limit for this kmem_list3 */ |
926 | l3->free_limit -= cachep->batchcount; | 925 | l3->free_limit -= cachep->batchcount; |
927 | if (nc) | 926 | if (nc) |
928 | free_block(cachep, nc->entry, nc->avail); | 927 | free_block(cachep, nc->entry, nc->avail, node); |
929 | 928 | ||
930 | if (!cpus_empty(mask)) { | 929 | if (!cpus_empty(mask)) { |
931 | spin_unlock(&l3->list_lock); | 930 | spin_unlock(&l3->list_lock); |
@@ -934,7 +933,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, | |||
934 | 933 | ||
935 | if (l3->shared) { | 934 | if (l3->shared) { |
936 | free_block(cachep, l3->shared->entry, | 935 | free_block(cachep, l3->shared->entry, |
937 | l3->shared->avail); | 936 | l3->shared->avail, node); |
938 | kfree(l3->shared); | 937 | kfree(l3->shared); |
939 | l3->shared = NULL; | 938 | l3->shared = NULL; |
940 | } | 939 | } |
@@ -1184,7 +1183,7 @@ __initcall(cpucache_init); | |||
1184 | * did not request dmaable memory, we might get it, but that | 1183 | * did not request dmaable memory, we might get it, but that |
1185 | * would be relatively rare and ignorable. | 1184 | * would be relatively rare and ignorable. |
1186 | */ | 1185 | */ |
1187 | static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 1186 | static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
1188 | { | 1187 | { |
1189 | struct page *page; | 1188 | struct page *page; |
1190 | void *addr; | 1189 | void *addr; |
@@ -1882,12 +1881,13 @@ static void do_drain(void *arg) | |||
1882 | { | 1881 | { |
1883 | kmem_cache_t *cachep = (kmem_cache_t*)arg; | 1882 | kmem_cache_t *cachep = (kmem_cache_t*)arg; |
1884 | struct array_cache *ac; | 1883 | struct array_cache *ac; |
1884 | int node = numa_node_id(); | ||
1885 | 1885 | ||
1886 | check_irq_off(); | 1886 | check_irq_off(); |
1887 | ac = ac_data(cachep); | 1887 | ac = ac_data(cachep); |
1888 | spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); | 1888 | spin_lock(&cachep->nodelists[node]->list_lock); |
1889 | free_block(cachep, ac->entry, ac->avail); | 1889 | free_block(cachep, ac->entry, ac->avail, node); |
1890 | spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); | 1890 | spin_unlock(&cachep->nodelists[node]->list_lock); |
1891 | ac->avail = 0; | 1891 | ac->avail = 0; |
1892 | } | 1892 | } |
1893 | 1893 | ||
@@ -2046,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2046 | 2046 | ||
2047 | /* Get the memory for a slab management obj. */ | 2047 | /* Get the memory for a slab management obj. */ |
2048 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, | 2048 | static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, |
2049 | int colour_off, unsigned int __nocast local_flags) | 2049 | int colour_off, gfp_t local_flags) |
2050 | { | 2050 | { |
2051 | struct slab *slabp; | 2051 | struct slab *slabp; |
2052 | 2052 | ||
@@ -2147,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |||
2147 | * Grow (by 1) the number of slabs within a cache. This is called by | 2147 | * Grow (by 1) the number of slabs within a cache. This is called by |
2148 | * kmem_cache_alloc() when there are no active objs left in a cache. | 2148 | * kmem_cache_alloc() when there are no active objs left in a cache. |
2149 | */ | 2149 | */ |
2150 | static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2150 | static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
2151 | { | 2151 | { |
2152 | struct slab *slabp; | 2152 | struct slab *slabp; |
2153 | void *objp; | 2153 | void *objp; |
@@ -2354,7 +2354,7 @@ bad: | |||
2354 | #define check_slabp(x,y) do { } while(0) | 2354 | #define check_slabp(x,y) do { } while(0) |
2355 | #endif | 2355 | #endif |
2356 | 2356 | ||
2357 | static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) | 2357 | static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) |
2358 | { | 2358 | { |
2359 | int batchcount; | 2359 | int batchcount; |
2360 | struct kmem_list3 *l3; | 2360 | struct kmem_list3 *l3; |
@@ -2454,7 +2454,7 @@ alloc_done: | |||
2454 | } | 2454 | } |
2455 | 2455 | ||
2456 | static inline void | 2456 | static inline void |
2457 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | 2457 | cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) |
2458 | { | 2458 | { |
2459 | might_sleep_if(flags & __GFP_WAIT); | 2459 | might_sleep_if(flags & __GFP_WAIT); |
2460 | #if DEBUG | 2460 | #if DEBUG |
@@ -2465,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) | |||
2465 | #if DEBUG | 2465 | #if DEBUG |
2466 | static void * | 2466 | static void * |
2467 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, | 2467 | cache_alloc_debugcheck_after(kmem_cache_t *cachep, |
2468 | unsigned int __nocast flags, void *objp, void *caller) | 2468 | gfp_t flags, void *objp, void *caller) |
2469 | { | 2469 | { |
2470 | if (!objp) | 2470 | if (!objp) |
2471 | return objp; | 2471 | return objp; |
@@ -2508,16 +2508,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, | |||
2508 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 2508 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
2509 | #endif | 2509 | #endif |
2510 | 2510 | ||
2511 | 2511 | static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
2512 | static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | ||
2513 | { | 2512 | { |
2514 | unsigned long save_flags; | ||
2515 | void* objp; | 2513 | void* objp; |
2516 | struct array_cache *ac; | 2514 | struct array_cache *ac; |
2517 | 2515 | ||
2518 | cache_alloc_debugcheck_before(cachep, flags); | 2516 | check_irq_off(); |
2519 | |||
2520 | local_irq_save(save_flags); | ||
2521 | ac = ac_data(cachep); | 2517 | ac = ac_data(cachep); |
2522 | if (likely(ac->avail)) { | 2518 | if (likely(ac->avail)) { |
2523 | STATS_INC_ALLOCHIT(cachep); | 2519 | STATS_INC_ALLOCHIT(cachep); |
@@ -2527,6 +2523,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl | |||
2527 | STATS_INC_ALLOCMISS(cachep); | 2523 | STATS_INC_ALLOCMISS(cachep); |
2528 | objp = cache_alloc_refill(cachep, flags); | 2524 | objp = cache_alloc_refill(cachep, flags); |
2529 | } | 2525 | } |
2526 | return objp; | ||
2527 | } | ||
2528 | |||
2529 | static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | ||
2530 | { | ||
2531 | unsigned long save_flags; | ||
2532 | void* objp; | ||
2533 | |||
2534 | cache_alloc_debugcheck_before(cachep, flags); | ||
2535 | |||
2536 | local_irq_save(save_flags); | ||
2537 | objp = ____cache_alloc(cachep, flags); | ||
2530 | local_irq_restore(save_flags); | 2538 | local_irq_restore(save_flags); |
2531 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 2539 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, |
2532 | __builtin_return_address(0)); | 2540 | __builtin_return_address(0)); |
@@ -2608,7 +2616,7 @@ done: | |||
2608 | /* | 2616 | /* |
2609 | * Caller needs to acquire correct kmem_list's list_lock | 2617 | * Caller needs to acquire correct kmem_list's list_lock |
2610 | */ | 2618 | */ |
2611 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | 2619 | static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) |
2612 | { | 2620 | { |
2613 | int i; | 2621 | int i; |
2614 | struct kmem_list3 *l3; | 2622 | struct kmem_list3 *l3; |
@@ -2617,14 +2625,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) | |||
2617 | void *objp = objpp[i]; | 2625 | void *objp = objpp[i]; |
2618 | struct slab *slabp; | 2626 | struct slab *slabp; |
2619 | unsigned int objnr; | 2627 | unsigned int objnr; |
2620 | int nodeid = 0; | ||
2621 | 2628 | ||
2622 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); | 2629 | slabp = GET_PAGE_SLAB(virt_to_page(objp)); |
2623 | nodeid = slabp->nodeid; | 2630 | l3 = cachep->nodelists[node]; |
2624 | l3 = cachep->nodelists[nodeid]; | ||
2625 | list_del(&slabp->list); | 2631 | list_del(&slabp->list); |
2626 | objnr = (objp - slabp->s_mem) / cachep->objsize; | 2632 | objnr = (objp - slabp->s_mem) / cachep->objsize; |
2627 | check_spinlock_acquired_node(cachep, nodeid); | 2633 | check_spinlock_acquired_node(cachep, node); |
2628 | check_slabp(cachep, slabp); | 2634 | check_slabp(cachep, slabp); |
2629 | 2635 | ||
2630 | 2636 | ||
@@ -2664,13 +2670,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
2664 | { | 2670 | { |
2665 | int batchcount; | 2671 | int batchcount; |
2666 | struct kmem_list3 *l3; | 2672 | struct kmem_list3 *l3; |
2673 | int node = numa_node_id(); | ||
2667 | 2674 | ||
2668 | batchcount = ac->batchcount; | 2675 | batchcount = ac->batchcount; |
2669 | #if DEBUG | 2676 | #if DEBUG |
2670 | BUG_ON(!batchcount || batchcount > ac->avail); | 2677 | BUG_ON(!batchcount || batchcount > ac->avail); |
2671 | #endif | 2678 | #endif |
2672 | check_irq_off(); | 2679 | check_irq_off(); |
2673 | l3 = cachep->nodelists[numa_node_id()]; | 2680 | l3 = cachep->nodelists[node]; |
2674 | spin_lock(&l3->list_lock); | 2681 | spin_lock(&l3->list_lock); |
2675 | if (l3->shared) { | 2682 | if (l3->shared) { |
2676 | struct array_cache *shared_array = l3->shared; | 2683 | struct array_cache *shared_array = l3->shared; |
@@ -2686,7 +2693,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |||
2686 | } | 2693 | } |
2687 | } | 2694 | } |
2688 | 2695 | ||
2689 | free_block(cachep, ac->entry, batchcount); | 2696 | free_block(cachep, ac->entry, batchcount, node); |
2690 | free_done: | 2697 | free_done: |
2691 | #if STATS | 2698 | #if STATS |
2692 | { | 2699 | { |
@@ -2751,7 +2758,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2751 | } else { | 2758 | } else { |
2752 | spin_lock(&(cachep->nodelists[nodeid])-> | 2759 | spin_lock(&(cachep->nodelists[nodeid])-> |
2753 | list_lock); | 2760 | list_lock); |
2754 | free_block(cachep, &objp, 1); | 2761 | free_block(cachep, &objp, 1, nodeid); |
2755 | spin_unlock(&(cachep->nodelists[nodeid])-> | 2762 | spin_unlock(&(cachep->nodelists[nodeid])-> |
2756 | list_lock); | 2763 | list_lock); |
2757 | } | 2764 | } |
@@ -2778,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |||
2778 | * Allocate an object from this cache. The flags are only relevant | 2785 | * Allocate an object from this cache. The flags are only relevant |
2779 | * if the cache has no available objects. | 2786 | * if the cache has no available objects. |
2780 | */ | 2787 | */ |
2781 | void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) | 2788 | void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) |
2782 | { | 2789 | { |
2783 | return __cache_alloc(cachep, flags); | 2790 | return __cache_alloc(cachep, flags); |
2784 | } | 2791 | } |
@@ -2839,12 +2846,12 @@ out: | |||
2839 | * New and improved: it will now make sure that the object gets | 2846 | * New and improved: it will now make sure that the object gets |
2840 | * put on the correct node list so that there is no false sharing. | 2847 | * put on the correct node list so that there is no false sharing. |
2841 | */ | 2848 | */ |
2842 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) | 2849 | void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) |
2843 | { | 2850 | { |
2844 | unsigned long save_flags; | 2851 | unsigned long save_flags; |
2845 | void *ptr; | 2852 | void *ptr; |
2846 | 2853 | ||
2847 | if (nodeid == numa_node_id() || nodeid == -1) | 2854 | if (nodeid == -1) |
2848 | return __cache_alloc(cachep, flags); | 2855 | return __cache_alloc(cachep, flags); |
2849 | 2856 | ||
2850 | if (unlikely(!cachep->nodelists[nodeid])) { | 2857 | if (unlikely(!cachep->nodelists[nodeid])) { |
@@ -2855,7 +2862,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
2855 | 2862 | ||
2856 | cache_alloc_debugcheck_before(cachep, flags); | 2863 | cache_alloc_debugcheck_before(cachep, flags); |
2857 | local_irq_save(save_flags); | 2864 | local_irq_save(save_flags); |
2858 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 2865 | if (nodeid == numa_node_id()) |
2866 | ptr = ____cache_alloc(cachep, flags); | ||
2867 | else | ||
2868 | ptr = __cache_alloc_node(cachep, flags, nodeid); | ||
2859 | local_irq_restore(save_flags); | 2869 | local_irq_restore(save_flags); |
2860 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); | 2870 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); |
2861 | 2871 | ||
@@ -2863,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i | |||
2863 | } | 2873 | } |
2864 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 2874 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
2865 | 2875 | ||
2866 | void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) | 2876 | void *kmalloc_node(size_t size, gfp_t flags, int node) |
2867 | { | 2877 | { |
2868 | kmem_cache_t *cachep; | 2878 | kmem_cache_t *cachep; |
2869 | 2879 | ||
@@ -2896,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node); | |||
2896 | * platforms. For example, on i386, it means that the memory must come | 2906 | * platforms. For example, on i386, it means that the memory must come |
2897 | * from the first 16MB. | 2907 | * from the first 16MB. |
2898 | */ | 2908 | */ |
2899 | void *__kmalloc(size_t size, unsigned int __nocast flags) | 2909 | void *__kmalloc(size_t size, gfp_t flags) |
2900 | { | 2910 | { |
2901 | kmem_cache_t *cachep; | 2911 | kmem_cache_t *cachep; |
2902 | 2912 | ||
@@ -2985,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free); | |||
2985 | * @size: how many bytes of memory are required. | 2995 | * @size: how many bytes of memory are required. |
2986 | * @flags: the type of memory to allocate. | 2996 | * @flags: the type of memory to allocate. |
2987 | */ | 2997 | */ |
2988 | void *kzalloc(size_t size, unsigned int __nocast flags) | 2998 | void *kzalloc(size_t size, gfp_t flags) |
2989 | { | 2999 | { |
2990 | void *ret = kmalloc(size, flags); | 3000 | void *ret = kmalloc(size, flags); |
2991 | if (ret) | 3001 | if (ret) |
@@ -3079,7 +3089,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) | |||
3079 | 3089 | ||
3080 | if ((nc = cachep->nodelists[node]->shared)) | 3090 | if ((nc = cachep->nodelists[node]->shared)) |
3081 | free_block(cachep, nc->entry, | 3091 | free_block(cachep, nc->entry, |
3082 | nc->avail); | 3092 | nc->avail, node); |
3083 | 3093 | ||
3084 | l3->shared = new; | 3094 | l3->shared = new; |
3085 | if (!cachep->nodelists[node]->alien) { | 3095 | if (!cachep->nodelists[node]->alien) { |
@@ -3160,7 +3170,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | |||
3160 | if (!ccold) | 3170 | if (!ccold) |
3161 | continue; | 3171 | continue; |
3162 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3172 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
3163 | free_block(cachep, ccold->entry, ccold->avail); | 3173 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); |
3164 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3174 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
3165 | kfree(ccold); | 3175 | kfree(ccold); |
3166 | } | 3176 | } |
@@ -3240,7 +3250,7 @@ static void drain_array_locked(kmem_cache_t *cachep, | |||
3240 | if (tofree > ac->avail) { | 3250 | if (tofree > ac->avail) { |
3241 | tofree = (ac->avail+1)/2; | 3251 | tofree = (ac->avail+1)/2; |
3242 | } | 3252 | } |
3243 | free_block(cachep, ac->entry, tofree); | 3253 | free_block(cachep, ac->entry, tofree, node); |
3244 | ac->avail -= tofree; | 3254 | ac->avail -= tofree; |
3245 | memmove(ac->entry, &(ac->entry[tofree]), | 3255 | memmove(ac->entry, &(ac->entry[tofree]), |
3246 | sizeof(void*)*ac->avail); | 3256 | sizeof(void*)*ac->avail); |
@@ -3591,7 +3601,7 @@ unsigned int ksize(const void *objp) | |||
3591 | * @s: the string to duplicate | 3601 | * @s: the string to duplicate |
3592 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 3602 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
3593 | */ | 3603 | */ |
3594 | char *kstrdup(const char *s, unsigned int __nocast gfp) | 3604 | char *kstrdup(const char *s, gfp_t gfp) |
3595 | { | 3605 | { |
3596 | size_t len; | 3606 | size_t len; |
3597 | char *buf; | 3607 | char *buf; |