aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c304
1 files changed, 211 insertions, 93 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7cb4bf9ae320..46997517406e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1065,11 +1065,15 @@ bad:
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068/* Supports checking bulk free of a constructed freelist */
1068static noinline struct kmem_cache_node *free_debug_processing( 1069static noinline struct kmem_cache_node *free_debug_processing(
1069 struct kmem_cache *s, struct page *page, void *object, 1070 struct kmem_cache *s, struct page *page,
1071 void *head, void *tail, int bulk_cnt,
1070 unsigned long addr, unsigned long *flags) 1072 unsigned long addr, unsigned long *flags)
1071{ 1073{
1072 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1075 void *object = head;
1076 int cnt = 0;
1073 1077
1074 spin_lock_irqsave(&n->list_lock, *flags); 1078 spin_lock_irqsave(&n->list_lock, *flags);
1075 slab_lock(page); 1079 slab_lock(page);
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing(
1077 if (!check_slab(s, page)) 1081 if (!check_slab(s, page))
1078 goto fail; 1082 goto fail;
1079 1083
1084next_object:
1085 cnt++;
1086
1080 if (!check_valid_pointer(s, page, object)) { 1087 if (!check_valid_pointer(s, page, object)) {
1081 slab_err(s, page, "Invalid object pointer 0x%p", object); 1088 slab_err(s, page, "Invalid object pointer 0x%p", object);
1082 goto fail; 1089 goto fail;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
1107 if (s->flags & SLAB_STORE_USER) 1114 if (s->flags & SLAB_STORE_USER)
1108 set_track(s, object, TRACK_FREE, addr); 1115 set_track(s, object, TRACK_FREE, addr);
1109 trace(s, page, object, 0); 1116 trace(s, page, object, 0);
1117 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1110 init_object(s, object, SLUB_RED_INACTIVE); 1118 init_object(s, object, SLUB_RED_INACTIVE);
1119
1120 /* Reached end of constructed freelist yet? */
1121 if (object != tail) {
1122 object = get_freepointer(s, object);
1123 goto next_object;
1124 }
1111out: 1125out:
1126 if (cnt != bulk_cnt)
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1128 bulk_cnt, cnt);
1129
1112 slab_unlock(page); 1130 slab_unlock(page);
1113 /* 1131 /*
1114 * Keep node_lock to preserve integrity 1132 * Keep node_lock to preserve integrity
@@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
1204 1222
1205 return flags; 1223 return flags;
1206} 1224}
1207#else 1225#else /* !CONFIG_SLUB_DEBUG */
1208static inline void setup_object_debug(struct kmem_cache *s, 1226static inline void setup_object_debug(struct kmem_cache *s,
1209 struct page *page, void *object) {} 1227 struct page *page, void *object) {}
1210 1228
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
1212 struct page *page, void *object, unsigned long addr) { return 0; } 1230 struct page *page, void *object, unsigned long addr) { return 0; }
1213 1231
1214static inline struct kmem_cache_node *free_debug_processing( 1232static inline struct kmem_cache_node *free_debug_processing(
1215 struct kmem_cache *s, struct page *page, void *object, 1233 struct kmem_cache *s, struct page *page,
1234 void *head, void *tail, int bulk_cnt,
1216 unsigned long addr, unsigned long *flags) { return NULL; } 1235 unsigned long addr, unsigned long *flags) { return NULL; }
1217 1236
1218static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1237static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
1273 return memcg_kmem_get_cache(s, flags); 1292 return memcg_kmem_get_cache(s, flags);
1274} 1293}
1275 1294
1276static inline void slab_post_alloc_hook(struct kmem_cache *s, 1295static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1277 gfp_t flags, void *object) 1296 size_t size, void **p)
1278{ 1297{
1298 size_t i;
1299
1279 flags &= gfp_allowed_mask; 1300 flags &= gfp_allowed_mask;
1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1301 for (i = 0; i < size; i++) {
1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1302 void *object = p[i];
1303
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305 kmemleak_alloc_recursive(object, s->object_size, 1,
1306 s->flags, flags);
1307 kasan_slab_alloc(s, object);
1308 }
1282 memcg_kmem_put_cache(s); 1309 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1284} 1310}
1285 1311
1286static inline void slab_free_hook(struct kmem_cache *s, void *x) 1312static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1308 kasan_slab_free(s, x); 1334 kasan_slab_free(s, x);
1309} 1335}
1310 1336
1337static inline void slab_free_freelist_hook(struct kmem_cache *s,
1338 void *head, void *tail)
1339{
1340/*
1341 * Compiler cannot detect this function can be removed if slab_free_hook()
1342 * evaluates to nothing. Thus, catch all relevant config debug options here.
1343 */
1344#if defined(CONFIG_KMEMCHECK) || \
1345 defined(CONFIG_LOCKDEP) || \
1346 defined(CONFIG_DEBUG_KMEMLEAK) || \
1347 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1348 defined(CONFIG_KASAN)
1349
1350 void *object = head;
1351 void *tail_obj = tail ? : head;
1352
1353 do {
1354 slab_free_hook(s, object);
1355 } while ((object != tail_obj) &&
1356 (object = get_freepointer(s, object)));
1357#endif
1358}
1359
1311static void setup_object(struct kmem_cache *s, struct page *page, 1360static void setup_object(struct kmem_cache *s, struct page *page,
1312 void *object) 1361 void *object)
1313{ 1362{
@@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2295 * And if we were unable to get a new slab from the partial slab lists then 2344 * And if we were unable to get a new slab from the partial slab lists then
2296 * we need to allocate a new slab. This is the slowest path since it involves 2345 * we need to allocate a new slab. This is the slowest path since it involves
2297 * a call to the page allocator and the setup of a new slab. 2346 * a call to the page allocator and the setup of a new slab.
2347 *
2348 * Version of __slab_alloc to use when we know that interrupts are
2349 * already disabled (which is the case for bulk allocation).
2298 */ 2350 */
2299static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2351static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2300 unsigned long addr, struct kmem_cache_cpu *c) 2352 unsigned long addr, struct kmem_cache_cpu *c)
2301{ 2353{
2302 void *freelist; 2354 void *freelist;
2303 struct page *page; 2355 struct page *page;
2304 unsigned long flags;
2305
2306 local_irq_save(flags);
2307#ifdef CONFIG_PREEMPT
2308 /*
2309 * We may have been preempted and rescheduled on a different
2310 * cpu before disabling interrupts. Need to reload cpu area
2311 * pointer.
2312 */
2313 c = this_cpu_ptr(s->cpu_slab);
2314#endif
2315 2356
2316 page = c->page; 2357 page = c->page;
2317 if (!page) 2358 if (!page)
@@ -2369,7 +2410,6 @@ load_freelist:
2369 VM_BUG_ON(!c->page->frozen); 2410 VM_BUG_ON(!c->page->frozen);
2370 c->freelist = get_freepointer(s, freelist); 2411 c->freelist = get_freepointer(s, freelist);
2371 c->tid = next_tid(c->tid); 2412 c->tid = next_tid(c->tid);
2372 local_irq_restore(flags);
2373 return freelist; 2413 return freelist;
2374 2414
2375new_slab: 2415new_slab:
@@ -2386,7 +2426,6 @@ new_slab:
2386 2426
2387 if (unlikely(!freelist)) { 2427 if (unlikely(!freelist)) {
2388 slab_out_of_memory(s, gfpflags, node); 2428 slab_out_of_memory(s, gfpflags, node);
2389 local_irq_restore(flags);
2390 return NULL; 2429 return NULL;
2391 } 2430 }
2392 2431
@@ -2402,11 +2441,35 @@ new_slab:
2402 deactivate_slab(s, page, get_freepointer(s, freelist)); 2441 deactivate_slab(s, page, get_freepointer(s, freelist));
2403 c->page = NULL; 2442 c->page = NULL;
2404 c->freelist = NULL; 2443 c->freelist = NULL;
2405 local_irq_restore(flags);
2406 return freelist; 2444 return freelist;
2407} 2445}
2408 2446
2409/* 2447/*
2448 * Another one that disabled interrupt and compensates for possible
2449 * cpu changes by refetching the per cpu area pointer.
2450 */
2451static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2452 unsigned long addr, struct kmem_cache_cpu *c)
2453{
2454 void *p;
2455 unsigned long flags;
2456
2457 local_irq_save(flags);
2458#ifdef CONFIG_PREEMPT
2459 /*
2460 * We may have been preempted and rescheduled on a different
2461 * cpu before disabling interrupts. Need to reload cpu area
2462 * pointer.
2463 */
2464 c = this_cpu_ptr(s->cpu_slab);
2465#endif
2466
2467 p = ___slab_alloc(s, gfpflags, node, addr, c);
2468 local_irq_restore(flags);
2469 return p;
2470}
2471
2472/*
2410 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2473 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2411 * have the fastpath folded into their functions. So no function call 2474 * have the fastpath folded into their functions. So no function call
2412 * overhead for requests that can be satisfied on the fastpath. 2475 * overhead for requests that can be satisfied on the fastpath.
@@ -2419,7 +2482,7 @@ new_slab:
2419static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2482static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2420 gfp_t gfpflags, int node, unsigned long addr) 2483 gfp_t gfpflags, int node, unsigned long addr)
2421{ 2484{
2422 void **object; 2485 void *object;
2423 struct kmem_cache_cpu *c; 2486 struct kmem_cache_cpu *c;
2424 struct page *page; 2487 struct page *page;
2425 unsigned long tid; 2488 unsigned long tid;
@@ -2498,7 +2561,7 @@ redo:
2498 if (unlikely(gfpflags & __GFP_ZERO) && object) 2561 if (unlikely(gfpflags & __GFP_ZERO) && object)
2499 memset(object, 0, s->object_size); 2562 memset(object, 0, s->object_size);
2500 2563
2501 slab_post_alloc_hook(s, gfpflags, object); 2564 slab_post_alloc_hook(s, gfpflags, 1, &object);
2502 2565
2503 return object; 2566 return object;
2504} 2567}
@@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2569 * handling required then we can return immediately. 2632 * handling required then we can return immediately.
2570 */ 2633 */
2571static void __slab_free(struct kmem_cache *s, struct page *page, 2634static void __slab_free(struct kmem_cache *s, struct page *page,
2572 void *x, unsigned long addr) 2635 void *head, void *tail, int cnt,
2636 unsigned long addr)
2637
2573{ 2638{
2574 void *prior; 2639 void *prior;
2575 void **object = (void *)x;
2576 int was_frozen; 2640 int was_frozen;
2577 struct page new; 2641 struct page new;
2578 unsigned long counters; 2642 unsigned long counters;
@@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2582 stat(s, FREE_SLOWPATH); 2646 stat(s, FREE_SLOWPATH);
2583 2647
2584 if (kmem_cache_debug(s) && 2648 if (kmem_cache_debug(s) &&
2585 !(n = free_debug_processing(s, page, x, addr, &flags))) 2649 !(n = free_debug_processing(s, page, head, tail, cnt,
2650 addr, &flags)))
2586 return; 2651 return;
2587 2652
2588 do { 2653 do {
@@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2592 } 2657 }
2593 prior = page->freelist; 2658 prior = page->freelist;
2594 counters = page->counters; 2659 counters = page->counters;
2595 set_freepointer(s, object, prior); 2660 set_freepointer(s, tail, prior);
2596 new.counters = counters; 2661 new.counters = counters;
2597 was_frozen = new.frozen; 2662 was_frozen = new.frozen;
2598 new.inuse--; 2663 new.inuse -= cnt;
2599 if ((!new.inuse || !prior) && !was_frozen) { 2664 if ((!new.inuse || !prior) && !was_frozen) {
2600 2665
2601 if (kmem_cache_has_cpu_partial(s) && !prior) { 2666 if (kmem_cache_has_cpu_partial(s) && !prior) {
@@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2626 2691
2627 } while (!cmpxchg_double_slab(s, page, 2692 } while (!cmpxchg_double_slab(s, page,
2628 prior, counters, 2693 prior, counters,
2629 object, new.counters, 2694 head, new.counters,
2630 "__slab_free")); 2695 "__slab_free"));
2631 2696
2632 if (likely(!n)) { 2697 if (likely(!n)) {
@@ -2691,15 +2756,20 @@ slab_empty:
2691 * 2756 *
2692 * If fastpath is not possible then fall back to __slab_free where we deal 2757 * If fastpath is not possible then fall back to __slab_free where we deal
2693 * with all sorts of special processing. 2758 * with all sorts of special processing.
2759 *
2760 * Bulk free of a freelist with several objects (all pointing to the
2761 * same page) possible by specifying head and tail ptr, plus objects
2762 * count (cnt). Bulk free indicated by tail pointer being set.
2694 */ 2763 */
2695static __always_inline void slab_free(struct kmem_cache *s, 2764static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2696 struct page *page, void *x, unsigned long addr) 2765 void *head, void *tail, int cnt,
2766 unsigned long addr)
2697{ 2767{
2698 void **object = (void *)x; 2768 void *tail_obj = tail ? : head;
2699 struct kmem_cache_cpu *c; 2769 struct kmem_cache_cpu *c;
2700 unsigned long tid; 2770 unsigned long tid;
2701 2771
2702 slab_free_hook(s, x); 2772 slab_free_freelist_hook(s, head, tail);
2703 2773
2704redo: 2774redo:
2705 /* 2775 /*
@@ -2718,19 +2788,19 @@ redo:
2718 barrier(); 2788 barrier();
2719 2789
2720 if (likely(page == c->page)) { 2790 if (likely(page == c->page)) {
2721 set_freepointer(s, object, c->freelist); 2791 set_freepointer(s, tail_obj, c->freelist);
2722 2792
2723 if (unlikely(!this_cpu_cmpxchg_double( 2793 if (unlikely(!this_cpu_cmpxchg_double(
2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2794 s->cpu_slab->freelist, s->cpu_slab->tid,
2725 c->freelist, tid, 2795 c->freelist, tid,
2726 object, next_tid(tid)))) { 2796 head, next_tid(tid)))) {
2727 2797
2728 note_cmpxchg_failure("slab_free", s, tid); 2798 note_cmpxchg_failure("slab_free", s, tid);
2729 goto redo; 2799 goto redo;
2730 } 2800 }
2731 stat(s, FREE_FASTPATH); 2801 stat(s, FREE_FASTPATH);
2732 } else 2802 } else
2733 __slab_free(s, page, x, addr); 2803 __slab_free(s, page, head, tail_obj, cnt, addr);
2734 2804
2735} 2805}
2736 2806
@@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2739 s = cache_from_obj(s, x); 2809 s = cache_from_obj(s, x);
2740 if (!s) 2810 if (!s)
2741 return; 2811 return;
2742 slab_free(s, virt_to_head_page(x), x, _RET_IP_); 2812 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2743 trace_kmem_cache_free(_RET_IP_, x); 2813 trace_kmem_cache_free(_RET_IP_, x);
2744} 2814}
2745EXPORT_SYMBOL(kmem_cache_free); 2815EXPORT_SYMBOL(kmem_cache_free);
2746 2816
2747/* Note that interrupts must be enabled when calling this function. */ 2817struct detached_freelist {
2748void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
2749{
2750 struct kmem_cache_cpu *c;
2751 struct page *page; 2818 struct page *page;
2752 int i; 2819 void *tail;
2820 void *freelist;
2821 int cnt;
2822};
2753 2823
2754 local_irq_disable(); 2824/*
2755 c = this_cpu_ptr(s->cpu_slab); 2825 * This function progressively scans the array with free objects (with
2826 * a limited look ahead) and extract objects belonging to the same
2827 * page. It builds a detached freelist directly within the given
2828 * page/objects. This can happen without any need for
2829 * synchronization, because the objects are owned by running process.
2830 * The freelist is build up as a single linked list in the objects.
2831 * The idea is, that this detached freelist can then be bulk
2832 * transferred to the real freelist(s), but only requiring a single
2833 * synchronization primitive. Look ahead in the array is limited due
2834 * to performance reasons.
2835 */
2836static int build_detached_freelist(struct kmem_cache *s, size_t size,
2837 void **p, struct detached_freelist *df)
2838{
2839 size_t first_skipped_index = 0;
2840 int lookahead = 3;
2841 void *object;
2756 2842
2757 for (i = 0; i < size; i++) { 2843 /* Always re-init detached_freelist */
2758 void *object = p[i]; 2844 df->page = NULL;
2759 2845
2760 BUG_ON(!object); 2846 do {
2761 /* kmem cache debug support */ 2847 object = p[--size];
2762 s = cache_from_obj(s, object); 2848 } while (!object && size);
2763 if (unlikely(!s))
2764 goto exit;
2765 slab_free_hook(s, object);
2766 2849
2767 page = virt_to_head_page(object); 2850 if (!object)
2851 return 0;
2768 2852
2769 if (c->page == page) { 2853 /* Start new detached freelist */
2770 /* Fastpath: local CPU free */ 2854 set_freepointer(s, object, NULL);
2771 set_freepointer(s, object, c->freelist); 2855 df->page = virt_to_head_page(object);
2772 c->freelist = object; 2856 df->tail = object;
2773 } else { 2857 df->freelist = object;
2774 c->tid = next_tid(c->tid); 2858 p[size] = NULL; /* mark object processed */
2775 local_irq_enable(); 2859 df->cnt = 1;
2776 /* Slowpath: overhead locked cmpxchg_double_slab */ 2860
2777 __slab_free(s, page, object, _RET_IP_); 2861 while (size) {
2778 local_irq_disable(); 2862 object = p[--size];
2779 c = this_cpu_ptr(s->cpu_slab); 2863 if (!object)
2864 continue; /* Skip processed objects */
2865
2866 /* df->page is always set at this point */
2867 if (df->page == virt_to_head_page(object)) {
2868 /* Opportunity build freelist */
2869 set_freepointer(s, object, df->freelist);
2870 df->freelist = object;
2871 df->cnt++;
2872 p[size] = NULL; /* mark object processed */
2873
2874 continue;
2780 } 2875 }
2876
2877 /* Limit look ahead search */
2878 if (!--lookahead)
2879 break;
2880
2881 if (!first_skipped_index)
2882 first_skipped_index = size + 1;
2781 } 2883 }
2782exit: 2884
2783 c->tid = next_tid(c->tid); 2885 return first_skipped_index;
2784 local_irq_enable(); 2886}
2887
2888
2889/* Note that interrupts must be enabled when calling this function. */
2890void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
2891{
2892 if (WARN_ON(!size))
2893 return;
2894
2895 do {
2896 struct detached_freelist df;
2897 struct kmem_cache *s;
2898
2899 /* Support for memcg */
2900 s = cache_from_obj(orig_s, p[size - 1]);
2901
2902 size = build_detached_freelist(s, size, p, &df);
2903 if (unlikely(!df.page))
2904 continue;
2905
2906 slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
2907 } while (likely(size));
2785} 2908}
2786EXPORT_SYMBOL(kmem_cache_free_bulk); 2909EXPORT_SYMBOL(kmem_cache_free_bulk);
2787 2910
2788/* Note that interrupts must be enabled when calling this function. */ 2911/* Note that interrupts must be enabled when calling this function. */
2789bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2912int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2790 void **p) 2913 void **p)
2791{ 2914{
2792 struct kmem_cache_cpu *c; 2915 struct kmem_cache_cpu *c;
2793 int i; 2916 int i;
2794 2917
2918 /* memcg and kmem_cache debug support */
2919 s = slab_pre_alloc_hook(s, flags);
2920 if (unlikely(!s))
2921 return false;
2795 /* 2922 /*
2796 * Drain objects in the per cpu slab, while disabling local 2923 * Drain objects in the per cpu slab, while disabling local
2797 * IRQs, which protects against PREEMPT and interrupts 2924 * IRQs, which protects against PREEMPT and interrupts
@@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2804 void *object = c->freelist; 2931 void *object = c->freelist;
2805 2932
2806 if (unlikely(!object)) { 2933 if (unlikely(!object)) {
2807 local_irq_enable();
2808 /* 2934 /*
2809 * Invoking slow path likely have side-effect 2935 * Invoking slow path likely have side-effect
2810 * of re-populating per CPU c->freelist 2936 * of re-populating per CPU c->freelist
2811 */ 2937 */
2812 p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2938 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2813 _RET_IP_, c); 2939 _RET_IP_, c);
2814 if (unlikely(!p[i])) { 2940 if (unlikely(!p[i]))
2815 __kmem_cache_free_bulk(s, i, p); 2941 goto error;
2816 return false; 2942
2817 }
2818 local_irq_disable();
2819 c = this_cpu_ptr(s->cpu_slab); 2943 c = this_cpu_ptr(s->cpu_slab);
2820 continue; /* goto for-loop */ 2944 continue; /* goto for-loop */
2821 } 2945 }
2822
2823 /* kmem_cache debug support */
2824 s = slab_pre_alloc_hook(s, flags);
2825 if (unlikely(!s)) {
2826 __kmem_cache_free_bulk(s, i, p);
2827 c->tid = next_tid(c->tid);
2828 local_irq_enable();
2829 return false;
2830 }
2831
2832 c->freelist = get_freepointer(s, object); 2946 c->freelist = get_freepointer(s, object);
2833 p[i] = object; 2947 p[i] = object;
2834
2835 /* kmem_cache debug support */
2836 slab_post_alloc_hook(s, flags, object);
2837 } 2948 }
2838 c->tid = next_tid(c->tid); 2949 c->tid = next_tid(c->tid);
2839 local_irq_enable(); 2950 local_irq_enable();
@@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2846 memset(p[j], 0, s->object_size); 2957 memset(p[j], 0, s->object_size);
2847 } 2958 }
2848 2959
2849 return true; 2960 /* memcg and kmem_cache debug support */
2961 slab_post_alloc_hook(s, flags, size, p);
2962 return i;
2963error:
2964 local_irq_enable();
2965 slab_post_alloc_hook(s, flags, i, p);
2966 __kmem_cache_free_bulk(s, i, p);
2967 return 0;
2850} 2968}
2851EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2969EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2852 2970
@@ -3511,7 +3629,7 @@ void kfree(const void *x)
3511 __free_kmem_pages(page, compound_order(page)); 3629 __free_kmem_pages(page, compound_order(page));
3512 return; 3630 return;
3513 } 3631 }
3514 slab_free(page->slab_cache, page, object, _RET_IP_); 3632 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3515} 3633}
3516EXPORT_SYMBOL(kfree); 3634EXPORT_SYMBOL(kfree);
3517 3635