diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-22 18:21:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-22 18:21:40 -0500 |
commit | 104e2a6f8b3df8b8200c7f4ac23feb079a44e4f3 (patch) | |
tree | 245a6d039b608784ac982acd4a7191b2cf15cc6c /mm | |
parent | dcfeda9d5f52bf43ae4297d8102a76d88b778d93 (diff) | |
parent | 865762a8119e74b5f0e236d2d8eaaf8be9292a06 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge slub bulk allocator updates from Andrew Morton:
"This missed the merge window because I was waiting for some repairs to
come in. Nothing actually uses the bulk allocator yet and the changes
to other code paths are pretty small. And the net guys are waiting
for this so they can start merging the client code"
More comments from Jesper Dangaard Brouer:
"The kmem_cache_alloc_bulk() call, in mm/slub.c, were included in
previous kernel. The present version contains a bug. Vladimir
Davydov noticed it contained a bug, when kernel is compiled with
CONFIG_MEMCG_KMEM (see commit 03ec0ed57ffc: "slub: fix kmem cgroup
bug in kmem_cache_alloc_bulk"). Plus the mem cgroup counterpart in
kmem_cache_free_bulk() were missing (see commit 033745189b1b "slub:
add missing kmem cgroup support to kmem_cache_free_bulk").
I don't consider the fix stable-material because there are no in-tree
users of the API.
But with known bugs (for memcg) I cannot start using the API in the
net-tree"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
slab/slub: adjust kmem_cache_alloc_bulk API
slub: add missing kmem cgroup support to kmem_cache_free_bulk
slub: fix kmem cgroup bug in kmem_cache_alloc_bulk
slub: optimize bulk slowpath free by detached freelist
slub: support for bulk free with SLUB freelists
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slab.h | 2 | ||||
-rw-r--r-- | mm/slab_common.c | 6 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 244 |
5 files changed, 181 insertions, 75 deletions
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
3419 | } | 3419 | } |
3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
3421 | 3421 | ||
3422 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 3422 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
3423 | void **p) | 3423 | void **p) |
3424 | { | 3424 | { |
3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
170 | * may be allocated or freed using these operations. | 170 | * may be allocated or freed using these operations. |
171 | */ | 171 | */ |
172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
173 | bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 173 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
174 | 174 | ||
175 | #ifdef CONFIG_MEMCG_KMEM | 175 | #ifdef CONFIG_MEMCG_KMEM |
176 | /* | 176 | /* |
diff --git a/mm/slab_common.c b/mm/slab_common.c index d88e97c10a2e..3c6a86b4ec25 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) | |||
112 | kmem_cache_free(s, p[i]); | 112 | kmem_cache_free(s, p[i]); |
113 | } | 113 | } |
114 | 114 | ||
115 | bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | 115 | int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, |
116 | void **p) | 116 | void **p) |
117 | { | 117 | { |
118 | size_t i; | 118 | size_t i; |
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | |||
121 | void *x = p[i] = kmem_cache_alloc(s, flags); | 121 | void *x = p[i] = kmem_cache_alloc(s, flags); |
122 | if (!x) { | 122 | if (!x) { |
123 | __kmem_cache_free_bulk(s, i, p); | 123 | __kmem_cache_free_bulk(s, i, p); |
124 | return false; | 124 | return 0; |
125 | } | 125 | } |
126 | } | 126 | } |
127 | return true; | 127 | return i; |
128 | } | 128 | } |
129 | 129 | ||
130 | #ifdef CONFIG_MEMCG_KMEM | 130 | #ifdef CONFIG_MEMCG_KMEM |
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
617 | } | 617 | } |
618 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 618 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
619 | 619 | ||
620 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 620 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
621 | void **p) | 621 | void **p) |
622 | { | 622 | { |
623 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 623 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
@@ -1065,11 +1065,15 @@ bad: | |||
1065 | return 0; | 1065 | return 0; |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /* Supports checking bulk free of a constructed freelist */ | ||
1068 | static noinline struct kmem_cache_node *free_debug_processing( | 1069 | static noinline struct kmem_cache_node *free_debug_processing( |
1069 | struct kmem_cache *s, struct page *page, void *object, | 1070 | struct kmem_cache *s, struct page *page, |
1071 | void *head, void *tail, int bulk_cnt, | ||
1070 | unsigned long addr, unsigned long *flags) | 1072 | unsigned long addr, unsigned long *flags) |
1071 | { | 1073 | { |
1072 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1074 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1075 | void *object = head; | ||
1076 | int cnt = 0; | ||
1073 | 1077 | ||
1074 | spin_lock_irqsave(&n->list_lock, *flags); | 1078 | spin_lock_irqsave(&n->list_lock, *flags); |
1075 | slab_lock(page); | 1079 | slab_lock(page); |
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
1077 | if (!check_slab(s, page)) | 1081 | if (!check_slab(s, page)) |
1078 | goto fail; | 1082 | goto fail; |
1079 | 1083 | ||
1084 | next_object: | ||
1085 | cnt++; | ||
1086 | |||
1080 | if (!check_valid_pointer(s, page, object)) { | 1087 | if (!check_valid_pointer(s, page, object)) { |
1081 | slab_err(s, page, "Invalid object pointer 0x%p", object); | 1088 | slab_err(s, page, "Invalid object pointer 0x%p", object); |
1082 | goto fail; | 1089 | goto fail; |
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
1107 | if (s->flags & SLAB_STORE_USER) | 1114 | if (s->flags & SLAB_STORE_USER) |
1108 | set_track(s, object, TRACK_FREE, addr); | 1115 | set_track(s, object, TRACK_FREE, addr); |
1109 | trace(s, page, object, 0); | 1116 | trace(s, page, object, 0); |
1117 | /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ | ||
1110 | init_object(s, object, SLUB_RED_INACTIVE); | 1118 | init_object(s, object, SLUB_RED_INACTIVE); |
1119 | |||
1120 | /* Reached end of constructed freelist yet? */ | ||
1121 | if (object != tail) { | ||
1122 | object = get_freepointer(s, object); | ||
1123 | goto next_object; | ||
1124 | } | ||
1111 | out: | 1125 | out: |
1126 | if (cnt != bulk_cnt) | ||
1127 | slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", | ||
1128 | bulk_cnt, cnt); | ||
1129 | |||
1112 | slab_unlock(page); | 1130 | slab_unlock(page); |
1113 | /* | 1131 | /* |
1114 | * Keep node_lock to preserve integrity | 1132 | * Keep node_lock to preserve integrity |
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s, | |||
1212 | struct page *page, void *object, unsigned long addr) { return 0; } | 1230 | struct page *page, void *object, unsigned long addr) { return 0; } |
1213 | 1231 | ||
1214 | static inline struct kmem_cache_node *free_debug_processing( | 1232 | static inline struct kmem_cache_node *free_debug_processing( |
1215 | struct kmem_cache *s, struct page *page, void *object, | 1233 | struct kmem_cache *s, struct page *page, |
1234 | void *head, void *tail, int bulk_cnt, | ||
1216 | unsigned long addr, unsigned long *flags) { return NULL; } | 1235 | unsigned long addr, unsigned long *flags) { return NULL; } |
1217 | 1236 | ||
1218 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1237 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |||
1273 | return memcg_kmem_get_cache(s, flags); | 1292 | return memcg_kmem_get_cache(s, flags); |
1274 | } | 1293 | } |
1275 | 1294 | ||
1276 | static inline void slab_post_alloc_hook(struct kmem_cache *s, | 1295 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
1277 | gfp_t flags, void *object) | 1296 | size_t size, void **p) |
1278 | { | 1297 | { |
1298 | size_t i; | ||
1299 | |||
1279 | flags &= gfp_allowed_mask; | 1300 | flags &= gfp_allowed_mask; |
1280 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | 1301 | for (i = 0; i < size; i++) { |
1281 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); | 1302 | void *object = p[i]; |
1303 | |||
1304 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | ||
1305 | kmemleak_alloc_recursive(object, s->object_size, 1, | ||
1306 | s->flags, flags); | ||
1307 | kasan_slab_alloc(s, object); | ||
1308 | } | ||
1282 | memcg_kmem_put_cache(s); | 1309 | memcg_kmem_put_cache(s); |
1283 | kasan_slab_alloc(s, object); | ||
1284 | } | 1310 | } |
1285 | 1311 | ||
1286 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | 1312 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
1308 | kasan_slab_free(s, x); | 1334 | kasan_slab_free(s, x); |
1309 | } | 1335 | } |
1310 | 1336 | ||
1337 | static inline void slab_free_freelist_hook(struct kmem_cache *s, | ||
1338 | void *head, void *tail) | ||
1339 | { | ||
1340 | /* | ||
1341 | * Compiler cannot detect this function can be removed if slab_free_hook() | ||
1342 | * evaluates to nothing. Thus, catch all relevant config debug options here. | ||
1343 | */ | ||
1344 | #if defined(CONFIG_KMEMCHECK) || \ | ||
1345 | defined(CONFIG_LOCKDEP) || \ | ||
1346 | defined(CONFIG_DEBUG_KMEMLEAK) || \ | ||
1347 | defined(CONFIG_DEBUG_OBJECTS_FREE) || \ | ||
1348 | defined(CONFIG_KASAN) | ||
1349 | |||
1350 | void *object = head; | ||
1351 | void *tail_obj = tail ? : head; | ||
1352 | |||
1353 | do { | ||
1354 | slab_free_hook(s, object); | ||
1355 | } while ((object != tail_obj) && | ||
1356 | (object = get_freepointer(s, object))); | ||
1357 | #endif | ||
1358 | } | ||
1359 | |||
1311 | static void setup_object(struct kmem_cache *s, struct page *page, | 1360 | static void setup_object(struct kmem_cache *s, struct page *page, |
1312 | void *object) | 1361 | void *object) |
1313 | { | 1362 | { |
@@ -2433,7 +2482,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2433 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, | 2482 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
2434 | gfp_t gfpflags, int node, unsigned long addr) | 2483 | gfp_t gfpflags, int node, unsigned long addr) |
2435 | { | 2484 | { |
2436 | void **object; | 2485 | void *object; |
2437 | struct kmem_cache_cpu *c; | 2486 | struct kmem_cache_cpu *c; |
2438 | struct page *page; | 2487 | struct page *page; |
2439 | unsigned long tid; | 2488 | unsigned long tid; |
@@ -2512,7 +2561,7 @@ redo: | |||
2512 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 2561 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
2513 | memset(object, 0, s->object_size); | 2562 | memset(object, 0, s->object_size); |
2514 | 2563 | ||
2515 | slab_post_alloc_hook(s, gfpflags, object); | 2564 | slab_post_alloc_hook(s, gfpflags, 1, &object); |
2516 | 2565 | ||
2517 | return object; | 2566 | return object; |
2518 | } | 2567 | } |
@@ -2583,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
2583 | * handling required then we can return immediately. | 2632 | * handling required then we can return immediately. |
2584 | */ | 2633 | */ |
2585 | static void __slab_free(struct kmem_cache *s, struct page *page, | 2634 | static void __slab_free(struct kmem_cache *s, struct page *page, |
2586 | void *x, unsigned long addr) | 2635 | void *head, void *tail, int cnt, |
2636 | unsigned long addr) | ||
2637 | |||
2587 | { | 2638 | { |
2588 | void *prior; | 2639 | void *prior; |
2589 | void **object = (void *)x; | ||
2590 | int was_frozen; | 2640 | int was_frozen; |
2591 | struct page new; | 2641 | struct page new; |
2592 | unsigned long counters; | 2642 | unsigned long counters; |
@@ -2596,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2596 | stat(s, FREE_SLOWPATH); | 2646 | stat(s, FREE_SLOWPATH); |
2597 | 2647 | ||
2598 | if (kmem_cache_debug(s) && | 2648 | if (kmem_cache_debug(s) && |
2599 | !(n = free_debug_processing(s, page, x, addr, &flags))) | 2649 | !(n = free_debug_processing(s, page, head, tail, cnt, |
2650 | addr, &flags))) | ||
2600 | return; | 2651 | return; |
2601 | 2652 | ||
2602 | do { | 2653 | do { |
@@ -2606,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2606 | } | 2657 | } |
2607 | prior = page->freelist; | 2658 | prior = page->freelist; |
2608 | counters = page->counters; | 2659 | counters = page->counters; |
2609 | set_freepointer(s, object, prior); | 2660 | set_freepointer(s, tail, prior); |
2610 | new.counters = counters; | 2661 | new.counters = counters; |
2611 | was_frozen = new.frozen; | 2662 | was_frozen = new.frozen; |
2612 | new.inuse--; | 2663 | new.inuse -= cnt; |
2613 | if ((!new.inuse || !prior) && !was_frozen) { | 2664 | if ((!new.inuse || !prior) && !was_frozen) { |
2614 | 2665 | ||
2615 | if (kmem_cache_has_cpu_partial(s) && !prior) { | 2666 | if (kmem_cache_has_cpu_partial(s) && !prior) { |
@@ -2640,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2640 | 2691 | ||
2641 | } while (!cmpxchg_double_slab(s, page, | 2692 | } while (!cmpxchg_double_slab(s, page, |
2642 | prior, counters, | 2693 | prior, counters, |
2643 | object, new.counters, | 2694 | head, new.counters, |
2644 | "__slab_free")); | 2695 | "__slab_free")); |
2645 | 2696 | ||
2646 | if (likely(!n)) { | 2697 | if (likely(!n)) { |
@@ -2705,15 +2756,20 @@ slab_empty: | |||
2705 | * | 2756 | * |
2706 | * If fastpath is not possible then fall back to __slab_free where we deal | 2757 | * If fastpath is not possible then fall back to __slab_free where we deal |
2707 | * with all sorts of special processing. | 2758 | * with all sorts of special processing. |
2759 | * | ||
2760 | * Bulk free of a freelist with several objects (all pointing to the | ||
2761 | * same page) possible by specifying head and tail ptr, plus objects | ||
2762 | * count (cnt). Bulk free indicated by tail pointer being set. | ||
2708 | */ | 2763 | */ |
2709 | static __always_inline void slab_free(struct kmem_cache *s, | 2764 | static __always_inline void slab_free(struct kmem_cache *s, struct page *page, |
2710 | struct page *page, void *x, unsigned long addr) | 2765 | void *head, void *tail, int cnt, |
2766 | unsigned long addr) | ||
2711 | { | 2767 | { |
2712 | void **object = (void *)x; | 2768 | void *tail_obj = tail ? : head; |
2713 | struct kmem_cache_cpu *c; | 2769 | struct kmem_cache_cpu *c; |
2714 | unsigned long tid; | 2770 | unsigned long tid; |
2715 | 2771 | ||
2716 | slab_free_hook(s, x); | 2772 | slab_free_freelist_hook(s, head, tail); |
2717 | 2773 | ||
2718 | redo: | 2774 | redo: |
2719 | /* | 2775 | /* |
@@ -2732,19 +2788,19 @@ redo: | |||
2732 | barrier(); | 2788 | barrier(); |
2733 | 2789 | ||
2734 | if (likely(page == c->page)) { | 2790 | if (likely(page == c->page)) { |
2735 | set_freepointer(s, object, c->freelist); | 2791 | set_freepointer(s, tail_obj, c->freelist); |
2736 | 2792 | ||
2737 | if (unlikely(!this_cpu_cmpxchg_double( | 2793 | if (unlikely(!this_cpu_cmpxchg_double( |
2738 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2794 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2739 | c->freelist, tid, | 2795 | c->freelist, tid, |
2740 | object, next_tid(tid)))) { | 2796 | head, next_tid(tid)))) { |
2741 | 2797 | ||
2742 | note_cmpxchg_failure("slab_free", s, tid); | 2798 | note_cmpxchg_failure("slab_free", s, tid); |
2743 | goto redo; | 2799 | goto redo; |
2744 | } | 2800 | } |
2745 | stat(s, FREE_FASTPATH); | 2801 | stat(s, FREE_FASTPATH); |
2746 | } else | 2802 | } else |
2747 | __slab_free(s, page, x, addr); | 2803 | __slab_free(s, page, head, tail_obj, cnt, addr); |
2748 | 2804 | ||
2749 | } | 2805 | } |
2750 | 2806 | ||
@@ -2753,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
2753 | s = cache_from_obj(s, x); | 2809 | s = cache_from_obj(s, x); |
2754 | if (!s) | 2810 | if (!s) |
2755 | return; | 2811 | return; |
2756 | slab_free(s, virt_to_head_page(x), x, _RET_IP_); | 2812 | slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); |
2757 | trace_kmem_cache_free(_RET_IP_, x); | 2813 | trace_kmem_cache_free(_RET_IP_, x); |
2758 | } | 2814 | } |
2759 | EXPORT_SYMBOL(kmem_cache_free); | 2815 | EXPORT_SYMBOL(kmem_cache_free); |
2760 | 2816 | ||
2761 | /* Note that interrupts must be enabled when calling this function. */ | 2817 | struct detached_freelist { |
2762 | void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | ||
2763 | { | ||
2764 | struct kmem_cache_cpu *c; | ||
2765 | struct page *page; | 2818 | struct page *page; |
2766 | int i; | 2819 | void *tail; |
2820 | void *freelist; | ||
2821 | int cnt; | ||
2822 | }; | ||
2767 | 2823 | ||
2768 | local_irq_disable(); | 2824 | /* |
2769 | c = this_cpu_ptr(s->cpu_slab); | 2825 | * This function progressively scans the array with free objects (with |
2826 | * a limited look ahead) and extract objects belonging to the same | ||
2827 | * page. It builds a detached freelist directly within the given | ||
2828 | * page/objects. This can happen without any need for | ||
2829 | * synchronization, because the objects are owned by running process. | ||
2830 | * The freelist is build up as a single linked list in the objects. | ||
2831 | * The idea is, that this detached freelist can then be bulk | ||
2832 | * transferred to the real freelist(s), but only requiring a single | ||
2833 | * synchronization primitive. Look ahead in the array is limited due | ||
2834 | * to performance reasons. | ||
2835 | */ | ||
2836 | static int build_detached_freelist(struct kmem_cache *s, size_t size, | ||
2837 | void **p, struct detached_freelist *df) | ||
2838 | { | ||
2839 | size_t first_skipped_index = 0; | ||
2840 | int lookahead = 3; | ||
2841 | void *object; | ||
2770 | 2842 | ||
2771 | for (i = 0; i < size; i++) { | 2843 | /* Always re-init detached_freelist */ |
2772 | void *object = p[i]; | 2844 | df->page = NULL; |
2773 | 2845 | ||
2774 | BUG_ON(!object); | 2846 | do { |
2775 | /* kmem cache debug support */ | 2847 | object = p[--size]; |
2776 | s = cache_from_obj(s, object); | 2848 | } while (!object && size); |
2777 | if (unlikely(!s)) | ||
2778 | goto exit; | ||
2779 | slab_free_hook(s, object); | ||
2780 | 2849 | ||
2781 | page = virt_to_head_page(object); | 2850 | if (!object) |
2851 | return 0; | ||
2782 | 2852 | ||
2783 | if (c->page == page) { | 2853 | /* Start new detached freelist */ |
2784 | /* Fastpath: local CPU free */ | 2854 | set_freepointer(s, object, NULL); |
2785 | set_freepointer(s, object, c->freelist); | 2855 | df->page = virt_to_head_page(object); |
2786 | c->freelist = object; | 2856 | df->tail = object; |
2787 | } else { | 2857 | df->freelist = object; |
2788 | c->tid = next_tid(c->tid); | 2858 | p[size] = NULL; /* mark object processed */ |
2789 | local_irq_enable(); | 2859 | df->cnt = 1; |
2790 | /* Slowpath: overhead locked cmpxchg_double_slab */ | 2860 | |
2791 | __slab_free(s, page, object, _RET_IP_); | 2861 | while (size) { |
2792 | local_irq_disable(); | 2862 | object = p[--size]; |
2793 | c = this_cpu_ptr(s->cpu_slab); | 2863 | if (!object) |
2864 | continue; /* Skip processed objects */ | ||
2865 | |||
2866 | /* df->page is always set at this point */ | ||
2867 | if (df->page == virt_to_head_page(object)) { | ||
2868 | /* Opportunity build freelist */ | ||
2869 | set_freepointer(s, object, df->freelist); | ||
2870 | df->freelist = object; | ||
2871 | df->cnt++; | ||
2872 | p[size] = NULL; /* mark object processed */ | ||
2873 | |||
2874 | continue; | ||
2794 | } | 2875 | } |
2876 | |||
2877 | /* Limit look ahead search */ | ||
2878 | if (!--lookahead) | ||
2879 | break; | ||
2880 | |||
2881 | if (!first_skipped_index) | ||
2882 | first_skipped_index = size + 1; | ||
2795 | } | 2883 | } |
2796 | exit: | 2884 | |
2797 | c->tid = next_tid(c->tid); | 2885 | return first_skipped_index; |
2798 | local_irq_enable(); | 2886 | } |
2887 | |||
2888 | |||
2889 | /* Note that interrupts must be enabled when calling this function. */ | ||
2890 | void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) | ||
2891 | { | ||
2892 | if (WARN_ON(!size)) | ||
2893 | return; | ||
2894 | |||
2895 | do { | ||
2896 | struct detached_freelist df; | ||
2897 | struct kmem_cache *s; | ||
2898 | |||
2899 | /* Support for memcg */ | ||
2900 | s = cache_from_obj(orig_s, p[size - 1]); | ||
2901 | |||
2902 | size = build_detached_freelist(s, size, p, &df); | ||
2903 | if (unlikely(!df.page)) | ||
2904 | continue; | ||
2905 | |||
2906 | slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); | ||
2907 | } while (likely(size)); | ||
2799 | } | 2908 | } |
2800 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 2909 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
2801 | 2910 | ||
2802 | /* Note that interrupts must be enabled when calling this function. */ | 2911 | /* Note that interrupts must be enabled when calling this function. */ |
2803 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 2912 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
2804 | void **p) | 2913 | void **p) |
2805 | { | 2914 | { |
2806 | struct kmem_cache_cpu *c; | 2915 | struct kmem_cache_cpu *c; |
2807 | int i; | 2916 | int i; |
2808 | 2917 | ||
2918 | /* memcg and kmem_cache debug support */ | ||
2919 | s = slab_pre_alloc_hook(s, flags); | ||
2920 | if (unlikely(!s)) | ||
2921 | return false; | ||
2809 | /* | 2922 | /* |
2810 | * Drain objects in the per cpu slab, while disabling local | 2923 | * Drain objects in the per cpu slab, while disabling local |
2811 | * IRQs, which protects against PREEMPT and interrupts | 2924 | * IRQs, which protects against PREEMPT and interrupts |
@@ -2830,17 +2943,8 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
2830 | c = this_cpu_ptr(s->cpu_slab); | 2943 | c = this_cpu_ptr(s->cpu_slab); |
2831 | continue; /* goto for-loop */ | 2944 | continue; /* goto for-loop */ |
2832 | } | 2945 | } |
2833 | |||
2834 | /* kmem_cache debug support */ | ||
2835 | s = slab_pre_alloc_hook(s, flags); | ||
2836 | if (unlikely(!s)) | ||
2837 | goto error; | ||
2838 | |||
2839 | c->freelist = get_freepointer(s, object); | 2946 | c->freelist = get_freepointer(s, object); |
2840 | p[i] = object; | 2947 | p[i] = object; |
2841 | |||
2842 | /* kmem_cache debug support */ | ||
2843 | slab_post_alloc_hook(s, flags, object); | ||
2844 | } | 2948 | } |
2845 | c->tid = next_tid(c->tid); | 2949 | c->tid = next_tid(c->tid); |
2846 | local_irq_enable(); | 2950 | local_irq_enable(); |
@@ -2853,12 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
2853 | memset(p[j], 0, s->object_size); | 2957 | memset(p[j], 0, s->object_size); |
2854 | } | 2958 | } |
2855 | 2959 | ||
2856 | return true; | 2960 | /* memcg and kmem_cache debug support */ |
2857 | 2961 | slab_post_alloc_hook(s, flags, size, p); | |
2962 | return i; | ||
2858 | error: | 2963 | error: |
2859 | __kmem_cache_free_bulk(s, i, p); | ||
2860 | local_irq_enable(); | 2964 | local_irq_enable(); |
2861 | return false; | 2965 | slab_post_alloc_hook(s, flags, i, p); |
2966 | __kmem_cache_free_bulk(s, i, p); | ||
2967 | return 0; | ||
2862 | } | 2968 | } |
2863 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); | 2969 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); |
2864 | 2970 | ||
@@ -3523,7 +3629,7 @@ void kfree(const void *x) | |||
3523 | __free_kmem_pages(page, compound_order(page)); | 3629 | __free_kmem_pages(page, compound_order(page)); |
3524 | return; | 3630 | return; |
3525 | } | 3631 | } |
3526 | slab_free(page->slab_cache, page, object, _RET_IP_); | 3632 | slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); |
3527 | } | 3633 | } |
3528 | EXPORT_SYMBOL(kfree); | 3634 | EXPORT_SYMBOL(kfree); |
3529 | 3635 | ||