diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-25 19:59:11 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-25 19:59:11 -0400 |
| commit | 606c61a0579669c292dc5f5e1cf898edecfc0d53 (patch) | |
| tree | 569aa7e9b99571890bfccd7278bbc303cfa0a919 /lib | |
| parent | 15dbc136dff62ebefb03353cfb7d308d49b275f3 (diff) | |
| parent | 0fda2788b03c1868e2f20b3b7995b8cc2adf4715 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge fourth patch-bomb from Andrew Morton:
"A lot more stuff than expected, sorry. A bunch of ocfs2 reviewing was
finished off.
- mhocko's oom-reaper out-of-memory-handler changes
- ocfs2 fixes and features
- KASAN feature work
- various fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (42 commits)
thp: fix typo in khugepaged_scan_pmd()
MAINTAINERS: fill entries for KASAN
mm/filemap: generic_file_read_iter(): check for zero reads unconditionally
kasan: test fix: warn if the UAF could not be detected in kmalloc_uaf2
mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
mm, kasan: add GFP flags to KASAN API
mm, kasan: SLAB support
kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right()
include/linux/oom.h: remove undefined oom_kills_count()/note_oom_kill()
mm/page_alloc: prevent merging between isolated and other pageblocks
drivers/memstick/host/r592.c: avoid gcc-6 warning
ocfs2: extend enough credits for freeing one truncate record while replaying truncate records
ocfs2: extend transaction for ocfs2_remove_rightmost_path() and ocfs2_update_edge_lengths() before to avoid inconsistency between inode and et
ocfs2/dlm: move lock to the tail of grant queue while doing in-place convert
ocfs2: solve a problem of crossing the boundary in updating backups
ocfs2: fix occurring deadlock by changing ocfs2_wq from global to local
ocfs2/dlm: fix BUG in dlm_move_lockres_to_recovery_list
ocfs2/dlm: fix race between convert and recovery
ocfs2: fix a deadlock issue in ocfs2_dio_end_io_write()
...
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 4 | ||||
| -rw-r--r-- | lib/Kconfig.kasan | 5 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/stackdepot.c | 284 | ||||
| -rw-r--r-- | lib/test_kasan.c | 30 |
5 files changed, 324 insertions, 2 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 133ebc0c1773..3cca1222578e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -536,4 +536,8 @@ config ARCH_HAS_PMEM_API | |||
| 536 | config ARCH_HAS_MMIO_FLUSH | 536 | config ARCH_HAS_MMIO_FLUSH |
| 537 | bool | 537 | bool |
| 538 | 538 | ||
| 539 | config STACKDEPOT | ||
| 540 | bool | ||
| 541 | select STACKTRACE | ||
| 542 | |||
| 539 | endmenu | 543 | endmenu |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 0fee5acd5aa0..67d8c6838ba9 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
| @@ -5,8 +5,9 @@ if HAVE_ARCH_KASAN | |||
| 5 | 5 | ||
| 6 | config KASAN | 6 | config KASAN |
| 7 | bool "KASan: runtime memory debugger" | 7 | bool "KASan: runtime memory debugger" |
| 8 | depends on SLUB_DEBUG | 8 | depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) |
| 9 | select CONSTRUCTORS | 9 | select CONSTRUCTORS |
| 10 | select STACKDEPOT if SLAB | ||
| 10 | help | 11 | help |
| 11 | Enables kernel address sanitizer - runtime memory debugger, | 12 | Enables kernel address sanitizer - runtime memory debugger, |
| 12 | designed to find out-of-bounds accesses and use-after-free bugs. | 13 | designed to find out-of-bounds accesses and use-after-free bugs. |
| @@ -16,6 +17,8 @@ config KASAN | |||
| 16 | This feature consumes about 1/8 of available memory and brings about | 17 | This feature consumes about 1/8 of available memory and brings about |
| 17 | ~x3 performance slowdown. | 18 | ~x3 performance slowdown. |
| 18 | For better error detection enable CONFIG_STACKTRACE. | 19 | For better error detection enable CONFIG_STACKTRACE. |
| 20 | Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB | ||
| 21 | (the resulting kernel does not boot). | ||
| 19 | 22 | ||
| 20 | choice | 23 | choice |
| 21 | prompt "Instrumentation type" | 24 | prompt "Instrumentation type" |
diff --git a/lib/Makefile b/lib/Makefile index a1de5b61ff40..7bd6fd436c97 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -181,6 +181,9 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o | |||
| 181 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o | 181 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o |
| 182 | obj-$(CONFIG_IRQ_POLL) += irq_poll.o | 182 | obj-$(CONFIG_IRQ_POLL) += irq_poll.o |
| 183 | 183 | ||
| 184 | obj-$(CONFIG_STACKDEPOT) += stackdepot.o | ||
| 185 | KASAN_SANITIZE_stackdepot.o := n | ||
| 186 | |||
| 184 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ | 187 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ |
| 185 | fdt_empty_tree.o | 188 | fdt_empty_tree.o |
| 186 | $(foreach file, $(libfdt_files), \ | 189 | $(foreach file, $(libfdt_files), \ |
diff --git a/lib/stackdepot.c b/lib/stackdepot.c new file mode 100644 index 000000000000..654c9d87e83a --- /dev/null +++ b/lib/stackdepot.c | |||
| @@ -0,0 +1,284 @@ | |||
| 1 | /* | ||
| 2 | * Generic stack depot for storing stack traces. | ||
| 3 | * | ||
| 4 | * Some debugging tools need to save stack traces of certain events which can | ||
| 5 | * be later presented to the user. For example, KASAN needs to safe alloc and | ||
| 6 | * free stacks for each object, but storing two stack traces per object | ||
| 7 | * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for | ||
| 8 | * that). | ||
| 9 | * | ||
| 10 | * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc | ||
| 11 | * and free stacks repeat a lot, we save about 100x space. | ||
| 12 | * Stacks are never removed from depot, so we store them contiguously one after | ||
| 13 | * another in a contiguos memory allocation. | ||
| 14 | * | ||
| 15 | * Author: Alexander Potapenko <glider@google.com> | ||
| 16 | * Copyright (C) 2016 Google, Inc. | ||
| 17 | * | ||
| 18 | * Based on code by Dmitry Chernenkov. | ||
| 19 | * | ||
| 20 | * This program is free software; you can redistribute it and/or | ||
| 21 | * modify it under the terms of the GNU General Public License | ||
| 22 | * version 2 as published by the Free Software Foundation. | ||
| 23 | * | ||
| 24 | * This program is distributed in the hope that it will be useful, but | ||
| 25 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 27 | * General Public License for more details. | ||
| 28 | * | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include <linux/gfp.h> | ||
| 32 | #include <linux/jhash.h> | ||
| 33 | #include <linux/kernel.h> | ||
| 34 | #include <linux/mm.h> | ||
| 35 | #include <linux/percpu.h> | ||
| 36 | #include <linux/printk.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/stacktrace.h> | ||
| 39 | #include <linux/stackdepot.h> | ||
| 40 | #include <linux/string.h> | ||
| 41 | #include <linux/types.h> | ||
| 42 | |||
| 43 | #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) | ||
| 44 | |||
| 45 | #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ | ||
| 46 | #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) | ||
| 47 | #define STACK_ALLOC_ALIGN 4 | ||
| 48 | #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ | ||
| 49 | STACK_ALLOC_ALIGN) | ||
| 50 | #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) | ||
| 51 | #define STACK_ALLOC_SLABS_CAP 1024 | ||
| 52 | #define STACK_ALLOC_MAX_SLABS \ | ||
| 53 | (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ | ||
| 54 | (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) | ||
| 55 | |||
| 56 | /* The compact structure to store the reference to stacks. */ | ||
| 57 | union handle_parts { | ||
| 58 | depot_stack_handle_t handle; | ||
| 59 | struct { | ||
| 60 | u32 slabindex : STACK_ALLOC_INDEX_BITS; | ||
| 61 | u32 offset : STACK_ALLOC_OFFSET_BITS; | ||
| 62 | }; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct stack_record { | ||
| 66 | struct stack_record *next; /* Link in the hashtable */ | ||
| 67 | u32 hash; /* Hash in the hastable */ | ||
| 68 | u32 size; /* Number of frames in the stack */ | ||
| 69 | union handle_parts handle; | ||
| 70 | unsigned long entries[1]; /* Variable-sized array of entries. */ | ||
| 71 | }; | ||
| 72 | |||
| 73 | static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; | ||
| 74 | |||
| 75 | static int depot_index; | ||
| 76 | static int next_slab_inited; | ||
| 77 | static size_t depot_offset; | ||
| 78 | static DEFINE_SPINLOCK(depot_lock); | ||
| 79 | |||
| 80 | static bool init_stack_slab(void **prealloc) | ||
| 81 | { | ||
| 82 | if (!*prealloc) | ||
| 83 | return false; | ||
| 84 | /* | ||
| 85 | * This smp_load_acquire() pairs with smp_store_release() to | ||
| 86 | * |next_slab_inited| below and in depot_alloc_stack(). | ||
| 87 | */ | ||
| 88 | if (smp_load_acquire(&next_slab_inited)) | ||
| 89 | return true; | ||
| 90 | if (stack_slabs[depot_index] == NULL) { | ||
| 91 | stack_slabs[depot_index] = *prealloc; | ||
| 92 | } else { | ||
| 93 | stack_slabs[depot_index + 1] = *prealloc; | ||
| 94 | /* | ||
| 95 | * This smp_store_release pairs with smp_load_acquire() from | ||
| 96 | * |next_slab_inited| above and in depot_save_stack(). | ||
| 97 | */ | ||
| 98 | smp_store_release(&next_slab_inited, 1); | ||
| 99 | } | ||
| 100 | *prealloc = NULL; | ||
| 101 | return true; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* Allocation of a new stack in raw storage */ | ||
| 105 | static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, | ||
| 106 | u32 hash, void **prealloc, gfp_t alloc_flags) | ||
| 107 | { | ||
| 108 | int required_size = offsetof(struct stack_record, entries) + | ||
| 109 | sizeof(unsigned long) * size; | ||
| 110 | struct stack_record *stack; | ||
| 111 | |||
| 112 | required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); | ||
| 113 | |||
| 114 | if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { | ||
| 115 | if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { | ||
| 116 | WARN_ONCE(1, "Stack depot reached limit capacity"); | ||
| 117 | return NULL; | ||
| 118 | } | ||
| 119 | depot_index++; | ||
| 120 | depot_offset = 0; | ||
| 121 | /* | ||
| 122 | * smp_store_release() here pairs with smp_load_acquire() from | ||
| 123 | * |next_slab_inited| in depot_save_stack() and | ||
| 124 | * init_stack_slab(). | ||
| 125 | */ | ||
| 126 | if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) | ||
| 127 | smp_store_release(&next_slab_inited, 0); | ||
| 128 | } | ||
| 129 | init_stack_slab(prealloc); | ||
| 130 | if (stack_slabs[depot_index] == NULL) | ||
| 131 | return NULL; | ||
| 132 | |||
| 133 | stack = stack_slabs[depot_index] + depot_offset; | ||
| 134 | |||
| 135 | stack->hash = hash; | ||
| 136 | stack->size = size; | ||
| 137 | stack->handle.slabindex = depot_index; | ||
| 138 | stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; | ||
| 139 | memcpy(stack->entries, entries, size * sizeof(unsigned long)); | ||
| 140 | depot_offset += required_size; | ||
| 141 | |||
| 142 | return stack; | ||
| 143 | } | ||
| 144 | |||
| 145 | #define STACK_HASH_ORDER 20 | ||
| 146 | #define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) | ||
| 147 | #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) | ||
| 148 | #define STACK_HASH_SEED 0x9747b28c | ||
| 149 | |||
| 150 | static struct stack_record *stack_table[STACK_HASH_SIZE] = { | ||
| 151 | [0 ... STACK_HASH_SIZE - 1] = NULL | ||
| 152 | }; | ||
| 153 | |||
| 154 | /* Calculate hash for a stack */ | ||
| 155 | static inline u32 hash_stack(unsigned long *entries, unsigned int size) | ||
| 156 | { | ||
| 157 | return jhash2((u32 *)entries, | ||
| 158 | size * sizeof(unsigned long) / sizeof(u32), | ||
| 159 | STACK_HASH_SEED); | ||
| 160 | } | ||
| 161 | |||
| 162 | /* Find a stack that is equal to the one stored in entries in the hash */ | ||
| 163 | static inline struct stack_record *find_stack(struct stack_record *bucket, | ||
| 164 | unsigned long *entries, int size, | ||
| 165 | u32 hash) | ||
| 166 | { | ||
| 167 | struct stack_record *found; | ||
| 168 | |||
| 169 | for (found = bucket; found; found = found->next) { | ||
| 170 | if (found->hash == hash && | ||
| 171 | found->size == size && | ||
| 172 | !memcmp(entries, found->entries, | ||
| 173 | size * sizeof(unsigned long))) { | ||
| 174 | return found; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | return NULL; | ||
| 178 | } | ||
| 179 | |||
| 180 | void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) | ||
| 181 | { | ||
| 182 | union handle_parts parts = { .handle = handle }; | ||
| 183 | void *slab = stack_slabs[parts.slabindex]; | ||
| 184 | size_t offset = parts.offset << STACK_ALLOC_ALIGN; | ||
| 185 | struct stack_record *stack = slab + offset; | ||
| 186 | |||
| 187 | trace->nr_entries = trace->max_entries = stack->size; | ||
| 188 | trace->entries = stack->entries; | ||
| 189 | trace->skip = 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | /** | ||
| 193 | * depot_save_stack - save stack in a stack depot. | ||
| 194 | * @trace - the stacktrace to save. | ||
| 195 | * @alloc_flags - flags for allocating additional memory if required. | ||
| 196 | * | ||
| 197 | * Returns the handle of the stack struct stored in depot. | ||
| 198 | */ | ||
| 199 | depot_stack_handle_t depot_save_stack(struct stack_trace *trace, | ||
| 200 | gfp_t alloc_flags) | ||
| 201 | { | ||
| 202 | u32 hash; | ||
| 203 | depot_stack_handle_t retval = 0; | ||
| 204 | struct stack_record *found = NULL, **bucket; | ||
| 205 | unsigned long flags; | ||
| 206 | struct page *page = NULL; | ||
| 207 | void *prealloc = NULL; | ||
| 208 | |||
| 209 | if (unlikely(trace->nr_entries == 0)) | ||
| 210 | goto fast_exit; | ||
| 211 | |||
| 212 | hash = hash_stack(trace->entries, trace->nr_entries); | ||
| 213 | /* Bad luck, we won't store this stack. */ | ||
| 214 | if (hash == 0) | ||
| 215 | goto exit; | ||
| 216 | |||
| 217 | bucket = &stack_table[hash & STACK_HASH_MASK]; | ||
| 218 | |||
| 219 | /* | ||
| 220 | * Fast path: look the stack trace up without locking. | ||
| 221 | * The smp_load_acquire() here pairs with smp_store_release() to | ||
| 222 | * |bucket| below. | ||
| 223 | */ | ||
| 224 | found = find_stack(smp_load_acquire(bucket), trace->entries, | ||
| 225 | trace->nr_entries, hash); | ||
| 226 | if (found) | ||
| 227 | goto exit; | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Check if the current or the next stack slab need to be initialized. | ||
| 231 | * If so, allocate the memory - we won't be able to do that under the | ||
| 232 | * lock. | ||
| 233 | * | ||
| 234 | * The smp_load_acquire() here pairs with smp_store_release() to | ||
| 235 | * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). | ||
| 236 | */ | ||
| 237 | if (unlikely(!smp_load_acquire(&next_slab_inited))) { | ||
| 238 | /* | ||
| 239 | * Zero out zone modifiers, as we don't have specific zone | ||
| 240 | * requirements. Keep the flags related to allocation in atomic | ||
| 241 | * contexts and I/O. | ||
| 242 | */ | ||
| 243 | alloc_flags &= ~GFP_ZONEMASK; | ||
| 244 | alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); | ||
| 245 | page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); | ||
| 246 | if (page) | ||
| 247 | prealloc = page_address(page); | ||
| 248 | } | ||
| 249 | |||
| 250 | spin_lock_irqsave(&depot_lock, flags); | ||
| 251 | |||
| 252 | found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); | ||
| 253 | if (!found) { | ||
| 254 | struct stack_record *new = | ||
| 255 | depot_alloc_stack(trace->entries, trace->nr_entries, | ||
| 256 | hash, &prealloc, alloc_flags); | ||
| 257 | if (new) { | ||
| 258 | new->next = *bucket; | ||
| 259 | /* | ||
| 260 | * This smp_store_release() pairs with | ||
| 261 | * smp_load_acquire() from |bucket| above. | ||
| 262 | */ | ||
| 263 | smp_store_release(bucket, new); | ||
| 264 | found = new; | ||
| 265 | } | ||
| 266 | } else if (prealloc) { | ||
| 267 | /* | ||
| 268 | * We didn't need to store this stack trace, but let's keep | ||
| 269 | * the preallocated memory for the future. | ||
| 270 | */ | ||
| 271 | WARN_ON(!init_stack_slab(&prealloc)); | ||
| 272 | } | ||
| 273 | |||
| 274 | spin_unlock_irqrestore(&depot_lock, flags); | ||
| 275 | exit: | ||
| 276 | if (prealloc) { | ||
| 277 | /* Nobody used this memory, ok to free it. */ | ||
| 278 | free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); | ||
| 279 | } | ||
| 280 | if (found) | ||
| 281 | retval = found->handle.handle; | ||
| 282 | fast_exit: | ||
| 283 | return retval; | ||
| 284 | } | ||
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index c32f3b0048dc..82169fbf2453 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
| @@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void) | |||
| 65 | kfree(ptr); | 65 | kfree(ptr); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static noinline void __init kmalloc_large_oob_right(void) | 68 | #ifdef CONFIG_SLUB |
| 69 | static noinline void __init kmalloc_pagealloc_oob_right(void) | ||
| 69 | { | 70 | { |
| 70 | char *ptr; | 71 | char *ptr; |
| 71 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | 72 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; |
| 72 | 73 | ||
| 74 | /* Allocate a chunk that does not fit into a SLUB cache to trigger | ||
| 75 | * the page allocator fallback. | ||
| 76 | */ | ||
| 77 | pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n"); | ||
| 78 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 79 | if (!ptr) { | ||
| 80 | pr_err("Allocation failed\n"); | ||
| 81 | return; | ||
| 82 | } | ||
| 83 | |||
| 84 | ptr[size] = 0; | ||
| 85 | kfree(ptr); | ||
| 86 | } | ||
| 87 | #endif | ||
| 88 | |||
| 89 | static noinline void __init kmalloc_large_oob_right(void) | ||
| 90 | { | ||
| 91 | char *ptr; | ||
| 92 | size_t size = KMALLOC_MAX_CACHE_SIZE - 256; | ||
| 93 | /* Allocate a chunk that is large enough, but still fits into a slab | ||
| 94 | * and does not trigger the page allocator fallback in SLUB. | ||
| 95 | */ | ||
| 73 | pr_info("kmalloc large allocation: out-of-bounds to right\n"); | 96 | pr_info("kmalloc large allocation: out-of-bounds to right\n"); |
| 74 | ptr = kmalloc(size, GFP_KERNEL); | 97 | ptr = kmalloc(size, GFP_KERNEL); |
| 75 | if (!ptr) { | 98 | if (!ptr) { |
| @@ -271,6 +294,8 @@ static noinline void __init kmalloc_uaf2(void) | |||
| 271 | } | 294 | } |
| 272 | 295 | ||
| 273 | ptr1[40] = 'x'; | 296 | ptr1[40] = 'x'; |
| 297 | if (ptr1 == ptr2) | ||
| 298 | pr_err("Could not detect use-after-free: ptr1 == ptr2\n"); | ||
| 274 | kfree(ptr2); | 299 | kfree(ptr2); |
| 275 | } | 300 | } |
| 276 | 301 | ||
| @@ -324,6 +349,9 @@ static int __init kmalloc_tests_init(void) | |||
| 324 | kmalloc_oob_right(); | 349 | kmalloc_oob_right(); |
| 325 | kmalloc_oob_left(); | 350 | kmalloc_oob_left(); |
| 326 | kmalloc_node_oob_right(); | 351 | kmalloc_node_oob_right(); |
| 352 | #ifdef CONFIG_SLUB | ||
| 353 | kmalloc_pagealloc_oob_right(); | ||
| 354 | #endif | ||
| 327 | kmalloc_large_oob_right(); | 355 | kmalloc_large_oob_right(); |
| 328 | kmalloc_oob_krealloc_more(); | 356 | kmalloc_oob_krealloc_more(); |
| 329 | kmalloc_oob_krealloc_less(); | 357 | kmalloc_oob_krealloc_less(); |
