diff options
| -rw-r--r-- | include/linux/slab.h | 156 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 106 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 31 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 97 | ||||
| -rw-r--r-- | mm/slab_common.c | 10 | ||||
| -rw-r--r-- | mm/slob.c | 28 | ||||
| -rw-r--r-- | mm/slub.c | 8 |
7 files changed, 158 insertions, 278 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 6c5cc0ea8713..386af639dcaa 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | * (C) SGI 2006, Christoph Lameter | 4 | * (C) SGI 2006, Christoph Lameter |
| 5 | * Cleaned up and restructured to ease the addition of alternative | 5 | * Cleaned up and restructured to ease the addition of alternative |
| 6 | * implementations of SLAB allocators. | 6 | * implementations of SLAB allocators. |
| 7 | * (C) Linux Foundation 2008-2013 | ||
| 8 | * Unified interface for all slab allocators | ||
| 7 | */ | 9 | */ |
| 8 | 10 | ||
| 9 | #ifndef _LINUX_SLAB_H | 11 | #ifndef _LINUX_SLAB_H |
| @@ -94,6 +96,7 @@ | |||
| 94 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 96 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
| 95 | (unsigned long)ZERO_SIZE_PTR) | 97 | (unsigned long)ZERO_SIZE_PTR) |
| 96 | 98 | ||
| 99 | #include <linux/kmemleak.h> | ||
| 97 | 100 | ||
| 98 | struct mem_cgroup; | 101 | struct mem_cgroup; |
| 99 | /* | 102 | /* |
| @@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size) | |||
| 289 | } | 292 | } |
| 290 | #endif /* !CONFIG_SLOB */ | 293 | #endif /* !CONFIG_SLOB */ |
| 291 | 294 | ||
| 295 | void *__kmalloc(size_t size, gfp_t flags); | ||
| 296 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | ||
| 297 | |||
| 298 | #ifdef CONFIG_NUMA | ||
| 299 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
| 300 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
| 301 | #else | ||
| 302 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 303 | { | ||
| 304 | return __kmalloc(size, flags); | ||
| 305 | } | ||
| 306 | |||
| 307 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | ||
| 308 | { | ||
| 309 | return kmem_cache_alloc(s, flags); | ||
| 310 | } | ||
| 311 | #endif | ||
| 312 | |||
| 313 | #ifdef CONFIG_TRACING | ||
| 314 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | ||
| 315 | |||
| 316 | #ifdef CONFIG_NUMA | ||
| 317 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
| 318 | gfp_t gfpflags, | ||
| 319 | int node, size_t size); | ||
| 320 | #else | ||
| 321 | static __always_inline void * | ||
| 322 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
| 323 | gfp_t gfpflags, | ||
| 324 | int node, size_t size) | ||
| 325 | { | ||
| 326 | return kmem_cache_alloc_trace(s, gfpflags, size); | ||
| 327 | } | ||
| 328 | #endif /* CONFIG_NUMA */ | ||
| 329 | |||
| 330 | #else /* CONFIG_TRACING */ | ||
| 331 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | ||
| 332 | gfp_t flags, size_t size) | ||
| 333 | { | ||
| 334 | return kmem_cache_alloc(s, flags); | ||
| 335 | } | ||
| 336 | |||
| 337 | static __always_inline void * | ||
| 338 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
| 339 | gfp_t gfpflags, | ||
| 340 | int node, size_t size) | ||
| 341 | { | ||
| 342 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
| 343 | } | ||
| 344 | #endif /* CONFIG_TRACING */ | ||
| 345 | |||
| 292 | #ifdef CONFIG_SLAB | 346 | #ifdef CONFIG_SLAB |
| 293 | #include <linux/slab_def.h> | 347 | #include <linux/slab_def.h> |
| 294 | #endif | 348 | #endif |
| @@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size) | |||
| 297 | #include <linux/slub_def.h> | 351 | #include <linux/slub_def.h> |
| 298 | #endif | 352 | #endif |
| 299 | 353 | ||
| 300 | #ifdef CONFIG_SLOB | 354 | static __always_inline void * |
| 301 | #include <linux/slob_def.h> | 355 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
| 356 | { | ||
| 357 | void *ret; | ||
| 358 | |||
| 359 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
| 360 | ret = (void *) __get_free_pages(flags, order); | ||
| 361 | kmemleak_alloc(ret, size, 1, flags); | ||
| 362 | return ret; | ||
| 363 | } | ||
| 364 | |||
| 365 | #ifdef CONFIG_TRACING | ||
| 366 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
| 367 | #else | ||
| 368 | static __always_inline void * | ||
| 369 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
| 370 | { | ||
| 371 | return kmalloc_order(size, flags, order); | ||
| 372 | } | ||
| 373 | #endif | ||
| 374 | |||
| 375 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
| 376 | { | ||
| 377 | unsigned int order = get_order(size); | ||
| 378 | return kmalloc_order_trace(size, flags, order); | ||
| 379 | } | ||
| 380 | |||
| 381 | /** | ||
| 382 | * kmalloc - allocate memory | ||
| 383 | * @size: how many bytes of memory are required. | ||
| 384 | * @flags: the type of memory to allocate (see kcalloc). | ||
| 385 | * | ||
| 386 | * kmalloc is the normal method of allocating memory | ||
| 387 | * for objects smaller than page size in the kernel. | ||
| 388 | */ | ||
| 389 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
| 390 | { | ||
| 391 | if (__builtin_constant_p(size)) { | ||
| 392 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
| 393 | return kmalloc_large(size, flags); | ||
| 394 | #ifndef CONFIG_SLOB | ||
| 395 | if (!(flags & GFP_DMA)) { | ||
| 396 | int index = kmalloc_index(size); | ||
| 397 | |||
| 398 | if (!index) | ||
| 399 | return ZERO_SIZE_PTR; | ||
| 400 | |||
| 401 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
| 402 | flags, size); | ||
| 403 | } | ||
| 302 | #endif | 404 | #endif |
| 405 | } | ||
| 406 | return __kmalloc(size, flags); | ||
| 407 | } | ||
| 303 | 408 | ||
| 304 | /* | 409 | /* |
| 305 | * Determine size used for the nth kmalloc cache. | 410 | * Determine size used for the nth kmalloc cache. |
| @@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n) | |||
| 321 | return 0; | 426 | return 0; |
| 322 | } | 427 | } |
| 323 | 428 | ||
| 429 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 430 | { | ||
| 431 | #ifndef CONFIG_SLOB | ||
| 432 | if (__builtin_constant_p(size) && | ||
| 433 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLAB_CACHE_DMA)) { | ||
| 434 | int i = kmalloc_index(size); | ||
| 435 | |||
| 436 | if (!i) | ||
| 437 | return ZERO_SIZE_PTR; | ||
| 438 | |||
| 439 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | ||
| 440 | flags, node, size); | ||
| 441 | } | ||
| 442 | #endif | ||
| 443 | return __kmalloc_node(size, flags, node); | ||
| 444 | } | ||
| 445 | |||
| 324 | /* | 446 | /* |
| 325 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | 447 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. |
| 326 | * Intended for arches that get misalignment faults even for 64 bit integer | 448 | * Intended for arches that get misalignment faults even for 64 bit integer |
| @@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
| 451 | return kmalloc_array(n, size, flags | __GFP_ZERO); | 573 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
| 452 | } | 574 | } |
| 453 | 575 | ||
| 454 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | ||
| 455 | /** | ||
| 456 | * kmalloc_node - allocate memory from a specific node | ||
| 457 | * @size: how many bytes of memory are required. | ||
| 458 | * @flags: the type of memory to allocate (see kmalloc). | ||
| 459 | * @node: node to allocate from. | ||
| 460 | * | ||
| 461 | * kmalloc() for non-local nodes, used to allocate from a specific node | ||
| 462 | * if available. Equivalent to kmalloc() in the non-NUMA single-node | ||
| 463 | * case. | ||
| 464 | */ | ||
| 465 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 466 | { | ||
| 467 | return kmalloc(size, flags); | ||
| 468 | } | ||
| 469 | |||
| 470 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 471 | { | ||
| 472 | return __kmalloc(size, flags); | ||
| 473 | } | ||
| 474 | |||
| 475 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
| 476 | |||
| 477 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | ||
| 478 | gfp_t flags, int node) | ||
| 479 | { | ||
| 480 | return kmem_cache_alloc(cachep, flags); | ||
| 481 | } | ||
| 482 | #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ | ||
| 483 | |||
| 484 | /* | 576 | /* |
| 485 | * kmalloc_track_caller is a special version of kmalloc that records the | 577 | * kmalloc_track_caller is a special version of kmalloc that records the |
| 486 | * calling function of the routine calling it for slab leak tracking instead | 578 | * calling function of the routine calling it for slab leak tracking instead |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cd401580bdd3..e9346b4f1ef4 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -3,20 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Definitions unique to the original Linux SLAB allocator. | 5 | * Definitions unique to the original Linux SLAB allocator. |
| 6 | * | ||
| 7 | * What we provide here is a way to optimize the frequent kmalloc | ||
| 8 | * calls in the kernel by selecting the appropriate general cache | ||
| 9 | * if kmalloc was called with a size that can be established at | ||
| 10 | * compile time. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/compiler.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * struct kmem_cache | ||
| 18 | * | ||
| 19 | * manages a cache. | ||
| 20 | */ | 6 | */ |
| 21 | 7 | ||
| 22 | struct kmem_cache { | 8 | struct kmem_cache { |
| @@ -102,96 +88,4 @@ struct kmem_cache { | |||
| 102 | */ | 88 | */ |
| 103 | }; | 89 | }; |
| 104 | 90 | ||
| 105 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
| 106 | void *__kmalloc(size_t size, gfp_t flags); | ||
| 107 | |||
| 108 | #ifdef CONFIG_TRACING | ||
| 109 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | ||
| 110 | #else | ||
| 111 | static __always_inline void * | ||
| 112 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) | ||
| 113 | { | ||
| 114 | return kmem_cache_alloc(cachep, flags); | ||
| 115 | } | ||
| 116 | #endif | ||
| 117 | |||
| 118 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
| 119 | { | ||
| 120 | struct kmem_cache *cachep; | ||
| 121 | void *ret; | ||
| 122 | |||
| 123 | if (__builtin_constant_p(size)) { | ||
| 124 | int i; | ||
| 125 | |||
| 126 | if (!size) | ||
| 127 | return ZERO_SIZE_PTR; | ||
| 128 | |||
| 129 | if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) | ||
| 130 | return NULL; | ||
| 131 | |||
| 132 | i = kmalloc_index(size); | ||
| 133 | |||
| 134 | #ifdef CONFIG_ZONE_DMA | ||
| 135 | if (flags & GFP_DMA) | ||
| 136 | cachep = kmalloc_dma_caches[i]; | ||
| 137 | else | ||
| 138 | #endif | ||
| 139 | cachep = kmalloc_caches[i]; | ||
| 140 | |||
| 141 | ret = kmem_cache_alloc_trace(cachep, flags, size); | ||
| 142 | |||
| 143 | return ret; | ||
| 144 | } | ||
| 145 | return __kmalloc(size, flags); | ||
| 146 | } | ||
| 147 | |||
| 148 | #ifdef CONFIG_NUMA | ||
| 149 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
| 150 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
| 151 | |||
| 152 | #ifdef CONFIG_TRACING | ||
| 153 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | ||
| 154 | gfp_t flags, | ||
| 155 | int nodeid, | ||
| 156 | size_t size); | ||
| 157 | #else | ||
| 158 | static __always_inline void * | ||
| 159 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | ||
| 160 | gfp_t flags, | ||
| 161 | int nodeid, | ||
| 162 | size_t size) | ||
| 163 | { | ||
| 164 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
| 165 | } | ||
| 166 | #endif | ||
| 167 | |||
| 168 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 169 | { | ||
| 170 | struct kmem_cache *cachep; | ||
| 171 | |||
| 172 | if (__builtin_constant_p(size)) { | ||
| 173 | int i; | ||
| 174 | |||
| 175 | if (!size) | ||
| 176 | return ZERO_SIZE_PTR; | ||
| 177 | |||
| 178 | if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) | ||
| 179 | return NULL; | ||
| 180 | |||
| 181 | i = kmalloc_index(size); | ||
| 182 | |||
| 183 | #ifdef CONFIG_ZONE_DMA | ||
| 184 | if (flags & GFP_DMA) | ||
| 185 | cachep = kmalloc_dma_caches[i]; | ||
| 186 | else | ||
| 187 | #endif | ||
| 188 | cachep = kmalloc_caches[i]; | ||
| 189 | |||
| 190 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); | ||
| 191 | } | ||
| 192 | return __kmalloc_node(size, flags, node); | ||
| 193 | } | ||
| 194 | |||
| 195 | #endif /* CONFIG_NUMA */ | ||
| 196 | |||
| 197 | #endif /* _LINUX_SLAB_DEF_H */ | 91 | #endif /* _LINUX_SLAB_DEF_H */ |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h deleted file mode 100644 index 095a5a4a8516..000000000000 --- a/include/linux/slob_def.h +++ /dev/null | |||
| @@ -1,31 +0,0 @@ | |||
| 1 | #ifndef __LINUX_SLOB_DEF_H | ||
| 2 | #define __LINUX_SLOB_DEF_H | ||
| 3 | |||
| 4 | #include <linux/numa.h> | ||
| 5 | |||
| 6 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
| 7 | |||
| 8 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | ||
| 9 | gfp_t flags) | ||
| 10 | { | ||
| 11 | return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE); | ||
| 12 | } | ||
| 13 | |||
| 14 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
| 15 | |||
| 16 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 17 | { | ||
| 18 | return __kmalloc_node(size, flags, node); | ||
| 19 | } | ||
| 20 | |||
| 21 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
| 22 | { | ||
| 23 | return __kmalloc_node(size, flags, NUMA_NO_NODE); | ||
| 24 | } | ||
| 25 | |||
| 26 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) | ||
| 27 | { | ||
| 28 | return kmalloc(size, flags); | ||
| 29 | } | ||
| 30 | |||
| 31 | #endif /* __LINUX_SLOB_DEF_H */ | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 027276fa8713..901fb6eb7467 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -6,14 +6,8 @@ | |||
| 6 | * | 6 | * |
| 7 | * (C) 2007 SGI, Christoph Lameter | 7 | * (C) 2007 SGI, Christoph Lameter |
| 8 | */ | 8 | */ |
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/gfp.h> | ||
| 11 | #include <linux/bug.h> | ||
| 12 | #include <linux/workqueue.h> | ||
| 13 | #include <linux/kobject.h> | 9 | #include <linux/kobject.h> |
| 14 | 10 | ||
| 15 | #include <linux/kmemleak.h> | ||
| 16 | |||
| 17 | enum stat_item { | 11 | enum stat_item { |
| 18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| 19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | 13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
| @@ -104,20 +98,6 @@ struct kmem_cache { | |||
| 104 | struct kmem_cache_node *node[MAX_NUMNODES]; | 98 | struct kmem_cache_node *node[MAX_NUMNODES]; |
| 105 | }; | 99 | }; |
| 106 | 100 | ||
| 107 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
| 108 | void *__kmalloc(size_t size, gfp_t flags); | ||
| 109 | |||
| 110 | static __always_inline void * | ||
| 111 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | ||
| 112 | { | ||
| 113 | void *ret; | ||
| 114 | |||
| 115 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
| 116 | ret = (void *) __get_free_pages(flags, order); | ||
| 117 | kmemleak_alloc(ret, size, 1, flags); | ||
| 118 | return ret; | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | 101 | /** |
| 122 | * Calling this on allocated memory will check that the memory | 102 | * Calling this on allocated memory will check that the memory |
| 123 | * is expected to be in use, and print warnings if not. | 103 | * is expected to be in use, and print warnings if not. |
| @@ -131,81 +111,4 @@ static inline bool verify_mem_not_deleted(const void *x) | |||
| 131 | } | 111 | } |
| 132 | #endif | 112 | #endif |
| 133 | 113 | ||
| 134 | #ifdef CONFIG_TRACING | ||
| 135 | extern void * | ||
| 136 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | ||
| 137 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
| 138 | #else | ||
| 139 | static __always_inline void * | ||
| 140 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | ||
| 141 | { | ||
| 142 | return kmem_cache_alloc(s, gfpflags); | ||
| 143 | } | ||
| 144 | |||
| 145 | static __always_inline void * | ||
| 146 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
| 147 | { | ||
| 148 | return kmalloc_order(size, flags, order); | ||
| 149 | } | ||
| 150 | #endif | ||
| 151 | |||
| 152 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
| 153 | { | ||
| 154 | unsigned int order = get_order(size); | ||
| 155 | return kmalloc_order_trace(size, flags, order); | ||
| 156 | } | ||
| 157 | |||
| 158 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
| 159 | { | ||
| 160 | if (__builtin_constant_p(size)) { | ||
| 161 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
| 162 | return kmalloc_large(size, flags); | ||
| 163 | |||
| 164 | if (!(flags & GFP_DMA)) { | ||
| 165 | int index = kmalloc_index(size); | ||
| 166 | |||
| 167 | if (!index) | ||
| 168 | return ZERO_SIZE_PTR; | ||
| 169 | |||
| 170 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
| 171 | flags, size); | ||
| 172 | } | ||
| 173 | } | ||
| 174 | return __kmalloc(size, flags); | ||
| 175 | } | ||
| 176 | |||
| 177 | #ifdef CONFIG_NUMA | ||
| 178 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
| 179 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
| 180 | |||
| 181 | #ifdef CONFIG_TRACING | ||
| 182 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
| 183 | gfp_t gfpflags, | ||
| 184 | int node, size_t size); | ||
| 185 | #else | ||
| 186 | static __always_inline void * | ||
| 187 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
| 188 | gfp_t gfpflags, | ||
| 189 | int node, size_t size) | ||
| 190 | { | ||
| 191 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
| 192 | } | ||
| 193 | #endif | ||
| 194 | |||
| 195 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
| 196 | { | ||
| 197 | if (__builtin_constant_p(size) && | ||
| 198 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { | ||
| 199 | int index = kmalloc_index(size); | ||
| 200 | |||
| 201 | if (!index) | ||
| 202 | return ZERO_SIZE_PTR; | ||
| 203 | |||
| 204 | return kmem_cache_alloc_node_trace(kmalloc_caches[index], | ||
| 205 | flags, node, size); | ||
| 206 | } | ||
| 207 | return __kmalloc_node(size, flags, node); | ||
| 208 | } | ||
| 209 | #endif | ||
| 210 | |||
| 211 | #endif /* _LINUX_SLUB_DEF_H */ | 114 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/mm/slab_common.c b/mm/slab_common.c index f0410eb61741..a3443278ce3a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
| 20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
| 21 | #include <linux/memcontrol.h> | 21 | #include <linux/memcontrol.h> |
| 22 | #include <trace/events/kmem.h> | ||
| 22 | 23 | ||
| 23 | #include "slab.h" | 24 | #include "slab.h" |
| 24 | 25 | ||
| @@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags) | |||
| 495 | } | 496 | } |
| 496 | #endif /* !CONFIG_SLOB */ | 497 | #endif /* !CONFIG_SLOB */ |
| 497 | 498 | ||
| 499 | #ifdef CONFIG_TRACING | ||
| 500 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
| 501 | { | ||
| 502 | void *ret = kmalloc_order(size, flags, order); | ||
| 503 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
| 504 | return ret; | ||
| 505 | } | ||
| 506 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
| 507 | #endif | ||
| 498 | 508 | ||
| 499 | #ifdef CONFIG_SLABINFO | 509 | #ifdef CONFIG_SLABINFO |
| 500 | 510 | ||
| @@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | |||
| 462 | return ret; | 462 | return ret; |
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 465 | void *__kmalloc(size_t size, gfp_t gfp) |
| 466 | { | 466 | { |
| 467 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | 467 | return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); |
| 468 | } | 468 | } |
| 469 | EXPORT_SYMBOL(__kmalloc_node); | 469 | EXPORT_SYMBOL(__kmalloc); |
| 470 | 470 | ||
| 471 | #ifdef CONFIG_TRACING | 471 | #ifdef CONFIG_TRACING |
| 472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) | 472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) |
| @@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) | |||
| 534 | return 0; | 534 | return 0; |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | 537 | void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
| 538 | { | 538 | { |
| 539 | void *b; | 539 | void *b; |
| 540 | 540 | ||
| @@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | 560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); |
| 561 | return b; | 561 | return b; |
| 562 | } | 562 | } |
| 563 | EXPORT_SYMBOL(slob_alloc_node); | ||
| 564 | |||
| 565 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | ||
| 566 | { | ||
| 567 | return slob_alloc_node(cachep, flags, NUMA_NO_NODE); | ||
| 568 | } | ||
| 569 | EXPORT_SYMBOL(kmem_cache_alloc); | ||
| 570 | |||
| 571 | #ifdef CONFIG_NUMA | ||
| 572 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | ||
| 573 | { | ||
| 574 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | ||
| 575 | } | ||
| 576 | EXPORT_SYMBOL(__kmalloc_node); | ||
| 577 | |||
| 578 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) | ||
| 579 | { | ||
| 580 | return slob_alloc_node(cachep, gfp, node); | ||
| 581 | } | ||
| 563 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 582 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 583 | #endif | ||
| 564 | 584 | ||
| 565 | static void __kmem_cache_free(void *b, int size) | 585 | static void __kmem_cache_free(void *b, int size) |
| 566 | { | 586 | { |
| @@ -2450,14 +2450,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | |||
| 2450 | return ret; | 2450 | return ret; |
| 2451 | } | 2451 | } |
| 2452 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 2452 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
| 2453 | |||
| 2454 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
| 2455 | { | ||
| 2456 | void *ret = kmalloc_order(size, flags, order); | ||
| 2457 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
| 2458 | return ret; | ||
| 2459 | } | ||
| 2460 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
| 2461 | #endif | 2453 | #endif |
| 2462 | 2454 | ||
| 2463 | #ifdef CONFIG_NUMA | 2455 | #ifdef CONFIG_NUMA |
