aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 45d4ca79933a..dd47d16d57b6 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
194 void *page; 194 void *page;
195 195
196#ifdef CONFIG_NUMA 196#ifdef CONFIG_NUMA
197 if (node != -1) 197 if (node != NUMA_NO_NODE)
198 page = alloc_pages_exact_node(node, gfp, order); 198 page = alloc_pages_exact_node(node, gfp, order);
199 else 199 else
200#endif 200#endif
@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
290 * If there's a node specification, search for a partial 290 * If there's a node specification, search for a partial
291 * page with a matching node id in the freelist. 291 * page with a matching node id in the freelist.
292 */ 292 */
293 if (node != -1 && page_to_nid(sp) != node) 293 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
294 continue; 294 continue;
295#endif 295#endif
296 /* Enough room on this page? */ 296 /* Enough room on this page? */
@@ -425,7 +425,8 @@ out:
425 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. 425 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
426 */ 426 */
427 427
428void *__kmalloc_node(size_t size, gfp_t gfp, int node) 428static __always_inline void *
429__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
429{ 430{
430 unsigned int *m; 431 unsigned int *m;
431 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 432 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
446 *m = size; 447 *m = size;
447 ret = (void *)m + align; 448 ret = (void *)m + align;
448 449
449 trace_kmalloc_node(_RET_IP_, ret, 450 trace_kmalloc_node(caller, ret,
450 size, size + align, gfp, node); 451 size, size + align, gfp, node);
451 } else { 452 } else {
452 unsigned int order = get_order(size); 453 unsigned int order = get_order(size);
@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
460 page->private = size; 461 page->private = size;
461 } 462 }
462 463
463 trace_kmalloc_node(_RET_IP_, ret, 464 trace_kmalloc_node(caller, ret,
464 size, PAGE_SIZE << order, gfp, node); 465 size, PAGE_SIZE << order, gfp, node);
465 } 466 }
466 467
467 kmemleak_alloc(ret, size, 1, gfp); 468 kmemleak_alloc(ret, size, 1, gfp);
468 return ret; 469 return ret;
469} 470}
471
472void *__kmalloc_node(size_t size, gfp_t gfp, int node)
473{
474 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
475}
470EXPORT_SYMBOL(__kmalloc_node); 476EXPORT_SYMBOL(__kmalloc_node);
471 477
478#ifdef CONFIG_TRACING
479void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
480{
481 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
482}
483
484#ifdef CONFIG_NUMA
485void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
486 int node, unsigned long caller)
487{
488 return __do_kmalloc_node(size, gfp, node, caller);
489}
490#endif
491#endif
492
472void kfree(const void *block) 493void kfree(const void *block)
473{ 494{
474 struct page *sp; 495 struct page *sp;
@@ -514,7 +535,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
514 struct kmem_cache *c; 535 struct kmem_cache *c;
515 536
516 c = slob_alloc(sizeof(struct kmem_cache), 537 c = slob_alloc(sizeof(struct kmem_cache),
517 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); 538 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);
518 539
519 if (c) { 540 if (c) {
520 c->name = name; 541 c->name = name;