aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slob.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/slob.c b/mm/slob.c
index a08e4681fd0d..06a5ec70e728 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls 29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order 30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact 31 * does not have to be separately tracked.
32 * allocation size in page->private so that it can be used to accurately 32 * These objects are detected in kfree() because PageSlab()
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them. 33 * is false for them.
35 * 34 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and 35 * SLAB is emulated on top of SLOB by simply calling constructors and
@@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
455 if (likely(order)) 454 if (likely(order))
456 gfp |= __GFP_COMP; 455 gfp |= __GFP_COMP;
457 ret = slob_new_pages(gfp, order, node); 456 ret = slob_new_pages(gfp, order, node);
458 if (ret) {
459 struct page *page;
460 page = virt_to_page(ret);
461 page->private = size;
462 }
463 457
464 trace_kmalloc_node(caller, ret, 458 trace_kmalloc_node(caller, ret,
465 size, PAGE_SIZE << order, gfp, node); 459 size, PAGE_SIZE << order, gfp, node);
@@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree);
514size_t ksize(const void *block) 508size_t ksize(const void *block)
515{ 509{
516 struct page *sp; 510 struct page *sp;
511 int align;
512 unsigned int *m;
517 513
518 BUG_ON(!block); 514 BUG_ON(!block);
519 if (unlikely(block == ZERO_SIZE_PTR)) 515 if (unlikely(block == ZERO_SIZE_PTR))
520 return 0; 516 return 0;
521 517
522 sp = virt_to_page(block); 518 sp = virt_to_page(block);
523 if (PageSlab(sp)) { 519 if (unlikely(!PageSlab(sp)))
524 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 520 return PAGE_SIZE << compound_order(sp);
525 unsigned int *m = (unsigned int *)(block - align); 521
526 return SLOB_UNITS(*m) * SLOB_UNIT; 522 align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
527 } else 523 m = (unsigned int *)(block - align);
528 return sp->private; 524 return SLOB_UNITS(*m) * SLOB_UNIT;
529} 525}
530EXPORT_SYMBOL(ksize); 526EXPORT_SYMBOL(ksize);
531 527