aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorEzequiel Garcia <elezegarcia@gmail.com>2012-10-19 08:33:10 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-31 02:50:43 -0400
commit999d8795d438d396936811b185428d70b7b7de6d (patch)
treeb63098f47faa341ec81cb9f07a47b986898d09d9 /mm/slob.c
parent1b4f59e356cc94929305bd107b7f38eec62715ad (diff)
mm/slob: Drop usage of page->private for storing page-sized allocations
This field was being used to store size allocation so it could be retrieved by ksize(). However, it is a bad practice to not mark a page as a slab page and then use fields for special purposes. There is no need to store the allocated size and ksize() can simply return PAGE_SIZE << compound_order(page). Cc: Pekka Enberg <penberg@kernel.org> Cc: Matt Mackall <mpm@selenic.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/slob.c b/mm/slob.c
index a08e4681fd0d..06a5ec70e728 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls 29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order 30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact 31 * does not have to be separately tracked.
32 * allocation size in page->private so that it can be used to accurately 32 * These objects are detected in kfree() because PageSlab()
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them. 33 * is false for them.
35 * 34 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and 35 * SLAB is emulated on top of SLOB by simply calling constructors and
@@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
455 if (likely(order)) 454 if (likely(order))
456 gfp |= __GFP_COMP; 455 gfp |= __GFP_COMP;
457 ret = slob_new_pages(gfp, order, node); 456 ret = slob_new_pages(gfp, order, node);
458 if (ret) {
459 struct page *page;
460 page = virt_to_page(ret);
461 page->private = size;
462 }
463 457
464 trace_kmalloc_node(caller, ret, 458 trace_kmalloc_node(caller, ret,
465 size, PAGE_SIZE << order, gfp, node); 459 size, PAGE_SIZE << order, gfp, node);
@@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree);
514size_t ksize(const void *block) 508size_t ksize(const void *block)
515{ 509{
516 struct page *sp; 510 struct page *sp;
511 int align;
512 unsigned int *m;
517 513
518 BUG_ON(!block); 514 BUG_ON(!block);
519 if (unlikely(block == ZERO_SIZE_PTR)) 515 if (unlikely(block == ZERO_SIZE_PTR))
520 return 0; 516 return 0;
521 517
522 sp = virt_to_page(block); 518 sp = virt_to_page(block);
523 if (PageSlab(sp)) { 519 if (unlikely(!PageSlab(sp)))
524 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 520 return PAGE_SIZE << compound_order(sp);
525 unsigned int *m = (unsigned int *)(block - align); 521
526 return SLOB_UNITS(*m) * SLOB_UNIT; 522 align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
527 } else 523 m = (unsigned int *)(block - align);
528 return sp->private; 524 return SLOB_UNITS(*m) * SLOB_UNIT;
529} 525}
530EXPORT_SYMBOL(ksize); 526EXPORT_SYMBOL(ksize);
531 527