diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 46 |
1 files changed, 13 insertions, 33 deletions
@@ -28,9 +28,8 @@ | |||
28 | * from kmalloc are prepended with a 4-byte header with the kmalloc size. | 28 | * from kmalloc are prepended with a 4-byte header with the kmalloc size. |
29 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 29 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
30 | * alloc_pages() directly, allocating compound pages so the page order | 30 | * alloc_pages() directly, allocating compound pages so the page order |
31 | * does not have to be separately tracked, and also stores the exact | 31 | * does not have to be separately tracked. |
32 | * allocation size in page->private so that it can be used to accurately | 32 | * These objects are detected in kfree() because PageSlab() |
33 | * provide ksize(). These objects are detected in kfree() because slob_page() | ||
34 | * is false for them. | 33 | * is false for them. |
35 | * | 34 | * |
36 | * SLAB is emulated on top of SLOB by simply calling constructors and | 35 | * SLAB is emulated on top of SLOB by simply calling constructors and |
@@ -124,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp) | |||
124 | 123 | ||
125 | #define SLOB_UNIT sizeof(slob_t) | 124 | #define SLOB_UNIT sizeof(slob_t) |
126 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) | 125 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) |
127 | #define SLOB_ALIGN L1_CACHE_BYTES | ||
128 | 126 | ||
129 | /* | 127 | /* |
130 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which | 128 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which |
@@ -455,11 +453,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | |||
455 | if (likely(order)) | 453 | if (likely(order)) |
456 | gfp |= __GFP_COMP; | 454 | gfp |= __GFP_COMP; |
457 | ret = slob_new_pages(gfp, order, node); | 455 | ret = slob_new_pages(gfp, order, node); |
458 | if (ret) { | ||
459 | struct page *page; | ||
460 | page = virt_to_page(ret); | ||
461 | page->private = size; | ||
462 | } | ||
463 | 456 | ||
464 | trace_kmalloc_node(caller, ret, | 457 | trace_kmalloc_node(caller, ret, |
465 | size, PAGE_SIZE << order, gfp, node); | 458 | size, PAGE_SIZE << order, gfp, node); |
@@ -506,7 +499,7 @@ void kfree(const void *block) | |||
506 | unsigned int *m = (unsigned int *)(block - align); | 499 | unsigned int *m = (unsigned int *)(block - align); |
507 | slob_free(m, *m + align); | 500 | slob_free(m, *m + align); |
508 | } else | 501 | } else |
509 | put_page(sp); | 502 | __free_pages(sp, compound_order(sp)); |
510 | } | 503 | } |
511 | EXPORT_SYMBOL(kfree); | 504 | EXPORT_SYMBOL(kfree); |
512 | 505 | ||
@@ -514,37 +507,30 @@ EXPORT_SYMBOL(kfree); | |||
514 | size_t ksize(const void *block) | 507 | size_t ksize(const void *block) |
515 | { | 508 | { |
516 | struct page *sp; | 509 | struct page *sp; |
510 | int align; | ||
511 | unsigned int *m; | ||
517 | 512 | ||
518 | BUG_ON(!block); | 513 | BUG_ON(!block); |
519 | if (unlikely(block == ZERO_SIZE_PTR)) | 514 | if (unlikely(block == ZERO_SIZE_PTR)) |
520 | return 0; | 515 | return 0; |
521 | 516 | ||
522 | sp = virt_to_page(block); | 517 | sp = virt_to_page(block); |
523 | if (PageSlab(sp)) { | 518 | if (unlikely(!PageSlab(sp))) |
524 | int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 519 | return PAGE_SIZE << compound_order(sp); |
525 | unsigned int *m = (unsigned int *)(block - align); | 520 | |
526 | return SLOB_UNITS(*m) * SLOB_UNIT; | 521 | align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
527 | } else | 522 | m = (unsigned int *)(block - align); |
528 | return sp->private; | 523 | return SLOB_UNITS(*m) * SLOB_UNIT; |
529 | } | 524 | } |
530 | EXPORT_SYMBOL(ksize); | 525 | EXPORT_SYMBOL(ksize); |
531 | 526 | ||
532 | int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) | 527 | int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) |
533 | { | 528 | { |
534 | size_t align = c->size; | ||
535 | |||
536 | if (flags & SLAB_DESTROY_BY_RCU) { | 529 | if (flags & SLAB_DESTROY_BY_RCU) { |
537 | /* leave room for rcu footer at the end of object */ | 530 | /* leave room for rcu footer at the end of object */ |
538 | c->size += sizeof(struct slob_rcu); | 531 | c->size += sizeof(struct slob_rcu); |
539 | } | 532 | } |
540 | c->flags = flags; | 533 | c->flags = flags; |
541 | /* ignore alignment unless it's forced */ | ||
542 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | ||
543 | if (c->align < ARCH_SLAB_MINALIGN) | ||
544 | c->align = ARCH_SLAB_MINALIGN; | ||
545 | if (c->align < align) | ||
546 | c->align = align; | ||
547 | |||
548 | return 0; | 534 | return 0; |
549 | } | 535 | } |
550 | 536 | ||
@@ -558,12 +544,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
558 | 544 | ||
559 | if (c->size < PAGE_SIZE) { | 545 | if (c->size < PAGE_SIZE) { |
560 | b = slob_alloc(c->size, flags, c->align, node); | 546 | b = slob_alloc(c->size, flags, c->align, node); |
561 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, | 547 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, |
562 | SLOB_UNITS(c->size) * SLOB_UNIT, | 548 | SLOB_UNITS(c->size) * SLOB_UNIT, |
563 | flags, node); | 549 | flags, node); |
564 | } else { | 550 | } else { |
565 | b = slob_new_pages(flags, get_order(c->size), node); | 551 | b = slob_new_pages(flags, get_order(c->size), node); |
566 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, | 552 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, |
567 | PAGE_SIZE << get_order(c->size), | 553 | PAGE_SIZE << get_order(c->size), |
568 | flags, node); | 554 | flags, node); |
569 | } | 555 | } |
@@ -608,12 +594,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) | |||
608 | } | 594 | } |
609 | EXPORT_SYMBOL(kmem_cache_free); | 595 | EXPORT_SYMBOL(kmem_cache_free); |
610 | 596 | ||
611 | unsigned int kmem_cache_size(struct kmem_cache *c) | ||
612 | { | ||
613 | return c->size; | ||
614 | } | ||
615 | EXPORT_SYMBOL(kmem_cache_size); | ||
616 | |||
617 | int __kmem_cache_shutdown(struct kmem_cache *c) | 597 | int __kmem_cache_shutdown(struct kmem_cache *c) |
618 | { | 598 | { |
619 | /* No way to check for remaining objects */ | 599 | /* No way to check for remaining objects */ |