aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-02-14 05:55:18 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-14 05:55:18 -0500
commitd2137d5af4259f50c19addb8246a186c9ffac325 (patch)
tree2f7e309f9cf8ef2f2698532c226edda38021fe69 /mm/slub.c
parentf005fe12b90c5b9fe180a09209a893e09affa8aa (diff)
parent795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 (diff)
Merge branch 'linus' into x86/bootmem
Conflicts: arch/x86/mm/numa_64.c Merge reason: fix the conflict, update to latest -rc and pick up this dependent fix from Yinghai: e6d2e2b2b1e1: memblock: don't adjust size in memblock_find_base() Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c89
1 files changed, 31 insertions, 58 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 981fb730aa04..e15aa7f193c9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -28,6 +28,8 @@
28#include <linux/math64.h> 28#include <linux/math64.h>
29#include <linux/fault-inject.h> 29#include <linux/fault-inject.h>
30 30
31#include <trace/events/kmem.h>
32
31/* 33/*
32 * Lock order: 34 * Lock order:
33 * 1. slab_lock(page) 35 * 1. slab_lock(page)
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1774EXPORT_SYMBOL(kmem_cache_alloc); 1776EXPORT_SYMBOL(kmem_cache_alloc);
1775 1777
1776#ifdef CONFIG_TRACING 1778#ifdef CONFIG_TRACING
1777void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1779void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1780{
1781 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1782 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
1783 return ret;
1784}
1785EXPORT_SYMBOL(kmem_cache_alloc_trace);
1786
1787void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1778{ 1788{
1779 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1789 void *ret = kmalloc_order(size, flags, order);
1790 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1791 return ret;
1780} 1792}
1781EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1793EXPORT_SYMBOL(kmalloc_order_trace);
1782#endif 1794#endif
1783 1795
1784#ifdef CONFIG_NUMA 1796#ifdef CONFIG_NUMA
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1794EXPORT_SYMBOL(kmem_cache_alloc_node); 1806EXPORT_SYMBOL(kmem_cache_alloc_node);
1795 1807
1796#ifdef CONFIG_TRACING 1808#ifdef CONFIG_TRACING
1797void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1809void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
1798 gfp_t gfpflags, 1810 gfp_t gfpflags,
1799 int node) 1811 int node, size_t size)
1800{ 1812{
1801 return slab_alloc(s, gfpflags, node, _RET_IP_); 1813 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1814
1815 trace_kmalloc_node(_RET_IP_, ret,
1816 size, s->size, gfpflags, node);
1817 return ret;
1802} 1818}
1803EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1819EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
1804#endif 1820#endif
1805#endif 1821#endif
1806 1822
@@ -1917,17 +1933,6 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1917} 1933}
1918EXPORT_SYMBOL(kmem_cache_free); 1934EXPORT_SYMBOL(kmem_cache_free);
1919 1935
1920/* Figure out on which slab page the object resides */
1921static struct page *get_object_page(const void *x)
1922{
1923 struct page *page = virt_to_head_page(x);
1924
1925 if (!PageSlab(page))
1926 return NULL;
1927
1928 return page;
1929}
1930
1931/* 1936/*
1932 * Object placement in a slab is made very easy because we always start at 1937 * Object placement in a slab is made very easy because we always start at
1933 * offset 0. If we tune the size of the object to the alignment then we can 1938 * offset 0. If we tune the size of the object to the alignment then we can
@@ -2386,35 +2391,6 @@ error:
2386} 2391}
2387 2392
2388/* 2393/*
2389 * Check if a given pointer is valid
2390 */
2391int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2392{
2393 struct page *page;
2394
2395 if (!kern_ptr_validate(object, s->size))
2396 return 0;
2397
2398 page = get_object_page(object);
2399
2400 if (!page || s != page->slab)
2401 /* No slab or wrong slab */
2402 return 0;
2403
2404 if (!check_valid_pointer(s, page, object))
2405 return 0;
2406
2407 /*
2408 * We could also check if the object is on the slabs freelist.
2409 * But this would be too expensive and it seems that the main
2410 * purpose of kmem_ptr_valid() is to check if the object belongs
2411 * to a certain slab.
2412 */
2413 return 1;
2414}
2415EXPORT_SYMBOL(kmem_ptr_validate);
2416
2417/*
2418 * Determine the size of a slab object 2394 * Determine the size of a slab object
2419 */ 2395 */
2420unsigned int kmem_cache_size(struct kmem_cache *s) 2396unsigned int kmem_cache_size(struct kmem_cache *s)
@@ -3401,13 +3377,13 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3401 3377
3402 for_each_free_object(p, s, page->freelist) { 3378 for_each_free_object(p, s, page->freelist) {
3403 set_bit(slab_index(p, s, addr), map); 3379 set_bit(slab_index(p, s, addr), map);
3404 if (!check_object(s, page, p, 0)) 3380 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3405 return 0; 3381 return 0;
3406 } 3382 }
3407 3383
3408 for_each_object(p, s, addr, page->objects) 3384 for_each_object(p, s, addr, page->objects)
3409 if (!test_bit(slab_index(p, s, addr), map)) 3385 if (!test_bit(slab_index(p, s, addr), map))
3410 if (!check_object(s, page, p, 1)) 3386 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3411 return 0; 3387 return 0;
3412 return 1; 3388 return 1;
3413} 3389}
@@ -3660,7 +3636,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3660 len += sprintf(buf + len, "%7ld ", l->count); 3636 len += sprintf(buf + len, "%7ld ", l->count);
3661 3637
3662 if (l->addr) 3638 if (l->addr)
3663 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3639 len += sprintf(buf + len, "%pS", (void *)l->addr);
3664 else 3640 else
3665 len += sprintf(buf + len, "<not-available>"); 3641 len += sprintf(buf + len, "<not-available>");
3666 3642
@@ -3821,7 +3797,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3821 } 3797 }
3822 } 3798 }
3823 3799
3824 down_read(&slub_lock); 3800 lock_memory_hotplug();
3825#ifdef CONFIG_SLUB_DEBUG 3801#ifdef CONFIG_SLUB_DEBUG
3826 if (flags & SO_ALL) { 3802 if (flags & SO_ALL) {
3827 for_each_node_state(node, N_NORMAL_MEMORY) { 3803 for_each_node_state(node, N_NORMAL_MEMORY) {
@@ -3862,7 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3862 x += sprintf(buf + x, " N%d=%lu", 3838 x += sprintf(buf + x, " N%d=%lu",
3863 node, nodes[node]); 3839 node, nodes[node]);
3864#endif 3840#endif
3865 up_read(&slub_lock); 3841 unlock_memory_hotplug();
3866 kfree(nodes); 3842 kfree(nodes);
3867 return x + sprintf(buf + x, "\n"); 3843 return x + sprintf(buf + x, "\n");
3868} 3844}
@@ -3970,12 +3946,9 @@ SLAB_ATTR(min_partial);
3970 3946
3971static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3947static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3972{ 3948{
3973 if (s->ctor) { 3949 if (!s->ctor)
3974 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3950 return 0;
3975 3951 return sprintf(buf, "%pS\n", s->ctor);
3976 return n + sprintf(buf + n, "\n");
3977 }
3978 return 0;
3979} 3952}
3980SLAB_ATTR_RO(ctor); 3953SLAB_ATTR_RO(ctor);
3981 3954