diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 77 |
1 files changed, 27 insertions, 50 deletions
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/math64.h> | 28 | #include <linux/math64.h> |
29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
30 | 30 | ||
31 | #include <trace/events/kmem.h> | ||
32 | |||
31 | /* | 33 | /* |
32 | * Lock order: | 34 | * Lock order: |
33 | * 1. slab_lock(page) | 35 | * 1. slab_lock(page) |
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | |||
1774 | EXPORT_SYMBOL(kmem_cache_alloc); | 1776 | EXPORT_SYMBOL(kmem_cache_alloc); |
1775 | 1777 | ||
1776 | #ifdef CONFIG_TRACING | 1778 | #ifdef CONFIG_TRACING |
1777 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 1779 | void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
1780 | { | ||
1781 | void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | ||
1782 | trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); | ||
1783 | return ret; | ||
1784 | } | ||
1785 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | ||
1786 | |||
1787 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
1778 | { | 1788 | { |
1779 | return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); | 1789 | void *ret = kmalloc_order(size, flags, order); |
1790 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
1791 | return ret; | ||
1780 | } | 1792 | } |
1781 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | 1793 | EXPORT_SYMBOL(kmalloc_order_trace); |
1782 | #endif | 1794 | #endif |
1783 | 1795 | ||
1784 | #ifdef CONFIG_NUMA | 1796 | #ifdef CONFIG_NUMA |
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | |||
1794 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1806 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1795 | 1807 | ||
1796 | #ifdef CONFIG_TRACING | 1808 | #ifdef CONFIG_TRACING |
1797 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 1809 | void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
1798 | gfp_t gfpflags, | 1810 | gfp_t gfpflags, |
1799 | int node) | 1811 | int node, size_t size) |
1800 | { | 1812 | { |
1801 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1813 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
1814 | |||
1815 | trace_kmalloc_node(_RET_IP_, ret, | ||
1816 | size, s->size, gfpflags, node); | ||
1817 | return ret; | ||
1802 | } | 1818 | } |
1803 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | 1819 | EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
1804 | #endif | 1820 | #endif |
1805 | #endif | 1821 | #endif |
1806 | 1822 | ||
@@ -1917,17 +1933,6 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1917 | } | 1933 | } |
1918 | EXPORT_SYMBOL(kmem_cache_free); | 1934 | EXPORT_SYMBOL(kmem_cache_free); |
1919 | 1935 | ||
1920 | /* Figure out on which slab page the object resides */ | ||
1921 | static struct page *get_object_page(const void *x) | ||
1922 | { | ||
1923 | struct page *page = virt_to_head_page(x); | ||
1924 | |||
1925 | if (!PageSlab(page)) | ||
1926 | return NULL; | ||
1927 | |||
1928 | return page; | ||
1929 | } | ||
1930 | |||
1931 | /* | 1936 | /* |
1932 | * Object placement in a slab is made very easy because we always start at | 1937 | * Object placement in a slab is made very easy because we always start at |
1933 | * offset 0. If we tune the size of the object to the alignment then we can | 1938 | * offset 0. If we tune the size of the object to the alignment then we can |
@@ -2386,35 +2391,6 @@ error: | |||
2386 | } | 2391 | } |
2387 | 2392 | ||
2388 | /* | 2393 | /* |
2389 | * Check if a given pointer is valid | ||
2390 | */ | ||
2391 | int kmem_ptr_validate(struct kmem_cache *s, const void *object) | ||
2392 | { | ||
2393 | struct page *page; | ||
2394 | |||
2395 | if (!kern_ptr_validate(object, s->size)) | ||
2396 | return 0; | ||
2397 | |||
2398 | page = get_object_page(object); | ||
2399 | |||
2400 | if (!page || s != page->slab) | ||
2401 | /* No slab or wrong slab */ | ||
2402 | return 0; | ||
2403 | |||
2404 | if (!check_valid_pointer(s, page, object)) | ||
2405 | return 0; | ||
2406 | |||
2407 | /* | ||
2408 | * We could also check if the object is on the slabs freelist. | ||
2409 | * But this would be too expensive and it seems that the main | ||
2410 | * purpose of kmem_ptr_valid() is to check if the object belongs | ||
2411 | * to a certain slab. | ||
2412 | */ | ||
2413 | return 1; | ||
2414 | } | ||
2415 | EXPORT_SYMBOL(kmem_ptr_validate); | ||
2416 | |||
2417 | /* | ||
2418 | * Determine the size of a slab object | 2394 | * Determine the size of a slab object |
2419 | */ | 2395 | */ |
2420 | unsigned int kmem_cache_size(struct kmem_cache *s) | 2396 | unsigned int kmem_cache_size(struct kmem_cache *s) |
@@ -3273,9 +3249,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3273 | kfree(n); | 3249 | kfree(n); |
3274 | kfree(s); | 3250 | kfree(s); |
3275 | } | 3251 | } |
3252 | err: | ||
3276 | up_write(&slub_lock); | 3253 | up_write(&slub_lock); |
3277 | 3254 | ||
3278 | err: | ||
3279 | if (flags & SLAB_PANIC) | 3255 | if (flags & SLAB_PANIC) |
3280 | panic("Cannot create slabcache %s\n", name); | 3256 | panic("Cannot create slabcache %s\n", name); |
3281 | else | 3257 | else |
@@ -3401,13 +3377,13 @@ static int validate_slab(struct kmem_cache *s, struct page *page, | |||
3401 | 3377 | ||
3402 | for_each_free_object(p, s, page->freelist) { | 3378 | for_each_free_object(p, s, page->freelist) { |
3403 | set_bit(slab_index(p, s, addr), map); | 3379 | set_bit(slab_index(p, s, addr), map); |
3404 | if (!check_object(s, page, p, 0)) | 3380 | if (!check_object(s, page, p, SLUB_RED_INACTIVE)) |
3405 | return 0; | 3381 | return 0; |
3406 | } | 3382 | } |
3407 | 3383 | ||
3408 | for_each_object(p, s, addr, page->objects) | 3384 | for_each_object(p, s, addr, page->objects) |
3409 | if (!test_bit(slab_index(p, s, addr), map)) | 3385 | if (!test_bit(slab_index(p, s, addr), map)) |
3410 | if (!check_object(s, page, p, 1)) | 3386 | if (!check_object(s, page, p, SLUB_RED_ACTIVE)) |
3411 | return 0; | 3387 | return 0; |
3412 | return 1; | 3388 | return 1; |
3413 | } | 3389 | } |
@@ -3862,6 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3862 | x += sprintf(buf + x, " N%d=%lu", | 3838 | x += sprintf(buf + x, " N%d=%lu", |
3863 | node, nodes[node]); | 3839 | node, nodes[node]); |
3864 | #endif | 3840 | #endif |
3841 | up_read(&slub_lock); | ||
3865 | kfree(nodes); | 3842 | kfree(nodes); |
3866 | return x + sprintf(buf + x, "\n"); | 3843 | return x + sprintf(buf + x, "\n"); |
3867 | } | 3844 | } |