diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 164 |
1 files changed, 127 insertions, 37 deletions
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
| 18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
| 19 | #include <trace/kmemtrace.h> | ||
| 19 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
| 20 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
| 21 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |
| @@ -374,14 +375,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
| 374 | static void set_track(struct kmem_cache *s, void *object, | 375 | static void set_track(struct kmem_cache *s, void *object, |
| 375 | enum track_item alloc, unsigned long addr) | 376 | enum track_item alloc, unsigned long addr) |
| 376 | { | 377 | { |
| 377 | struct track *p; | 378 | struct track *p = get_track(s, object, alloc); |
| 378 | 379 | ||
| 379 | if (s->offset) | ||
| 380 | p = object + s->offset + sizeof(void *); | ||
| 381 | else | ||
| 382 | p = object + s->inuse; | ||
| 383 | |||
| 384 | p += alloc; | ||
| 385 | if (addr) { | 380 | if (addr) { |
| 386 | p->addr = addr; | 381 | p->addr = addr; |
| 387 | p->cpu = smp_processor_id(); | 382 | p->cpu = smp_processor_id(); |
| @@ -1335,7 +1330,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
| 1335 | n = get_node(s, zone_to_nid(zone)); | 1330 | n = get_node(s, zone_to_nid(zone)); |
| 1336 | 1331 | ||
| 1337 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1332 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
| 1338 | n->nr_partial > n->min_partial) { | 1333 | n->nr_partial > s->min_partial) { |
| 1339 | page = get_partial_node(n); | 1334 | page = get_partial_node(n); |
| 1340 | if (page) | 1335 | if (page) |
| 1341 | return page; | 1336 | return page; |
| @@ -1387,7 +1382,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
| 1387 | slab_unlock(page); | 1382 | slab_unlock(page); |
| 1388 | } else { | 1383 | } else { |
| 1389 | stat(c, DEACTIVATE_EMPTY); | 1384 | stat(c, DEACTIVATE_EMPTY); |
| 1390 | if (n->nr_partial < n->min_partial) { | 1385 | if (n->nr_partial < s->min_partial) { |
| 1391 | /* | 1386 | /* |
| 1392 | * Adding an empty slab to the partial slabs in order | 1387 | * Adding an empty slab to the partial slabs in order |
| 1393 | * to avoid page allocator overhead. This slab needs | 1388 | * to avoid page allocator overhead. This slab needs |
| @@ -1596,6 +1591,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1596 | unsigned long flags; | 1591 | unsigned long flags; |
| 1597 | unsigned int objsize; | 1592 | unsigned int objsize; |
| 1598 | 1593 | ||
| 1594 | lockdep_trace_alloc(gfpflags); | ||
| 1599 | might_sleep_if(gfpflags & __GFP_WAIT); | 1595 | might_sleep_if(gfpflags & __GFP_WAIT); |
| 1600 | 1596 | ||
| 1601 | if (should_failslab(s->objsize, gfpflags)) | 1597 | if (should_failslab(s->objsize, gfpflags)) |
| @@ -1623,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1623 | 1619 | ||
| 1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1620 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| 1625 | { | 1621 | { |
| 1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | 1622 | void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); |
| 1623 | |||
| 1624 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1625 | s->objsize, s->size, gfpflags); | ||
| 1626 | |||
| 1627 | return ret; | ||
| 1627 | } | 1628 | } |
| 1628 | EXPORT_SYMBOL(kmem_cache_alloc); | 1629 | EXPORT_SYMBOL(kmem_cache_alloc); |
| 1629 | 1630 | ||
| 1631 | #ifdef CONFIG_KMEMTRACE | ||
| 1632 | void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
| 1633 | { | ||
| 1634 | return slab_alloc(s, gfpflags, -1, _RET_IP_); | ||
| 1635 | } | ||
| 1636 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); | ||
| 1637 | #endif | ||
| 1638 | |||
| 1630 | #ifdef CONFIG_NUMA | 1639 | #ifdef CONFIG_NUMA |
| 1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1640 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| 1632 | { | 1641 | { |
| 1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); | 1642 | void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
| 1643 | |||
| 1644 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret, | ||
| 1645 | s->objsize, s->size, gfpflags, node); | ||
| 1646 | |||
| 1647 | return ret; | ||
| 1634 | } | 1648 | } |
| 1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1649 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
| 1636 | #endif | 1650 | #endif |
| 1637 | 1651 | ||
| 1652 | #ifdef CONFIG_KMEMTRACE | ||
| 1653 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
| 1654 | gfp_t gfpflags, | ||
| 1655 | int node) | ||
| 1656 | { | ||
| 1657 | return slab_alloc(s, gfpflags, node, _RET_IP_); | ||
| 1658 | } | ||
| 1659 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | ||
| 1660 | #endif | ||
| 1661 | |||
| 1638 | /* | 1662 | /* |
| 1639 | * Slow patch handling. This may still be called frequently since objects | 1663 | * Slow patch handling. This may still be called frequently since objects |
| 1640 | * have a longer lifetime than the cpu slabs in most processing loads. | 1664 | * have a longer lifetime than the cpu slabs in most processing loads. |
| @@ -1724,7 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
| 1724 | c = get_cpu_slab(s, smp_processor_id()); | 1748 | c = get_cpu_slab(s, smp_processor_id()); |
| 1725 | debug_check_no_locks_freed(object, c->objsize); | 1749 | debug_check_no_locks_freed(object, c->objsize); |
| 1726 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1750 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
| 1727 | debug_check_no_obj_freed(object, s->objsize); | 1751 | debug_check_no_obj_freed(object, c->objsize); |
| 1728 | if (likely(page == c->page && c->node >= 0)) { | 1752 | if (likely(page == c->page && c->node >= 0)) { |
| 1729 | object[c->offset] = c->freelist; | 1753 | object[c->offset] = c->freelist; |
| 1730 | c->freelist = object; | 1754 | c->freelist = object; |
| @@ -1742,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 1742 | page = virt_to_head_page(x); | 1766 | page = virt_to_head_page(x); |
| 1743 | 1767 | ||
| 1744 | slab_free(s, page, x, _RET_IP_); | 1768 | slab_free(s, page, x, _RET_IP_); |
| 1769 | |||
| 1770 | kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x); | ||
| 1745 | } | 1771 | } |
| 1746 | EXPORT_SYMBOL(kmem_cache_free); | 1772 | EXPORT_SYMBOL(kmem_cache_free); |
| 1747 | 1773 | ||
| @@ -1844,6 +1870,7 @@ static inline int calculate_order(int size) | |||
| 1844 | int order; | 1870 | int order; |
| 1845 | int min_objects; | 1871 | int min_objects; |
| 1846 | int fraction; | 1872 | int fraction; |
| 1873 | int max_objects; | ||
| 1847 | 1874 | ||
| 1848 | /* | 1875 | /* |
| 1849 | * Attempt to find best configuration for a slab. This | 1876 | * Attempt to find best configuration for a slab. This |
| @@ -1856,6 +1883,9 @@ static inline int calculate_order(int size) | |||
| 1856 | min_objects = slub_min_objects; | 1883 | min_objects = slub_min_objects; |
| 1857 | if (!min_objects) | 1884 | if (!min_objects) |
| 1858 | min_objects = 4 * (fls(nr_cpu_ids) + 1); | 1885 | min_objects = 4 * (fls(nr_cpu_ids) + 1); |
| 1886 | max_objects = (PAGE_SIZE << slub_max_order)/size; | ||
| 1887 | min_objects = min(min_objects, max_objects); | ||
| 1888 | |||
| 1859 | while (min_objects > 1) { | 1889 | while (min_objects > 1) { |
| 1860 | fraction = 16; | 1890 | fraction = 16; |
| 1861 | while (fraction >= 4) { | 1891 | while (fraction >= 4) { |
| @@ -1865,7 +1895,7 @@ static inline int calculate_order(int size) | |||
| 1865 | return order; | 1895 | return order; |
| 1866 | fraction /= 2; | 1896 | fraction /= 2; |
| 1867 | } | 1897 | } |
| 1868 | min_objects /= 2; | 1898 | min_objects --; |
| 1869 | } | 1899 | } |
| 1870 | 1900 | ||
| 1871 | /* | 1901 | /* |
| @@ -1928,17 +1958,6 @@ static void | |||
| 1928 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | 1958 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) |
| 1929 | { | 1959 | { |
| 1930 | n->nr_partial = 0; | 1960 | n->nr_partial = 0; |
| 1931 | |||
| 1932 | /* | ||
| 1933 | * The larger the object size is, the more pages we want on the partial | ||
| 1934 | * list to avoid pounding the page allocator excessively. | ||
| 1935 | */ | ||
| 1936 | n->min_partial = ilog2(s->size); | ||
| 1937 | if (n->min_partial < MIN_PARTIAL) | ||
| 1938 | n->min_partial = MIN_PARTIAL; | ||
| 1939 | else if (n->min_partial > MAX_PARTIAL) | ||
| 1940 | n->min_partial = MAX_PARTIAL; | ||
| 1941 | |||
| 1942 | spin_lock_init(&n->list_lock); | 1961 | spin_lock_init(&n->list_lock); |
| 1943 | INIT_LIST_HEAD(&n->partial); | 1962 | INIT_LIST_HEAD(&n->partial); |
| 1944 | #ifdef CONFIG_SLUB_DEBUG | 1963 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -2181,6 +2200,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
| 2181 | } | 2200 | } |
| 2182 | #endif | 2201 | #endif |
| 2183 | 2202 | ||
| 2203 | static void set_min_partial(struct kmem_cache *s, unsigned long min) | ||
| 2204 | { | ||
| 2205 | if (min < MIN_PARTIAL) | ||
| 2206 | min = MIN_PARTIAL; | ||
| 2207 | else if (min > MAX_PARTIAL) | ||
| 2208 | min = MAX_PARTIAL; | ||
| 2209 | s->min_partial = min; | ||
| 2210 | } | ||
| 2211 | |||
| 2184 | /* | 2212 | /* |
| 2185 | * calculate_sizes() determines the order and the distribution of data within | 2213 | * calculate_sizes() determines the order and the distribution of data within |
| 2186 | * a slab object. | 2214 | * a slab object. |
| @@ -2319,6 +2347,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
| 2319 | if (!calculate_sizes(s, -1)) | 2347 | if (!calculate_sizes(s, -1)) |
| 2320 | goto error; | 2348 | goto error; |
| 2321 | 2349 | ||
| 2350 | /* | ||
| 2351 | * The larger the object size is, the more pages we want on the partial | ||
| 2352 | * list to avoid pounding the page allocator excessively. | ||
| 2353 | */ | ||
| 2354 | set_min_partial(s, ilog2(s->size)); | ||
| 2322 | s->refcount = 1; | 2355 | s->refcount = 1; |
| 2323 | #ifdef CONFIG_NUMA | 2356 | #ifdef CONFIG_NUMA |
| 2324 | s->remote_node_defrag_ratio = 1000; | 2357 | s->remote_node_defrag_ratio = 1000; |
| @@ -2475,7 +2508,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2475 | * Kmalloc subsystem | 2508 | * Kmalloc subsystem |
| 2476 | *******************************************************************/ | 2509 | *******************************************************************/ |
| 2477 | 2510 | ||
| 2478 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2511 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
| 2479 | EXPORT_SYMBOL(kmalloc_caches); | 2512 | EXPORT_SYMBOL(kmalloc_caches); |
| 2480 | 2513 | ||
| 2481 | static int __init setup_slub_min_order(char *str) | 2514 | static int __init setup_slub_min_order(char *str) |
| @@ -2537,7 +2570,7 @@ panic: | |||
| 2537 | } | 2570 | } |
| 2538 | 2571 | ||
| 2539 | #ifdef CONFIG_ZONE_DMA | 2572 | #ifdef CONFIG_ZONE_DMA |
| 2540 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2573 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
| 2541 | 2574 | ||
| 2542 | static void sysfs_add_func(struct work_struct *w) | 2575 | static void sysfs_add_func(struct work_struct *w) |
| 2543 | { | 2576 | { |
| @@ -2657,8 +2690,9 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
| 2657 | void *__kmalloc(size_t size, gfp_t flags) | 2690 | void *__kmalloc(size_t size, gfp_t flags) |
| 2658 | { | 2691 | { |
| 2659 | struct kmem_cache *s; | 2692 | struct kmem_cache *s; |
| 2693 | void *ret; | ||
| 2660 | 2694 | ||
| 2661 | if (unlikely(size > PAGE_SIZE)) | 2695 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2662 | return kmalloc_large(size, flags); | 2696 | return kmalloc_large(size, flags); |
| 2663 | 2697 | ||
| 2664 | s = get_slab(size, flags); | 2698 | s = get_slab(size, flags); |
| @@ -2666,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2700 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2667 | return s; | 2701 | return s; |
| 2668 | 2702 | ||
| 2669 | return slab_alloc(s, flags, -1, _RET_IP_); | 2703 | ret = slab_alloc(s, flags, -1, _RET_IP_); |
| 2704 | |||
| 2705 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2706 | size, s->size, flags); | ||
| 2707 | |||
| 2708 | return ret; | ||
| 2670 | } | 2709 | } |
| 2671 | EXPORT_SYMBOL(__kmalloc); | 2710 | EXPORT_SYMBOL(__kmalloc); |
| 2672 | 2711 | ||
| @@ -2685,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
| 2685 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2724 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 2686 | { | 2725 | { |
| 2687 | struct kmem_cache *s; | 2726 | struct kmem_cache *s; |
| 2727 | void *ret; | ||
| 2728 | |||
| 2729 | if (unlikely(size > SLUB_MAX_SIZE)) { | ||
| 2730 | ret = kmalloc_large_node(size, flags, node); | ||
| 2688 | 2731 | ||
| 2689 | if (unlikely(size > PAGE_SIZE)) | 2732 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, |
| 2690 | return kmalloc_large_node(size, flags, node); | 2733 | _RET_IP_, ret, |
| 2734 | size, PAGE_SIZE << get_order(size), | ||
| 2735 | flags, node); | ||
| 2736 | |||
| 2737 | return ret; | ||
| 2738 | } | ||
| 2691 | 2739 | ||
| 2692 | s = get_slab(size, flags); | 2740 | s = get_slab(size, flags); |
| 2693 | 2741 | ||
| 2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2742 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 2695 | return s; | 2743 | return s; |
| 2696 | 2744 | ||
| 2697 | return slab_alloc(s, flags, node, _RET_IP_); | 2745 | ret = slab_alloc(s, flags, node, _RET_IP_); |
| 2746 | |||
| 2747 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret, | ||
| 2748 | size, s->size, flags, node); | ||
| 2749 | |||
| 2750 | return ret; | ||
| 2698 | } | 2751 | } |
| 2699 | EXPORT_SYMBOL(__kmalloc_node); | 2752 | EXPORT_SYMBOL(__kmalloc_node); |
| 2700 | #endif | 2753 | #endif |
| @@ -2753,6 +2806,8 @@ void kfree(const void *x) | |||
| 2753 | return; | 2806 | return; |
| 2754 | } | 2807 | } |
| 2755 | slab_free(page->slab, page, object, _RET_IP_); | 2808 | slab_free(page->slab, page, object, _RET_IP_); |
| 2809 | |||
| 2810 | kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x); | ||
| 2756 | } | 2811 | } |
| 2757 | EXPORT_SYMBOL(kfree); | 2812 | EXPORT_SYMBOL(kfree); |
| 2758 | 2813 | ||
| @@ -2986,7 +3041,7 @@ void __init kmem_cache_init(void) | |||
| 2986 | caches++; | 3041 | caches++; |
| 2987 | } | 3042 | } |
| 2988 | 3043 | ||
| 2989 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 3044 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
| 2990 | create_kmalloc_cache(&kmalloc_caches[i], | 3045 | create_kmalloc_cache(&kmalloc_caches[i], |
| 2991 | "kmalloc", 1 << i, GFP_KERNEL); | 3046 | "kmalloc", 1 << i, GFP_KERNEL); |
| 2992 | caches++; | 3047 | caches++; |
| @@ -3023,7 +3078,7 @@ void __init kmem_cache_init(void) | |||
| 3023 | slab_state = UP; | 3078 | slab_state = UP; |
| 3024 | 3079 | ||
| 3025 | /* Provide the correct kmalloc names now that the caches are up */ | 3080 | /* Provide the correct kmalloc names now that the caches are up */ |
| 3026 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3081 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
| 3027 | kmalloc_caches[i]. name = | 3082 | kmalloc_caches[i]. name = |
| 3028 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3083 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
| 3029 | 3084 | ||
| @@ -3222,8 +3277,9 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
| 3222 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 3277 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
| 3223 | { | 3278 | { |
| 3224 | struct kmem_cache *s; | 3279 | struct kmem_cache *s; |
| 3280 | void *ret; | ||
| 3225 | 3281 | ||
| 3226 | if (unlikely(size > PAGE_SIZE)) | 3282 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3227 | return kmalloc_large(size, gfpflags); | 3283 | return kmalloc_large(size, gfpflags); |
| 3228 | 3284 | ||
| 3229 | s = get_slab(size, gfpflags); | 3285 | s = get_slab(size, gfpflags); |
| @@ -3231,15 +3287,22 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3231 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3287 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3232 | return s; | 3288 | return s; |
| 3233 | 3289 | ||
| 3234 | return slab_alloc(s, gfpflags, -1, caller); | 3290 | ret = slab_alloc(s, gfpflags, -1, caller); |
| 3291 | |||
| 3292 | /* Honor the call site pointer we recieved. */ | ||
| 3293 | kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size, | ||
| 3294 | s->size, gfpflags); | ||
| 3295 | |||
| 3296 | return ret; | ||
| 3235 | } | 3297 | } |
| 3236 | 3298 | ||
| 3237 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3299 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
| 3238 | int node, unsigned long caller) | 3300 | int node, unsigned long caller) |
| 3239 | { | 3301 | { |
| 3240 | struct kmem_cache *s; | 3302 | struct kmem_cache *s; |
| 3303 | void *ret; | ||
| 3241 | 3304 | ||
| 3242 | if (unlikely(size > PAGE_SIZE)) | 3305 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3243 | return kmalloc_large_node(size, gfpflags, node); | 3306 | return kmalloc_large_node(size, gfpflags, node); |
| 3244 | 3307 | ||
| 3245 | s = get_slab(size, gfpflags); | 3308 | s = get_slab(size, gfpflags); |
| @@ -3247,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3247 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3310 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| 3248 | return s; | 3311 | return s; |
| 3249 | 3312 | ||
| 3250 | return slab_alloc(s, gfpflags, node, caller); | 3313 | ret = slab_alloc(s, gfpflags, node, caller); |
| 3314 | |||
| 3315 | /* Honor the call site pointer we recieved. */ | ||
| 3316 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret, | ||
| 3317 | size, s->size, gfpflags, node); | ||
| 3318 | |||
| 3319 | return ret; | ||
| 3251 | } | 3320 | } |
| 3252 | 3321 | ||
| 3253 | #ifdef CONFIG_SLUB_DEBUG | 3322 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -3836,6 +3905,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf) | |||
| 3836 | } | 3905 | } |
| 3837 | SLAB_ATTR(order); | 3906 | SLAB_ATTR(order); |
| 3838 | 3907 | ||
| 3908 | static ssize_t min_partial_show(struct kmem_cache *s, char *buf) | ||
| 3909 | { | ||
| 3910 | return sprintf(buf, "%lu\n", s->min_partial); | ||
| 3911 | } | ||
| 3912 | |||
| 3913 | static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, | ||
| 3914 | size_t length) | ||
| 3915 | { | ||
| 3916 | unsigned long min; | ||
| 3917 | int err; | ||
| 3918 | |||
| 3919 | err = strict_strtoul(buf, 10, &min); | ||
| 3920 | if (err) | ||
| 3921 | return err; | ||
| 3922 | |||
| 3923 | set_min_partial(s, min); | ||
| 3924 | return length; | ||
| 3925 | } | ||
| 3926 | SLAB_ATTR(min_partial); | ||
| 3927 | |||
| 3839 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) | 3928 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
| 3840 | { | 3929 | { |
| 3841 | if (s->ctor) { | 3930 | if (s->ctor) { |
| @@ -4151,6 +4240,7 @@ static struct attribute *slab_attrs[] = { | |||
| 4151 | &object_size_attr.attr, | 4240 | &object_size_attr.attr, |
| 4152 | &objs_per_slab_attr.attr, | 4241 | &objs_per_slab_attr.attr, |
| 4153 | &order_attr.attr, | 4242 | &order_attr.attr, |
| 4243 | &min_partial_attr.attr, | ||
| 4154 | &objects_attr.attr, | 4244 | &objects_attr.attr, |
| 4155 | &objects_partial_attr.attr, | 4245 | &objects_partial_attr.attr, |
| 4156 | &total_objects_attr.attr, | 4246 | &total_objects_attr.attr, |
