diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 83 |
1 files changed, 53 insertions, 30 deletions
| @@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
| 374 | static void set_track(struct kmem_cache *s, void *object, | 374 | static void set_track(struct kmem_cache *s, void *object, |
| 375 | enum track_item alloc, unsigned long addr) | 375 | enum track_item alloc, unsigned long addr) |
| 376 | { | 376 | { |
| 377 | struct track *p; | 377 | struct track *p = get_track(s, object, alloc); |
| 378 | |||
| 379 | if (s->offset) | ||
| 380 | p = object + s->offset + sizeof(void *); | ||
| 381 | else | ||
| 382 | p = object + s->inuse; | ||
| 383 | 378 | ||
| 384 | p += alloc; | ||
| 385 | if (addr) { | 379 | if (addr) { |
| 386 | p->addr = addr; | 380 | p->addr = addr; |
| 387 | p->cpu = smp_processor_id(); | 381 | p->cpu = smp_processor_id(); |
| @@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
| 1335 | n = get_node(s, zone_to_nid(zone)); | 1329 | n = get_node(s, zone_to_nid(zone)); |
| 1336 | 1330 | ||
| 1337 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1331 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
| 1338 | n->nr_partial > n->min_partial) { | 1332 | n->nr_partial > s->min_partial) { |
| 1339 | page = get_partial_node(n); | 1333 | page = get_partial_node(n); |
| 1340 | if (page) | 1334 | if (page) |
| 1341 | return page; | 1335 | return page; |
| @@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
| 1387 | slab_unlock(page); | 1381 | slab_unlock(page); |
| 1388 | } else { | 1382 | } else { |
| 1389 | stat(c, DEACTIVATE_EMPTY); | 1383 | stat(c, DEACTIVATE_EMPTY); |
| 1390 | if (n->nr_partial < n->min_partial) { | 1384 | if (n->nr_partial < s->min_partial) { |
| 1391 | /* | 1385 | /* |
| 1392 | * Adding an empty slab to the partial slabs in order | 1386 | * Adding an empty slab to the partial slabs in order |
| 1393 | * to avoid page allocator overhead. This slab needs | 1387 | * to avoid page allocator overhead. This slab needs |
| @@ -1596,6 +1590,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1596 | unsigned long flags; | 1590 | unsigned long flags; |
| 1597 | unsigned int objsize; | 1591 | unsigned int objsize; |
| 1598 | 1592 | ||
| 1593 | lockdep_trace_alloc(gfpflags); | ||
| 1599 | might_sleep_if(gfpflags & __GFP_WAIT); | 1594 | might_sleep_if(gfpflags & __GFP_WAIT); |
| 1600 | 1595 | ||
| 1601 | if (should_failslab(s->objsize, gfpflags)) | 1596 | if (should_failslab(s->objsize, gfpflags)) |
| @@ -1724,7 +1719,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
| 1724 | c = get_cpu_slab(s, smp_processor_id()); | 1719 | c = get_cpu_slab(s, smp_processor_id()); |
| 1725 | debug_check_no_locks_freed(object, c->objsize); | 1720 | debug_check_no_locks_freed(object, c->objsize); |
| 1726 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1721 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
| 1727 | debug_check_no_obj_freed(object, s->objsize); | 1722 | debug_check_no_obj_freed(object, c->objsize); |
| 1728 | if (likely(page == c->page && c->node >= 0)) { | 1723 | if (likely(page == c->page && c->node >= 0)) { |
| 1729 | object[c->offset] = c->freelist; | 1724 | object[c->offset] = c->freelist; |
| 1730 | c->freelist = object; | 1725 | c->freelist = object; |
| @@ -1844,6 +1839,7 @@ static inline int calculate_order(int size) | |||
| 1844 | int order; | 1839 | int order; |
| 1845 | int min_objects; | 1840 | int min_objects; |
| 1846 | int fraction; | 1841 | int fraction; |
| 1842 | int max_objects; | ||
| 1847 | 1843 | ||
| 1848 | /* | 1844 | /* |
| 1849 | * Attempt to find best configuration for a slab. This | 1845 | * Attempt to find best configuration for a slab. This |
| @@ -1856,6 +1852,9 @@ static inline int calculate_order(int size) | |||
| 1856 | min_objects = slub_min_objects; | 1852 | min_objects = slub_min_objects; |
| 1857 | if (!min_objects) | 1853 | if (!min_objects) |
| 1858 | min_objects = 4 * (fls(nr_cpu_ids) + 1); | 1854 | min_objects = 4 * (fls(nr_cpu_ids) + 1); |
| 1855 | max_objects = (PAGE_SIZE << slub_max_order)/size; | ||
| 1856 | min_objects = min(min_objects, max_objects); | ||
| 1857 | |||
| 1859 | while (min_objects > 1) { | 1858 | while (min_objects > 1) { |
| 1860 | fraction = 16; | 1859 | fraction = 16; |
| 1861 | while (fraction >= 4) { | 1860 | while (fraction >= 4) { |
| @@ -1865,7 +1864,7 @@ static inline int calculate_order(int size) | |||
| 1865 | return order; | 1864 | return order; |
| 1866 | fraction /= 2; | 1865 | fraction /= 2; |
| 1867 | } | 1866 | } |
| 1868 | min_objects /= 2; | 1867 | min_objects --; |
| 1869 | } | 1868 | } |
| 1870 | 1869 | ||
| 1871 | /* | 1870 | /* |
| @@ -1928,17 +1927,6 @@ static void | |||
| 1928 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | 1927 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) |
| 1929 | { | 1928 | { |
| 1930 | n->nr_partial = 0; | 1929 | n->nr_partial = 0; |
| 1931 | |||
| 1932 | /* | ||
| 1933 | * The larger the object size is, the more pages we want on the partial | ||
| 1934 | * list to avoid pounding the page allocator excessively. | ||
| 1935 | */ | ||
| 1936 | n->min_partial = ilog2(s->size); | ||
| 1937 | if (n->min_partial < MIN_PARTIAL) | ||
| 1938 | n->min_partial = MIN_PARTIAL; | ||
| 1939 | else if (n->min_partial > MAX_PARTIAL) | ||
| 1940 | n->min_partial = MAX_PARTIAL; | ||
| 1941 | |||
| 1942 | spin_lock_init(&n->list_lock); | 1930 | spin_lock_init(&n->list_lock); |
| 1943 | INIT_LIST_HEAD(&n->partial); | 1931 | INIT_LIST_HEAD(&n->partial); |
| 1944 | #ifdef CONFIG_SLUB_DEBUG | 1932 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -2181,6 +2169,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
| 2181 | } | 2169 | } |
| 2182 | #endif | 2170 | #endif |
| 2183 | 2171 | ||
| 2172 | static void set_min_partial(struct kmem_cache *s, unsigned long min) | ||
| 2173 | { | ||
| 2174 | if (min < MIN_PARTIAL) | ||
| 2175 | min = MIN_PARTIAL; | ||
| 2176 | else if (min > MAX_PARTIAL) | ||
| 2177 | min = MAX_PARTIAL; | ||
| 2178 | s->min_partial = min; | ||
| 2179 | } | ||
| 2180 | |||
| 2184 | /* | 2181 | /* |
| 2185 | * calculate_sizes() determines the order and the distribution of data within | 2182 | * calculate_sizes() determines the order and the distribution of data within |
| 2186 | * a slab object. | 2183 | * a slab object. |
| @@ -2319,6 +2316,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
| 2319 | if (!calculate_sizes(s, -1)) | 2316 | if (!calculate_sizes(s, -1)) |
| 2320 | goto error; | 2317 | goto error; |
| 2321 | 2318 | ||
| 2319 | /* | ||
| 2320 | * The larger the object size is, the more pages we want on the partial | ||
| 2321 | * list to avoid pounding the page allocator excessively. | ||
| 2322 | */ | ||
| 2323 | set_min_partial(s, ilog2(s->size)); | ||
| 2322 | s->refcount = 1; | 2324 | s->refcount = 1; |
| 2323 | #ifdef CONFIG_NUMA | 2325 | #ifdef CONFIG_NUMA |
| 2324 | s->remote_node_defrag_ratio = 1000; | 2326 | s->remote_node_defrag_ratio = 1000; |
| @@ -2475,7 +2477,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2475 | * Kmalloc subsystem | 2477 | * Kmalloc subsystem |
| 2476 | *******************************************************************/ | 2478 | *******************************************************************/ |
| 2477 | 2479 | ||
| 2478 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2480 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
| 2479 | EXPORT_SYMBOL(kmalloc_caches); | 2481 | EXPORT_SYMBOL(kmalloc_caches); |
| 2480 | 2482 | ||
| 2481 | static int __init setup_slub_min_order(char *str) | 2483 | static int __init setup_slub_min_order(char *str) |
| @@ -2537,7 +2539,7 @@ panic: | |||
| 2537 | } | 2539 | } |
| 2538 | 2540 | ||
| 2539 | #ifdef CONFIG_ZONE_DMA | 2541 | #ifdef CONFIG_ZONE_DMA |
| 2540 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2542 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
| 2541 | 2543 | ||
| 2542 | static void sysfs_add_func(struct work_struct *w) | 2544 | static void sysfs_add_func(struct work_struct *w) |
| 2543 | { | 2545 | { |
| @@ -2658,7 +2660,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2658 | { | 2660 | { |
| 2659 | struct kmem_cache *s; | 2661 | struct kmem_cache *s; |
| 2660 | 2662 | ||
| 2661 | if (unlikely(size > PAGE_SIZE)) | 2663 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2662 | return kmalloc_large(size, flags); | 2664 | return kmalloc_large(size, flags); |
| 2663 | 2665 | ||
| 2664 | s = get_slab(size, flags); | 2666 | s = get_slab(size, flags); |
| @@ -2686,7 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 2686 | { | 2688 | { |
| 2687 | struct kmem_cache *s; | 2689 | struct kmem_cache *s; |
| 2688 | 2690 | ||
| 2689 | if (unlikely(size > PAGE_SIZE)) | 2691 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2690 | return kmalloc_large_node(size, flags, node); | 2692 | return kmalloc_large_node(size, flags, node); |
| 2691 | 2693 | ||
| 2692 | s = get_slab(size, flags); | 2694 | s = get_slab(size, flags); |
| @@ -2986,7 +2988,7 @@ void __init kmem_cache_init(void) | |||
| 2986 | caches++; | 2988 | caches++; |
| 2987 | } | 2989 | } |
| 2988 | 2990 | ||
| 2989 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 2991 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
| 2990 | create_kmalloc_cache(&kmalloc_caches[i], | 2992 | create_kmalloc_cache(&kmalloc_caches[i], |
| 2991 | "kmalloc", 1 << i, GFP_KERNEL); | 2993 | "kmalloc", 1 << i, GFP_KERNEL); |
| 2992 | caches++; | 2994 | caches++; |
| @@ -3023,7 +3025,7 @@ void __init kmem_cache_init(void) | |||
| 3023 | slab_state = UP; | 3025 | slab_state = UP; |
| 3024 | 3026 | ||
| 3025 | /* Provide the correct kmalloc names now that the caches are up */ | 3027 | /* Provide the correct kmalloc names now that the caches are up */ |
| 3026 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3028 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
| 3027 | kmalloc_caches[i]. name = | 3029 | kmalloc_caches[i]. name = |
| 3028 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3030 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
| 3029 | 3031 | ||
| @@ -3223,7 +3225,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3223 | { | 3225 | { |
| 3224 | struct kmem_cache *s; | 3226 | struct kmem_cache *s; |
| 3225 | 3227 | ||
| 3226 | if (unlikely(size > PAGE_SIZE)) | 3228 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3227 | return kmalloc_large(size, gfpflags); | 3229 | return kmalloc_large(size, gfpflags); |
| 3228 | 3230 | ||
| 3229 | s = get_slab(size, gfpflags); | 3231 | s = get_slab(size, gfpflags); |
| @@ -3239,7 +3241,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3239 | { | 3241 | { |
| 3240 | struct kmem_cache *s; | 3242 | struct kmem_cache *s; |
| 3241 | 3243 | ||
| 3242 | if (unlikely(size > PAGE_SIZE)) | 3244 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3243 | return kmalloc_large_node(size, gfpflags, node); | 3245 | return kmalloc_large_node(size, gfpflags, node); |
| 3244 | 3246 | ||
| 3245 | s = get_slab(size, gfpflags); | 3247 | s = get_slab(size, gfpflags); |
| @@ -3836,6 +3838,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf) | |||
| 3836 | } | 3838 | } |
| 3837 | SLAB_ATTR(order); | 3839 | SLAB_ATTR(order); |
| 3838 | 3840 | ||
| 3841 | static ssize_t min_partial_show(struct kmem_cache *s, char *buf) | ||
| 3842 | { | ||
| 3843 | return sprintf(buf, "%lu\n", s->min_partial); | ||
| 3844 | } | ||
| 3845 | |||
| 3846 | static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, | ||
| 3847 | size_t length) | ||
| 3848 | { | ||
| 3849 | unsigned long min; | ||
| 3850 | int err; | ||
| 3851 | |||
| 3852 | err = strict_strtoul(buf, 10, &min); | ||
| 3853 | if (err) | ||
| 3854 | return err; | ||
| 3855 | |||
| 3856 | set_min_partial(s, min); | ||
| 3857 | return length; | ||
| 3858 | } | ||
| 3859 | SLAB_ATTR(min_partial); | ||
| 3860 | |||
| 3839 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) | 3861 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
| 3840 | { | 3862 | { |
| 3841 | if (s->ctor) { | 3863 | if (s->ctor) { |
| @@ -4151,6 +4173,7 @@ static struct attribute *slab_attrs[] = { | |||
| 4151 | &object_size_attr.attr, | 4173 | &object_size_attr.attr, |
| 4152 | &objs_per_slab_attr.attr, | 4174 | &objs_per_slab_attr.attr, |
| 4153 | &order_attr.attr, | 4175 | &order_attr.attr, |
| 4176 | &min_partial_attr.attr, | ||
| 4154 | &objects_attr.attr, | 4177 | &objects_attr.attr, |
| 4155 | &objects_partial_attr.attr, | 4178 | &objects_partial_attr.attr, |
| 4156 | &total_objects_attr.attr, | 4179 | &total_objects_attr.attr, |
