diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 19:18:17 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 19:18:17 -0400 |
| commit | be0ea69674ed95e1e98cb3687a241badc756d228 (patch) | |
| tree | 36d0db8fe389d65bbc8c7aa5be0e61b066f9536a /mm | |
| parent | 4496d937a518fde0d0e1980e4ab470cedb4b50cd (diff) | |
| parent | 15a5b0a4912d98a9615ef457c7bde8d08195a771 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slob: fix lockup in slob_free()
slub: use get_track()
slub: rename calculate_min_partial() to set_min_partial()
slub: add min_partial sysfs tunable
slub: move min_partial to struct kmem_cache
SLUB: Fix default slab order for big object sizes
SLUB: Do not pass 8k objects through to the page allocator
SLUB: Introduce and use SLUB_MAX_SIZE and SLUB_PAGE_SHIFT constants
slob: clean up the code
SLUB: Use ->objsize from struct kmem_cache_cpu in slab_free()
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slob.c | 43 | ||||
| -rw-r--r-- | mm/slub.c | 82 |
2 files changed, 79 insertions, 46 deletions
| @@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); | |||
| 126 | static LIST_HEAD(free_slob_large); | 126 | static LIST_HEAD(free_slob_large); |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * slob_page: True for all slob pages (false for bigblock pages) | 129 | * is_slob_page: True for all slob pages (false for bigblock pages) |
| 130 | */ | 130 | */ |
| 131 | static inline int slob_page(struct slob_page *sp) | 131 | static inline int is_slob_page(struct slob_page *sp) |
| 132 | { | 132 | { |
| 133 | return PageSlobPage((struct page *)sp); | 133 | return PageSlobPage((struct page *)sp); |
| 134 | } | 134 | } |
| @@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) | |||
| 143 | __ClearPageSlobPage((struct page *)sp); | 143 | __ClearPageSlobPage((struct page *)sp); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline struct slob_page *slob_page(const void *addr) | ||
| 147 | { | ||
| 148 | return (struct slob_page *)virt_to_page(addr); | ||
| 149 | } | ||
| 150 | |||
| 146 | /* | 151 | /* |
| 147 | * slob_page_free: true for pages on free_slob_pages list. | 152 | * slob_page_free: true for pages on free_slob_pages list. |
| 148 | */ | 153 | */ |
| @@ -230,7 +235,7 @@ static int slob_last(slob_t *s) | |||
| 230 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | 235 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); |
| 231 | } | 236 | } |
| 232 | 237 | ||
| 233 | static void *slob_new_page(gfp_t gfp, int order, int node) | 238 | static void *slob_new_pages(gfp_t gfp, int order, int node) |
| 234 | { | 239 | { |
| 235 | void *page; | 240 | void *page; |
| 236 | 241 | ||
| @@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) | |||
| 247 | return page_address(page); | 252 | return page_address(page); |
| 248 | } | 253 | } |
| 249 | 254 | ||
| 255 | static void slob_free_pages(void *b, int order) | ||
| 256 | { | ||
| 257 | free_pages((unsigned long)b, order); | ||
| 258 | } | ||
| 259 | |||
| 250 | /* | 260 | /* |
| 251 | * Allocate a slob block within a given slob_page sp. | 261 | * Allocate a slob block within a given slob_page sp. |
| 252 | */ | 262 | */ |
| 253 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 263 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
| 254 | { | 264 | { |
| 255 | slob_t *prev, *cur, *aligned = 0; | 265 | slob_t *prev, *cur, *aligned = NULL; |
| 256 | int delta = 0, units = SLOB_UNITS(size); | 266 | int delta = 0, units = SLOB_UNITS(size); |
| 257 | 267 | ||
| 258 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 268 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
| @@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
| 349 | 359 | ||
| 350 | /* Not enough space: must allocate a new page */ | 360 | /* Not enough space: must allocate a new page */ |
| 351 | if (!b) { | 361 | if (!b) { |
| 352 | b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); | 362 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
| 353 | if (!b) | 363 | if (!b) |
| 354 | return 0; | 364 | return NULL; |
| 355 | sp = (struct slob_page *)virt_to_page(b); | 365 | sp = slob_page(b); |
| 356 | set_slob_page(sp); | 366 | set_slob_page(sp); |
| 357 | 367 | ||
| 358 | spin_lock_irqsave(&slob_lock, flags); | 368 | spin_lock_irqsave(&slob_lock, flags); |
| @@ -384,7 +394,7 @@ static void slob_free(void *block, int size) | |||
| 384 | return; | 394 | return; |
| 385 | BUG_ON(!size); | 395 | BUG_ON(!size); |
| 386 | 396 | ||
| 387 | sp = (struct slob_page *)virt_to_page(block); | 397 | sp = slob_page(block); |
| 388 | units = SLOB_UNITS(size); | 398 | units = SLOB_UNITS(size); |
| 389 | 399 | ||
| 390 | spin_lock_irqsave(&slob_lock, flags); | 400 | spin_lock_irqsave(&slob_lock, flags); |
| @@ -393,10 +403,11 @@ static void slob_free(void *block, int size) | |||
| 393 | /* Go directly to page allocator. Do not pass slob allocator */ | 403 | /* Go directly to page allocator. Do not pass slob allocator */ |
| 394 | if (slob_page_free(sp)) | 404 | if (slob_page_free(sp)) |
| 395 | clear_slob_page_free(sp); | 405 | clear_slob_page_free(sp); |
| 406 | spin_unlock_irqrestore(&slob_lock, flags); | ||
| 396 | clear_slob_page(sp); | 407 | clear_slob_page(sp); |
| 397 | free_slob_page(sp); | 408 | free_slob_page(sp); |
| 398 | free_page((unsigned long)b); | 409 | free_page((unsigned long)b); |
| 399 | goto out; | 410 | return; |
| 400 | } | 411 | } |
| 401 | 412 | ||
| 402 | if (!slob_page_free(sp)) { | 413 | if (!slob_page_free(sp)) { |
| @@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
| 476 | } else { | 487 | } else { |
| 477 | void *ret; | 488 | void *ret; |
| 478 | 489 | ||
| 479 | ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); | 490 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
| 480 | if (ret) { | 491 | if (ret) { |
| 481 | struct page *page; | 492 | struct page *page; |
| 482 | page = virt_to_page(ret); | 493 | page = virt_to_page(ret); |
| @@ -494,8 +505,8 @@ void kfree(const void *block) | |||
| 494 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 505 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
| 495 | return; | 506 | return; |
| 496 | 507 | ||
| 497 | sp = (struct slob_page *)virt_to_page(block); | 508 | sp = slob_page(block); |
| 498 | if (slob_page(sp)) { | 509 | if (is_slob_page(sp)) { |
| 499 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 510 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 500 | unsigned int *m = (unsigned int *)(block - align); | 511 | unsigned int *m = (unsigned int *)(block - align); |
| 501 | slob_free(m, *m + align); | 512 | slob_free(m, *m + align); |
| @@ -513,8 +524,8 @@ size_t ksize(const void *block) | |||
| 513 | if (unlikely(block == ZERO_SIZE_PTR)) | 524 | if (unlikely(block == ZERO_SIZE_PTR)) |
| 514 | return 0; | 525 | return 0; |
| 515 | 526 | ||
| 516 | sp = (struct slob_page *)virt_to_page(block); | 527 | sp = slob_page(block); |
| 517 | if (slob_page(sp)) { | 528 | if (is_slob_page(sp)) { |
| 518 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 529 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
| 519 | unsigned int *m = (unsigned int *)(block - align); | 530 | unsigned int *m = (unsigned int *)(block - align); |
| 520 | return SLOB_UNITS(*m) * SLOB_UNIT; | 531 | return SLOB_UNITS(*m) * SLOB_UNIT; |
| @@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
| 573 | if (c->size < PAGE_SIZE) | 584 | if (c->size < PAGE_SIZE) |
| 574 | b = slob_alloc(c->size, flags, c->align, node); | 585 | b = slob_alloc(c->size, flags, c->align, node); |
| 575 | else | 586 | else |
| 576 | b = slob_new_page(flags, get_order(c->size), node); | 587 | b = slob_new_pages(flags, get_order(c->size), node); |
| 577 | 588 | ||
| 578 | if (c->ctor) | 589 | if (c->ctor) |
| 579 | c->ctor(b); | 590 | c->ctor(b); |
| @@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size) | |||
| 587 | if (size < PAGE_SIZE) | 598 | if (size < PAGE_SIZE) |
| 588 | slob_free(b, size); | 599 | slob_free(b, size); |
| 589 | else | 600 | else |
| 590 | free_pages((unsigned long)b, get_order(size)); | 601 | slob_free_pages(b, get_order(size)); |
| 591 | } | 602 | } |
| 592 | 603 | ||
| 593 | static void kmem_rcu_free(struct rcu_head *head) | 604 | static void kmem_rcu_free(struct rcu_head *head) |
| @@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
| 374 | static void set_track(struct kmem_cache *s, void *object, | 374 | static void set_track(struct kmem_cache *s, void *object, |
| 375 | enum track_item alloc, unsigned long addr) | 375 | enum track_item alloc, unsigned long addr) |
| 376 | { | 376 | { |
| 377 | struct track *p; | 377 | struct track *p = get_track(s, object, alloc); |
| 378 | |||
| 379 | if (s->offset) | ||
| 380 | p = object + s->offset + sizeof(void *); | ||
| 381 | else | ||
| 382 | p = object + s->inuse; | ||
| 383 | 378 | ||
| 384 | p += alloc; | ||
| 385 | if (addr) { | 379 | if (addr) { |
| 386 | p->addr = addr; | 380 | p->addr = addr; |
| 387 | p->cpu = smp_processor_id(); | 381 | p->cpu = smp_processor_id(); |
| @@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
| 1335 | n = get_node(s, zone_to_nid(zone)); | 1329 | n = get_node(s, zone_to_nid(zone)); |
| 1336 | 1330 | ||
| 1337 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1331 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
| 1338 | n->nr_partial > n->min_partial) { | 1332 | n->nr_partial > s->min_partial) { |
| 1339 | page = get_partial_node(n); | 1333 | page = get_partial_node(n); |
| 1340 | if (page) | 1334 | if (page) |
| 1341 | return page; | 1335 | return page; |
| @@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
| 1387 | slab_unlock(page); | 1381 | slab_unlock(page); |
| 1388 | } else { | 1382 | } else { |
| 1389 | stat(c, DEACTIVATE_EMPTY); | 1383 | stat(c, DEACTIVATE_EMPTY); |
| 1390 | if (n->nr_partial < n->min_partial) { | 1384 | if (n->nr_partial < s->min_partial) { |
| 1391 | /* | 1385 | /* |
| 1392 | * Adding an empty slab to the partial slabs in order | 1386 | * Adding an empty slab to the partial slabs in order |
| 1393 | * to avoid page allocator overhead. This slab needs | 1387 | * to avoid page allocator overhead. This slab needs |
| @@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
| 1724 | c = get_cpu_slab(s, smp_processor_id()); | 1718 | c = get_cpu_slab(s, smp_processor_id()); |
| 1725 | debug_check_no_locks_freed(object, c->objsize); | 1719 | debug_check_no_locks_freed(object, c->objsize); |
| 1726 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1720 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
| 1727 | debug_check_no_obj_freed(object, s->objsize); | 1721 | debug_check_no_obj_freed(object, c->objsize); |
| 1728 | if (likely(page == c->page && c->node >= 0)) { | 1722 | if (likely(page == c->page && c->node >= 0)) { |
| 1729 | object[c->offset] = c->freelist; | 1723 | object[c->offset] = c->freelist; |
| 1730 | c->freelist = object; | 1724 | c->freelist = object; |
| @@ -1844,6 +1838,7 @@ static inline int calculate_order(int size) | |||
| 1844 | int order; | 1838 | int order; |
| 1845 | int min_objects; | 1839 | int min_objects; |
| 1846 | int fraction; | 1840 | int fraction; |
| 1841 | int max_objects; | ||
| 1847 | 1842 | ||
| 1848 | /* | 1843 | /* |
| 1849 | * Attempt to find best configuration for a slab. This | 1844 | * Attempt to find best configuration for a slab. This |
| @@ -1856,6 +1851,9 @@ static inline int calculate_order(int size) | |||
| 1856 | min_objects = slub_min_objects; | 1851 | min_objects = slub_min_objects; |
| 1857 | if (!min_objects) | 1852 | if (!min_objects) |
| 1858 | min_objects = 4 * (fls(nr_cpu_ids) + 1); | 1853 | min_objects = 4 * (fls(nr_cpu_ids) + 1); |
| 1854 | max_objects = (PAGE_SIZE << slub_max_order)/size; | ||
| 1855 | min_objects = min(min_objects, max_objects); | ||
| 1856 | |||
| 1859 | while (min_objects > 1) { | 1857 | while (min_objects > 1) { |
| 1860 | fraction = 16; | 1858 | fraction = 16; |
| 1861 | while (fraction >= 4) { | 1859 | while (fraction >= 4) { |
| @@ -1865,7 +1863,7 @@ static inline int calculate_order(int size) | |||
| 1865 | return order; | 1863 | return order; |
| 1866 | fraction /= 2; | 1864 | fraction /= 2; |
| 1867 | } | 1865 | } |
| 1868 | min_objects /= 2; | 1866 | min_objects --; |
| 1869 | } | 1867 | } |
| 1870 | 1868 | ||
| 1871 | /* | 1869 | /* |
| @@ -1928,17 +1926,6 @@ static void | |||
| 1928 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | 1926 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) |
| 1929 | { | 1927 | { |
| 1930 | n->nr_partial = 0; | 1928 | n->nr_partial = 0; |
| 1931 | |||
| 1932 | /* | ||
| 1933 | * The larger the object size is, the more pages we want on the partial | ||
| 1934 | * list to avoid pounding the page allocator excessively. | ||
| 1935 | */ | ||
| 1936 | n->min_partial = ilog2(s->size); | ||
| 1937 | if (n->min_partial < MIN_PARTIAL) | ||
| 1938 | n->min_partial = MIN_PARTIAL; | ||
| 1939 | else if (n->min_partial > MAX_PARTIAL) | ||
| 1940 | n->min_partial = MAX_PARTIAL; | ||
| 1941 | |||
| 1942 | spin_lock_init(&n->list_lock); | 1929 | spin_lock_init(&n->list_lock); |
| 1943 | INIT_LIST_HEAD(&n->partial); | 1930 | INIT_LIST_HEAD(&n->partial); |
| 1944 | #ifdef CONFIG_SLUB_DEBUG | 1931 | #ifdef CONFIG_SLUB_DEBUG |
| @@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
| 2181 | } | 2168 | } |
| 2182 | #endif | 2169 | #endif |
| 2183 | 2170 | ||
| 2171 | static void set_min_partial(struct kmem_cache *s, unsigned long min) | ||
| 2172 | { | ||
| 2173 | if (min < MIN_PARTIAL) | ||
| 2174 | min = MIN_PARTIAL; | ||
| 2175 | else if (min > MAX_PARTIAL) | ||
| 2176 | min = MAX_PARTIAL; | ||
| 2177 | s->min_partial = min; | ||
| 2178 | } | ||
| 2179 | |||
| 2184 | /* | 2180 | /* |
| 2185 | * calculate_sizes() determines the order and the distribution of data within | 2181 | * calculate_sizes() determines the order and the distribution of data within |
| 2186 | * a slab object. | 2182 | * a slab object. |
| @@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
| 2319 | if (!calculate_sizes(s, -1)) | 2315 | if (!calculate_sizes(s, -1)) |
| 2320 | goto error; | 2316 | goto error; |
| 2321 | 2317 | ||
| 2318 | /* | ||
| 2319 | * The larger the object size is, the more pages we want on the partial | ||
| 2320 | * list to avoid pounding the page allocator excessively. | ||
| 2321 | */ | ||
| 2322 | set_min_partial(s, ilog2(s->size)); | ||
| 2322 | s->refcount = 1; | 2323 | s->refcount = 1; |
| 2323 | #ifdef CONFIG_NUMA | 2324 | #ifdef CONFIG_NUMA |
| 2324 | s->remote_node_defrag_ratio = 1000; | 2325 | s->remote_node_defrag_ratio = 1000; |
| @@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2475 | * Kmalloc subsystem | 2476 | * Kmalloc subsystem |
| 2476 | *******************************************************************/ | 2477 | *******************************************************************/ |
| 2477 | 2478 | ||
| 2478 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; | 2479 | struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; |
| 2479 | EXPORT_SYMBOL(kmalloc_caches); | 2480 | EXPORT_SYMBOL(kmalloc_caches); |
| 2480 | 2481 | ||
| 2481 | static int __init setup_slub_min_order(char *str) | 2482 | static int __init setup_slub_min_order(char *str) |
| @@ -2537,7 +2538,7 @@ panic: | |||
| 2537 | } | 2538 | } |
| 2538 | 2539 | ||
| 2539 | #ifdef CONFIG_ZONE_DMA | 2540 | #ifdef CONFIG_ZONE_DMA |
| 2540 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; | 2541 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; |
| 2541 | 2542 | ||
| 2542 | static void sysfs_add_func(struct work_struct *w) | 2543 | static void sysfs_add_func(struct work_struct *w) |
| 2543 | { | 2544 | { |
| @@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2658 | { | 2659 | { |
| 2659 | struct kmem_cache *s; | 2660 | struct kmem_cache *s; |
| 2660 | 2661 | ||
| 2661 | if (unlikely(size > PAGE_SIZE)) | 2662 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2662 | return kmalloc_large(size, flags); | 2663 | return kmalloc_large(size, flags); |
| 2663 | 2664 | ||
| 2664 | s = get_slab(size, flags); | 2665 | s = get_slab(size, flags); |
| @@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 2686 | { | 2687 | { |
| 2687 | struct kmem_cache *s; | 2688 | struct kmem_cache *s; |
| 2688 | 2689 | ||
| 2689 | if (unlikely(size > PAGE_SIZE)) | 2690 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 2690 | return kmalloc_large_node(size, flags, node); | 2691 | return kmalloc_large_node(size, flags, node); |
| 2691 | 2692 | ||
| 2692 | s = get_slab(size, flags); | 2693 | s = get_slab(size, flags); |
| @@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void) | |||
| 2986 | caches++; | 2987 | caches++; |
| 2987 | } | 2988 | } |
| 2988 | 2989 | ||
| 2989 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { | 2990 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
| 2990 | create_kmalloc_cache(&kmalloc_caches[i], | 2991 | create_kmalloc_cache(&kmalloc_caches[i], |
| 2991 | "kmalloc", 1 << i, GFP_KERNEL); | 2992 | "kmalloc", 1 << i, GFP_KERNEL); |
| 2992 | caches++; | 2993 | caches++; |
| @@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void) | |||
| 3023 | slab_state = UP; | 3024 | slab_state = UP; |
| 3024 | 3025 | ||
| 3025 | /* Provide the correct kmalloc names now that the caches are up */ | 3026 | /* Provide the correct kmalloc names now that the caches are up */ |
| 3026 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) | 3027 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) |
| 3027 | kmalloc_caches[i]. name = | 3028 | kmalloc_caches[i]. name = |
| 3028 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3029 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
| 3029 | 3030 | ||
| @@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
| 3223 | { | 3224 | { |
| 3224 | struct kmem_cache *s; | 3225 | struct kmem_cache *s; |
| 3225 | 3226 | ||
| 3226 | if (unlikely(size > PAGE_SIZE)) | 3227 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3227 | return kmalloc_large(size, gfpflags); | 3228 | return kmalloc_large(size, gfpflags); |
| 3228 | 3229 | ||
| 3229 | s = get_slab(size, gfpflags); | 3230 | s = get_slab(size, gfpflags); |
| @@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3239 | { | 3240 | { |
| 3240 | struct kmem_cache *s; | 3241 | struct kmem_cache *s; |
| 3241 | 3242 | ||
| 3242 | if (unlikely(size > PAGE_SIZE)) | 3243 | if (unlikely(size > SLUB_MAX_SIZE)) |
| 3243 | return kmalloc_large_node(size, gfpflags, node); | 3244 | return kmalloc_large_node(size, gfpflags, node); |
| 3244 | 3245 | ||
| 3245 | s = get_slab(size, gfpflags); | 3246 | s = get_slab(size, gfpflags); |
| @@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf) | |||
| 3836 | } | 3837 | } |
| 3837 | SLAB_ATTR(order); | 3838 | SLAB_ATTR(order); |
| 3838 | 3839 | ||
| 3840 | static ssize_t min_partial_show(struct kmem_cache *s, char *buf) | ||
| 3841 | { | ||
| 3842 | return sprintf(buf, "%lu\n", s->min_partial); | ||
| 3843 | } | ||
| 3844 | |||
| 3845 | static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, | ||
| 3846 | size_t length) | ||
| 3847 | { | ||
| 3848 | unsigned long min; | ||
| 3849 | int err; | ||
| 3850 | |||
| 3851 | err = strict_strtoul(buf, 10, &min); | ||
| 3852 | if (err) | ||
| 3853 | return err; | ||
| 3854 | |||
| 3855 | set_min_partial(s, min); | ||
| 3856 | return length; | ||
| 3857 | } | ||
| 3858 | SLAB_ATTR(min_partial); | ||
| 3859 | |||
| 3839 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) | 3860 | static ssize_t ctor_show(struct kmem_cache *s, char *buf) |
| 3840 | { | 3861 | { |
| 3841 | if (s->ctor) { | 3862 | if (s->ctor) { |
| @@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = { | |||
| 4151 | &object_size_attr.attr, | 4172 | &object_size_attr.attr, |
| 4152 | &objs_per_slab_attr.attr, | 4173 | &objs_per_slab_attr.attr, |
| 4153 | &order_attr.attr, | 4174 | &order_attr.attr, |
| 4175 | &min_partial_attr.attr, | ||
| 4154 | &objects_attr.attr, | 4176 | &objects_attr.attr, |
| 4155 | &objects_partial_attr.attr, | 4177 | &objects_partial_attr.attr, |
| 4156 | &total_objects_attr.attr, | 4178 | &total_objects_attr.attr, |
