aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-15 00:24:02 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-15 00:24:02 -0500
commitf527cf405017e60ceb28f84e2d60ab16fc34f209 (patch)
treeeadf0bfa385dad2e76a27d9a01cdcb22bad0efc1 /mm
parentcead99dcf48eeaaac0a1ececff9c979756b79294 (diff)
parent331dc558fa020451ff773973cee855fd721aa88e (diff)
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slub: Support 4k kmallocs again to compensate for page allocator slowness slub: Fallback to kmalloc_large for failing higher order allocs slub: Determine gfpflags once and not every time a slab is allocated make slub.c:slab_address() static slub: kmalloc page allocator pass-through cleanup slab: avoid double initialization & do initialization in 1 place
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c94
2 files changed, 65 insertions, 32 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 40c00dacbe4b..473e6c2eaefb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2630 slabp->colouroff = colour_off; 2630 slabp->colouroff = colour_off;
2631 slabp->s_mem = objp + colour_off; 2631 slabp->s_mem = objp + colour_off;
2632 slabp->nodeid = nodeid; 2632 slabp->nodeid = nodeid;
2633 slabp->free = 0;
2633 return slabp; 2634 return slabp;
2634} 2635}
2635 2636
@@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
2683 slab_bufctl(slabp)[i] = i + 1; 2684 slab_bufctl(slabp)[i] = i + 1;
2684 } 2685 }
2685 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2686 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2686 slabp->free = 0;
2687} 2687}
2688 2688
2689static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2689static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep,
2816 if (!slabp) 2816 if (!slabp)
2817 goto opps1; 2817 goto opps1;
2818 2818
2819 slabp->nodeid = nodeid;
2820 slab_map_pages(cachep, slabp, objp); 2819 slab_map_pages(cachep, slabp, objp);
2821 2820
2822 cache_init_objs(cachep, slabp); 2821 cache_init_objs(cachep, slabp);
diff --git a/mm/slub.c b/mm/slub.c
index e2989ae243b5..4b3895cb90ee 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
211/* Internal SLUB flags */ 211/* Internal SLUB flags */
212#define __OBJECT_POISON 0x80000000 /* Poison object */ 212#define __OBJECT_POISON 0x80000000 /* Poison object */
213#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 213#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
214#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
215#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
214 216
215/* Not all arches define cache_line_size */ 217/* Not all arches define cache_line_size */
216#ifndef cache_line_size 218#ifndef cache_line_size
@@ -308,7 +310,7 @@ static inline int is_end(void *addr)
308 return (unsigned long)addr & PAGE_MAPPING_ANON; 310 return (unsigned long)addr & PAGE_MAPPING_ANON;
309} 311}
310 312
311void *slab_address(struct page *page) 313static void *slab_address(struct page *page)
312{ 314{
313 return page->end - PAGE_MAPPING_ANON; 315 return page->end - PAGE_MAPPING_ANON;
314} 316}
@@ -1078,14 +1080,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1078 struct page *page; 1080 struct page *page;
1079 int pages = 1 << s->order; 1081 int pages = 1 << s->order;
1080 1082
1081 if (s->order) 1083 flags |= s->allocflags;
1082 flags |= __GFP_COMP;
1083
1084 if (s->flags & SLAB_CACHE_DMA)
1085 flags |= SLUB_DMA;
1086
1087 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1088 flags |= __GFP_RECLAIMABLE;
1089 1084
1090 if (node == -1) 1085 if (node == -1)
1091 page = alloc_pages(flags, s->order); 1086 page = alloc_pages(flags, s->order);
@@ -1546,7 +1541,6 @@ load_freelist:
1546unlock_out: 1541unlock_out:
1547 slab_unlock(c->page); 1542 slab_unlock(c->page);
1548 stat(c, ALLOC_SLOWPATH); 1543 stat(c, ALLOC_SLOWPATH);
1549out:
1550#ifdef SLUB_FASTPATH 1544#ifdef SLUB_FASTPATH
1551 local_irq_restore(flags); 1545 local_irq_restore(flags);
1552#endif 1546#endif
@@ -1581,8 +1575,24 @@ new_slab:
1581 c->page = new; 1575 c->page = new;
1582 goto load_freelist; 1576 goto load_freelist;
1583 } 1577 }
1584 object = NULL; 1578#ifdef SLUB_FASTPATH
1585 goto out; 1579 local_irq_restore(flags);
1580#endif
1581 /*
1582 * No memory available.
1583 *
1584 * If the slab uses higher order allocs but the object is
1585 * smaller than a page size then we can fallback in emergencies
1586 * to the page allocator via kmalloc_large. The page allocator may
1587 * have failed to obtain a higher order page and we can try to
1588 * allocate a single page if the object fits into a single page.
1589 * That is only possible if certain conditions are met that are being
1590 * checked when a slab is created.
1591 */
1592 if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
1593 return kmalloc_large(s->objsize, gfpflags);
1594
1595 return NULL;
1586debug: 1596debug:
1587 object = c->page->freelist; 1597 object = c->page->freelist;
1588 if (!alloc_debug_processing(s, c->page, object, addr)) 1598 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2329,10 +2339,33 @@ static int calculate_sizes(struct kmem_cache *s)
2329 size = ALIGN(size, align); 2339 size = ALIGN(size, align);
2330 s->size = size; 2340 s->size = size;
2331 2341
2332 s->order = calculate_order(size); 2342 if ((flags & __KMALLOC_CACHE) &&
2343 PAGE_SIZE / size < slub_min_objects) {
2344 /*
2345 * Kmalloc cache that would not have enough objects in
2346 * an order 0 page. Kmalloc slabs can fallback to
2347 * page allocator order 0 allocs so take a reasonably large
2348 * order that will allows us a good number of objects.
2349 */
2350 s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
2351 s->flags |= __PAGE_ALLOC_FALLBACK;
2352 s->allocflags |= __GFP_NOWARN;
2353 } else
2354 s->order = calculate_order(size);
2355
2333 if (s->order < 0) 2356 if (s->order < 0)
2334 return 0; 2357 return 0;
2335 2358
2359 s->allocflags = 0;
2360 if (s->order)
2361 s->allocflags |= __GFP_COMP;
2362
2363 if (s->flags & SLAB_CACHE_DMA)
2364 s->allocflags |= SLUB_DMA;
2365
2366 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2367 s->allocflags |= __GFP_RECLAIMABLE;
2368
2336 /* 2369 /*
2337 * Determine the number of objects per slab 2370 * Determine the number of objects per slab
2338 */ 2371 */
@@ -2484,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2484 * Kmalloc subsystem 2517 * Kmalloc subsystem
2485 *******************************************************************/ 2518 *******************************************************************/
2486 2519
2487struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; 2520struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2488EXPORT_SYMBOL(kmalloc_caches); 2521EXPORT_SYMBOL(kmalloc_caches);
2489 2522
2490#ifdef CONFIG_ZONE_DMA 2523#ifdef CONFIG_ZONE_DMA
2491static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; 2524static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2492#endif 2525#endif
2493 2526
2494static int __init setup_slub_min_order(char *str) 2527static int __init setup_slub_min_order(char *str)
@@ -2536,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2536 2569
2537 down_write(&slub_lock); 2570 down_write(&slub_lock);
2538 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2571 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2539 flags, NULL)) 2572 flags | __KMALLOC_CACHE, NULL))
2540 goto panic; 2573 goto panic;
2541 2574
2542 list_add(&s->list, &slab_caches); 2575 list_add(&s->list, &slab_caches);
@@ -2670,9 +2703,8 @@ void *__kmalloc(size_t size, gfp_t flags)
2670{ 2703{
2671 struct kmem_cache *s; 2704 struct kmem_cache *s;
2672 2705
2673 if (unlikely(size > PAGE_SIZE / 2)) 2706 if (unlikely(size > PAGE_SIZE))
2674 return (void *)__get_free_pages(flags | __GFP_COMP, 2707 return kmalloc_large(size, flags);
2675 get_order(size));
2676 2708
2677 s = get_slab(size, flags); 2709 s = get_slab(size, flags);
2678 2710
@@ -2688,9 +2720,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2688{ 2720{
2689 struct kmem_cache *s; 2721 struct kmem_cache *s;
2690 2722
2691 if (unlikely(size > PAGE_SIZE / 2)) 2723 if (unlikely(size > PAGE_SIZE))
2692 return (void *)__get_free_pages(flags | __GFP_COMP, 2724 return kmalloc_large(size, flags);
2693 get_order(size));
2694 2725
2695 s = get_slab(size, flags); 2726 s = get_slab(size, flags);
2696 2727
@@ -3001,7 +3032,7 @@ void __init kmem_cache_init(void)
3001 caches++; 3032 caches++;
3002 } 3033 }
3003 3034
3004 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { 3035 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
3005 create_kmalloc_cache(&kmalloc_caches[i], 3036 create_kmalloc_cache(&kmalloc_caches[i],
3006 "kmalloc", 1 << i, GFP_KERNEL); 3037 "kmalloc", 1 << i, GFP_KERNEL);
3007 caches++; 3038 caches++;
@@ -3028,7 +3059,7 @@ void __init kmem_cache_init(void)
3028 slab_state = UP; 3059 slab_state = UP;
3029 3060
3030 /* Provide the correct kmalloc names now that the caches are up */ 3061 /* Provide the correct kmalloc names now that the caches are up */
3031 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) 3062 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
3032 kmalloc_caches[i]. name = 3063 kmalloc_caches[i]. name =
3033 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3064 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3034 3065
@@ -3057,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
3057 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3088 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3058 return 1; 3089 return 1;
3059 3090
3091 if ((s->flags & __PAGE_ALLOC_FALLBACK))
3092 return 1;
3093
3060 if (s->ctor) 3094 if (s->ctor)
3061 return 1; 3095 return 1;
3062 3096
@@ -3218,9 +3252,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3218{ 3252{
3219 struct kmem_cache *s; 3253 struct kmem_cache *s;
3220 3254
3221 if (unlikely(size > PAGE_SIZE / 2)) 3255 if (unlikely(size > PAGE_SIZE))
3222 return (void *)__get_free_pages(gfpflags | __GFP_COMP, 3256 return kmalloc_large(size, gfpflags);
3223 get_order(size)); 3257
3224 s = get_slab(size, gfpflags); 3258 s = get_slab(size, gfpflags);
3225 3259
3226 if (unlikely(ZERO_OR_NULL_PTR(s))) 3260 if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3234,9 +3268,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3234{ 3268{
3235 struct kmem_cache *s; 3269 struct kmem_cache *s;
3236 3270
3237 if (unlikely(size > PAGE_SIZE / 2)) 3271 if (unlikely(size > PAGE_SIZE))
3238 return (void *)__get_free_pages(gfpflags | __GFP_COMP, 3272 return kmalloc_large(size, gfpflags);
3239 get_order(size)); 3273
3240 s = get_slab(size, gfpflags); 3274 s = get_slab(size, gfpflags);
3241 3275
3242 if (unlikely(ZERO_OR_NULL_PTR(s))) 3276 if (unlikely(ZERO_OR_NULL_PTR(s)))