diff options
| -rw-r--r-- | include/linux/slub_def.h | 15 | ||||
| -rw-r--r-- | mm/slab.c | 3 | ||||
| -rw-r--r-- | mm/slub.c | 94 |
3 files changed, 75 insertions, 37 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5e6d3d634d5b..57deecc79d52 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -71,6 +71,7 @@ struct kmem_cache { | |||
| 71 | 71 | ||
| 72 | /* Allocation and freeing of slabs */ | 72 | /* Allocation and freeing of slabs */ |
| 73 | int objects; /* Number of objects in slab */ | 73 | int objects; /* Number of objects in slab */ |
| 74 | gfp_t allocflags; /* gfp flags to use on each alloc */ | ||
| 74 | int refcount; /* Refcount for slab cache destroy */ | 75 | int refcount; /* Refcount for slab cache destroy */ |
| 75 | void (*ctor)(struct kmem_cache *, void *); | 76 | void (*ctor)(struct kmem_cache *, void *); |
| 76 | int inuse; /* Offset to metadata */ | 77 | int inuse; /* Offset to metadata */ |
| @@ -110,7 +111,7 @@ struct kmem_cache { | |||
| 110 | * We keep the general caches in an array of slab caches that are used for | 111 | * We keep the general caches in an array of slab caches that are used for |
| 111 | * 2^x bytes of allocations. | 112 | * 2^x bytes of allocations. |
| 112 | */ | 113 | */ |
| 113 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; | 114 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; |
| 114 | 115 | ||
| 115 | /* | 116 | /* |
| 116 | * Sorry that the following has to be that ugly but some versions of GCC | 117 | * Sorry that the following has to be that ugly but some versions of GCC |
| @@ -188,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
| 188 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 189 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 189 | void *__kmalloc(size_t size, gfp_t flags); | 190 | void *__kmalloc(size_t size, gfp_t flags); |
| 190 | 191 | ||
| 192 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
| 193 | { | ||
| 194 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | ||
| 195 | } | ||
| 196 | |||
| 191 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 197 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 192 | { | 198 | { |
| 193 | if (__builtin_constant_p(size)) { | 199 | if (__builtin_constant_p(size)) { |
| 194 | if (size > PAGE_SIZE / 2) | 200 | if (size > PAGE_SIZE) |
| 195 | return (void *)__get_free_pages(flags | __GFP_COMP, | 201 | return kmalloc_large(size, flags); |
| 196 | get_order(size)); | ||
| 197 | 202 | ||
| 198 | if (!(flags & SLUB_DMA)) { | 203 | if (!(flags & SLUB_DMA)) { |
| 199 | struct kmem_cache *s = kmalloc_slab(size); | 204 | struct kmem_cache *s = kmalloc_slab(size); |
| @@ -214,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |||
| 214 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 219 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 215 | { | 220 | { |
| 216 | if (__builtin_constant_p(size) && | 221 | if (__builtin_constant_p(size) && |
| 217 | size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { | 222 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
| 218 | struct kmem_cache *s = kmalloc_slab(size); | 223 | struct kmem_cache *s = kmalloc_slab(size); |
| 219 | 224 | ||
| 220 | if (!s) | 225 | if (!s) |
| @@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
| 2630 | slabp->colouroff = colour_off; | 2630 | slabp->colouroff = colour_off; |
| 2631 | slabp->s_mem = objp + colour_off; | 2631 | slabp->s_mem = objp + colour_off; |
| 2632 | slabp->nodeid = nodeid; | 2632 | slabp->nodeid = nodeid; |
| 2633 | slabp->free = 0; | ||
| 2633 | return slabp; | 2634 | return slabp; |
| 2634 | } | 2635 | } |
| 2635 | 2636 | ||
| @@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
| 2683 | slab_bufctl(slabp)[i] = i + 1; | 2684 | slab_bufctl(slabp)[i] = i + 1; |
| 2684 | } | 2685 | } |
| 2685 | slab_bufctl(slabp)[i - 1] = BUFCTL_END; | 2686 | slab_bufctl(slabp)[i - 1] = BUFCTL_END; |
| 2686 | slabp->free = 0; | ||
| 2687 | } | 2687 | } |
| 2688 | 2688 | ||
| 2689 | static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | 2689 | static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) |
| @@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
| 2816 | if (!slabp) | 2816 | if (!slabp) |
| 2817 | goto opps1; | 2817 | goto opps1; |
| 2818 | 2818 | ||
| 2819 | slabp->nodeid = nodeid; | ||
| 2820 | slab_map_pages(cachep, slabp, objp); | 2819 | slab_map_pages(cachep, slabp, objp); |
| 2821 | 2820 | ||
| 2822 | cache_init_objs(cachep, slabp); | 2821 | cache_init_objs(cachep, slabp); |
| @@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page) | |||
| 211 | /* Internal SLUB flags */ | 211 | /* Internal SLUB flags */ |
| 212 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 212 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
| 213 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 213 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
| 214 | #define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ | ||
| 215 | #define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ | ||
| 214 | 216 | ||
| 215 | /* Not all arches define cache_line_size */ | 217 | /* Not all arches define cache_line_size */ |
| 216 | #ifndef cache_line_size | 218 | #ifndef cache_line_size |
| @@ -308,7 +310,7 @@ static inline int is_end(void *addr) | |||
| 308 | return (unsigned long)addr & PAGE_MAPPING_ANON; | 310 | return (unsigned long)addr & PAGE_MAPPING_ANON; |
| 309 | } | 311 | } |
| 310 | 312 | ||
| 311 | void *slab_address(struct page *page) | 313 | static void *slab_address(struct page *page) |
| 312 | { | 314 | { |
| 313 | return page->end - PAGE_MAPPING_ANON; | 315 | return page->end - PAGE_MAPPING_ANON; |
| 314 | } | 316 | } |
| @@ -1078,14 +1080,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
| 1078 | struct page *page; | 1080 | struct page *page; |
| 1079 | int pages = 1 << s->order; | 1081 | int pages = 1 << s->order; |
| 1080 | 1082 | ||
| 1081 | if (s->order) | 1083 | flags |= s->allocflags; |
| 1082 | flags |= __GFP_COMP; | ||
| 1083 | |||
| 1084 | if (s->flags & SLAB_CACHE_DMA) | ||
| 1085 | flags |= SLUB_DMA; | ||
| 1086 | |||
| 1087 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | ||
| 1088 | flags |= __GFP_RECLAIMABLE; | ||
| 1089 | 1084 | ||
| 1090 | if (node == -1) | 1085 | if (node == -1) |
| 1091 | page = alloc_pages(flags, s->order); | 1086 | page = alloc_pages(flags, s->order); |
| @@ -1546,7 +1541,6 @@ load_freelist: | |||
| 1546 | unlock_out: | 1541 | unlock_out: |
| 1547 | slab_unlock(c->page); | 1542 | slab_unlock(c->page); |
| 1548 | stat(c, ALLOC_SLOWPATH); | 1543 | stat(c, ALLOC_SLOWPATH); |
| 1549 | out: | ||
| 1550 | #ifdef SLUB_FASTPATH | 1544 | #ifdef SLUB_FASTPATH |
| 1551 | local_irq_restore(flags); | 1545 | local_irq_restore(flags); |
| 1552 | #endif | 1546 | #endif |
| @@ -1581,8 +1575,24 @@ new_slab: | |||
| 1581 | c->page = new; | 1575 | c->page = new; |
| 1582 | goto load_freelist; | 1576 | goto load_freelist; |
| 1583 | } | 1577 | } |
| 1584 | object = NULL; | 1578 | #ifdef SLUB_FASTPATH |
| 1585 | goto out; | 1579 | local_irq_restore(flags); |
| 1580 | #endif | ||
| 1581 | /* | ||
| 1582 | * No memory available. | ||
| 1583 | * | ||
| 1584 | * If the slab uses higher order allocs but the object is | ||
| 1585 | * smaller than a page size then we can fallback in emergencies | ||
| 1586 | * to the page allocator via kmalloc_large. The page allocator may | ||
| 1587 | * have failed to obtain a higher order page and we can try to | ||
| 1588 | * allocate a single page if the object fits into a single page. | ||
| 1589 | * That is only possible if certain conditions are met that are being | ||
| 1590 | * checked when a slab is created. | ||
| 1591 | */ | ||
| 1592 | if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) | ||
| 1593 | return kmalloc_large(s->objsize, gfpflags); | ||
| 1594 | |||
| 1595 | return NULL; | ||
| 1586 | debug: | 1596 | debug: |
| 1587 | object = c->page->freelist; | 1597 | object = c->page->freelist; |
| 1588 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1598 | if (!alloc_debug_processing(s, c->page, object, addr)) |
| @@ -2329,10 +2339,33 @@ static int calculate_sizes(struct kmem_cache *s) | |||
| 2329 | size = ALIGN(size, align); | 2339 | size = ALIGN(size, align); |
| 2330 | s->size = size; | 2340 | s->size = size; |
| 2331 | 2341 | ||
| 2332 | s->order = calculate_order(size); | 2342 | if ((flags & __KMALLOC_CACHE) && |
| 2343 | PAGE_SIZE / size < slub_min_objects) { | ||
| 2344 | /* | ||
| 2345 | * Kmalloc cache that would not have enough objects in | ||
| 2346 | * an order 0 page. Kmalloc slabs can fallback to | ||
| 2347 | * page allocator order 0 allocs so take a reasonably large | ||
| 2348 | * order that will allows us a good number of objects. | ||
| 2349 | */ | ||
| 2350 | s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); | ||
| 2351 | s->flags |= __PAGE_ALLOC_FALLBACK; | ||
| 2352 | s->allocflags |= __GFP_NOWARN; | ||
| 2353 | } else | ||
| 2354 | s->order = calculate_order(size); | ||
| 2355 | |||
| 2333 | if (s->order < 0) | 2356 | if (s->order < 0) |
| 2334 | return 0; | 2357 | return 0; |
| 2335 | 2358 | ||
| 2359 | s->allocflags = 0; | ||
| 2360 | if (s->order) | ||
| 2361 | s->allocflags |= __GFP_COMP; | ||
| 2362 | |||
| 2363 | if (s->flags & SLAB_CACHE_DMA) | ||
| 2364 | s->allocflags |= SLUB_DMA; | ||
| 2365 | |||
| 2366 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | ||
| 2367 | s->allocflags |= __GFP_RECLAIMABLE; | ||
| 2368 | |||
| 2336 | /* | 2369 | /* |
| 2337 | * Determine the number of objects per slab | 2370 | * Determine the number of objects per slab |
| 2338 | */ | 2371 | */ |
| @@ -2484,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
| 2484 | * Kmalloc subsystem | 2517 | * Kmalloc subsystem |
| 2485 | *******************************************************************/ | 2518 | *******************************************************************/ |
| 2486 | 2519 | ||
| 2487 | struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; | 2520 | struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; |
| 2488 | EXPORT_SYMBOL(kmalloc_caches); | 2521 | EXPORT_SYMBOL(kmalloc_caches); |
| 2489 | 2522 | ||
| 2490 | #ifdef CONFIG_ZONE_DMA | 2523 | #ifdef CONFIG_ZONE_DMA |
| 2491 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; | 2524 | static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; |
| 2492 | #endif | 2525 | #endif |
| 2493 | 2526 | ||
| 2494 | static int __init setup_slub_min_order(char *str) | 2527 | static int __init setup_slub_min_order(char *str) |
| @@ -2536,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
| 2536 | 2569 | ||
| 2537 | down_write(&slub_lock); | 2570 | down_write(&slub_lock); |
| 2538 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2571 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
| 2539 | flags, NULL)) | 2572 | flags | __KMALLOC_CACHE, NULL)) |
| 2540 | goto panic; | 2573 | goto panic; |
| 2541 | 2574 | ||
| 2542 | list_add(&s->list, &slab_caches); | 2575 | list_add(&s->list, &slab_caches); |
| @@ -2670,9 +2703,8 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
| 2670 | { | 2703 | { |
| 2671 | struct kmem_cache *s; | 2704 | struct kmem_cache *s; |
| 2672 | 2705 | ||
| 2673 | if (unlikely(size > PAGE_SIZE / 2)) | 2706 | if (unlikely(size > PAGE_SIZE)) |
| 2674 | return (void *)__get_free_pages(flags | __GFP_COMP, | 2707 | return kmalloc_large(size, flags); |
| 2675 | get_order(size)); | ||
| 2676 | 2708 | ||
| 2677 | s = get_slab(size, flags); | 2709 | s = get_slab(size, flags); |
| 2678 | 2710 | ||
| @@ -2688,9 +2720,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 2688 | { | 2720 | { |
| 2689 | struct kmem_cache *s; | 2721 | struct kmem_cache *s; |
| 2690 | 2722 | ||
| 2691 | if (unlikely(size > PAGE_SIZE / 2)) | 2723 | if (unlikely(size > PAGE_SIZE)) |
| 2692 | return (void *)__get_free_pages(flags | __GFP_COMP, | 2724 | return kmalloc_large(size, flags); |
| 2693 | get_order(size)); | ||
| 2694 | 2725 | ||
| 2695 | s = get_slab(size, flags); | 2726 | s = get_slab(size, flags); |
| 2696 | 2727 | ||
| @@ -3001,7 +3032,7 @@ void __init kmem_cache_init(void) | |||
| 3001 | caches++; | 3032 | caches++; |
| 3002 | } | 3033 | } |
| 3003 | 3034 | ||
| 3004 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { | 3035 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { |
| 3005 | create_kmalloc_cache(&kmalloc_caches[i], | 3036 | create_kmalloc_cache(&kmalloc_caches[i], |
| 3006 | "kmalloc", 1 << i, GFP_KERNEL); | 3037 | "kmalloc", 1 << i, GFP_KERNEL); |
| 3007 | caches++; | 3038 | caches++; |
| @@ -3028,7 +3059,7 @@ void __init kmem_cache_init(void) | |||
| 3028 | slab_state = UP; | 3059 | slab_state = UP; |
| 3029 | 3060 | ||
| 3030 | /* Provide the correct kmalloc names now that the caches are up */ | 3061 | /* Provide the correct kmalloc names now that the caches are up */ |
| 3031 | for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) | 3062 | for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) |
| 3032 | kmalloc_caches[i]. name = | 3063 | kmalloc_caches[i]. name = |
| 3033 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); | 3064 | kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); |
| 3034 | 3065 | ||
| @@ -3057,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
| 3057 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | 3088 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) |
| 3058 | return 1; | 3089 | return 1; |
| 3059 | 3090 | ||
| 3091 | if ((s->flags & __PAGE_ALLOC_FALLBACK)) | ||
| 3092 | return 1; | ||
| 3093 | |||
| 3060 | if (s->ctor) | 3094 | if (s->ctor) |
| 3061 | return 1; | 3095 | return 1; |
| 3062 | 3096 | ||
| @@ -3218,9 +3252,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
| 3218 | { | 3252 | { |
| 3219 | struct kmem_cache *s; | 3253 | struct kmem_cache *s; |
| 3220 | 3254 | ||
| 3221 | if (unlikely(size > PAGE_SIZE / 2)) | 3255 | if (unlikely(size > PAGE_SIZE)) |
| 3222 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | 3256 | return kmalloc_large(size, gfpflags); |
| 3223 | get_order(size)); | 3257 | |
| 3224 | s = get_slab(size, gfpflags); | 3258 | s = get_slab(size, gfpflags); |
| 3225 | 3259 | ||
| 3226 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3260 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
| @@ -3234,9 +3268,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
| 3234 | { | 3268 | { |
| 3235 | struct kmem_cache *s; | 3269 | struct kmem_cache *s; |
| 3236 | 3270 | ||
| 3237 | if (unlikely(size > PAGE_SIZE / 2)) | 3271 | if (unlikely(size > PAGE_SIZE)) |
| 3238 | return (void *)__get_free_pages(gfpflags | __GFP_COMP, | 3272 | return kmalloc_large(size, gfpflags); |
| 3239 | get_order(size)); | 3273 | |
| 3240 | s = get_slab(size, gfpflags); | 3274 | s = get_slab(size, gfpflags); |
| 3241 | 3275 | ||
| 3242 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3276 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
