diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 43 |
1 files changed, 38 insertions, 5 deletions
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page) | |||
211 | /* Internal SLUB flags */ | 211 | /* Internal SLUB flags */ |
212 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 212 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
213 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 213 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
214 | #define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ | ||
215 | #define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ | ||
214 | 216 | ||
215 | /* Not all arches define cache_line_size */ | 217 | /* Not all arches define cache_line_size */ |
216 | #ifndef cache_line_size | 218 | #ifndef cache_line_size |
@@ -1539,7 +1541,6 @@ load_freelist: | |||
1539 | unlock_out: | 1541 | unlock_out: |
1540 | slab_unlock(c->page); | 1542 | slab_unlock(c->page); |
1541 | stat(c, ALLOC_SLOWPATH); | 1543 | stat(c, ALLOC_SLOWPATH); |
1542 | out: | ||
1543 | #ifdef SLUB_FASTPATH | 1544 | #ifdef SLUB_FASTPATH |
1544 | local_irq_restore(flags); | 1545 | local_irq_restore(flags); |
1545 | #endif | 1546 | #endif |
@@ -1574,8 +1575,24 @@ new_slab: | |||
1574 | c->page = new; | 1575 | c->page = new; |
1575 | goto load_freelist; | 1576 | goto load_freelist; |
1576 | } | 1577 | } |
1577 | object = NULL; | 1578 | #ifdef SLUB_FASTPATH |
1578 | goto out; | 1579 | local_irq_restore(flags); |
1580 | #endif | ||
1581 | /* | ||
1582 | * No memory available. | ||
1583 | * | ||
1584 | * If the slab uses higher order allocs but the object is | ||
1585 | * smaller than a page size then we can fallback in emergencies | ||
1586 | * to the page allocator via kmalloc_large. The page allocator may | ||
1587 | * have failed to obtain a higher order page and we can try to | ||
1588 | * allocate a single page if the object fits into a single page. | ||
1589 | * That is only possible if certain conditions are met that are being | ||
1590 | * checked when a slab is created. | ||
1591 | */ | ||
1592 | if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) | ||
1593 | return kmalloc_large(s->objsize, gfpflags); | ||
1594 | |||
1595 | return NULL; | ||
1579 | debug: | 1596 | debug: |
1580 | object = c->page->freelist; | 1597 | object = c->page->freelist; |
1581 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1598 | if (!alloc_debug_processing(s, c->page, object, addr)) |
@@ -2322,7 +2339,20 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2322 | size = ALIGN(size, align); | 2339 | size = ALIGN(size, align); |
2323 | s->size = size; | 2340 | s->size = size; |
2324 | 2341 | ||
2325 | s->order = calculate_order(size); | 2342 | if ((flags & __KMALLOC_CACHE) && |
2343 | PAGE_SIZE / size < slub_min_objects) { | ||
2344 | /* | ||
2345 | * Kmalloc cache that would not have enough objects in | ||
2346 | * an order 0 page. Kmalloc slabs can fallback to | ||
2347 | * page allocator order 0 allocs so take a reasonably large | ||
2348 | * order that will allows us a good number of objects. | ||
2349 | */ | ||
2350 | s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); | ||
2351 | s->flags |= __PAGE_ALLOC_FALLBACK; | ||
2352 | s->allocflags |= __GFP_NOWARN; | ||
2353 | } else | ||
2354 | s->order = calculate_order(size); | ||
2355 | |||
2326 | if (s->order < 0) | 2356 | if (s->order < 0) |
2327 | return 0; | 2357 | return 0; |
2328 | 2358 | ||
@@ -2539,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2539 | 2569 | ||
2540 | down_write(&slub_lock); | 2570 | down_write(&slub_lock); |
2541 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2571 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2542 | flags, NULL)) | 2572 | flags | __KMALLOC_CACHE, NULL)) |
2543 | goto panic; | 2573 | goto panic; |
2544 | 2574 | ||
2545 | list_add(&s->list, &slab_caches); | 2575 | list_add(&s->list, &slab_caches); |
@@ -3058,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
3058 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | 3088 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) |
3059 | return 1; | 3089 | return 1; |
3060 | 3090 | ||
3091 | if ((s->flags & __PAGE_ALLOC_FALLBACK) | ||
3092 | return 1; | ||
3093 | |||
3061 | if (s->ctor) | 3094 | if (s->ctor) |
3062 | return 1; | 3095 | return 1; |
3063 | 3096 | ||