aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 96d63eb3ab17..84ed734b96b3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1470,6 +1470,9 @@ static void *__slab_alloc(struct kmem_cache *s,
1470 void **object; 1470 void **object;
1471 struct page *new; 1471 struct page *new;
1472 1472
1473 /* We handle __GFP_ZERO in the caller */
1474 gfpflags &= ~__GFP_ZERO;
1475
1473 if (!c->page) 1476 if (!c->page)
1474 goto new_slab; 1477 goto new_slab;
1475 1478
@@ -1536,9 +1539,15 @@ new_slab:
1536 * That is only possible if certain conditions are met that are being 1539 * That is only possible if certain conditions are met that are being
1537 * checked when a slab is created. 1540 * checked when a slab is created.
1538 */ 1541 */
1539 if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) 1542 if (!(gfpflags & __GFP_NORETRY) &&
1540 return kmalloc_large(s->objsize, gfpflags); 1543 (s->flags & __PAGE_ALLOC_FALLBACK)) {
1541 1544 if (gfpflags & __GFP_WAIT)
1545 local_irq_enable();
1546 object = kmalloc_large(s->objsize, gfpflags);
1547 if (gfpflags & __GFP_WAIT)
1548 local_irq_disable();
1549 return object;
1550 }
1542 return NULL; 1551 return NULL;
1543debug: 1552debug:
1544 if (!alloc_debug_processing(s, c->page, object, addr)) 1553 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2679,6 +2688,7 @@ void kfree(const void *x)
2679} 2688}
2680EXPORT_SYMBOL(kfree); 2689EXPORT_SYMBOL(kfree);
2681 2690
2691#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2682static unsigned long count_partial(struct kmem_cache_node *n) 2692static unsigned long count_partial(struct kmem_cache_node *n)
2683{ 2693{
2684 unsigned long flags; 2694 unsigned long flags;
@@ -2691,6 +2701,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
2691 spin_unlock_irqrestore(&n->list_lock, flags); 2701 spin_unlock_irqrestore(&n->list_lock, flags);
2692 return x; 2702 return x;
2693} 2703}
2704#endif
2694 2705
2695/* 2706/*
2696 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts