summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2015-11-20 18:57:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-20 19:17:32 -0500
commit87098373e244840e00bd1c93884c1d917411597e (patch)
tree3edf3bc77482a1424c11732060bc86fc589df242 /mm
parenta380a3c75529a5c42b78c0d64a46404f8cb0c0d1 (diff)
slub: avoid irqoff/on in bulk allocation
Use the new function that can do allocation while interrupts are disabled. Avoids irq on/off sequences. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alexander Duyck <alexander.h.duyck@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a952751bb50..23f9d8d26422 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2818,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2818 void *object = c->freelist; 2818 void *object = c->freelist;
2819 2819
2820 if (unlikely(!object)) { 2820 if (unlikely(!object)) {
2821 local_irq_enable();
2822 /* 2821 /*
2823 * Invoking slow path likely have side-effect 2822 * Invoking slow path likely have side-effect
2824 * of re-populating per CPU c->freelist 2823 * of re-populating per CPU c->freelist
2825 */ 2824 */
2826 p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2825 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2827 _RET_IP_, c); 2826 _RET_IP_, c);
2828 if (unlikely(!p[i])) { 2827 if (unlikely(!p[i]))
2829 __kmem_cache_free_bulk(s, i, p); 2828 goto error;
2830 return false; 2829
2831 }
2832 local_irq_disable();
2833 c = this_cpu_ptr(s->cpu_slab); 2830 c = this_cpu_ptr(s->cpu_slab);
2834 continue; /* goto for-loop */ 2831 continue; /* goto for-loop */
2835 } 2832 }
2836 2833
2837 /* kmem_cache debug support */ 2834 /* kmem_cache debug support */
2838 s = slab_pre_alloc_hook(s, flags); 2835 s = slab_pre_alloc_hook(s, flags);
2839 if (unlikely(!s)) { 2836 if (unlikely(!s))
2840 __kmem_cache_free_bulk(s, i, p); 2837 goto error;
2841 c->tid = next_tid(c->tid);
2842 local_irq_enable();
2843 return false;
2844 }
2845 2838
2846 c->freelist = get_freepointer(s, object); 2839 c->freelist = get_freepointer(s, object);
2847 p[i] = object; 2840 p[i] = object;
@@ -2861,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2861 } 2854 }
2862 2855
2863 return true; 2856 return true;
2857
2858error:
2859 __kmem_cache_free_bulk(s, i, p);
2860 local_irq_enable();
2861 return false;
2864} 2862}
2865EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2863EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2866 2864