aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2015-11-20 18:57:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-22 14:58:44 -0500
commit865762a8119e74b5f0e236d2d8eaaf8be9292a06 (patch)
tree71d18041df3089d9baea367701f6eb503b98ebbc
parent033745189b1bae3fc931beeaf48604ee7c259309 (diff)
slab/slub: adjust kmem_cache_alloc_bulk API
Adjust kmem_cache_alloc_bulk API before we have any real users. Adjust API to return type 'int' instead of previously type 'bool'. This is done to allow future extension of the bulk alloc API. A future extension could be to allow SLUB to stop at a page boundary, when specified by a flag, and then return the number of objects. The advantage of this approach, would make it easier to make bulk alloc run without local IRQs disabled. With an approach of cmpxchg "stealing" the entire c->freelist or page->freelist. To avoid overshooting we would stop processing at a slab-page boundary. Else we always end up returning some objects at the cost of another cmpxchg. To keep compatible with future users of this API linking against an older kernel when using the new flag, we need to return the number of allocated objects with this API change. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/slab.h2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c8
6 files changed, 11 insertions, 11 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 96940772bb92..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -316,7 +316,7 @@ void kmem_cache_free(struct kmem_cache *, void *);
316 * Note that interrupts must be enabled when calling these functions. 316 * Note that interrupts must be enabled when calling these functions.
317 */ 317 */
318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
319bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 319int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
320 320
321#ifdef CONFIG_NUMA 321#ifdef CONFIG_NUMA
322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; 322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
diff --git a/mm/slab.c b/mm/slab.c
index e0819fa96559..4765c97ce690 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3419} 3419}
3420EXPORT_SYMBOL(kmem_cache_free_bulk); 3420EXPORT_SYMBOL(kmem_cache_free_bulk);
3421 3421
3422bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3422int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3423 void **p) 3423 void **p)
3424{ 3424{
3425 return __kmem_cache_alloc_bulk(s, flags, size, p); 3425 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slab.h b/mm/slab.h
index 27492eb678f7..7b6087197997 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170 * may be allocated or freed using these operations. 170 * may be allocated or freed using these operations.
171 */ 171 */
172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
173bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 173int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
174 174
175#ifdef CONFIG_MEMCG_KMEM 175#ifdef CONFIG_MEMCG_KMEM
176/* 176/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d88e97c10a2e..3c6a86b4ec25 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
112 kmem_cache_free(s, p[i]); 112 kmem_cache_free(s, p[i]);
113} 113}
114 114
115bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 115int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p) 116 void **p)
117{ 117{
118 size_t i; 118 size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void *x = p[i] = kmem_cache_alloc(s, flags); 121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) { 122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p); 123 __kmem_cache_free_bulk(s, i, p);
124 return false; 124 return 0;
125 } 125 }
126 } 126 }
127 return true; 127 return i;
128} 128}
129 129
130#ifdef CONFIG_MEMCG_KMEM 130#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/slob.c b/mm/slob.c
index 0d7e5df74d1f..17e8f8cc7c53 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
617} 617}
618EXPORT_SYMBOL(kmem_cache_free_bulk); 618EXPORT_SYMBOL(kmem_cache_free_bulk);
619 619
620bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 620int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621 void **p) 621 void **p)
622{ 622{
623 return __kmem_cache_alloc_bulk(s, flags, size, p); 623 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slub.c b/mm/slub.c
index 34847044dfe5..46997517406e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2909,8 +2909,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
2909EXPORT_SYMBOL(kmem_cache_free_bulk); 2909EXPORT_SYMBOL(kmem_cache_free_bulk);
2910 2910
2911/* Note that interrupts must be enabled when calling this function. */ 2911/* Note that interrupts must be enabled when calling this function. */
2912bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2912int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2913 void **p) 2913 void **p)
2914{ 2914{
2915 struct kmem_cache_cpu *c; 2915 struct kmem_cache_cpu *c;
2916 int i; 2916 int i;
@@ -2959,12 +2959,12 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2959 2959
2960 /* memcg and kmem_cache debug support */ 2960 /* memcg and kmem_cache debug support */
2961 slab_post_alloc_hook(s, flags, size, p); 2961 slab_post_alloc_hook(s, flags, size, p);
2962 return true; 2962 return i;
2963error: 2963error:
2964 local_irq_enable(); 2964 local_irq_enable();
2965 slab_post_alloc_hook(s, flags, i, p); 2965 slab_post_alloc_hook(s, flags, i, p);
2966 __kmem_cache_free_bulk(s, i, p); 2966 __kmem_cache_free_bulk(s, i, p);
2967 return false; 2967 return 0;
2968} 2968}
2969EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2969EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2970 2970