aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h43
1 files changed, 42 insertions, 1 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 34a98d642196..f96b49e4704e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -16,7 +16,7 @@ enum slab_state {
16 DOWN, /* No slab functionality yet */ 16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */ 17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ 19 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
20 UP, /* Slab caches usable but not all extras yet */ 20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */ 21 FULL /* Everything is working */
22}; 22};
@@ -35,6 +35,15 @@ extern struct kmem_cache *kmem_cache;
35unsigned long calculate_alignment(unsigned long flags, 35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size); 36 unsigned long align, unsigned long size);
37 37
38#ifndef CONFIG_SLOB
39/* Kmalloc array related functions */
40void create_kmalloc_caches(unsigned long);
41
42/* Find the kmalloc slab corresponding for a certain size */
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44#endif
45
46
38/* Functions provided by the slab allocators */ 47/* Functions provided by the slab allocators */
39extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
40 49
@@ -230,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
230 return s; 239 return s;
231} 240}
232#endif 241#endif
242
243
244/*
245 * The slab lists for all objects.
246 */
247struct kmem_cache_node {
248 spinlock_t list_lock;
249
250#ifdef CONFIG_SLAB
251 struct list_head slabs_partial; /* partial list first, better asm code */
252 struct list_head slabs_full;
253 struct list_head slabs_free;
254 unsigned long free_objects;
255 unsigned int free_limit;
256 unsigned int colour_next; /* Per-node cache coloring */
257 struct array_cache *shared; /* shared per node */
258 struct array_cache **alien; /* on other nodes */
259 unsigned long next_reap; /* updated without locking */
260 int free_touched; /* updated without locking */
261#endif
262
263#ifdef CONFIG_SLUB
264 unsigned long nr_partial;
265 struct list_head partial;
266#ifdef CONFIG_SLUB_DEBUG
267 atomic_long_t nr_slabs;
268 atomic_long_t total_objects;
269 struct list_head full;
270#endif
271#endif
272
273};