aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 17:08:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 17:08:56 -0400
commite97e386b126c2d60b8da61ce1e4964b41b3d1514 (patch)
tree7e04b7f735004330777200c6742568fc130ff893 /include
parentd9dedc13851f9cbd568fbc631a17b0be83404957 (diff)
parentc124f5b54f879e5870befcc076addbd5d614663f (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: pack objects denser slub: Calculate min_objects based on number of processors. slub: Drop DEFAULT_MAX_ORDER / DEFAULT_MIN_OBJECTS slub: Simplify any_slab_object checks slub: Make the order configurable for each slab cache slub: Drop fallback to page allocator method slub: Fallback to minimal order during slab page allocation slub: Update statistics handling for variable order slabs slub: Add kmem_cache_order_objects struct slub: for_each_object must be passed the number of objects in a slab slub: Store max number of objects in the page struct. slub: Dump list of objects not freed on kmem_cache_close() slub: free_list() cleanup slub: improve kmem_cache_destroy() error message slob: fix bug - when slob allocates "struct kmem_cache", it does not force alignment.
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/slub_def.h16
2 files changed, 18 insertions, 3 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 29adaa781cb6..e2bae8dde35a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -42,7 +42,10 @@ struct page {
42 * to show when page is mapped 42 * to show when page is mapped
43 * & limit reverse map searches. 43 * & limit reverse map searches.
44 */ 44 */
45 unsigned int inuse; /* SLUB: Nr of objects */ 45 struct { /* SLUB */
46 u16 inuse;
47 u16 objects;
48 };
46 }; 49 };
47 union { 50 union {
48 struct { 51 struct {
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 79d59c937fac..71e43a12ebbb 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -29,6 +29,7 @@ enum stat_item {
29 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 29 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
30 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 30 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
31 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 31 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
32 ORDER_FALLBACK, /* Number of times fallback was necessary */
32 NR_SLUB_STAT_ITEMS }; 33 NR_SLUB_STAT_ITEMS };
33 34
34struct kmem_cache_cpu { 35struct kmem_cache_cpu {
@@ -48,11 +49,21 @@ struct kmem_cache_node {
48 struct list_head partial; 49 struct list_head partial;
49#ifdef CONFIG_SLUB_DEBUG 50#ifdef CONFIG_SLUB_DEBUG
50 atomic_long_t nr_slabs; 51 atomic_long_t nr_slabs;
52 atomic_long_t total_objects;
51 struct list_head full; 53 struct list_head full;
52#endif 54#endif
53}; 55};
54 56
55/* 57/*
58 * Word size structure that can be atomically updated or read and that
59 * contains both the order and the number of objects that a slab of the
60 * given order would contain.
61 */
62struct kmem_cache_order_objects {
63 unsigned long x;
64};
65
66/*
56 * Slab cache management. 67 * Slab cache management.
57 */ 68 */
58struct kmem_cache { 69struct kmem_cache {
@@ -61,7 +72,7 @@ struct kmem_cache {
61 int size; /* The size of an object including meta data */ 72 int size; /* The size of an object including meta data */
62 int objsize; /* The size of an object without meta data */ 73 int objsize; /* The size of an object without meta data */
63 int offset; /* Free pointer offset. */ 74 int offset; /* Free pointer offset. */
64 int order; /* Current preferred allocation order */ 75 struct kmem_cache_order_objects oo;
65 76
66 /* 77 /*
67 * Avoid an extra cache line for UP, SMP and for the node local to 78 * Avoid an extra cache line for UP, SMP and for the node local to
@@ -70,7 +81,8 @@ struct kmem_cache {
70 struct kmem_cache_node local_node; 81 struct kmem_cache_node local_node;
71 82
72 /* Allocation and freeing of slabs */ 83 /* Allocation and freeing of slabs */
73 int objects; /* Number of objects in slab */ 84 struct kmem_cache_order_objects max;
85 struct kmem_cache_order_objects min;
74 gfp_t allocflags; /* gfp flags to use on each alloc */ 86 gfp_t allocflags; /* gfp flags to use on each alloc */
75 int refcount; /* Refcount for slab cache destroy */ 87 int refcount; /* Refcount for slab cache destroy */
76 void (*ctor)(struct kmem_cache *, void *); 88 void (*ctor)(struct kmem_cache *, void *);