aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 13:56:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 13:56:07 -0500
commitae664dba2724e59ddd66291b895f7370e28b9a7a (patch)
treed6e214bdc9999bcb8b0a067053aa6934cfd9d60e /include
parenta2faf2fc534f57ba26bc4d613795236ed4f5fb1c (diff)
parent08afe22c68d8c07e8e31ee6491c37f36199ba14b (diff)
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "This contains preparational work from Christoph Lameter and Glauber Costa for SLAB memcg and cleanups and improvements from Ezequiel Garcia and Joonsoo Kim. Please note that the SLOB cleanup commit from Arnd Bergmann already appears in your tree but I had also merged it myself which is why it shows up in the shortlog." * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm/sl[aou]b: Common alignment code slab: Use the new create_boot_cache function to simplify bootstrap slub: Use statically allocated kmem_cache boot structure for bootstrap mm, sl[au]b: create common functions for boot slab creation slab: Simplify bootstrap slub: Use correct cpu_slab on dead cpu mm: fix slab.c kernel-doc warnings mm/slob: use min_t() to compare ARCH_SLAB_MINALIGN slab: Ignore internal flags in cache creation mm/slob: Use free_page instead of put_page for page-size kmalloc allocations mm/sl[aou]b: Move common kmem_cache_size() to slab.h mm/slob: Use object_size field in kmem_cache_size() mm/slob: Drop usage of page->private for storing page-sized allocations slub: Commonize slab_cache field in struct page sl[au]b: Process slabinfo_show in common code mm/sl[au]b: Move print_slabinfo_header to slab_common.c mm/sl[au]b: Move slabinfo processing to slab_common.c slub: remove one code path and reduce lock contention in __slab_free()
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/slab.h9
-rw-r--r--include/linux/slab_def.h6
3 files changed, 15 insertions, 7 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7d9ebb7cc982..f8f5162a3571 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -128,10 +128,7 @@ struct page {
128 }; 128 };
129 129
130 struct list_head list; /* slobs list of pages */ 130 struct list_head list; /* slobs list of pages */
131 struct { /* slab fields */ 131 struct slab *slab_page; /* slab fields */
132 struct kmem_cache *slab_cache;
133 struct slab *slab_page;
134 };
135 }; 132 };
136 133
137 /* Remainder is not double word aligned */ 134 /* Remainder is not double word aligned */
@@ -146,7 +143,7 @@ struct page {
146#if USE_SPLIT_PTLOCKS 143#if USE_SPLIT_PTLOCKS
147 spinlock_t ptl; 144 spinlock_t ptl;
148#endif 145#endif
149 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 146 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
150 struct page *first_page; /* Compound tail pages */ 147 struct page *first_page; /* Compound tail pages */
151 }; 148 };
152 149
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 83d1a1454b7e..743a10415122 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128void kmem_cache_destroy(struct kmem_cache *); 128void kmem_cache_destroy(struct kmem_cache *);
129int kmem_cache_shrink(struct kmem_cache *); 129int kmem_cache_shrink(struct kmem_cache *);
130void kmem_cache_free(struct kmem_cache *, void *); 130void kmem_cache_free(struct kmem_cache *, void *);
131unsigned int kmem_cache_size(struct kmem_cache *);
132 131
133/* 132/*
134 * Please use this macro to create slab caches. Simply specify the 133 * Please use this macro to create slab caches. Simply specify the
@@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
388 return kmalloc_node(size, flags | __GFP_ZERO, node); 387 return kmalloc_node(size, flags | __GFP_ZERO, node);
389} 388}
390 389
390/*
391 * Determine the size of a slab object
392 */
393static inline unsigned int kmem_cache_size(struct kmem_cache *s)
394{
395 return s->object_size;
396}
397
391void __init kmem_cache_init_late(void); 398void __init kmem_cache_init_late(void);
392 399
393#endif /* _LINUX_SLAB_H */ 400#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cc290f0bdb34..45c0356fdc8c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -89,9 +89,13 @@ struct kmem_cache {
89 * (see kmem_cache_init()) 89 * (see kmem_cache_init())
90 * We still use [NR_CPUS] and not [1] or [0] because cache_cache 90 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
91 * is statically defined, so we reserve the max number of cpus. 91 * is statically defined, so we reserve the max number of cpus.
92 *
93 * We also need to guarantee that the list is able to accomodate a
94 * pointer for each node since "nodelists" uses the remainder of
95 * available pointers.
92 */ 96 */
93 struct kmem_list3 **nodelists; 97 struct kmem_list3 **nodelists;
94 struct array_cache *array[NR_CPUS]; 98 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
95 /* 99 /*
96 * Do not add fields after array[] 100 * Do not add fields after array[]
97 */ 101 */