aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 11:10:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 11:10:34 -0500
commit24f971abbda045c24d5d6f2438a7785567d2fde9 (patch)
treea4df2b80eafa1199625b53464bcf34e786a03a28 /include/linux
parent3bab0bf045e1cc4880e2cfc9351e52cf7ec8e35e (diff)
parent721ae22ae1a51c25871b7a0b543877aa94ff2a20 (diff)
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "The patches from Joonsoo Kim switch mm/slab.c to use 'struct page' for slab internals similar to mm/slub.c. This reduces memory usage and improves performance: https://lkml.org/lkml/2013/10/16/155 Rest of the changes are bug fixes from various people" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (21 commits) mm, slub: fix the typo in mm/slub.c mm, slub: fix the typo in include/linux/slub_def.h slub: Handle NULL parameter in kmem_cache_flags slab: replace non-existing 'struct freelist *' with 'void *' slab: fix to calm down kmemleak warning slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled slab: rename slab_bufctl to slab_freelist slab: remove useless statement for checking pfmemalloc slab: use struct page for slab management slab: replace free and inuse in struct slab with newly introduced active slab: remove SLAB_LIMIT slab: remove kmem_bufctl_t slab: change the management method of free objects of the slab slab: use __GFP_COMP flag for allocating slab pages slab: use well-defined macro, virt_to_slab() slab: overloading the RCU head over the LRU for RCU free slab: remove cachep in struct slab_rcu slab: remove nodeid in struct slab slab: remove colouroff in struct slab slab: change return type of kmem_getpages() to struct page ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_types.h24
-rw-r--r--include/linux/slab.h9
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h2
4 files changed, 27 insertions, 12 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 011eb85d7b0f..bd299418a934 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -44,18 +44,22 @@ struct page {
44 /* First double word block */ 44 /* First double word block */
45 unsigned long flags; /* Atomic flags, some possibly 45 unsigned long flags; /* Atomic flags, some possibly
46 * updated asynchronously */ 46 * updated asynchronously */
47 struct address_space *mapping; /* If low bit clear, points to 47 union {
48 * inode address_space, or NULL. 48 struct address_space *mapping; /* If low bit clear, points to
49 * If page mapped as anonymous 49 * inode address_space, or NULL.
50 * memory, low bit is set, and 50 * If page mapped as anonymous
51 * it points to anon_vma object: 51 * memory, low bit is set, and
52 * see PAGE_MAPPING_ANON below. 52 * it points to anon_vma object:
53 */ 53 * see PAGE_MAPPING_ANON below.
54 */
55 void *s_mem; /* slab first object */
56 };
57
54 /* Second double word */ 58 /* Second double word */
55 struct { 59 struct {
56 union { 60 union {
57 pgoff_t index; /* Our offset within mapping. */ 61 pgoff_t index; /* Our offset within mapping. */
58 void *freelist; /* slub/slob first free object */ 62 void *freelist; /* sl[aou]b first free object */
59 bool pfmemalloc; /* If set by the page allocator, 63 bool pfmemalloc; /* If set by the page allocator,
60 * ALLOC_NO_WATERMARKS was set 64 * ALLOC_NO_WATERMARKS was set
61 * and the low watermark was not 65 * and the low watermark was not
@@ -111,6 +115,7 @@ struct page {
111 }; 115 };
112 atomic_t _count; /* Usage count, see below. */ 116 atomic_t _count; /* Usage count, see below. */
113 }; 117 };
118 unsigned int active; /* SLAB */
114 }; 119 };
115 }; 120 };
116 121
@@ -132,6 +137,9 @@ struct page {
132 137
133 struct list_head list; /* slobs list of pages */ 138 struct list_head list; /* slobs list of pages */
134 struct slab *slab_page; /* slab fields */ 139 struct slab *slab_page; /* slab fields */
140 struct rcu_head rcu_head; /* Used by SLAB
141 * when destroying via RCU
142 */
135#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS 143#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
136 pgtable_t pmd_huge_pte; /* protected by page->ptl */ 144 pgtable_t pmd_huge_pte; /* protected by page->ptl */
137#endif 145#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 74f105847d13..c2bba248fa63 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,7 +53,14 @@
53 * } 53 * }
54 * rcu_read_unlock(); 54 * rcu_read_unlock();
55 * 55 *
56 * See also the comment on struct slab_rcu in mm/slab.c. 56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
57 */ 64 */
58#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 65#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
59#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 66#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e9346b4f1ef4..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,8 +27,8 @@ struct kmem_cache {
27 27
28 size_t colour; /* cache colouring range */ 28 size_t colour; /* cache colouring range */
29 unsigned int colour_off; /* colour offset */ 29 unsigned int colour_off; /* colour offset */
30 struct kmem_cache *slabp_cache; 30 struct kmem_cache *freelist_cache;
31 unsigned int slab_size; 31 unsigned int freelist_size;
32 32
33 /* constructor func */ 33 /* constructor func */
34 void (*ctor)(void *obj); 34 void (*ctor)(void *obj);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0b67eada42..f56bfa9e4526 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,7 +11,7 @@
11enum stat_item { 11enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */ 12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
14 FREE_FASTPATH, /* Free to cpu slub */ 14 FREE_FASTPATH, /* Free to cpu slab */
15 FREE_SLOWPATH, /* Freeing not to cpu slab */ 15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */ 16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */