diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-13 16:28:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-13 16:28:13 -0400 |
commit | bf3a340738bc78008e496257c04fb5a7fc8281e6 (patch) | |
tree | 3e84d21261ff0c437f0ea2507df8c30844150769 /include | |
parent | 321d03c86732e45f5f33ad0db5b68e2e1364acb9 (diff) | |
parent | 34bf6ef94a835a8f1d8abd3e7d38c6c08d205867 (diff) |
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab changes from Pekka Enberg:
"The biggest change is byte-sized freelist indices which reduces slab
freelist memory usage:
https://lkml.org/lkml/2013/12/2/64"
* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
mm: slab/slub: use page->list consistently instead of page->lru
mm/slab.c: cleanup outdated comments and unify variables naming
slab: fix wrongly used macro
slub: fix high order page allocation problem with __GFP_NOFAIL
slab: Make allocations with GFP_ZERO slightly more efficient
slab: make more slab management structure off the slab
slab: introduce byte sized index for the freelist of a slab
slab: restrict the number of objects in a slab
slab: introduce helper functions to get/set free object
slab: factor out calculate nr objects in cache_estimate
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 3 | ||||
-rw-r--r-- | include/linux/slab.h | 11 |
2 files changed, 13 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 2b58d192ea24..8967e20cbe57 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -124,6 +124,8 @@ struct page { | |||
124 | union { | 124 | union { |
125 | struct list_head lru; /* Pageout list, eg. active_list | 125 | struct list_head lru; /* Pageout list, eg. active_list |
126 | * protected by zone->lru_lock ! | 126 | * protected by zone->lru_lock ! |
127 | * Can be used as a generic list | ||
128 | * by the page owner. | ||
127 | */ | 129 | */ |
128 | struct { /* slub per cpu partial pages */ | 130 | struct { /* slub per cpu partial pages */ |
129 | struct page *next; /* Next partial slab */ | 131 | struct page *next; /* Next partial slab */ |
@@ -136,7 +138,6 @@ struct page { | |||
136 | #endif | 138 | #endif |
137 | }; | 139 | }; |
138 | 140 | ||
139 | struct list_head list; /* slobs list of pages */ | ||
140 | struct slab *slab_page; /* slab fields */ | 141 | struct slab *slab_page; /* slab fields */ |
141 | struct rcu_head rcu_head; /* Used by SLAB | 142 | struct rcu_head rcu_head; /* Used by SLAB |
142 | * when destroying via RCU | 143 | * when destroying via RCU |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 3dd389aa91c7..307bfbe62387 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -242,6 +242,17 @@ struct kmem_cache { | |||
242 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) | 242 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) |
243 | #endif | 243 | #endif |
244 | 244 | ||
245 | /* | ||
246 | * This restriction comes from byte sized index implementation. | ||
247 | * Page size is normally 2^12 bytes and, in this case, if we want to use | ||
248 | * byte sized index which can represent 2^8 entries, the size of the object | ||
249 | * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. | ||
250 | * If minimum size of kmalloc is less than 16, we use it as minimum object | ||
251 | * size and give up to use byte sized index. | ||
252 | */ | ||
253 | #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ | ||
254 | (KMALLOC_MIN_SIZE) : 16) | ||
255 | |||
245 | #ifndef CONFIG_SLOB | 256 | #ifndef CONFIG_SLOB |
246 | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | 257 | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
247 | #ifdef CONFIG_ZONE_DMA | 258 | #ifdef CONFIG_ZONE_DMA |