aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h19
1 files changed, 12 insertions, 7 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1e2f4fe12773..b5b2df60299e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -205,8 +205,8 @@ struct kmem_cache {
205 205
206#ifdef CONFIG_SLUB 206#ifdef CONFIG_SLUB
207/* 207/*
208 * SLUB allocates up to order 2 pages directly and otherwise 208 * SLUB directly allocates requests fitting in to an order-1 page
209 * passes the request to the page allocator. 209 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
210 */ 210 */
211#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 211#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
212#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 212#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
@@ -217,12 +217,12 @@ struct kmem_cache {
217 217
218#ifdef CONFIG_SLOB 218#ifdef CONFIG_SLOB
219/* 219/*
220 * SLOB passes all page size and larger requests to the page allocator. 220 * SLOB passes all requests larger than one page to the page allocator.
221 * No kmalloc array is necessary since objects of different sizes can 221 * No kmalloc array is necessary since objects of different sizes can
222 * be allocated from the same page. 222 * be allocated from the same page.
223 */ 223 */
224#define KMALLOC_SHIFT_MAX 30
225#define KMALLOC_SHIFT_HIGH PAGE_SHIFT 224#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
225#define KMALLOC_SHIFT_MAX 30
226#ifndef KMALLOC_SHIFT_LOW 226#ifndef KMALLOC_SHIFT_LOW
227#define KMALLOC_SHIFT_LOW 3 227#define KMALLOC_SHIFT_LOW 3
228#endif 228#endif
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
410 * 410 *
411 * %GFP_NOWAIT - Allocation will not sleep. 411 * %GFP_NOWAIT - Allocation will not sleep.
412 * 412 *
413 * %GFP_THISNODE - Allocate node-local memory only. 413 * %__GFP_THISNODE - Allocate node-local memory only.
414 * 414 *
415 * %GFP_DMA - Allocation suitable for DMA. 415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a 416 * Should only be used for kmalloc() caches. Otherwise, use a
@@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
513 * 513 *
514 * Both the root cache and the child caches will have it. For the root cache, 514 * Both the root cache and the child caches will have it. For the root cache,
515 * this will hold a dynamically allocated array large enough to hold 515 * this will hold a dynamically allocated array large enough to hold
516 * information about the currently limited memcgs in the system. 516 * information about the currently limited memcgs in the system. To allow the
517 * array to be accessed without taking any locks, on relocation we free the old
518 * version only after a grace period.
517 * 519 *
518 * Child caches will hold extra metadata needed for its operation. Fields are: 520 * Child caches will hold extra metadata needed for its operation. Fields are:
519 * 521 *
@@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
528struct memcg_cache_params { 530struct memcg_cache_params {
529 bool is_root_cache; 531 bool is_root_cache;
530 union { 532 union {
531 struct kmem_cache *memcg_caches[0]; 533 struct {
534 struct rcu_head rcu_head;
535 struct kmem_cache *memcg_caches[0];
536 };
532 struct { 537 struct {
533 struct mem_cgroup *memcg; 538 struct mem_cgroup *memcg;
534 struct list_head list; 539 struct list_head list;