diff options
| author | Pekka Enberg <penberg@kernel.org> | 2012-12-18 05:46:20 -0500 |
|---|---|---|
| committer | Pekka Enberg <penberg@kernel.org> | 2012-12-18 05:46:20 -0500 |
| commit | 08afe22c68d8c07e8e31ee6491c37f36199ba14b (patch) | |
| tree | 875d203149b74fddb50522fd5df3d6b154f5fe1e /include/linux | |
| parent | a304f836a2e6d257c1f918b3431f97ef6b33e02e (diff) | |
| parent | 4590685546a374fb0f60682ce0e3a6fd48911d46 (diff) | |
Merge branch 'slab/next' into slab/for-linus
Fix up a trivial merge conflict with commit baaf1dd ("mm/slob: use
min_t() to compare ARCH_SLAB_MINALIGN") that did not go through the slab
tree.
Conflicts:
mm/slob.c
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/mm_types.h | 7 | ||||
| -rw-r--r-- | include/linux/slab.h | 9 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 6 |
3 files changed, 15 insertions, 7 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 31f8a3af7d9..2fef4e720e7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -128,10 +128,7 @@ struct page { | |||
| 128 | }; | 128 | }; |
| 129 | 129 | ||
| 130 | struct list_head list; /* slobs list of pages */ | 130 | struct list_head list; /* slobs list of pages */ |
| 131 | struct { /* slab fields */ | 131 | struct slab *slab_page; /* slab fields */ |
| 132 | struct kmem_cache *slab_cache; | ||
| 133 | struct slab *slab_page; | ||
| 134 | }; | ||
| 135 | }; | 132 | }; |
| 136 | 133 | ||
| 137 | /* Remainder is not double word aligned */ | 134 | /* Remainder is not double word aligned */ |
| @@ -146,7 +143,7 @@ struct page { | |||
| 146 | #if USE_SPLIT_PTLOCKS | 143 | #if USE_SPLIT_PTLOCKS |
| 147 | spinlock_t ptl; | 144 | spinlock_t ptl; |
| 148 | #endif | 145 | #endif |
| 149 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ | 146 | struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ |
| 150 | struct page *first_page; /* Compound tail pages */ | 147 | struct page *first_page; /* Compound tail pages */ |
| 151 | }; | 148 | }; |
| 152 | 149 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 83d1a1454b7..743a1041512 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |||
| 128 | void kmem_cache_destroy(struct kmem_cache *); | 128 | void kmem_cache_destroy(struct kmem_cache *); |
| 129 | int kmem_cache_shrink(struct kmem_cache *); | 129 | int kmem_cache_shrink(struct kmem_cache *); |
| 130 | void kmem_cache_free(struct kmem_cache *, void *); | 130 | void kmem_cache_free(struct kmem_cache *, void *); |
| 131 | unsigned int kmem_cache_size(struct kmem_cache *); | ||
| 132 | 131 | ||
| 133 | /* | 132 | /* |
| 134 | * Please use this macro to create slab caches. Simply specify the | 133 | * Please use this macro to create slab caches. Simply specify the |
| @@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |||
| 388 | return kmalloc_node(size, flags | __GFP_ZERO, node); | 387 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
| 389 | } | 388 | } |
| 390 | 389 | ||
| 390 | /* | ||
| 391 | * Determine the size of a slab object | ||
| 392 | */ | ||
| 393 | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | ||
| 394 | { | ||
| 395 | return s->object_size; | ||
| 396 | } | ||
| 397 | |||
| 391 | void __init kmem_cache_init_late(void); | 398 | void __init kmem_cache_init_late(void); |
| 392 | 399 | ||
| 393 | #endif /* _LINUX_SLAB_H */ | 400 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cc290f0bdb3..45c0356fdc8 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -89,9 +89,13 @@ struct kmem_cache { | |||
| 89 | * (see kmem_cache_init()) | 89 | * (see kmem_cache_init()) |
| 90 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache | 90 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
| 91 | * is statically defined, so we reserve the max number of cpus. | 91 | * is statically defined, so we reserve the max number of cpus. |
| 92 | * | ||
| 93 | * We also need to guarantee that the list is able to accomodate a | ||
| 94 | * pointer for each node since "nodelists" uses the remainder of | ||
| 95 | * available pointers. | ||
| 92 | */ | 96 | */ |
| 93 | struct kmem_list3 **nodelists; | 97 | struct kmem_list3 **nodelists; |
| 94 | struct array_cache *array[NR_CPUS]; | 98 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
| 95 | /* | 99 | /* |
| 96 | * Do not add fields after array[] | 100 | * Do not add fields after array[] |
| 97 | */ | 101 | */ |
