diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 57 |
1 files changed, 26 insertions, 31 deletions
@@ -191,22 +191,6 @@ typedef unsigned int kmem_bufctl_t; | |||
191 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) | 191 | #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * struct slab | ||
195 | * | ||
196 | * Manages the objs in a slab. Placed either at the beginning of mem allocated | ||
197 | * for a slab, or allocated from an general cache. | ||
198 | * Slabs are chained into three list: fully used, partial, fully free slabs. | ||
199 | */ | ||
200 | struct slab { | ||
201 | struct list_head list; | ||
202 | unsigned long colouroff; | ||
203 | void *s_mem; /* including colour offset */ | ||
204 | unsigned int inuse; /* num of objs active in slab */ | ||
205 | kmem_bufctl_t free; | ||
206 | unsigned short nodeid; | ||
207 | }; | ||
208 | |||
209 | /* | ||
210 | * struct slab_rcu | 194 | * struct slab_rcu |
211 | * | 195 | * |
212 | * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to | 196 | * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to |
@@ -219,8 +203,6 @@ struct slab { | |||
219 | * | 203 | * |
220 | * rcu_read_lock before reading the address, then rcu_read_unlock after | 204 | * rcu_read_lock before reading the address, then rcu_read_unlock after |
221 | * taking the spinlock within the structure expected at that address. | 205 | * taking the spinlock within the structure expected at that address. |
222 | * | ||
223 | * We assume struct slab_rcu can overlay struct slab when destroying. | ||
224 | */ | 206 | */ |
225 | struct slab_rcu { | 207 | struct slab_rcu { |
226 | struct rcu_head head; | 208 | struct rcu_head head; |
@@ -229,6 +211,27 @@ struct slab_rcu { | |||
229 | }; | 211 | }; |
230 | 212 | ||
231 | /* | 213 | /* |
214 | * struct slab | ||
215 | * | ||
216 | * Manages the objs in a slab. Placed either at the beginning of mem allocated | ||
217 | * for a slab, or allocated from an general cache. | ||
218 | * Slabs are chained into three list: fully used, partial, fully free slabs. | ||
219 | */ | ||
220 | struct slab { | ||
221 | union { | ||
222 | struct { | ||
223 | struct list_head list; | ||
224 | unsigned long colouroff; | ||
225 | void *s_mem; /* including colour offset */ | ||
226 | unsigned int inuse; /* num of objs active in slab */ | ||
227 | kmem_bufctl_t free; | ||
228 | unsigned short nodeid; | ||
229 | }; | ||
230 | struct slab_rcu __slab_cover_slab_rcu; | ||
231 | }; | ||
232 | }; | ||
233 | |||
234 | /* | ||
232 | * struct array_cache | 235 | * struct array_cache |
233 | * | 236 | * |
234 | * Purpose: | 237 | * Purpose: |
@@ -1387,7 +1390,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self, | |||
1387 | break; | 1390 | break; |
1388 | } | 1391 | } |
1389 | out: | 1392 | out: |
1390 | return ret ? notifier_from_errno(ret) : NOTIFY_OK; | 1393 | return notifier_from_errno(ret); |
1391 | } | 1394 | } |
1392 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ | 1395 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ |
1393 | 1396 | ||
@@ -2147,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2147 | * | 2150 | * |
2148 | * @name must be valid until the cache is destroyed. This implies that | 2151 | * @name must be valid until the cache is destroyed. This implies that |
2149 | * the module calling this has to destroy the cache before getting unloaded. | 2152 | * the module calling this has to destroy the cache before getting unloaded. |
2150 | * Note that kmem_cache_name() is not guaranteed to return the same pointer, | ||
2151 | * therefore applications must manage it themselves. | ||
2152 | * | 2153 | * |
2153 | * The flags are | 2154 | * The flags are |
2154 | * | 2155 | * |
@@ -2288,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2288 | if (ralign < align) { | 2289 | if (ralign < align) { |
2289 | ralign = align; | 2290 | ralign = align; |
2290 | } | 2291 | } |
2291 | /* disable debug if not aligning with REDZONE_ALIGN */ | 2292 | /* disable debug if necessary */ |
2292 | if (ralign & (__alignof__(unsigned long long) - 1)) | 2293 | if (ralign > __alignof__(unsigned long long)) |
2293 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 2294 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); |
2294 | /* | 2295 | /* |
2295 | * 4) Store it. | 2296 | * 4) Store it. |
@@ -2315,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2315 | */ | 2316 | */ |
2316 | if (flags & SLAB_RED_ZONE) { | 2317 | if (flags & SLAB_RED_ZONE) { |
2317 | /* add space for red zone words */ | 2318 | /* add space for red zone words */ |
2318 | cachep->obj_offset += align; | 2319 | cachep->obj_offset += sizeof(unsigned long long); |
2319 | size += align + sizeof(unsigned long long); | 2320 | size += 2 * sizeof(unsigned long long); |
2320 | } | 2321 | } |
2321 | if (flags & SLAB_STORE_USER) { | 2322 | if (flags & SLAB_STORE_USER) { |
2322 | /* user store requires one word storage behind the end of | 2323 | /* user store requires one word storage behind the end of |
@@ -3840,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep) | |||
3840 | } | 3841 | } |
3841 | EXPORT_SYMBOL(kmem_cache_size); | 3842 | EXPORT_SYMBOL(kmem_cache_size); |
3842 | 3843 | ||
3843 | const char *kmem_cache_name(struct kmem_cache *cachep) | ||
3844 | { | ||
3845 | return cachep->name; | ||
3846 | } | ||
3847 | EXPORT_SYMBOL_GPL(kmem_cache_name); | ||
3848 | |||
3849 | /* | 3844 | /* |
3850 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3845 | * This initializes kmem_list3 or resizes various caches for all nodes. |
3851 | */ | 3846 | */ |