diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-13 16:00:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-13 16:00:53 -0400 |
commit | 40779859de0f73b40390c6401a024d06cf024290 (patch) | |
tree | b799c66c3886a8be5c40c3c382f1a5feda7dabef | |
parent | ffdb8f1bfbd9cef1394f5d3c4a774015d4ac0f97 (diff) | |
parent | a947eb95ea03199da7408a64baa97fbb613e9b84 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
SLAB: Record actual last user of freed objects.
slub: always align cpu_slab to honor cmpxchg_double requirement
-rw-r--r-- | include/linux/percpu.h | 3 | ||||
-rw-r--r-- | mm/slab.c | 9 | ||||
-rw-r--r-- | mm/slub.c | 12 |
3 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 8b97308e65df..9ca008f0c542 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void); | |||
259 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | 259 | * Special handling for cmpxchg_double. cmpxchg_double is passed two |
260 | * percpu variables. The first has to be aligned to a double word | 260 | * percpu variables. The first has to be aligned to a double word |
261 | * boundary and the second has to follow directly thereafter. | 261 | * boundary and the second has to follow directly thereafter. |
262 | * We enforce this on all architectures even if they don't support | ||
263 | * a double cmpxchg instruction, since it's a cheap requirement, and it | ||
264 | * avoids breaking the requirement for architectures with the instruction. | ||
262 | */ | 265 | */ |
263 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | 266 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
264 | ({ \ | 267 | ({ \ |
@@ -3604,13 +3604,14 @@ free_done: | |||
3604 | * Release an obj back to its cache. If the obj has a constructed state, it must | 3604 | * Release an obj back to its cache. If the obj has a constructed state, it must |
3605 | * be in this state _before_ it is released. Called with disabled ints. | 3605 | * be in this state _before_ it is released. Called with disabled ints. |
3606 | */ | 3606 | */ |
3607 | static inline void __cache_free(struct kmem_cache *cachep, void *objp) | 3607 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, |
3608 | void *caller) | ||
3608 | { | 3609 | { |
3609 | struct array_cache *ac = cpu_cache_get(cachep); | 3610 | struct array_cache *ac = cpu_cache_get(cachep); |
3610 | 3611 | ||
3611 | check_irq_off(); | 3612 | check_irq_off(); |
3612 | kmemleak_free_recursive(objp, cachep->flags); | 3613 | kmemleak_free_recursive(objp, cachep->flags); |
3613 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3614 | objp = cache_free_debugcheck(cachep, objp, caller); |
3614 | 3615 | ||
3615 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); | 3616 | kmemcheck_slab_free(cachep, objp, obj_size(cachep)); |
3616 | 3617 | ||
@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
3801 | debug_check_no_locks_freed(objp, obj_size(cachep)); | 3802 | debug_check_no_locks_freed(objp, obj_size(cachep)); |
3802 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) | 3803 | if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) |
3803 | debug_check_no_obj_freed(objp, obj_size(cachep)); | 3804 | debug_check_no_obj_freed(objp, obj_size(cachep)); |
3804 | __cache_free(cachep, objp); | 3805 | __cache_free(cachep, objp, __builtin_return_address(0)); |
3805 | local_irq_restore(flags); | 3806 | local_irq_restore(flags); |
3806 | 3807 | ||
3807 | trace_kmem_cache_free(_RET_IP_, objp); | 3808 | trace_kmem_cache_free(_RET_IP_, objp); |
@@ -3831,7 +3832,7 @@ void kfree(const void *objp) | |||
3831 | c = virt_to_cache(objp); | 3832 | c = virt_to_cache(objp); |
3832 | debug_check_no_locks_freed(objp, obj_size(c)); | 3833 | debug_check_no_locks_freed(objp, obj_size(c)); |
3833 | debug_check_no_obj_freed(objp, obj_size(c)); | 3834 | debug_check_no_obj_freed(objp, obj_size(c)); |
3834 | __cache_free(c, (void *)objp); | 3835 | __cache_free(c, (void *)objp, __builtin_return_address(0)); |
3835 | local_irq_restore(flags); | 3836 | local_irq_restore(flags); |
3836 | } | 3837 | } |
3837 | EXPORT_SYMBOL(kfree); | 3838 | EXPORT_SYMBOL(kfree); |
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | |||
2320 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < | 2320 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
2321 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); | 2321 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); |
2322 | 2322 | ||
2323 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2324 | /* | 2323 | /* |
2325 | * Must align to double word boundary for the double cmpxchg instructions | 2324 | * Must align to double word boundary for the double cmpxchg |
2326 | * to work. | 2325 | * instructions to work; see __pcpu_double_call_return_bool(). |
2327 | */ | 2326 | */ |
2328 | s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); | 2327 | s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), |
2329 | #else | 2328 | 2 * sizeof(void *)); |
2330 | /* Regular alignment is sufficient */ | ||
2331 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); | ||
2332 | #endif | ||
2333 | 2329 | ||
2334 | if (!s->cpu_slab) | 2330 | if (!s->cpu_slab) |
2335 | return 0; | 2331 | return 0; |