diff options
| author | Christoph Lameter <clameter@sgi.com> | 2007-09-11 18:24:11 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-11 20:21:27 -0400 |
| commit | ba0268a8b056f2ad846f1f8837a764c21bb6425e (patch) | |
| tree | 93ab7d2555a98628850dbf917a43ceddef8c980c | |
| parent | 4150d3f549fe2355625017b2a6ff72aec98bcef0 (diff) | |
SLUB: accurately compare debug flags during slab cache merge
This was posted on Aug 28 and fixes an issue that could cause troubles
when slab caches >=128k are created.
http://marc.info/?l=linux-mm&m=118798149918424&w=2
Currently we simply add the debug flags unconditional when checking for a
matching slab. This creates issues for sysfs processing when slabs exist
that are exempt from debugging due to their huge size or because only a
subset of slabs was selected for debugging.
We need to only add the flags if kmem_cache_open() would also add them.
Create a function to calculate the flags that would be set
if the cache would be opened and use that function to determine
the flags before looking for a compatible slab.
[akpm@linux-foundation.org: fixlets]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Chuck Ebbert <cebbert@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | mm/slub.c | 38 |
1 files changed, 23 insertions, 15 deletions
| @@ -986,7 +986,9 @@ out: | |||
| 986 | 986 | ||
| 987 | __setup("slub_debug", setup_slub_debug); | 987 | __setup("slub_debug", setup_slub_debug); |
| 988 | 988 | ||
| 989 | static void kmem_cache_open_debug_check(struct kmem_cache *s) | 989 | static unsigned long kmem_cache_flags(unsigned long objsize, |
| 990 | unsigned long flags, const char *name, | ||
| 991 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | ||
| 990 | { | 992 | { |
| 991 | /* | 993 | /* |
| 992 | * The page->offset field is only 16 bit wide. This is an offset | 994 | * The page->offset field is only 16 bit wide. This is an offset |
| @@ -1000,19 +1002,21 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
| 1000 | * Debugging or ctor may create a need to move the free | 1002 | * Debugging or ctor may create a need to move the free |
| 1001 | * pointer. Fail if this happens. | 1003 | * pointer. Fail if this happens. |
| 1002 | */ | 1004 | */ |
| 1003 | if (s->objsize >= 65535 * sizeof(void *)) { | 1005 | if (objsize >= 65535 * sizeof(void *)) { |
| 1004 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | | 1006 | BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | |
| 1005 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | 1007 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); |
| 1006 | BUG_ON(s->ctor); | 1008 | BUG_ON(ctor); |
| 1007 | } | 1009 | } else { |
| 1008 | else | ||
| 1009 | /* | 1010 | /* |
| 1010 | * Enable debugging if selected on the kernel commandline. | 1011 | * Enable debugging if selected on the kernel commandline. |
| 1011 | */ | 1012 | */ |
| 1012 | if (slub_debug && (!slub_debug_slabs || | 1013 | if (slub_debug && (!slub_debug_slabs || |
| 1013 | strncmp(slub_debug_slabs, s->name, | 1014 | strncmp(slub_debug_slabs, name, |
| 1014 | strlen(slub_debug_slabs)) == 0)) | 1015 | strlen(slub_debug_slabs)) == 0)) |
| 1015 | s->flags |= slub_debug; | 1016 | flags |= slub_debug; |
| 1017 | } | ||
| 1018 | |||
| 1019 | return flags; | ||
| 1016 | } | 1020 | } |
| 1017 | #else | 1021 | #else |
| 1018 | static inline void setup_object_debug(struct kmem_cache *s, | 1022 | static inline void setup_object_debug(struct kmem_cache *s, |
| @@ -1029,7 +1033,12 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
| 1029 | static inline int check_object(struct kmem_cache *s, struct page *page, | 1033 | static inline int check_object(struct kmem_cache *s, struct page *page, |
| 1030 | void *object, int active) { return 1; } | 1034 | void *object, int active) { return 1; } |
| 1031 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1035 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
| 1032 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} | 1036 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
| 1037 | unsigned long flags, const char *name, | ||
| 1038 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | ||
| 1039 | { | ||
| 1040 | return flags; | ||
| 1041 | } | ||
| 1033 | #define slub_debug 0 | 1042 | #define slub_debug 0 |
| 1034 | #endif | 1043 | #endif |
| 1035 | /* | 1044 | /* |
| @@ -2088,9 +2097,8 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
| 2088 | s->name = name; | 2097 | s->name = name; |
| 2089 | s->ctor = ctor; | 2098 | s->ctor = ctor; |
| 2090 | s->objsize = size; | 2099 | s->objsize = size; |
| 2091 | s->flags = flags; | ||
| 2092 | s->align = align; | 2100 | s->align = align; |
| 2093 | kmem_cache_open_debug_check(s); | 2101 | s->flags = kmem_cache_flags(size, flags, name, ctor); |
| 2094 | 2102 | ||
| 2095 | if (!calculate_sizes(s)) | 2103 | if (!calculate_sizes(s)) |
| 2096 | goto error; | 2104 | goto error; |
| @@ -2660,7 +2668,7 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
| 2660 | } | 2668 | } |
| 2661 | 2669 | ||
| 2662 | static struct kmem_cache *find_mergeable(size_t size, | 2670 | static struct kmem_cache *find_mergeable(size_t size, |
| 2663 | size_t align, unsigned long flags, | 2671 | size_t align, unsigned long flags, const char *name, |
| 2664 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) | 2672 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) |
| 2665 | { | 2673 | { |
| 2666 | struct kmem_cache *s; | 2674 | struct kmem_cache *s; |
| @@ -2674,6 +2682,7 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
| 2674 | size = ALIGN(size, sizeof(void *)); | 2682 | size = ALIGN(size, sizeof(void *)); |
| 2675 | align = calculate_alignment(flags, align, size); | 2683 | align = calculate_alignment(flags, align, size); |
| 2676 | size = ALIGN(size, align); | 2684 | size = ALIGN(size, align); |
| 2685 | flags = kmem_cache_flags(size, flags, name, NULL); | ||
| 2677 | 2686 | ||
| 2678 | list_for_each_entry(s, &slab_caches, list) { | 2687 | list_for_each_entry(s, &slab_caches, list) { |
| 2679 | if (slab_unmergeable(s)) | 2688 | if (slab_unmergeable(s)) |
| @@ -2682,8 +2691,7 @@ static struct kmem_cache *find_mergeable(size_t size, | |||
| 2682 | if (size > s->size) | 2691 | if (size > s->size) |
| 2683 | continue; | 2692 | continue; |
| 2684 | 2693 | ||
| 2685 | if (((flags | slub_debug) & SLUB_MERGE_SAME) != | 2694 | if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) |
| 2686 | (s->flags & SLUB_MERGE_SAME)) | ||
| 2687 | continue; | 2695 | continue; |
| 2688 | /* | 2696 | /* |
| 2689 | * Check if alignment is compatible. | 2697 | * Check if alignment is compatible. |
| @@ -2707,7 +2715,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
| 2707 | struct kmem_cache *s; | 2715 | struct kmem_cache *s; |
| 2708 | 2716 | ||
| 2709 | down_write(&slub_lock); | 2717 | down_write(&slub_lock); |
| 2710 | s = find_mergeable(size, align, flags, ctor); | 2718 | s = find_mergeable(size, align, flags, name, ctor); |
| 2711 | if (s) { | 2719 | if (s) { |
| 2712 | s->refcount++; | 2720 | s->refcount++; |
| 2713 | /* | 2721 | /* |
