diff options
author | Christoph Lameter <cl@linux.com> | 2010-09-29 08:15:01 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2010-10-02 03:44:10 -0400 |
commit | f7cb1933621bce66a77f690776a16fe3ebbc4d58 (patch) | |
tree | 7d78e7bc0f985abdf8e9fe281b0c0b4b0bcc7ced | |
parent | 7340cc84141d5236c5dd003359ee921513cd9b84 (diff) |
SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
Pass the actual values used for inactive and active redzoning to the
functions that check the objects. Avoids a lot of the ? : things to
lookup the values in the functions.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | mm/slub.c | 33 |
1 files changed, 14 insertions, 19 deletions
@@ -490,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) | |||
490 | dump_stack(); | 490 | dump_stack(); |
491 | } | 491 | } |
492 | 492 | ||
493 | static void init_object(struct kmem_cache *s, void *object, int active) | 493 | static void init_object(struct kmem_cache *s, void *object, u8 val) |
494 | { | 494 | { |
495 | u8 *p = object; | 495 | u8 *p = object; |
496 | 496 | ||
@@ -500,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active) | |||
500 | } | 500 | } |
501 | 501 | ||
502 | if (s->flags & SLAB_RED_ZONE) | 502 | if (s->flags & SLAB_RED_ZONE) |
503 | memset(p + s->objsize, | 503 | memset(p + s->objsize, val, s->inuse - s->objsize); |
504 | active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, | ||
505 | s->inuse - s->objsize); | ||
506 | } | 504 | } |
507 | 505 | ||
508 | static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) | 506 | static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) |
@@ -637,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
637 | } | 635 | } |
638 | 636 | ||
639 | static int check_object(struct kmem_cache *s, struct page *page, | 637 | static int check_object(struct kmem_cache *s, struct page *page, |
640 | void *object, int active) | 638 | void *object, u8 val) |
641 | { | 639 | { |
642 | u8 *p = object; | 640 | u8 *p = object; |
643 | u8 *endobject = object + s->objsize; | 641 | u8 *endobject = object + s->objsize; |
644 | 642 | ||
645 | if (s->flags & SLAB_RED_ZONE) { | 643 | if (s->flags & SLAB_RED_ZONE) { |
646 | unsigned int red = | ||
647 | active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; | ||
648 | |||
649 | if (!check_bytes_and_report(s, page, object, "Redzone", | 644 | if (!check_bytes_and_report(s, page, object, "Redzone", |
650 | endobject, red, s->inuse - s->objsize)) | 645 | endobject, val, s->inuse - s->objsize)) |
651 | return 0; | 646 | return 0; |
652 | } else { | 647 | } else { |
653 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { | 648 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { |
@@ -657,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
657 | } | 652 | } |
658 | 653 | ||
659 | if (s->flags & SLAB_POISON) { | 654 | if (s->flags & SLAB_POISON) { |
660 | if (!active && (s->flags & __OBJECT_POISON) && | 655 | if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && |
661 | (!check_bytes_and_report(s, page, p, "Poison", p, | 656 | (!check_bytes_and_report(s, page, p, "Poison", p, |
662 | POISON_FREE, s->objsize - 1) || | 657 | POISON_FREE, s->objsize - 1) || |
663 | !check_bytes_and_report(s, page, p, "Poison", | 658 | !check_bytes_and_report(s, page, p, "Poison", |
@@ -669,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
669 | check_pad_bytes(s, page, p); | 664 | check_pad_bytes(s, page, p); |
670 | } | 665 | } |
671 | 666 | ||
672 | if (!s->offset && active) | 667 | if (!s->offset && val == SLUB_RED_ACTIVE) |
673 | /* | 668 | /* |
674 | * Object and freepointer overlap. Cannot check | 669 | * Object and freepointer overlap. Cannot check |
675 | * freepointer while object is allocated. | 670 | * freepointer while object is allocated. |
@@ -887,7 +882,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
887 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) | 882 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) |
888 | return; | 883 | return; |
889 | 884 | ||
890 | init_object(s, object, 0); | 885 | init_object(s, object, SLUB_RED_INACTIVE); |
891 | init_tracking(s, object); | 886 | init_tracking(s, object); |
892 | } | 887 | } |
893 | 888 | ||
@@ -907,14 +902,14 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa | |||
907 | goto bad; | 902 | goto bad; |
908 | } | 903 | } |
909 | 904 | ||
910 | if (!check_object(s, page, object, 0)) | 905 | if (!check_object(s, page, object, SLUB_RED_INACTIVE)) |
911 | goto bad; | 906 | goto bad; |
912 | 907 | ||
913 | /* Success perform special debug activities for allocs */ | 908 | /* Success perform special debug activities for allocs */ |
914 | if (s->flags & SLAB_STORE_USER) | 909 | if (s->flags & SLAB_STORE_USER) |
915 | set_track(s, object, TRACK_ALLOC, addr); | 910 | set_track(s, object, TRACK_ALLOC, addr); |
916 | trace(s, page, object, 1); | 911 | trace(s, page, object, 1); |
917 | init_object(s, object, 1); | 912 | init_object(s, object, SLUB_RED_ACTIVE); |
918 | return 1; | 913 | return 1; |
919 | 914 | ||
920 | bad: | 915 | bad: |
@@ -947,7 +942,7 @@ static noinline int free_debug_processing(struct kmem_cache *s, | |||
947 | goto fail; | 942 | goto fail; |
948 | } | 943 | } |
949 | 944 | ||
950 | if (!check_object(s, page, object, 1)) | 945 | if (!check_object(s, page, object, SLUB_RED_ACTIVE)) |
951 | return 0; | 946 | return 0; |
952 | 947 | ||
953 | if (unlikely(s != page->slab)) { | 948 | if (unlikely(s != page->slab)) { |
@@ -971,7 +966,7 @@ static noinline int free_debug_processing(struct kmem_cache *s, | |||
971 | if (s->flags & SLAB_STORE_USER) | 966 | if (s->flags & SLAB_STORE_USER) |
972 | set_track(s, object, TRACK_FREE, addr); | 967 | set_track(s, object, TRACK_FREE, addr); |
973 | trace(s, page, object, 0); | 968 | trace(s, page, object, 0); |
974 | init_object(s, object, 0); | 969 | init_object(s, object, SLUB_RED_INACTIVE); |
975 | return 1; | 970 | return 1; |
976 | 971 | ||
977 | fail: | 972 | fail: |
@@ -1075,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s, | |||
1075 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1070 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1076 | { return 1; } | 1071 | { return 1; } |
1077 | static inline int check_object(struct kmem_cache *s, struct page *page, | 1072 | static inline int check_object(struct kmem_cache *s, struct page *page, |
1078 | void *object, int active) { return 1; } | 1073 | void *object, u8 val) { return 1; } |
1079 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1074 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
1080 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1075 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
1081 | unsigned long flags, const char *name, | 1076 | unsigned long flags, const char *name, |
@@ -1235,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1235 | slab_pad_check(s, page); | 1230 | slab_pad_check(s, page); |
1236 | for_each_object(p, s, page_address(page), | 1231 | for_each_object(p, s, page_address(page), |
1237 | page->objects) | 1232 | page->objects) |
1238 | check_object(s, page, p, 0); | 1233 | check_object(s, page, p, SLUB_RED_INACTIVE); |
1239 | } | 1234 | } |
1240 | 1235 | ||
1241 | kmemcheck_free_shadow(page, compound_order(page)); | 1236 | kmemcheck_free_shadow(page, compound_order(page)); |
@@ -2143,7 +2138,7 @@ static void early_kmem_cache_node_alloc(int node) | |||
2143 | page->inuse++; | 2138 | page->inuse++; |
2144 | kmem_cache_node->node[node] = n; | 2139 | kmem_cache_node->node[node] = n; |
2145 | #ifdef CONFIG_SLUB_DEBUG | 2140 | #ifdef CONFIG_SLUB_DEBUG |
2146 | init_object(kmem_cache_node, n, 1); | 2141 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
2147 | init_tracking(kmem_cache_node, n); | 2142 | init_tracking(kmem_cache_node, n); |
2148 | #endif | 2143 | #endif |
2149 | init_kmem_cache_node(n, kmem_cache_node); | 2144 | init_kmem_cache_node(n, kmem_cache_node); |