diff options
-rw-r--r-- | init/Kconfig | 9 | ||||
-rw-r--r-- | mm/slub.c | 189 |
2 files changed, 123 insertions, 75 deletions
diff --git a/init/Kconfig b/init/Kconfig index d0edf42f4dba..da6a91c4a051 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -504,6 +504,15 @@ config VM_EVENT_COUNTERS | |||
504 | on EMBEDDED systems. /proc/vmstat will only show page counts | 504 | on EMBEDDED systems. /proc/vmstat will only show page counts |
505 | if VM event counters are disabled. | 505 | if VM event counters are disabled. |
506 | 506 | ||
507 | config SLUB_DEBUG | ||
508 | default y | ||
509 | bool "Enable SLUB debugging support" if EMBEDDED | ||
510 | help | ||
511 | SLUB has extensive debug support features. Disabling these can | ||
512 | result in significant savings in code size. This also disables | ||
513 | SLUB sysfs support. /sys/slab will not exist and there will be | ||
514 | no support for cache validation etc. | ||
515 | |||
507 | choice | 516 | choice |
508 | prompt "Choose SLAB allocator" | 517 | prompt "Choose SLAB allocator" |
509 | default SLAB | 518 | default SLAB |
@@ -89,17 +89,25 @@ | |||
89 | 89 | ||
90 | static inline int SlabDebug(struct page *page) | 90 | static inline int SlabDebug(struct page *page) |
91 | { | 91 | { |
92 | #ifdef CONFIG_SLUB_DEBUG | ||
92 | return PageError(page); | 93 | return PageError(page); |
94 | #else | ||
95 | return 0; | ||
96 | #endif | ||
93 | } | 97 | } |
94 | 98 | ||
95 | static inline void SetSlabDebug(struct page *page) | 99 | static inline void SetSlabDebug(struct page *page) |
96 | { | 100 | { |
101 | #ifdef CONFIG_SLUB_DEBUG | ||
97 | SetPageError(page); | 102 | SetPageError(page); |
103 | #endif | ||
98 | } | 104 | } |
99 | 105 | ||
100 | static inline void ClearSlabDebug(struct page *page) | 106 | static inline void ClearSlabDebug(struct page *page) |
101 | { | 107 | { |
108 | #ifdef CONFIG_SLUB_DEBUG | ||
102 | ClearPageError(page); | 109 | ClearPageError(page); |
110 | #endif | ||
103 | } | 111 | } |
104 | 112 | ||
105 | /* | 113 | /* |
@@ -207,7 +215,7 @@ struct track { | |||
207 | 215 | ||
208 | enum track_item { TRACK_ALLOC, TRACK_FREE }; | 216 | enum track_item { TRACK_ALLOC, TRACK_FREE }; |
209 | 217 | ||
210 | #ifdef CONFIG_SYSFS | 218 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
211 | static int sysfs_slab_add(struct kmem_cache *); | 219 | static int sysfs_slab_add(struct kmem_cache *); |
212 | static int sysfs_slab_alias(struct kmem_cache *, const char *); | 220 | static int sysfs_slab_alias(struct kmem_cache *, const char *); |
213 | static void sysfs_slab_remove(struct kmem_cache *); | 221 | static void sysfs_slab_remove(struct kmem_cache *); |
@@ -284,6 +292,14 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) | |||
284 | return (p - addr) / s->size; | 292 | return (p - addr) / s->size; |
285 | } | 293 | } |
286 | 294 | ||
295 | #ifdef CONFIG_SLUB_DEBUG | ||
296 | /* | ||
297 | * Debug settings: | ||
298 | */ | ||
299 | static int slub_debug; | ||
300 | |||
301 | static char *slub_debug_slabs; | ||
302 | |||
287 | /* | 303 | /* |
288 | * Object debugging | 304 | * Object debugging |
289 | */ | 305 | */ |
@@ -821,6 +837,97 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, int all | |||
821 | } | 837 | } |
822 | } | 838 | } |
823 | 839 | ||
840 | static int __init setup_slub_debug(char *str) | ||
841 | { | ||
842 | if (!str || *str != '=') | ||
843 | slub_debug = DEBUG_DEFAULT_FLAGS; | ||
844 | else { | ||
845 | str++; | ||
846 | if (*str == 0 || *str == ',') | ||
847 | slub_debug = DEBUG_DEFAULT_FLAGS; | ||
848 | else | ||
849 | for( ;*str && *str != ','; str++) | ||
850 | switch (*str) { | ||
851 | case 'f' : case 'F' : | ||
852 | slub_debug |= SLAB_DEBUG_FREE; | ||
853 | break; | ||
854 | case 'z' : case 'Z' : | ||
855 | slub_debug |= SLAB_RED_ZONE; | ||
856 | break; | ||
857 | case 'p' : case 'P' : | ||
858 | slub_debug |= SLAB_POISON; | ||
859 | break; | ||
860 | case 'u' : case 'U' : | ||
861 | slub_debug |= SLAB_STORE_USER; | ||
862 | break; | ||
863 | case 't' : case 'T' : | ||
864 | slub_debug |= SLAB_TRACE; | ||
865 | break; | ||
866 | default: | ||
867 | printk(KERN_ERR "slub_debug option '%c' " | ||
868 | "unknown. skipped\n",*str); | ||
869 | } | ||
870 | } | ||
871 | |||
872 | if (*str == ',') | ||
873 | slub_debug_slabs = str + 1; | ||
874 | return 1; | ||
875 | } | ||
876 | |||
877 | __setup("slub_debug", setup_slub_debug); | ||
878 | |||
879 | static void kmem_cache_open_debug_check(struct kmem_cache *s) | ||
880 | { | ||
881 | /* | ||
882 | * The page->offset field is only 16 bit wide. This is an offset | ||
883 | * in units of words from the beginning of an object. If the slab | ||
884 | * size is bigger then we cannot move the free pointer behind the | ||
885 | * object anymore. | ||
886 | * | ||
887 | * On 32 bit platforms the limit is 256k. On 64bit platforms | ||
888 | * the limit is 512k. | ||
889 | * | ||
890 | * Debugging or ctor/dtors may create a need to move the free | ||
891 | * pointer. Fail if this happens. | ||
892 | */ | ||
893 | if (s->size >= 65535 * sizeof(void *)) { | ||
894 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | | ||
895 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | ||
896 | BUG_ON(s->ctor || s->dtor); | ||
897 | } | ||
898 | else | ||
899 | /* | ||
900 | * Enable debugging if selected on the kernel commandline. | ||
901 | */ | ||
902 | if (slub_debug && (!slub_debug_slabs || | ||
903 | strncmp(slub_debug_slabs, s->name, | ||
904 | strlen(slub_debug_slabs)) == 0)) | ||
905 | s->flags |= slub_debug; | ||
906 | } | ||
907 | #else | ||
908 | |||
909 | static inline int alloc_object_checks(struct kmem_cache *s, | ||
910 | struct page *page, void *object) { return 0; } | ||
911 | |||
912 | static inline int free_object_checks(struct kmem_cache *s, | ||
913 | struct page *page, void *object) { return 0; } | ||
914 | |||
915 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | ||
916 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | ||
917 | static inline void trace(struct kmem_cache *s, struct page *page, | ||
918 | void *object, int alloc) {} | ||
919 | static inline void init_object(struct kmem_cache *s, | ||
920 | void *object, int active) {} | ||
921 | static inline void init_tracking(struct kmem_cache *s, void *object) {} | ||
922 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | ||
923 | { return 1; } | ||
924 | static inline int check_object(struct kmem_cache *s, struct page *page, | ||
925 | void *object, int active) { return 1; } | ||
926 | static inline void set_track(struct kmem_cache *s, void *object, | ||
927 | enum track_item alloc, void *addr) {} | ||
928 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} | ||
929 | #define slub_debug 0 | ||
930 | #endif | ||
824 | /* | 931 | /* |
825 | * Slab allocation and freeing | 932 | * Slab allocation and freeing |
826 | */ | 933 | */ |
@@ -1446,13 +1553,6 @@ static int slub_min_objects = DEFAULT_MIN_OBJECTS; | |||
1446 | static int slub_nomerge; | 1553 | static int slub_nomerge; |
1447 | 1554 | ||
1448 | /* | 1555 | /* |
1449 | * Debug settings: | ||
1450 | */ | ||
1451 | static int slub_debug; | ||
1452 | |||
1453 | static char *slub_debug_slabs; | ||
1454 | |||
1455 | /* | ||
1456 | * Calculate the order of allocation given an slab object size. | 1556 | * Calculate the order of allocation given an slab object size. |
1457 | * | 1557 | * |
1458 | * The order of allocation has significant impact on performance and other | 1558 | * The order of allocation has significant impact on performance and other |
@@ -1660,6 +1760,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1660 | */ | 1760 | */ |
1661 | size = ALIGN(size, sizeof(void *)); | 1761 | size = ALIGN(size, sizeof(void *)); |
1662 | 1762 | ||
1763 | #ifdef CONFIG_SLUB_DEBUG | ||
1663 | /* | 1764 | /* |
1664 | * If we are Redzoning then check if there is some space between the | 1765 | * If we are Redzoning then check if there is some space between the |
1665 | * end of the object and the free pointer. If not then add an | 1766 | * end of the object and the free pointer. If not then add an |
@@ -1667,6 +1768,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1667 | */ | 1768 | */ |
1668 | if ((flags & SLAB_RED_ZONE) && size == s->objsize) | 1769 | if ((flags & SLAB_RED_ZONE) && size == s->objsize) |
1669 | size += sizeof(void *); | 1770 | size += sizeof(void *); |
1771 | #endif | ||
1670 | 1772 | ||
1671 | /* | 1773 | /* |
1672 | * With that we have determined the number of bytes in actual use | 1774 | * With that we have determined the number of bytes in actual use |
@@ -1674,6 +1776,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1674 | */ | 1776 | */ |
1675 | s->inuse = size; | 1777 | s->inuse = size; |
1676 | 1778 | ||
1779 | #ifdef CONFIG_SLUB_DEBUG | ||
1677 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || | 1780 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || |
1678 | s->ctor || s->dtor)) { | 1781 | s->ctor || s->dtor)) { |
1679 | /* | 1782 | /* |
@@ -1704,6 +1807,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1704 | * of the object. | 1807 | * of the object. |
1705 | */ | 1808 | */ |
1706 | size += sizeof(void *); | 1809 | size += sizeof(void *); |
1810 | #endif | ||
1707 | 1811 | ||
1708 | /* | 1812 | /* |
1709 | * Determine the alignment based on various parameters that the | 1813 | * Determine the alignment based on various parameters that the |
@@ -1753,32 +1857,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
1753 | s->objsize = size; | 1857 | s->objsize = size; |
1754 | s->flags = flags; | 1858 | s->flags = flags; |
1755 | s->align = align; | 1859 | s->align = align; |
1756 | 1860 | kmem_cache_open_debug_check(s); | |
1757 | /* | ||
1758 | * The page->offset field is only 16 bit wide. This is an offset | ||
1759 | * in units of words from the beginning of an object. If the slab | ||
1760 | * size is bigger then we cannot move the free pointer behind the | ||
1761 | * object anymore. | ||
1762 | * | ||
1763 | * On 32 bit platforms the limit is 256k. On 64bit platforms | ||
1764 | * the limit is 512k. | ||
1765 | * | ||
1766 | * Debugging or ctor/dtors may create a need to move the free | ||
1767 | * pointer. Fail if this happens. | ||
1768 | */ | ||
1769 | if (s->size >= 65535 * sizeof(void *)) { | ||
1770 | BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | | ||
1771 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | ||
1772 | BUG_ON(ctor || dtor); | ||
1773 | } | ||
1774 | else | ||
1775 | /* | ||
1776 | * Enable debugging if selected on the kernel commandline. | ||
1777 | */ | ||
1778 | if (slub_debug && (!slub_debug_slabs || | ||
1779 | strncmp(slub_debug_slabs, name, | ||
1780 | strlen(slub_debug_slabs)) == 0)) | ||
1781 | s->flags |= slub_debug; | ||
1782 | 1861 | ||
1783 | if (!calculate_sizes(s)) | 1862 | if (!calculate_sizes(s)) |
1784 | goto error; | 1863 | goto error; |
@@ -1949,45 +2028,6 @@ static int __init setup_slub_nomerge(char *str) | |||
1949 | 2028 | ||
1950 | __setup("slub_nomerge", setup_slub_nomerge); | 2029 | __setup("slub_nomerge", setup_slub_nomerge); |
1951 | 2030 | ||
1952 | static int __init setup_slub_debug(char *str) | ||
1953 | { | ||
1954 | if (!str || *str != '=') | ||
1955 | slub_debug = DEBUG_DEFAULT_FLAGS; | ||
1956 | else { | ||
1957 | str++; | ||
1958 | if (*str == 0 || *str == ',') | ||
1959 | slub_debug = DEBUG_DEFAULT_FLAGS; | ||
1960 | else | ||
1961 | for( ;*str && *str != ','; str++) | ||
1962 | switch (*str) { | ||
1963 | case 'f' : case 'F' : | ||
1964 | slub_debug |= SLAB_DEBUG_FREE; | ||
1965 | break; | ||
1966 | case 'z' : case 'Z' : | ||
1967 | slub_debug |= SLAB_RED_ZONE; | ||
1968 | break; | ||
1969 | case 'p' : case 'P' : | ||
1970 | slub_debug |= SLAB_POISON; | ||
1971 | break; | ||
1972 | case 'u' : case 'U' : | ||
1973 | slub_debug |= SLAB_STORE_USER; | ||
1974 | break; | ||
1975 | case 't' : case 'T' : | ||
1976 | slub_debug |= SLAB_TRACE; | ||
1977 | break; | ||
1978 | default: | ||
1979 | printk(KERN_ERR "slub_debug option '%c' " | ||
1980 | "unknown. skipped\n",*str); | ||
1981 | } | ||
1982 | } | ||
1983 | |||
1984 | if (*str == ',') | ||
1985 | slub_debug_slabs = str + 1; | ||
1986 | return 1; | ||
1987 | } | ||
1988 | |||
1989 | __setup("slub_debug", setup_slub_debug); | ||
1990 | |||
1991 | static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | 2031 | static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, |
1992 | const char *name, int size, gfp_t gfp_flags) | 2032 | const char *name, int size, gfp_t gfp_flags) |
1993 | { | 2033 | { |
@@ -2554,8 +2594,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
2554 | return slab_alloc(s, gfpflags, node, caller); | 2594 | return slab_alloc(s, gfpflags, node, caller); |
2555 | } | 2595 | } |
2556 | 2596 | ||
2557 | #ifdef CONFIG_SYSFS | 2597 | #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) |
2558 | |||
2559 | static int validate_slab(struct kmem_cache *s, struct page *page) | 2598 | static int validate_slab(struct kmem_cache *s, struct page *page) |
2560 | { | 2599 | { |
2561 | void *p; | 2600 | void *p; |