diff options
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | init/Kconfig | 2 | ||||
-rw-r--r-- | mm/slub.c | 51 |
3 files changed, 42 insertions, 13 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b00c1c73eb0a..79d59c937fac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -45,9 +45,9 @@ struct kmem_cache_cpu { | |||
45 | struct kmem_cache_node { | 45 | struct kmem_cache_node { |
46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
47 | unsigned long nr_partial; | 47 | unsigned long nr_partial; |
48 | atomic_long_t nr_slabs; | ||
49 | struct list_head partial; | 48 | struct list_head partial; |
50 | #ifdef CONFIG_SLUB_DEBUG | 49 | #ifdef CONFIG_SLUB_DEBUG |
50 | atomic_long_t nr_slabs; | ||
51 | struct list_head full; | 51 | struct list_head full; |
52 | #endif | 52 | #endif |
53 | }; | 53 | }; |
diff --git a/init/Kconfig b/init/Kconfig index a97924bc5b8d..7fccf09bb95a 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -763,7 +763,7 @@ endmenu # General setup | |||
763 | config SLABINFO | 763 | config SLABINFO |
764 | bool | 764 | bool |
765 | depends on PROC_FS | 765 | depends on PROC_FS |
766 | depends on SLAB || SLUB | 766 | depends on SLAB || SLUB_DEBUG |
767 | default y | 767 | default y |
768 | 768 | ||
769 | config RT_MUTEXES | 769 | config RT_MUTEXES |
@@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page) | |||
837 | spin_unlock(&n->list_lock); | 837 | spin_unlock(&n->list_lock); |
838 | } | 838 | } |
839 | 839 | ||
840 | /* Tracking of the number of slabs for debugging purposes */ | ||
841 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | ||
842 | { | ||
843 | struct kmem_cache_node *n = get_node(s, node); | ||
844 | |||
845 | return atomic_long_read(&n->nr_slabs); | ||
846 | } | ||
847 | |||
848 | static inline void inc_slabs_node(struct kmem_cache *s, int node) | ||
849 | { | ||
850 | struct kmem_cache_node *n = get_node(s, node); | ||
851 | |||
852 | /* | ||
853 | * May be called early in order to allocate a slab for the | ||
854 | * kmem_cache_node structure. Solve the chicken-egg | ||
855 | * dilemma by deferring the increment of the count during | ||
856 | * bootstrap (see early_kmem_cache_node_alloc). | ||
857 | */ | ||
858 | if (!NUMA_BUILD || n) | ||
859 | atomic_long_inc(&n->nr_slabs); | ||
860 | } | ||
861 | static inline void dec_slabs_node(struct kmem_cache *s, int node) | ||
862 | { | ||
863 | struct kmem_cache_node *n = get_node(s, node); | ||
864 | |||
865 | atomic_long_dec(&n->nr_slabs); | ||
866 | } | ||
867 | |||
868 | /* Object debug checks for alloc/free paths */ | ||
840 | static void setup_object_debug(struct kmem_cache *s, struct page *page, | 869 | static void setup_object_debug(struct kmem_cache *s, struct page *page, |
841 | void *object) | 870 | void *object) |
842 | { | 871 | { |
@@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, | |||
1028 | return flags; | 1057 | return flags; |
1029 | } | 1058 | } |
1030 | #define slub_debug 0 | 1059 | #define slub_debug 0 |
1060 | |||
1061 | static inline unsigned long slabs_node(struct kmem_cache *s, int node) | ||
1062 | { return 0; } | ||
1063 | static inline void inc_slabs_node(struct kmem_cache *s, int node) {} | ||
1064 | static inline void dec_slabs_node(struct kmem_cache *s, int node) {} | ||
1031 | #endif | 1065 | #endif |
1032 | /* | 1066 | /* |
1033 | * Slab allocation and freeing | 1067 | * Slab allocation and freeing |
@@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page, | |||
1066 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1100 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
1067 | { | 1101 | { |
1068 | struct page *page; | 1102 | struct page *page; |
1069 | struct kmem_cache_node *n; | ||
1070 | void *start; | 1103 | void *start; |
1071 | void *last; | 1104 | void *last; |
1072 | void *p; | 1105 | void *p; |
@@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1078 | if (!page) | 1111 | if (!page) |
1079 | goto out; | 1112 | goto out; |
1080 | 1113 | ||
1081 | n = get_node(s, page_to_nid(page)); | 1114 | inc_slabs_node(s, page_to_nid(page)); |
1082 | if (n) | ||
1083 | atomic_long_inc(&n->nr_slabs); | ||
1084 | page->slab = s; | 1115 | page->slab = s; |
1085 | page->flags |= 1 << PG_slab; | 1116 | page->flags |= 1 << PG_slab; |
1086 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | | 1117 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | |
@@ -1153,9 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page) | |||
1153 | 1184 | ||
1154 | static void discard_slab(struct kmem_cache *s, struct page *page) | 1185 | static void discard_slab(struct kmem_cache *s, struct page *page) |
1155 | { | 1186 | { |
1156 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1187 | dec_slabs_node(s, page_to_nid(page)); |
1157 | |||
1158 | atomic_long_dec(&n->nr_slabs); | ||
1159 | free_slab(s, page); | 1188 | free_slab(s, page); |
1160 | } | 1189 | } |
1161 | 1190 | ||
@@ -1894,10 +1923,10 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
1894 | static void init_kmem_cache_node(struct kmem_cache_node *n) | 1923 | static void init_kmem_cache_node(struct kmem_cache_node *n) |
1895 | { | 1924 | { |
1896 | n->nr_partial = 0; | 1925 | n->nr_partial = 0; |
1897 | atomic_long_set(&n->nr_slabs, 0); | ||
1898 | spin_lock_init(&n->list_lock); | 1926 | spin_lock_init(&n->list_lock); |
1899 | INIT_LIST_HEAD(&n->partial); | 1927 | INIT_LIST_HEAD(&n->partial); |
1900 | #ifdef CONFIG_SLUB_DEBUG | 1928 | #ifdef CONFIG_SLUB_DEBUG |
1929 | atomic_long_set(&n->nr_slabs, 0); | ||
1901 | INIT_LIST_HEAD(&n->full); | 1930 | INIT_LIST_HEAD(&n->full); |
1902 | #endif | 1931 | #endif |
1903 | } | 1932 | } |
@@ -2066,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2066 | init_tracking(kmalloc_caches, n); | 2095 | init_tracking(kmalloc_caches, n); |
2067 | #endif | 2096 | #endif |
2068 | init_kmem_cache_node(n); | 2097 | init_kmem_cache_node(n); |
2069 | atomic_long_inc(&n->nr_slabs); | 2098 | inc_slabs_node(kmalloc_caches, node); |
2070 | 2099 | ||
2071 | /* | 2100 | /* |
2072 | * lockdep requires consistent irq usage for each lock | 2101 | * lockdep requires consistent irq usage for each lock |
@@ -2379,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
2379 | struct kmem_cache_node *n = get_node(s, node); | 2408 | struct kmem_cache_node *n = get_node(s, node); |
2380 | 2409 | ||
2381 | n->nr_partial -= free_list(s, n, &n->partial); | 2410 | n->nr_partial -= free_list(s, n, &n->partial); |
2382 | if (atomic_long_read(&n->nr_slabs)) | 2411 | if (slabs_node(s, node)) |
2383 | return 1; | 2412 | return 1; |
2384 | } | 2413 | } |
2385 | free_kmem_cache_nodes(s); | 2414 | free_kmem_cache_nodes(s); |
@@ -2801,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg) | |||
2801 | * and offline_pages() function shoudn't call this | 2830 | * and offline_pages() function shoudn't call this |
2802 | * callback. So, we must fail. | 2831 | * callback. So, we must fail. |
2803 | */ | 2832 | */ |
2804 | BUG_ON(atomic_long_read(&n->nr_slabs)); | 2833 | BUG_ON(slabs_node(s, offline_node)); |
2805 | 2834 | ||
2806 | s->node[offline_node] = NULL; | 2835 | s->node[offline_node] = NULL; |
2807 | kmem_cache_free(kmalloc_caches, n); | 2836 | kmem_cache_free(kmalloc_caches, n); |