aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:19:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:19:00 -0400
commit7d939fbdfee49e5c06bd27214d25f726fb87a25a (patch)
tree8366a5e7ec36b3b0162fd54ec4b434a2b840e7ba
parent18c98b65279c00c3c983a4525161207f1aa6a04b (diff)
parent0f389ec63077521166f071e1e970aed36147fd45 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: No need for per node slab counters if !SLUB_DEBUG slub: Move map/flag clearing to __free_slab slub: Fixes to per cpu stat output in sysfs slub: Deal with config variable dependencies slub: Reduce #ifdef ZONE_DMA by moving kmalloc_caches_dma near dma logic slub: Initialize per-cpu stats
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--init/Kconfig2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/slub.c97
4 files changed, 67 insertions, 36 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index b00c1c73eb0a..79d59c937fac 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -45,9 +45,9 @@ struct kmem_cache_cpu {
45struct kmem_cache_node { 45struct kmem_cache_node {
46 spinlock_t list_lock; /* Protect partial list and nr_partial */ 46 spinlock_t list_lock; /* Protect partial list and nr_partial */
47 unsigned long nr_partial; 47 unsigned long nr_partial;
48 atomic_long_t nr_slabs;
49 struct list_head partial; 48 struct list_head partial;
50#ifdef CONFIG_SLUB_DEBUG 49#ifdef CONFIG_SLUB_DEBUG
50 atomic_long_t nr_slabs;
51 struct list_head full; 51 struct list_head full;
52#endif 52#endif
53}; 53};
diff --git a/init/Kconfig b/init/Kconfig
index a97924bc5b8d..7fccf09bb95a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -763,7 +763,7 @@ endmenu # General setup
763config SLABINFO 763config SLABINFO
764 bool 764 bool
765 depends on PROC_FS 765 depends on PROC_FS
766 depends on SLAB || SLUB 766 depends on SLAB || SLUB_DEBUG
767 default y 767 default y
768 768
769config RT_MUTEXES 769config RT_MUTEXES
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..eef557dc46c3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
diff --git a/mm/slub.c b/mm/slub.c
index acc975fcc8cc..7f8aaa291a4e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page)
837 spin_unlock(&n->list_lock); 837 spin_unlock(&n->list_lock);
838} 838}
839 839
840/* Tracking of the number of slabs for debugging purposes */
841static inline unsigned long slabs_node(struct kmem_cache *s, int node)
842{
843 struct kmem_cache_node *n = get_node(s, node);
844
845 return atomic_long_read(&n->nr_slabs);
846}
847
848static inline void inc_slabs_node(struct kmem_cache *s, int node)
849{
850 struct kmem_cache_node *n = get_node(s, node);
851
852 /*
853 * May be called early in order to allocate a slab for the
854 * kmem_cache_node structure. Solve the chicken-egg
855 * dilemma by deferring the increment of the count during
856 * bootstrap (see early_kmem_cache_node_alloc).
857 */
858 if (!NUMA_BUILD || n)
859 atomic_long_inc(&n->nr_slabs);
860}
861static inline void dec_slabs_node(struct kmem_cache *s, int node)
862{
863 struct kmem_cache_node *n = get_node(s, node);
864
865 atomic_long_dec(&n->nr_slabs);
866}
867
868/* Object debug checks for alloc/free paths */
840static void setup_object_debug(struct kmem_cache *s, struct page *page, 869static void setup_object_debug(struct kmem_cache *s, struct page *page,
841 void *object) 870 void *object)
842{ 871{
@@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
1028 return flags; 1057 return flags;
1029} 1058}
1030#define slub_debug 0 1059#define slub_debug 0
1060
1061static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1062 { return 0; }
1063static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
1064static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
1031#endif 1065#endif
1032/* 1066/*
1033 * Slab allocation and freeing 1067 * Slab allocation and freeing
@@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page,
1066static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1100static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1067{ 1101{
1068 struct page *page; 1102 struct page *page;
1069 struct kmem_cache_node *n;
1070 void *start; 1103 void *start;
1071 void *last; 1104 void *last;
1072 void *p; 1105 void *p;
@@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1078 if (!page) 1111 if (!page)
1079 goto out; 1112 goto out;
1080 1113
1081 n = get_node(s, page_to_nid(page)); 1114 inc_slabs_node(s, page_to_nid(page));
1082 if (n)
1083 atomic_long_inc(&n->nr_slabs);
1084 page->slab = s; 1115 page->slab = s;
1085 page->flags |= 1 << PG_slab; 1116 page->flags |= 1 << PG_slab;
1086 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1117 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1125,6 +1156,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1125 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1156 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1126 -pages); 1157 -pages);
1127 1158
1159 __ClearPageSlab(page);
1160 reset_page_mapcount(page);
1128 __free_pages(page, s->order); 1161 __free_pages(page, s->order);
1129} 1162}
1130 1163
@@ -1151,11 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
1151 1184
1152static void discard_slab(struct kmem_cache *s, struct page *page) 1185static void discard_slab(struct kmem_cache *s, struct page *page)
1153{ 1186{
1154 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1187 dec_slabs_node(s, page_to_nid(page));
1155
1156 atomic_long_dec(&n->nr_slabs);
1157 reset_page_mapcount(page);
1158 __ClearPageSlab(page);
1159 free_slab(s, page); 1188 free_slab(s, page);
1160} 1189}
1161 1190
@@ -1886,15 +1915,18 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
1886 c->node = 0; 1915 c->node = 0;
1887 c->offset = s->offset / sizeof(void *); 1916 c->offset = s->offset / sizeof(void *);
1888 c->objsize = s->objsize; 1917 c->objsize = s->objsize;
1918#ifdef CONFIG_SLUB_STATS
1919 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1920#endif
1889} 1921}
1890 1922
1891static void init_kmem_cache_node(struct kmem_cache_node *n) 1923static void init_kmem_cache_node(struct kmem_cache_node *n)
1892{ 1924{
1893 n->nr_partial = 0; 1925 n->nr_partial = 0;
1894 atomic_long_set(&n->nr_slabs, 0);
1895 spin_lock_init(&n->list_lock); 1926 spin_lock_init(&n->list_lock);
1896 INIT_LIST_HEAD(&n->partial); 1927 INIT_LIST_HEAD(&n->partial);
1897#ifdef CONFIG_SLUB_DEBUG 1928#ifdef CONFIG_SLUB_DEBUG
1929 atomic_long_set(&n->nr_slabs, 0);
1898 INIT_LIST_HEAD(&n->full); 1930 INIT_LIST_HEAD(&n->full);
1899#endif 1931#endif
1900} 1932}
@@ -2063,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2063 init_tracking(kmalloc_caches, n); 2095 init_tracking(kmalloc_caches, n);
2064#endif 2096#endif
2065 init_kmem_cache_node(n); 2097 init_kmem_cache_node(n);
2066 atomic_long_inc(&n->nr_slabs); 2098 inc_slabs_node(kmalloc_caches, node);
2067 2099
2068 /* 2100 /*
2069 * lockdep requires consistent irq usage for each lock 2101 * lockdep requires consistent irq usage for each lock
@@ -2376,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
2376 struct kmem_cache_node *n = get_node(s, node); 2408 struct kmem_cache_node *n = get_node(s, node);
2377 2409
2378 n->nr_partial -= free_list(s, n, &n->partial); 2410 n->nr_partial -= free_list(s, n, &n->partial);
2379 if (atomic_long_read(&n->nr_slabs)) 2411 if (slabs_node(s, node))
2380 return 1; 2412 return 1;
2381 } 2413 }
2382 free_kmem_cache_nodes(s); 2414 free_kmem_cache_nodes(s);
@@ -2409,10 +2441,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2409struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2441struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2410EXPORT_SYMBOL(kmalloc_caches); 2442EXPORT_SYMBOL(kmalloc_caches);
2411 2443
2412#ifdef CONFIG_ZONE_DMA
2413static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2414#endif
2415
2416static int __init setup_slub_min_order(char *str) 2444static int __init setup_slub_min_order(char *str)
2417{ 2445{
2418 get_option(&str, &slub_min_order); 2446 get_option(&str, &slub_min_order);
@@ -2472,6 +2500,7 @@ panic:
2472} 2500}
2473 2501
2474#ifdef CONFIG_ZONE_DMA 2502#ifdef CONFIG_ZONE_DMA
2503static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2475 2504
2476static void sysfs_add_func(struct work_struct *w) 2505static void sysfs_add_func(struct work_struct *w)
2477{ 2506{
@@ -2688,21 +2717,6 @@ void kfree(const void *x)
2688} 2717}
2689EXPORT_SYMBOL(kfree); 2718EXPORT_SYMBOL(kfree);
2690 2719
2691#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692static unsigned long count_partial(struct kmem_cache_node *n)
2693{
2694 unsigned long flags;
2695 unsigned long x = 0;
2696 struct page *page;
2697
2698 spin_lock_irqsave(&n->list_lock, flags);
2699 list_for_each_entry(page, &n->partial, lru)
2700 x += page->inuse;
2701 spin_unlock_irqrestore(&n->list_lock, flags);
2702 return x;
2703}
2704#endif
2705
2706/* 2720/*
2707 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2721 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708 * the remaining slabs by the number of items in use. The slabs with the 2722 * the remaining slabs by the number of items in use. The slabs with the
@@ -2816,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg)
2816 * and offline_pages() function shoudn't call this 2830 * and offline_pages() function shoudn't call this
2817 * callback. So, we must fail. 2831 * callback. So, we must fail.
2818 */ 2832 */
2819 BUG_ON(atomic_long_read(&n->nr_slabs)); 2833 BUG_ON(slabs_node(s, offline_node));
2820 2834
2821 s->node[offline_node] = NULL; 2835 s->node[offline_node] = NULL;
2822 kmem_cache_free(kmalloc_caches, n); 2836 kmem_cache_free(kmalloc_caches, n);
@@ -3181,6 +3195,21 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3181 return slab_alloc(s, gfpflags, node, caller); 3195 return slab_alloc(s, gfpflags, node, caller);
3182} 3196}
3183 3197
3198#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3199static unsigned long count_partial(struct kmem_cache_node *n)
3200{
3201 unsigned long flags;
3202 unsigned long x = 0;
3203 struct page *page;
3204
3205 spin_lock_irqsave(&n->list_lock, flags);
3206 list_for_each_entry(page, &n->partial, lru)
3207 x += page->inuse;
3208 spin_unlock_irqrestore(&n->list_lock, flags);
3209 return x;
3210}
3211#endif
3212
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 3213#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page, 3214static int validate_slab(struct kmem_cache *s, struct page *page,
3186 unsigned long *map) 3215 unsigned long *map)
@@ -3979,10 +4008,12 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
3979 4008
3980 len = sprintf(buf, "%lu", sum); 4009 len = sprintf(buf, "%lu", sum);
3981 4010
4011#ifdef CONFIG_SMP
3982 for_each_online_cpu(cpu) { 4012 for_each_online_cpu(cpu) {
3983 if (data[cpu] && len < PAGE_SIZE - 20) 4013 if (data[cpu] && len < PAGE_SIZE - 20)
3984 len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]); 4014 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
3985 } 4015 }
4016#endif
3986 kfree(data); 4017 kfree(data);
3987 return len + sprintf(buf + len, "\n"); 4018 return len + sprintf(buf + len, "\n");
3988} 4019}