aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-02-03 10:34:56 -0500
committerPekka Enberg <penberg@kernel.org>2012-02-18 04:00:09 -0500
commit8028dcea8abbbd51b5156e40ea214c20b559cd01 (patch)
treeb4f60cce7da33fa2942262043a3834724c07405b /mm/slub.c
parent4de900b4d6b2216b7443d32e263f5de9078697a3 (diff)
slub: per cpu partial statistics change
This patch split the cpu_partial_free into 2 parts: cpu_partial_node, PCP refilling times from node partial; and same name cpu_partial_free, PCP refilling times in slab_free slow path. A new statistic 'cpu_partial_drain' is added to get PCP drain to node partial times. These info are useful when do PCP tunning. The slabinfo.c code is unchanged, since cpu_partial_node is not on slow path. Signed-off-by: Alex Shi <alex.shi@intel.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b6666eb3d9c4..24132edcfe33 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1566,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s,
1566 } else { 1566 } else {
1567 page->freelist = t; 1567 page->freelist = t;
1568 available = put_cpu_partial(s, page, 0); 1568 available = put_cpu_partial(s, page, 0);
1569 stat(s, CPU_PARTIAL_NODE);
1569 } 1570 }
1570 if (kmem_cache_debug(s) || available > s->cpu_partial / 2) 1571 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1571 break; 1572 break;
@@ -1979,6 +1980,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1979 local_irq_restore(flags); 1980 local_irq_restore(flags);
1980 pobjects = 0; 1981 pobjects = 0;
1981 pages = 0; 1982 pages = 0;
1983 stat(s, CPU_PARTIAL_DRAIN);
1982 } 1984 }
1983 } 1985 }
1984 1986
@@ -1990,7 +1992,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1990 page->next = oldpage; 1992 page->next = oldpage;
1991 1993
1992 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1994 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1993 stat(s, CPU_PARTIAL_FREE);
1994 return pobjects; 1995 return pobjects;
1995} 1996}
1996 1997
@@ -2474,9 +2475,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2474 * If we just froze the page then put it onto the 2475 * If we just froze the page then put it onto the
2475 * per cpu partial list. 2476 * per cpu partial list.
2476 */ 2477 */
2477 if (new.frozen && !was_frozen) 2478 if (new.frozen && !was_frozen) {
2478 put_cpu_partial(s, page, 1); 2479 put_cpu_partial(s, page, 1);
2479 2480 stat(s, CPU_PARTIAL_FREE);
2481 }
2480 /* 2482 /*
2481 * The list lock was not taken therefore no list 2483 * The list lock was not taken therefore no list
2482 * activity can be necessary. 2484 * activity can be necessary.
@@ -5069,6 +5071,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5069STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5071STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5070STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5072STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5071STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5073STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5074STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5075STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5072#endif 5076#endif
5073 5077
5074static struct attribute *slab_attrs[] = { 5078static struct attribute *slab_attrs[] = {
@@ -5134,6 +5138,8 @@ static struct attribute *slab_attrs[] = {
5134 &cmpxchg_double_cpu_fail_attr.attr, 5138 &cmpxchg_double_cpu_fail_attr.attr,
5135 &cpu_partial_alloc_attr.attr, 5139 &cpu_partial_alloc_attr.attr,
5136 &cpu_partial_free_attr.attr, 5140 &cpu_partial_free_attr.attr,
5141 &cpu_partial_node_attr.attr,
5142 &cpu_partial_drain_attr.attr,
5137#endif 5143#endif
5138#ifdef CONFIG_FAILSLAB 5144#ifdef CONFIG_FAILSLAB
5139 &failslab_attr.attr, 5145 &failslab_attr.attr,