aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2009-12-18 17:26:23 -0500
committerPekka Enberg <penberg@cs.helsinki.fi>2009-12-20 03:39:34 -0500
commit84e554e6865c4f4ae84d38800cf270b9a67901cc (patch)
tree9bf5ce0596a9ffeee9e6a307eadbce6086f636ba
parentff12059ed14b0773d7bbef86f98218ada6c20770 (diff)
SLUB: Make slub statistics use this_cpu_inc
this_cpu_inc() translates into a single instruction on x86 and does not need any register. So use it in stat(). We also want to avoid the calculation of the per cpu kmem_cache_cpu structure pointer. So pass a kmem_cache pointer instead of a kmem_cache_cpu pointer. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--mm/slub.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 30d2dde27563..bddae72f6f49 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -217,10 +217,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
217 217
218#endif 218#endif
219 219
220static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 220static inline void stat(struct kmem_cache *s, enum stat_item si)
221{ 221{
222#ifdef CONFIG_SLUB_STATS 222#ifdef CONFIG_SLUB_STATS
223 c->stat[si]++; 223 __this_cpu_inc(s->cpu_slab->stat[si]);
224#endif 224#endif
225} 225}
226 226
@@ -1108,7 +1108,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1108 if (!page) 1108 if (!page)
1109 return NULL; 1109 return NULL;
1110 1110
1111 stat(this_cpu_ptr(s->cpu_slab), ORDER_FALLBACK); 1111 stat(s, ORDER_FALLBACK);
1112 } 1112 }
1113 1113
1114 if (kmemcheck_enabled 1114 if (kmemcheck_enabled
@@ -1406,23 +1406,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1406static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1406static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1407{ 1407{
1408 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1408 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1409 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1410 1409
1411 __ClearPageSlubFrozen(page); 1410 __ClearPageSlubFrozen(page);
1412 if (page->inuse) { 1411 if (page->inuse) {
1413 1412
1414 if (page->freelist) { 1413 if (page->freelist) {
1415 add_partial(n, page, tail); 1414 add_partial(n, page, tail);
1416 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1415 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1417 } else { 1416 } else {
1418 stat(c, DEACTIVATE_FULL); 1417 stat(s, DEACTIVATE_FULL);
1419 if (SLABDEBUG && PageSlubDebug(page) && 1418 if (SLABDEBUG && PageSlubDebug(page) &&
1420 (s->flags & SLAB_STORE_USER)) 1419 (s->flags & SLAB_STORE_USER))
1421 add_full(n, page); 1420 add_full(n, page);
1422 } 1421 }
1423 slab_unlock(page); 1422 slab_unlock(page);
1424 } else { 1423 } else {
1425 stat(c, DEACTIVATE_EMPTY); 1424 stat(s, DEACTIVATE_EMPTY);
1426 if (n->nr_partial < s->min_partial) { 1425 if (n->nr_partial < s->min_partial) {
1427 /* 1426 /*
1428 * Adding an empty slab to the partial slabs in order 1427 * Adding an empty slab to the partial slabs in order
@@ -1438,7 +1437,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1438 slab_unlock(page); 1437 slab_unlock(page);
1439 } else { 1438 } else {
1440 slab_unlock(page); 1439 slab_unlock(page);
1441 stat(__this_cpu_ptr(s->cpu_slab), FREE_SLAB); 1440 stat(s, FREE_SLAB);
1442 discard_slab(s, page); 1441 discard_slab(s, page);
1443 } 1442 }
1444 } 1443 }
@@ -1453,7 +1452,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1453 int tail = 1; 1452 int tail = 1;
1454 1453
1455 if (page->freelist) 1454 if (page->freelist)
1456 stat(c, DEACTIVATE_REMOTE_FREES); 1455 stat(s, DEACTIVATE_REMOTE_FREES);
1457 /* 1456 /*
1458 * Merge cpu freelist into slab freelist. Typically we get here 1457 * Merge cpu freelist into slab freelist. Typically we get here
1459 * because both freelists are empty. So this is unlikely 1458 * because both freelists are empty. So this is unlikely
@@ -1479,7 +1478,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1479 1478
1480static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1479static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1481{ 1480{
1482 stat(c, CPUSLAB_FLUSH); 1481 stat(s, CPUSLAB_FLUSH);
1483 slab_lock(c->page); 1482 slab_lock(c->page);
1484 deactivate_slab(s, c); 1483 deactivate_slab(s, c);
1485} 1484}
@@ -1619,7 +1618,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1619 if (unlikely(!node_match(c, node))) 1618 if (unlikely(!node_match(c, node)))
1620 goto another_slab; 1619 goto another_slab;
1621 1620
1622 stat(c, ALLOC_REFILL); 1621 stat(s, ALLOC_REFILL);
1623 1622
1624load_freelist: 1623load_freelist:
1625 object = c->page->freelist; 1624 object = c->page->freelist;
@@ -1634,7 +1633,7 @@ load_freelist:
1634 c->node = page_to_nid(c->page); 1633 c->node = page_to_nid(c->page);
1635unlock_out: 1634unlock_out:
1636 slab_unlock(c->page); 1635 slab_unlock(c->page);
1637 stat(c, ALLOC_SLOWPATH); 1636 stat(s, ALLOC_SLOWPATH);
1638 return object; 1637 return object;
1639 1638
1640another_slab: 1639another_slab:
@@ -1644,7 +1643,7 @@ new_slab:
1644 new = get_partial(s, gfpflags, node); 1643 new = get_partial(s, gfpflags, node);
1645 if (new) { 1644 if (new) {
1646 c->page = new; 1645 c->page = new;
1647 stat(c, ALLOC_FROM_PARTIAL); 1646 stat(s, ALLOC_FROM_PARTIAL);
1648 goto load_freelist; 1647 goto load_freelist;
1649 } 1648 }
1650 1649
@@ -1658,7 +1657,7 @@ new_slab:
1658 1657
1659 if (new) { 1658 if (new) {
1660 c = __this_cpu_ptr(s->cpu_slab); 1659 c = __this_cpu_ptr(s->cpu_slab);
1661 stat(c, ALLOC_SLAB); 1660 stat(s, ALLOC_SLAB);
1662 if (c->page) 1661 if (c->page)
1663 flush_slab(s, c); 1662 flush_slab(s, c);
1664 slab_lock(new); 1663 slab_lock(new);
@@ -1713,7 +1712,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1713 1712
1714 else { 1713 else {
1715 c->freelist = get_freepointer(s, object); 1714 c->freelist = get_freepointer(s, object);
1716 stat(c, ALLOC_FASTPATH); 1715 stat(s, ALLOC_FASTPATH);
1717 } 1716 }
1718 local_irq_restore(flags); 1717 local_irq_restore(flags);
1719 1718
@@ -1780,10 +1779,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1780{ 1779{
1781 void *prior; 1780 void *prior;
1782 void **object = (void *)x; 1781 void **object = (void *)x;
1783 struct kmem_cache_cpu *c;
1784 1782
1785 c = __this_cpu_ptr(s->cpu_slab); 1783 stat(s, FREE_SLOWPATH);
1786 stat(c, FREE_SLOWPATH);
1787 slab_lock(page); 1784 slab_lock(page);
1788 1785
1789 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1786 if (unlikely(SLABDEBUG && PageSlubDebug(page)))
@@ -1796,7 +1793,7 @@ checks_ok:
1796 page->inuse--; 1793 page->inuse--;
1797 1794
1798 if (unlikely(PageSlubFrozen(page))) { 1795 if (unlikely(PageSlubFrozen(page))) {
1799 stat(c, FREE_FROZEN); 1796 stat(s, FREE_FROZEN);
1800 goto out_unlock; 1797 goto out_unlock;
1801 } 1798 }
1802 1799
@@ -1809,7 +1806,7 @@ checks_ok:
1809 */ 1806 */
1810 if (unlikely(!prior)) { 1807 if (unlikely(!prior)) {
1811 add_partial(get_node(s, page_to_nid(page)), page, 1); 1808 add_partial(get_node(s, page_to_nid(page)), page, 1);
1812 stat(c, FREE_ADD_PARTIAL); 1809 stat(s, FREE_ADD_PARTIAL);
1813 } 1810 }
1814 1811
1815out_unlock: 1812out_unlock:
@@ -1822,10 +1819,10 @@ slab_empty:
1822 * Slab still on the partial list. 1819 * Slab still on the partial list.
1823 */ 1820 */
1824 remove_partial(s, page); 1821 remove_partial(s, page);
1825 stat(c, FREE_REMOVE_PARTIAL); 1822 stat(s, FREE_REMOVE_PARTIAL);
1826 } 1823 }
1827 slab_unlock(page); 1824 slab_unlock(page);
1828 stat(c, FREE_SLAB); 1825 stat(s, FREE_SLAB);
1829 discard_slab(s, page); 1826 discard_slab(s, page);
1830 return; 1827 return;
1831 1828
@@ -1863,7 +1860,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1863 if (likely(page == c->page && c->node >= 0)) { 1860 if (likely(page == c->page && c->node >= 0)) {
1864 set_freepointer(s, object, c->freelist); 1861 set_freepointer(s, object, c->freelist);
1865 c->freelist = object; 1862 c->freelist = object;
1866 stat(c, FREE_FASTPATH); 1863 stat(s, FREE_FASTPATH);
1867 } else 1864 } else
1868 __slab_free(s, page, x, addr); 1865 __slab_free(s, page, x, addr);
1869 1866