aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-08-23 20:57:52 -0400
committerPekka Enberg <penberg@kernel.org>2011-08-27 04:59:00 -0400
commit136333d104bd3a62d783b0ac3d0f32ac0108c5d0 (patch)
treedbac3ee89362a3707823bbd1185ad0423cb153de /mm/slub.c
parent130655ef097940b627e8e04fa7c6f3b51cf24f85 (diff)
slub: explicitly document position of inserting slab to partial list
Adding slab to partial list head/tail is sensitive to performance. So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document it to avoid we get it wrong. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Shaohua Li <shli@kernel.org> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7c54fe83a90c..91a120f185d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
1534 struct page *page, int tail) 1534 struct page *page, int tail)
1535{ 1535{
1536 n->nr_partial++; 1536 n->nr_partial++;
1537 if (tail) 1537 if (tail == DEACTIVATE_TO_TAIL)
1538 list_add_tail(&page->lru, &n->partial); 1538 list_add_tail(&page->lru, &n->partial);
1539 else 1539 else
1540 list_add(&page->lru, &n->partial); 1540 list_add(&page->lru, &n->partial);
@@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1781 enum slab_modes l = M_NONE, m = M_NONE; 1781 enum slab_modes l = M_NONE, m = M_NONE;
1782 void *freelist; 1782 void *freelist;
1783 void *nextfree; 1783 void *nextfree;
1784 int tail = 0; 1784 int tail = DEACTIVATE_TO_HEAD;
1785 struct page new; 1785 struct page new;
1786 struct page old; 1786 struct page old;
1787 1787
1788 if (page->freelist) { 1788 if (page->freelist) {
1789 stat(s, DEACTIVATE_REMOTE_FREES); 1789 stat(s, DEACTIVATE_REMOTE_FREES);
1790 tail = 1; 1790 tail = DEACTIVATE_TO_TAIL;
1791 } 1791 }
1792 1792
1793 c->tid = next_tid(c->tid); 1793 c->tid = next_tid(c->tid);
@@ -1893,7 +1893,7 @@ redo:
1893 if (m == M_PARTIAL) { 1893 if (m == M_PARTIAL) {
1894 1894
1895 add_partial(n, page, tail); 1895 add_partial(n, page, tail);
1896 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1896 stat(s, tail);
1897 1897
1898 } else if (m == M_FULL) { 1898 } else if (m == M_FULL) {
1899 1899
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2377 */ 2377 */
2378 if (unlikely(!prior)) { 2378 if (unlikely(!prior)) {
2379 remove_full(s, page); 2379 remove_full(s, page);
2380 add_partial(n, page, 1); 2380 add_partial(n, page, DEACTIVATE_TO_TAIL);
2381 stat(s, FREE_ADD_PARTIAL); 2381 stat(s, FREE_ADD_PARTIAL);
2382 } 2382 }
2383 } 2383 }
@@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
2695 init_kmem_cache_node(n, kmem_cache_node); 2695 init_kmem_cache_node(n, kmem_cache_node);
2696 inc_slabs_node(kmem_cache_node, node, page->objects); 2696 inc_slabs_node(kmem_cache_node, node, page->objects);
2697 2697
2698 add_partial(n, page, 0); 2698 add_partial(n, page, DEACTIVATE_TO_HEAD);
2699} 2699}
2700 2700
2701static void free_kmem_cache_nodes(struct kmem_cache *s) 2701static void free_kmem_cache_nodes(struct kmem_cache *s)