aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-09-19 10:46:07 -0400
committerPekka Enberg <penberg@kernel.org>2011-09-19 10:46:07 -0400
commitd20bbfab01802e195a50435940f7e4aa747c217c (patch)
tree82b0007e33c083050a4e60a49dbb2f5477b4c99d /mm/slub.c
parenta37933c37c14b64e81c7c9cc44a5d3f5e0c91412 (diff)
parent136333d104bd3a62d783b0ac3d0f32ac0108c5d0 (diff)
Merge branch 'slab/urgent' into slab/next
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2dc22160aff1..3b3f17bc0d17 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -675,7 +675,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
675 return check_bytes8(start, value, bytes); 675 return check_bytes8(start, value, bytes);
676 676
677 value64 = value | value << 8 | value << 16 | value << 24; 677 value64 = value | value << 8 | value << 16 | value << 24;
678 value64 = value64 | value64 << 32; 678 value64 = (value64 & 0xffffffff) | value64 << 32;
679 prefix = 8 - ((unsigned long)start) % 8; 679 prefix = 8 - ((unsigned long)start) % 8;
680 680
681 if (prefix) { 681 if (prefix) {
@@ -1508,7 +1508,7 @@ static inline void add_partial(struct kmem_cache_node *n,
1508 struct page *page, int tail) 1508 struct page *page, int tail)
1509{ 1509{
1510 n->nr_partial++; 1510 n->nr_partial++;
1511 if (tail) 1511 if (tail == DEACTIVATE_TO_TAIL)
1512 list_add_tail(&page->lru, &n->partial); 1512 list_add_tail(&page->lru, &n->partial);
1513 else 1513 else
1514 list_add(&page->lru, &n->partial); 1514 list_add(&page->lru, &n->partial);
@@ -1755,13 +1755,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1755 enum slab_modes l = M_NONE, m = M_NONE; 1755 enum slab_modes l = M_NONE, m = M_NONE;
1756 void *freelist; 1756 void *freelist;
1757 void *nextfree; 1757 void *nextfree;
1758 int tail = 0; 1758 int tail = DEACTIVATE_TO_HEAD;
1759 struct page new; 1759 struct page new;
1760 struct page old; 1760 struct page old;
1761 1761
1762 if (page->freelist) { 1762 if (page->freelist) {
1763 stat(s, DEACTIVATE_REMOTE_FREES); 1763 stat(s, DEACTIVATE_REMOTE_FREES);
1764 tail = 1; 1764 tail = DEACTIVATE_TO_TAIL;
1765 } 1765 }
1766 1766
1767 c->tid = next_tid(c->tid); 1767 c->tid = next_tid(c->tid);
@@ -1828,7 +1828,7 @@ redo:
1828 1828
1829 new.frozen = 0; 1829 new.frozen = 0;
1830 1830
1831 if (!new.inuse && n->nr_partial < s->min_partial) 1831 if (!new.inuse && n->nr_partial > s->min_partial)
1832 m = M_FREE; 1832 m = M_FREE;
1833 else if (new.freelist) { 1833 else if (new.freelist) {
1834 m = M_PARTIAL; 1834 m = M_PARTIAL;
@@ -1867,7 +1867,7 @@ redo:
1867 if (m == M_PARTIAL) { 1867 if (m == M_PARTIAL) {
1868 1868
1869 add_partial(n, page, tail); 1869 add_partial(n, page, tail);
1870 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1870 stat(s, tail);
1871 1871
1872 } else if (m == M_FULL) { 1872 } else if (m == M_FULL) {
1873 1873
@@ -2351,7 +2351,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2351 */ 2351 */
2352 if (unlikely(!prior)) { 2352 if (unlikely(!prior)) {
2353 remove_full(s, page); 2353 remove_full(s, page);
2354 add_partial(n, page, 0); 2354 add_partial(n, page, DEACTIVATE_TO_TAIL);
2355 stat(s, FREE_ADD_PARTIAL); 2355 stat(s, FREE_ADD_PARTIAL);
2356 } 2356 }
2357 } 2357 }
@@ -2361,11 +2361,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2361slab_empty: 2361slab_empty:
2362 if (prior) { 2362 if (prior) {
2363 /* 2363 /*
2364 * Slab still on the partial list. 2364 * Slab on the partial list.
2365 */ 2365 */
2366 remove_partial(n, page); 2366 remove_partial(n, page);
2367 stat(s, FREE_REMOVE_PARTIAL); 2367 stat(s, FREE_REMOVE_PARTIAL);
2368 } 2368 } else
2369 /* Slab must be on the full list */
2370 remove_full(s, page);
2369 2371
2370 spin_unlock_irqrestore(&n->list_lock, flags); 2372 spin_unlock_irqrestore(&n->list_lock, flags);
2371 stat(s, FREE_SLAB); 2373 stat(s, FREE_SLAB);
@@ -2667,7 +2669,7 @@ static void early_kmem_cache_node_alloc(int node)
2667 init_kmem_cache_node(n, kmem_cache_node); 2669 init_kmem_cache_node(n, kmem_cache_node);
2668 inc_slabs_node(kmem_cache_node, node, page->objects); 2670 inc_slabs_node(kmem_cache_node, node, page->objects);
2669 2671
2670 add_partial(n, page, 0); 2672 add_partial(n, page, DEACTIVATE_TO_HEAD);
2671} 2673}
2672 2674
2673static void free_kmem_cache_nodes(struct kmem_cache *s) 2675static void free_kmem_cache_nodes(struct kmem_cache *s)