aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:45 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:52 -0400
commit50d5c41cd151b21ac1dfc98f048210456ccacc20 (patch)
tree406983fe5aca70acad4df7c7f92286e4fc71d70b /mm/slub.c
parent7e0528dadc9f8b04e4de0dba48a075100c2afe75 (diff)
slub: Do not use frozen page flag but a bit in the page counters
Do not use a page flag for the frozen bit. It needs to be part of the state that is handled with cmpxchg_double(). So use a bit in the counter struct in the page struct for that purpose. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index add2ae74046c..82b2d048a278 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -166,7 +166,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
166 166
167#define OO_SHIFT 16 167#define OO_SHIFT 16
168#define OO_MASK ((1 << OO_SHIFT) - 1) 168#define OO_MASK ((1 << OO_SHIFT) - 1)
169#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 169#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
170 170
171/* Internal SLUB flags */ 171/* Internal SLUB flags */
172#define __OBJECT_POISON 0x80000000UL /* Poison object */ 172#define __OBJECT_POISON 0x80000000UL /* Poison object */
@@ -1025,7 +1025,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
1025 } 1025 }
1026 1026
1027 /* Special debug activities for freeing objects */ 1027 /* Special debug activities for freeing objects */
1028 if (!PageSlubFrozen(page) && !page->freelist) 1028 if (!page->frozen && !page->freelist)
1029 remove_full(s, page); 1029 remove_full(s, page);
1030 if (s->flags & SLAB_STORE_USER) 1030 if (s->flags & SLAB_STORE_USER)
1031 set_track(s, object, TRACK_FREE, addr); 1031 set_track(s, object, TRACK_FREE, addr);
@@ -1424,7 +1424,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1424{ 1424{
1425 if (slab_trylock(page)) { 1425 if (slab_trylock(page)) {
1426 __remove_partial(n, page); 1426 __remove_partial(n, page);
1427 __SetPageSlubFrozen(page); 1427 page->frozen = 1;
1428 return 1; 1428 return 1;
1429 } 1429 }
1430 return 0; 1430 return 0;
@@ -1538,7 +1538,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1538{ 1538{
1539 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1539 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1540 1540
1541 __ClearPageSlubFrozen(page); 1541 page->frozen = 0;
1542 if (page->inuse) { 1542 if (page->inuse) {
1543 1543
1544 if (page->freelist) { 1544 if (page->freelist) {
@@ -1868,7 +1868,7 @@ new_slab:
1868 flush_slab(s, c); 1868 flush_slab(s, c);
1869 1869
1870 slab_lock(page); 1870 slab_lock(page);
1871 __SetPageSlubFrozen(page); 1871 page->frozen = 1;
1872 c->node = page_to_nid(page); 1872 c->node = page_to_nid(page);
1873 c->page = page; 1873 c->page = page;
1874 goto load_freelist; 1874 goto load_freelist;
@@ -2048,7 +2048,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2048 page->freelist = object; 2048 page->freelist = object;
2049 page->inuse--; 2049 page->inuse--;
2050 2050
2051 if (unlikely(PageSlubFrozen(page))) { 2051 if (unlikely(page->frozen)) {
2052 stat(s, FREE_FROZEN); 2052 stat(s, FREE_FROZEN);
2053 goto out_unlock; 2053 goto out_unlock;
2054 } 2054 }