summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1880d482a0cb..2a31ee3c5814 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1728 1728
1729 freelist = page->freelist; 1729 freelist = page->freelist;
1730 slab_destroy_debugcheck(cachep, page); 1730 slab_destroy_debugcheck(cachep, page);
1731 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 1731 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1732 call_rcu(&page->rcu_head, kmem_rcu_free); 1732 call_rcu(&page->rcu_head, kmem_rcu_free);
1733 else 1733 else
1734 kmem_freepages(cachep, page); 1734 kmem_freepages(cachep, page);
@@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1924 1924
1925 cachep->num = 0; 1925 cachep->num = 0;
1926 1926
1927 if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU) 1927 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1928 return false; 1928 return false;
1929 1929
1930 left = calculate_slab_order(cachep, size, 1930 left = calculate_slab_order(cachep, size,
@@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2030 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2030 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2031 2 * sizeof(unsigned long long))) 2031 2 * sizeof(unsigned long long)))
2032 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2032 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2033 if (!(flags & SLAB_DESTROY_BY_RCU)) 2033 if (!(flags & SLAB_TYPESAFE_BY_RCU))
2034 flags |= SLAB_POISON; 2034 flags |= SLAB_POISON;
2035#endif 2035#endif
2036#endif 2036#endif