summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 8834563cdb4b..42c1b3af3c98 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
829 return 1; 829 return 1;
830 830
831 start = page_address(page); 831 start = page_address(page);
832 length = PAGE_SIZE << compound_order(page); 832 length = page_size(page);
833 end = start + length; 833 end = start + length;
834 remainder = length % s->size; 834 remainder = length % s->size;
835 if (!remainder) 835 if (!remainder)
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1074 init_tracking(s, object); 1074 init_tracking(s, object);
1075} 1075}
1076 1076
1077static void setup_page_debug(struct kmem_cache *s, void *addr, int order) 1077static
1078void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1078{ 1079{
1079 if (!(s->flags & SLAB_POISON)) 1080 if (!(s->flags & SLAB_POISON))
1080 return; 1081 return;
1081 1082
1082 metadata_access_enable(); 1083 metadata_access_enable();
1083 memset(addr, POISON_INUSE, PAGE_SIZE << order); 1084 memset(addr, POISON_INUSE, page_size(page));
1084 metadata_access_disable(); 1085 metadata_access_disable();
1085} 1086}
1086 1087
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
1340#else /* !CONFIG_SLUB_DEBUG */ 1341#else /* !CONFIG_SLUB_DEBUG */
1341static inline void setup_object_debug(struct kmem_cache *s, 1342static inline void setup_object_debug(struct kmem_cache *s,
1342 struct page *page, void *object) {} 1343 struct page *page, void *object) {}
1343static inline void setup_page_debug(struct kmem_cache *s, 1344static inline
1344 void *addr, int order) {} 1345void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1345 1346
1346static inline int alloc_debug_processing(struct kmem_cache *s, 1347static inline int alloc_debug_processing(struct kmem_cache *s,
1347 struct page *page, void *object, unsigned long addr) { return 0; } 1348 struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1639 struct kmem_cache_order_objects oo = s->oo; 1640 struct kmem_cache_order_objects oo = s->oo;
1640 gfp_t alloc_gfp; 1641 gfp_t alloc_gfp;
1641 void *start, *p, *next; 1642 void *start, *p, *next;
1642 int idx, order; 1643 int idx;
1643 bool shuffle; 1644 bool shuffle;
1644 1645
1645 flags &= gfp_allowed_mask; 1646 flags &= gfp_allowed_mask;
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1673 1674
1674 page->objects = oo_objects(oo); 1675 page->objects = oo_objects(oo);
1675 1676
1676 order = compound_order(page);
1677 page->slab_cache = s; 1677 page->slab_cache = s;
1678 __SetPageSlab(page); 1678 __SetPageSlab(page);
1679 if (page_is_pfmemalloc(page)) 1679 if (page_is_pfmemalloc(page))
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1683 1683
1684 start = page_address(page); 1684 start = page_address(page);
1685 1685
1686 setup_page_debug(s, start, order); 1686 setup_page_debug(s, page, start);
1687 1687
1688 shuffle = shuffle_freelist(s, page); 1688 shuffle = shuffle_freelist(s, page);
1689 1689
@@ -2004,6 +2004,7 @@ static inline unsigned long next_tid(unsigned long tid)
2004 return tid + TID_STEP; 2004 return tid + TID_STEP;
2005} 2005}
2006 2006
2007#ifdef SLUB_DEBUG_CMPXCHG
2007static inline unsigned int tid_to_cpu(unsigned long tid) 2008static inline unsigned int tid_to_cpu(unsigned long tid)
2008{ 2009{
2009 return tid % TID_STEP; 2010 return tid % TID_STEP;
@@ -2013,6 +2014,7 @@ static inline unsigned long tid_to_event(unsigned long tid)
2013{ 2014{
2014 return tid / TID_STEP; 2015 return tid / TID_STEP;
2015} 2016}
2017#endif
2016 2018
2017static inline unsigned int init_tid(int cpu) 2019static inline unsigned int init_tid(int cpu)
2018{ 2020{
@@ -3930,7 +3932,7 @@ size_t __ksize(const void *object)
3930 3932
3931 if (unlikely(!PageSlab(page))) { 3933 if (unlikely(!PageSlab(page))) {
3932 WARN_ON(!PageCompound(page)); 3934 WARN_ON(!PageCompound(page));
3933 return PAGE_SIZE << compound_order(page); 3935 return page_size(page);
3934 } 3936 }
3935 3937
3936 return slab_ksize(page->slab_cache); 3938 return slab_ksize(page->slab_cache);
@@ -5298,7 +5300,7 @@ static ssize_t shrink_store(struct kmem_cache *s,
5298 const char *buf, size_t length) 5300 const char *buf, size_t length)
5299{ 5301{
5300 if (buf[0] == '1') 5302 if (buf[0] == '1')
5301 kmem_cache_shrink(s); 5303 kmem_cache_shrink_all(s);
5302 else 5304 else
5303 return -EINVAL; 5305 return -EINVAL;
5304 return length; 5306 return length;