diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2019-09-23 18:34:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-24 18:54:08 -0400 |
commit | a50b854e073cd3335bbbada8dcff83a857297dd7 (patch) | |
tree | 2ffc3a1e603860d6c0acc00154b47080c567c9c6 /mm/slub.c | |
parent | 1f18b296699c83d858ca8ebb8b77dbc641d87cae (diff) |
mm: introduce page_size()
Patch series "Make working with compound pages easier", v2.
These three patches add three helpers and convert the appropriate
places to use them.
This patch (of 3):
It's unnecessarily hard to find out the size of a potentially huge page.
Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).
Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 18 |
1 files changed, 9 insertions, 9 deletions
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
829 | return 1; | 829 | return 1; |
830 | 830 | ||
831 | start = page_address(page); | 831 | start = page_address(page); |
832 | length = PAGE_SIZE << compound_order(page); | 832 | length = page_size(page); |
833 | end = start + length; | 833 | end = start + length; |
834 | remainder = length % s->size; | 834 | remainder = length % s->size; |
835 | if (!remainder) | 835 | if (!remainder) |
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
1074 | init_tracking(s, object); | 1074 | init_tracking(s, object); |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static void setup_page_debug(struct kmem_cache *s, void *addr, int order) | 1077 | static |
1078 | void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) | ||
1078 | { | 1079 | { |
1079 | if (!(s->flags & SLAB_POISON)) | 1080 | if (!(s->flags & SLAB_POISON)) |
1080 | return; | 1081 | return; |
1081 | 1082 | ||
1082 | metadata_access_enable(); | 1083 | metadata_access_enable(); |
1083 | memset(addr, POISON_INUSE, PAGE_SIZE << order); | 1084 | memset(addr, POISON_INUSE, page_size(page)); |
1084 | metadata_access_disable(); | 1085 | metadata_access_disable(); |
1085 | } | 1086 | } |
1086 | 1087 | ||
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, | |||
1340 | #else /* !CONFIG_SLUB_DEBUG */ | 1341 | #else /* !CONFIG_SLUB_DEBUG */ |
1341 | static inline void setup_object_debug(struct kmem_cache *s, | 1342 | static inline void setup_object_debug(struct kmem_cache *s, |
1342 | struct page *page, void *object) {} | 1343 | struct page *page, void *object) {} |
1343 | static inline void setup_page_debug(struct kmem_cache *s, | 1344 | static inline |
1344 | void *addr, int order) {} | 1345 | void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} |
1345 | 1346 | ||
1346 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1347 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1347 | struct page *page, void *object, unsigned long addr) { return 0; } | 1348 | struct page *page, void *object, unsigned long addr) { return 0; } |
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1639 | struct kmem_cache_order_objects oo = s->oo; | 1640 | struct kmem_cache_order_objects oo = s->oo; |
1640 | gfp_t alloc_gfp; | 1641 | gfp_t alloc_gfp; |
1641 | void *start, *p, *next; | 1642 | void *start, *p, *next; |
1642 | int idx, order; | 1643 | int idx; |
1643 | bool shuffle; | 1644 | bool shuffle; |
1644 | 1645 | ||
1645 | flags &= gfp_allowed_mask; | 1646 | flags &= gfp_allowed_mask; |
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1673 | 1674 | ||
1674 | page->objects = oo_objects(oo); | 1675 | page->objects = oo_objects(oo); |
1675 | 1676 | ||
1676 | order = compound_order(page); | ||
1677 | page->slab_cache = s; | 1677 | page->slab_cache = s; |
1678 | __SetPageSlab(page); | 1678 | __SetPageSlab(page); |
1679 | if (page_is_pfmemalloc(page)) | 1679 | if (page_is_pfmemalloc(page)) |
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1683 | 1683 | ||
1684 | start = page_address(page); | 1684 | start = page_address(page); |
1685 | 1685 | ||
1686 | setup_page_debug(s, start, order); | 1686 | setup_page_debug(s, page, start); |
1687 | 1687 | ||
1688 | shuffle = shuffle_freelist(s, page); | 1688 | shuffle = shuffle_freelist(s, page); |
1689 | 1689 | ||
@@ -3932,7 +3932,7 @@ size_t __ksize(const void *object) | |||
3932 | 3932 | ||
3933 | if (unlikely(!PageSlab(page))) { | 3933 | if (unlikely(!PageSlab(page))) { |
3934 | WARN_ON(!PageCompound(page)); | 3934 | WARN_ON(!PageCompound(page)); |
3935 | return PAGE_SIZE << compound_order(page); | 3935 | return page_size(page); |
3936 | } | 3936 | } |
3937 | 3937 | ||
3938 | return slab_ksize(page->slab_cache); | 3938 | return slab_ksize(page->slab_cache); |