summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2019-09-23 18:34:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:08 -0400
commita50b854e073cd3335bbbada8dcff83a857297dd7 (patch)
tree2ffc3a1e603860d6c0acc00154b47080c567c9c6
parent1f18b296699c83d858ca8ebb8b77dbc641d87cae (diff)
mm: introduce page_size()
Patch series "Make working with compound pages easier", v2. These three patches add three helpers and convert the appropriate places to use them. This patch (of 3): It's unnecessarily hard to find out the size of a potentially huge page. Replace 'PAGE_SIZE << compound_order(page)' with page_size(page). Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/mm/flush.c3
-rw-r--r--arch/arm64/mm/flush.c3
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--fs/io_uring.c2
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--lib/iov_iter.c2
-rw-r--r--mm/kasan/common.c8
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page_vma_mapped.c3
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c18
-rw-r--r--net/xdp/xsk.c2
17 files changed, 35 insertions, 38 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6ecbda87ee46..4c7ebe094a83 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
204 * coherent with the kernels mapping. 204 * coherent with the kernels mapping.
205 */ 205 */
206 if (!PageHighMem(page)) { 206 if (!PageHighMem(page)) {
207 size_t page_size = PAGE_SIZE << compound_order(page); 207 __cpuc_flush_dcache_area(page_address(page), page_size(page));
208 __cpuc_flush_dcache_area(page_address(page), page_size);
209 } else { 208 } else {
210 unsigned long i; 209 unsigned long i;
211 if (cache_is_vipt_nonaliasing()) { 210 if (cache_is_vipt_nonaliasing()) {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dc19300309d2..ac485163a4a7 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
56 struct page *page = pte_page(pte); 56 struct page *page = pte_page(pte);
57 57
58 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 58 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
59 sync_icache_aliases(page_address(page), 59 sync_icache_aliases(page_address(page), page_size(page));
60 PAGE_SIZE << compound_order(page));
61} 60}
62EXPORT_SYMBOL_GPL(__sync_icache_dcache); 61EXPORT_SYMBOL_GPL(__sync_icache_dcache);
63 62
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 678b98a09c85..bf9df2625bc8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
64 if (test_bit(PG_arch_1, &page->flags)) 64 if (test_bit(PG_arch_1, &page->flags))
65 return; /* i-cache is already coherent with d-cache */ 65 return; /* i-cache is already coherent with d-cache */
66 66
67 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); 67 flush_icache_range(addr, addr + page_size(page));
68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
69} 69}
70 70
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index c70cb5f272cf..0891ab829b1b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1078,7 +1078,7 @@ new_buf:
1078 bool merge; 1078 bool merge;
1079 1079
1080 if (page) 1080 if (page)
1081 pg_size <<= compound_order(page); 1081 pg_size = page_size(page);
1082 if (off < pg_size && 1082 if (off < pg_size &&
1083 skb_can_coalesce(skb, i, page, off)) { 1083 skb_can_coalesce(skb, i, page, off)) {
1084 merge = 1; 1084 merge = 1;
@@ -1105,8 +1105,7 @@ new_buf:
1105 __GFP_NORETRY, 1105 __GFP_NORETRY,
1106 order); 1106 order);
1107 if (page) 1107 if (page)
1108 pg_size <<= 1108 pg_size <<= order;
1109 compound_order(page);
1110 } 1109 }
1111 if (!page) { 1110 if (!page) {
1112 page = alloc_page(gfp); 1111 page = alloc_page(gfp);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa8d8425be25..b83a1d16bd89 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
120 if (!page) 120 if (!page)
121 goto free_pages; 121 goto free_pages;
122 list_add_tail(&page->lru, &pages); 122 list_add_tail(&page->lru, &pages);
123 size_remaining -= PAGE_SIZE << compound_order(page); 123 size_remaining -= page_size(page);
124 max_order = compound_order(page); 124 max_order = compound_order(page);
125 i++; 125 i++;
126 } 126 }
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
133 133
134 sg = table->sgl; 134 sg = table->sgl;
135 list_for_each_entry_safe(page, tmp_page, &pages, lru) { 135 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
136 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); 136 sg_set_page(sg, page, page_size(page), 0);
137 sg = sg_next(sg); 137 sg = sg_next(sg);
138 list_del(&page->lru); 138 list_del(&page->lru);
139 } 139 }
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index a254792d882c..1354a157e9af 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
136 page, off_in_page, tlen); 136 page, off_in_page, tlen);
137 fr_len(fp) += tlen; 137 fr_len(fp) += tlen;
138 fp_skb(fp)->data_len += tlen; 138 fp_skb(fp)->data_len += tlen;
139 fp_skb(fp)->truesize += 139 fp_skb(fp)->truesize += page_size(page);
140 PAGE_SIZE << compound_order(page);
141 } else { 140 } else {
142 BUG_ON(!page); 141 BUG_ON(!page);
143 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); 142 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0dadbdbead0f..f83de4c6a826 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3319 } 3319 }
3320 3320
3321 page = virt_to_head_page(ptr); 3321 page = virt_to_head_page(ptr);
3322 if (sz > (PAGE_SIZE << compound_order(page))) 3322 if (sz > page_size(page))
3323 return -EINVAL; 3323 return -EINVAL;
3324 3324
3325 pfn = virt_to_phys(ptr) >> PAGE_SHIFT; 3325 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edfca4278319..53fc34f930d0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
454static inline struct hstate *page_hstate(struct page *page) 454static inline struct hstate *page_hstate(struct page *page)
455{ 455{
456 VM_BUG_ON_PAGE(!PageHuge(page), page); 456 VM_BUG_ON_PAGE(!PageHuge(page), page);
457 return size_to_hstate(PAGE_SIZE << compound_order(page)); 457 return size_to_hstate(page_size(page));
458} 458}
459 459
460static inline unsigned hstate_index_to_shift(unsigned index) 460static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6e79b3df1582..d46d5585e2a2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
805 page[1].compound_order = order; 805 page[1].compound_order = order;
806} 806}
807 807
808/* Returns the number of bytes in this potentially compound page. */
809static inline unsigned long page_size(struct page *page)
810{
811 return PAGE_SIZE << compound_order(page);
812}
813
808void free_compound_page(struct page *page); 814void free_compound_page(struct page *page);
809 815
810#ifdef CONFIG_MMU 816#ifdef CONFIG_MMU
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f1e0569b4539..639d5e7014c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
878 head = compound_head(page); 878 head = compound_head(page);
879 v += (page - head) << PAGE_SHIFT; 879 v += (page - head) << PAGE_SHIFT;
880 880
881 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) 881 if (likely(n <= v && v <= (page_size(head))))
882 return true; 882 return true;
883 WARN_ON(1); 883 WARN_ON(1);
884 return false; 884 return false;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6b6f1198c72b..307631d9c62b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page)
338 338
339 for (i = 0; i < (1 << compound_order(page)); i++) 339 for (i = 0; i < (1 << compound_order(page)); i++)
340 page_kasan_tag_reset(page + i); 340 page_kasan_tag_reset(page + i);
341 kasan_poison_shadow(page_address(page), 341 kasan_poison_shadow(page_address(page), page_size(page),
342 PAGE_SIZE << compound_order(page),
343 KASAN_KMALLOC_REDZONE); 342 KASAN_KMALLOC_REDZONE);
344} 343}
345 344
@@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
542 page = virt_to_page(ptr); 541 page = virt_to_page(ptr);
543 redzone_start = round_up((unsigned long)(ptr + size), 542 redzone_start = round_up((unsigned long)(ptr + size),
544 KASAN_SHADOW_SCALE_SIZE); 543 KASAN_SHADOW_SCALE_SIZE);
545 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); 544 redzone_end = (unsigned long)ptr + page_size(page);
546 545
547 kasan_unpoison_shadow(ptr, size); 546 kasan_unpoison_shadow(ptr, size);
548 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 547 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
578 kasan_report_invalid_free(ptr, ip); 577 kasan_report_invalid_free(ptr, ip);
579 return; 578 return;
580 } 579 }
581 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 580 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
582 KASAN_FREE_PAGE);
583 } else { 581 } else {
584 __kasan_slab_free(page->slab_cache, ptr, ip, false); 582 __kasan_slab_free(page->slab_cache, ptr, ip, false);
585 } 583 }
diff --git a/mm/nommu.c b/mm/nommu.c
index fed1b6e9c89b..99b7ec318824 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
108 * The ksize() function is only guaranteed to work for pointers 108 * The ksize() function is only guaranteed to work for pointers
109 * returned by kmalloc(). So handle arbitrary pointers here. 109 * returned by kmalloc(). So handle arbitrary pointers here.
110 */ 110 */
111 return PAGE_SIZE << compound_order(page); 111 return page_size(page);
112} 112}
113 113
114/** 114/**
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 11df03e71288..eff4b4520c8d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
153 153
154 if (unlikely(PageHuge(pvmw->page))) { 154 if (unlikely(PageHuge(pvmw->page))) {
155 /* when pud is not present, pte will be NULL */ 155 /* when pud is not present, pte will be NULL */
156 pvmw->pte = huge_pte_offset(mm, pvmw->address, 156 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
157 PAGE_SIZE << compound_order(page));
158 if (!pvmw->pte) 157 if (!pvmw->pte)
159 return false; 158 return false;
160 159
diff --git a/mm/rmap.c b/mm/rmap.c
index 31352bba197d..f401732b20e8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
898 */ 898 */
899 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 899 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
900 0, vma, vma->vm_mm, address, 900 0, vma, vma->vm_mm, address,
901 min(vma->vm_end, address + 901 min(vma->vm_end, address + page_size(page)));
902 (PAGE_SIZE << compound_order(page))));
903 mmu_notifier_invalidate_range_start(&range); 902 mmu_notifier_invalidate_range_start(&range);
904 903
905 while (page_vma_mapped_walk(&pvmw)) { 904 while (page_vma_mapped_walk(&pvmw)) {
@@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1372 */ 1371 */
1373 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1372 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1374 address, 1373 address,
1375 min(vma->vm_end, address + 1374 min(vma->vm_end, address + page_size(page)));
1376 (PAGE_SIZE << compound_order(page))));
1377 if (PageHuge(page)) { 1375 if (PageHuge(page)) {
1378 /* 1376 /*
1379 * If sharing is possible, start and end will be adjusted 1377 * If sharing is possible, start and end will be adjusted
diff --git a/mm/slob.c b/mm/slob.c
index 7f421d0ca9ab..cf377beab962 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@ size_t __ksize(const void *block)
539 539
540 sp = virt_to_page(block); 540 sp = virt_to_page(block);
541 if (unlikely(!PageSlab(sp))) 541 if (unlikely(!PageSlab(sp)))
542 return PAGE_SIZE << compound_order(sp); 542 return page_size(sp);
543 543
544 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 544 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
545 m = (unsigned int *)(block - align); 545 m = (unsigned int *)(block - align);
diff --git a/mm/slub.c b/mm/slub.c
index 17fe1cac11fb..42c1b3af3c98 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
829 return 1; 829 return 1;
830 830
831 start = page_address(page); 831 start = page_address(page);
832 length = PAGE_SIZE << compound_order(page); 832 length = page_size(page);
833 end = start + length; 833 end = start + length;
834 remainder = length % s->size; 834 remainder = length % s->size;
835 if (!remainder) 835 if (!remainder)
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1074 init_tracking(s, object); 1074 init_tracking(s, object);
1075} 1075}
1076 1076
1077static void setup_page_debug(struct kmem_cache *s, void *addr, int order) 1077static
1078void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1078{ 1079{
1079 if (!(s->flags & SLAB_POISON)) 1080 if (!(s->flags & SLAB_POISON))
1080 return; 1081 return;
1081 1082
1082 metadata_access_enable(); 1083 metadata_access_enable();
1083 memset(addr, POISON_INUSE, PAGE_SIZE << order); 1084 memset(addr, POISON_INUSE, page_size(page));
1084 metadata_access_disable(); 1085 metadata_access_disable();
1085} 1086}
1086 1087
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
1340#else /* !CONFIG_SLUB_DEBUG */ 1341#else /* !CONFIG_SLUB_DEBUG */
1341static inline void setup_object_debug(struct kmem_cache *s, 1342static inline void setup_object_debug(struct kmem_cache *s,
1342 struct page *page, void *object) {} 1343 struct page *page, void *object) {}
1343static inline void setup_page_debug(struct kmem_cache *s, 1344static inline
1344 void *addr, int order) {} 1345void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1345 1346
1346static inline int alloc_debug_processing(struct kmem_cache *s, 1347static inline int alloc_debug_processing(struct kmem_cache *s,
1347 struct page *page, void *object, unsigned long addr) { return 0; } 1348 struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1639 struct kmem_cache_order_objects oo = s->oo; 1640 struct kmem_cache_order_objects oo = s->oo;
1640 gfp_t alloc_gfp; 1641 gfp_t alloc_gfp;
1641 void *start, *p, *next; 1642 void *start, *p, *next;
1642 int idx, order; 1643 int idx;
1643 bool shuffle; 1644 bool shuffle;
1644 1645
1645 flags &= gfp_allowed_mask; 1646 flags &= gfp_allowed_mask;
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1673 1674
1674 page->objects = oo_objects(oo); 1675 page->objects = oo_objects(oo);
1675 1676
1676 order = compound_order(page);
1677 page->slab_cache = s; 1677 page->slab_cache = s;
1678 __SetPageSlab(page); 1678 __SetPageSlab(page);
1679 if (page_is_pfmemalloc(page)) 1679 if (page_is_pfmemalloc(page))
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1683 1683
1684 start = page_address(page); 1684 start = page_address(page);
1685 1685
1686 setup_page_debug(s, start, order); 1686 setup_page_debug(s, page, start);
1687 1687
1688 shuffle = shuffle_freelist(s, page); 1688 shuffle = shuffle_freelist(s, page);
1689 1689
@@ -3932,7 +3932,7 @@ size_t __ksize(const void *object)
3932 3932
3933 if (unlikely(!PageSlab(page))) { 3933 if (unlikely(!PageSlab(page))) {
3934 WARN_ON(!PageCompound(page)); 3934 WARN_ON(!PageCompound(page));
3935 return PAGE_SIZE << compound_order(page); 3935 return page_size(page);
3936 } 3936 }
3937 3937
3938 return slab_ksize(page->slab_cache); 3938 return slab_ksize(page->slab_cache);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index c2f1af3b6a7c..fa8fbb8fa3c8 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
977 /* Matches the smp_wmb() in xsk_init_queue */ 977 /* Matches the smp_wmb() in xsk_init_queue */
978 smp_rmb(); 978 smp_rmb();
979 qpg = virt_to_head_page(q->ring); 979 qpg = virt_to_head_page(q->ring);
980 if (size > (PAGE_SIZE << compound_order(qpg))) 980 if (size > page_size(qpg))
981 return -EINVAL; 981 return -EINVAL;
982 982
983 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; 983 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;