aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 684f7aa9692a..29e1e761f9eb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v)
27 */ 27 */
28static inline void set_page_refcounted(struct page *page) 28static inline void set_page_refcounted(struct page *page)
29{ 29{
30 VM_BUG_ON(PageTail(page)); 30 VM_BUG_ON_PAGE(PageTail(page), page);
31 VM_BUG_ON(atomic_read(&page->_count)); 31 VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
32 set_page_count(page, 1); 32 set_page_count(page, 1);
33} 33}
34 34
@@ -46,12 +46,10 @@ static inline void __get_page_tail_foll(struct page *page,
46 * speculative page access (like in 46 * speculative page access (like in
47 * page_cache_get_speculative()) on tail pages. 47 * page_cache_get_speculative()) on tail pages.
48 */ 48 */
49 VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); 49 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
50 VM_BUG_ON(atomic_read(&page->_count) != 0);
51 VM_BUG_ON(page_mapcount(page) < 0);
52 if (get_page_head) 50 if (get_page_head)
53 atomic_inc(&page->first_page->_count); 51 atomic_inc(&page->first_page->_count);
54 atomic_inc(&page->_mapcount); 52 get_huge_page_tail(page);
55} 53}
56 54
57/* 55/*
@@ -73,7 +71,7 @@ static inline void get_page_foll(struct page *page)
73 * Getting a normal page or the head of a compound page 71 * Getting a normal page or the head of a compound page
74 * requires to already have an elevated page->_count. 72 * requires to already have an elevated page->_count.
75 */ 73 */
76 VM_BUG_ON(atomic_read(&page->_count) <= 0); 74 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
77 atomic_inc(&page->_count); 75 atomic_inc(&page->_count);
78 } 76 }
79} 77}
@@ -85,7 +83,6 @@ extern unsigned long highest_memmap_pfn;
85 */ 83 */
86extern int isolate_lru_page(struct page *page); 84extern int isolate_lru_page(struct page *page);
87extern void putback_lru_page(struct page *page); 85extern void putback_lru_page(struct page *page);
88extern unsigned long zone_reclaimable_pages(struct zone *zone);
89extern bool zone_reclaimable(struct zone *zone); 86extern bool zone_reclaimable(struct zone *zone);
90 87
91/* 88/*
@@ -101,6 +98,7 @@ extern void prep_compound_page(struct page *page, unsigned long order);
101#ifdef CONFIG_MEMORY_FAILURE 98#ifdef CONFIG_MEMORY_FAILURE
102extern bool is_free_buddy_page(struct page *page); 99extern bool is_free_buddy_page(struct page *page);
103#endif 100#endif
101extern int user_min_free_kbytes;
104 102
105#if defined CONFIG_COMPACTION || defined CONFIG_CMA 103#if defined CONFIG_COMPACTION || defined CONFIG_CMA
106 104
@@ -144,9 +142,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
144#endif 142#endif
145 143
146/* 144/*
147 * function for dealing with page's order in buddy system. 145 * This function returns the order of a free page in the buddy system. In
148 * zone->lock is already acquired when we use these. 146 * general, page_zone(page)->lock must be held by the caller to prevent the
149 * So, we don't need atomic page->flags operations here. 147 * page from being allocated in parallel and returning garbage as the order.
148 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
149 * page cannot be allocated or merged in parallel.
150 */ 150 */
151static inline unsigned long page_order(struct page *page) 151static inline unsigned long page_order(struct page *page)
152{ 152{
@@ -175,7 +175,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
175static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 175static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
176 struct page *page) 176 struct page *page)
177{ 177{
178 VM_BUG_ON(PageLRU(page)); 178 VM_BUG_ON_PAGE(PageLRU(page), page);
179 179
180 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 180 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
181 return 0; 181 return 0;