summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-17 17:19:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commitfe896d1878949ea92ba547587bc3075cc688fb8f (patch)
tree582ae505611bafae117c0de8498916485699ac78 /mm/page_alloc.c
parent444eb2a449ef36fe115431ed7b71467c4563c7f1 (diff)
mm: introduce page reference manipulation functions
The success of CMA allocation largely depends on the success of migration and key factor of it is page reference count. Until now, page reference is manipulated by direct calling atomic functions so we cannot follow up who and where manipulate it. Then, it is hard to find actual reason of CMA allocation failure. CMA allocation should be guaranteed to succeed so finding offending place is really important. In this patch, call sites where page reference is manipulated are converted to introduced wrapper function. This is preparation step to add tracepoint to each page reference manipulation function. With this facility, we can easily find reason of CMA allocation failure. There is no functional change in this patch. In addition, this patch also converts reference read sites. It will help a second step that renames page._count to something else and prevents later attempt to direct access to it (Suggested by Andrew). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 096a00d98a45..30134a8f7cc8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -766,7 +766,7 @@ static inline int free_pages_check(struct page *page)
766 bad_reason = "nonzero mapcount"; 766 bad_reason = "nonzero mapcount";
767 if (unlikely(page->mapping != NULL)) 767 if (unlikely(page->mapping != NULL))
768 bad_reason = "non-NULL mapping"; 768 bad_reason = "non-NULL mapping";
769 if (unlikely(atomic_read(&page->_count) != 0)) 769 if (unlikely(page_ref_count(page) != 0))
770 bad_reason = "nonzero _count"; 770 bad_reason = "nonzero _count";
771 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 771 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
772 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 772 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
@@ -1462,7 +1462,7 @@ static inline int check_new_page(struct page *page)
1462 bad_reason = "nonzero mapcount"; 1462 bad_reason = "nonzero mapcount";
1463 if (unlikely(page->mapping != NULL)) 1463 if (unlikely(page->mapping != NULL))
1464 bad_reason = "non-NULL mapping"; 1464 bad_reason = "non-NULL mapping";
1465 if (unlikely(atomic_read(&page->_count) != 0)) 1465 if (unlikely(page_ref_count(page) != 0))
1466 bad_reason = "nonzero _count"; 1466 bad_reason = "nonzero _count";
1467 if (unlikely(page->flags & __PG_HWPOISON)) { 1467 if (unlikely(page->flags & __PG_HWPOISON)) {
1468 bad_reason = "HWPoisoned (hardware-corrupted)"; 1468 bad_reason = "HWPoisoned (hardware-corrupted)";
@@ -3475,7 +3475,7 @@ refill:
3475 /* Even if we own the page, we do not use atomic_set(). 3475 /* Even if we own the page, we do not use atomic_set().
3476 * This would break get_page_unless_zero() users. 3476 * This would break get_page_unless_zero() users.
3477 */ 3477 */
3478 atomic_add(size - 1, &page->_count); 3478 page_ref_add(page, size - 1);
3479 3479
3480 /* reset page count bias and offset to start of new frag */ 3480 /* reset page count bias and offset to start of new frag */
3481 nc->pfmemalloc = page_is_pfmemalloc(page); 3481 nc->pfmemalloc = page_is_pfmemalloc(page);
@@ -3487,7 +3487,7 @@ refill:
3487 if (unlikely(offset < 0)) { 3487 if (unlikely(offset < 0)) {
3488 page = virt_to_page(nc->va); 3488 page = virt_to_page(nc->va);
3489 3489
3490 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) 3490 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
3491 goto refill; 3491 goto refill;
3492 3492
3493#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3493#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
@@ -3495,7 +3495,7 @@ refill:
3495 size = nc->size; 3495 size = nc->size;
3496#endif 3496#endif
3497 /* OK, page count is 0, we can safely set it */ 3497 /* OK, page count is 0, we can safely set it */
3498 atomic_set(&page->_count, size); 3498 set_page_count(page, size);
3499 3499
3500 /* reset page count bias and offset to start of new frag */ 3500 /* reset page count bias and offset to start of new frag */
3501 nc->pagecnt_bias = size; 3501 nc->pagecnt_bias = size;
@@ -6852,7 +6852,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6852 * This check already skips compound tails of THP 6852 * This check already skips compound tails of THP
6853 * because their page->_count is zero at all time. 6853 * because their page->_count is zero at all time.
6854 */ 6854 */
6855 if (!atomic_read(&page->_count)) { 6855 if (!page_ref_count(page)) {
6856 if (PageBuddy(page)) 6856 if (PageBuddy(page))
6857 iter += (1 << page_order(page)) - 1; 6857 iter += (1 << page_order(page)) - 1;
6858 continue; 6858 continue;