summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-17 17:19:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commitfe896d1878949ea92ba547587bc3075cc688fb8f (patch)
tree582ae505611bafae117c0de8498916485699ac78 /mm/migrate.c
parent444eb2a449ef36fe115431ed7b71467c4563c7f1 (diff)
mm: introduce page reference manipulation functions
The success of CMA allocation largely depends on the success of migration and key factor of it is page reference count. Until now, page reference is manipulated by direct calling atomic functions so we cannot follow up who and where manipulate it. Then, it is hard to find actual reason of CMA allocation failure. CMA allocation should be guaranteed to succeed so finding offending place is really important. In this patch, call sites where page reference is manipulated are converted to introduced wrapper function. This is preparation step to add tracepoint to each page reference manipulation function. With this facility, we can easily find reason of CMA allocation failure. There is no functional change in this patch. In addition, this patch also converts reference read sites. It will help a second step that renames page._count to something else and prevents later attempt to direct access to it (Suggested by Andrew). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index fdaf0818fb30..577c94b8e959 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -349,7 +349,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
349 return -EAGAIN; 349 return -EAGAIN;
350 } 350 }
351 351
352 if (!page_freeze_refs(page, expected_count)) { 352 if (!page_ref_freeze(page, expected_count)) {
353 spin_unlock_irq(&mapping->tree_lock); 353 spin_unlock_irq(&mapping->tree_lock);
354 return -EAGAIN; 354 return -EAGAIN;
355 } 355 }
@@ -363,7 +363,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
363 */ 363 */
364 if (mode == MIGRATE_ASYNC && head && 364 if (mode == MIGRATE_ASYNC && head &&
365 !buffer_migrate_lock_buffers(head, mode)) { 365 !buffer_migrate_lock_buffers(head, mode)) {
366 page_unfreeze_refs(page, expected_count); 366 page_ref_unfreeze(page, expected_count);
367 spin_unlock_irq(&mapping->tree_lock); 367 spin_unlock_irq(&mapping->tree_lock);
368 return -EAGAIN; 368 return -EAGAIN;
369 } 369 }
@@ -397,7 +397,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
397 * to one less reference. 397 * to one less reference.
398 * We know this isn't the last reference. 398 * We know this isn't the last reference.
399 */ 399 */
400 page_unfreeze_refs(page, expected_count - 1); 400 page_ref_unfreeze(page, expected_count - 1);
401 401
402 spin_unlock(&mapping->tree_lock); 402 spin_unlock(&mapping->tree_lock);
403 /* Leave irq disabled to prevent preemption while updating stats */ 403 /* Leave irq disabled to prevent preemption while updating stats */
@@ -451,7 +451,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
451 return -EAGAIN; 451 return -EAGAIN;
452 } 452 }
453 453
454 if (!page_freeze_refs(page, expected_count)) { 454 if (!page_ref_freeze(page, expected_count)) {
455 spin_unlock_irq(&mapping->tree_lock); 455 spin_unlock_irq(&mapping->tree_lock);
456 return -EAGAIN; 456 return -EAGAIN;
457 } 457 }
@@ -463,7 +463,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
463 463
464 radix_tree_replace_slot(pslot, newpage); 464 radix_tree_replace_slot(pslot, newpage);
465 465
466 page_unfreeze_refs(page, expected_count - 1); 466 page_ref_unfreeze(page, expected_count - 1);
467 467
468 spin_unlock_irq(&mapping->tree_lock); 468 spin_unlock_irq(&mapping->tree_lock);
469 469