summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPingfan Liu <kernelfans@gmail.com>2019-07-11 23:57:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 14:05:45 -0400
commitaa712399c1e8245c375a5c44760de684ec2ebefb (patch)
tree10c9359a002db7e0f01ff327ab1fbeda3da7b2fe /mm
parent520b4a4496f12b117b94f3ac7c493651881c5fe3 (diff)
mm/gup: speed up check_and_migrate_cma_pages() on huge page
Both hugetlb and thp locate on the same migration type of pageblock, since they are allocated from a free_list[]. Based on this fact, it is enough to check on a single subpage to decide the migration type of the whole huge page. By this way, it saves (2M/4K - 1) times loop for pmd_huge on x86, similar on other archs. Furthermore, when executing isolate_huge_page(), it avoid taking global hugetlb_lock many times, and meanless remove/add to the local link list cma_page_list. [akpm@linux-foundation.org: make `i' and `step' unsigned] Link: http://lkml.kernel.org/r/1561612545-28997-1-git-send-email-kernelfans@gmail.com Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Hubbard <jhubbard@nvidia.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Keith Busch <keith.busch@intel.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 83d480e9b05f..f411bab037f5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1449,25 +1449,31 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
1449 struct vm_area_struct **vmas, 1449 struct vm_area_struct **vmas,
1450 unsigned int gup_flags) 1450 unsigned int gup_flags)
1451{ 1451{
1452 long i; 1452 unsigned long i;
1453 unsigned long step;
1453 bool drain_allow = true; 1454 bool drain_allow = true;
1454 bool migrate_allow = true; 1455 bool migrate_allow = true;
1455 LIST_HEAD(cma_page_list); 1456 LIST_HEAD(cma_page_list);
1456 1457
1457check_again: 1458check_again:
1458 for (i = 0; i < nr_pages; i++) { 1459 for (i = 0; i < nr_pages;) {
1460
1461 struct page *head = compound_head(pages[i]);
1462
1463 /*
1464 * gup may start from a tail page. Advance step by the left
1465 * part.
1466 */
1467 step = (1 << compound_order(head)) - (pages[i] - head);
1459 /* 1468 /*
1460 * If we get a page from the CMA zone, since we are going to 1469 * If we get a page from the CMA zone, since we are going to
1461 * be pinning these entries, we might as well move them out 1470 * be pinning these entries, we might as well move them out
1462 * of the CMA zone if possible. 1471 * of the CMA zone if possible.
1463 */ 1472 */
1464 if (is_migrate_cma_page(pages[i])) { 1473 if (is_migrate_cma_page(head)) {
1465 1474 if (PageHuge(head))
1466 struct page *head = compound_head(pages[i]);
1467
1468 if (PageHuge(head)) {
1469 isolate_huge_page(head, &cma_page_list); 1475 isolate_huge_page(head, &cma_page_list);
1470 } else { 1476 else {
1471 if (!PageLRU(head) && drain_allow) { 1477 if (!PageLRU(head) && drain_allow) {
1472 lru_add_drain_all(); 1478 lru_add_drain_all();
1473 drain_allow = false; 1479 drain_allow = false;
@@ -1482,6 +1488,8 @@ check_again:
1482 } 1488 }
1483 } 1489 }
1484 } 1490 }
1491
1492 i += step;
1485 } 1493 }
1486 1494
1487 if (!list_empty(&cma_page_list)) { 1495 if (!list_empty(&cma_page_list)) {