diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 37 |
1 files changed, 14 insertions, 23 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 85e042686031..a65ff72ab739 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -901,12 +901,23 @@ out: | |||
901 | } | 901 | } |
902 | 902 | ||
903 | /* | 903 | /* |
904 | * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work | ||
905 | * around it. | ||
906 | */ | ||
907 | #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) | ||
908 | #define ICE_noinline noinline | ||
909 | #else | ||
910 | #define ICE_noinline | ||
911 | #endif | ||
912 | |||
913 | /* | ||
904 | * Obtain the lock on page, remove all ptes and migrate the page | 914 | * Obtain the lock on page, remove all ptes and migrate the page |
905 | * to the newly allocated page in newpage. | 915 | * to the newly allocated page in newpage. |
906 | */ | 916 | */ |
907 | static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, | 917 | static ICE_noinline int unmap_and_move(new_page_t get_new_page, |
908 | unsigned long private, struct page *page, int force, | 918 | free_page_t put_new_page, |
909 | enum migrate_mode mode) | 919 | unsigned long private, struct page *page, |
920 | int force, enum migrate_mode mode) | ||
910 | { | 921 | { |
911 | int rc = 0; | 922 | int rc = 0; |
912 | int *result = NULL; | 923 | int *result = NULL; |
@@ -1554,30 +1565,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page, | |||
1554 | * page migration rate limiting control. | 1565 | * page migration rate limiting control. |
1555 | * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs | 1566 | * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs |
1556 | * window of time. Default here says do not migrate more than 1280M per second. | 1567 | * window of time. Default here says do not migrate more than 1280M per second. |
1557 | * If a node is rate-limited then PTE NUMA updates are also rate-limited. However | ||
1558 | * as it is faults that reset the window, pte updates will happen unconditionally | ||
1559 | * if there has not been a fault since @pteupdate_interval_millisecs after the | ||
1560 | * throttle window closed. | ||
1561 | */ | 1568 | */ |
1562 | static unsigned int migrate_interval_millisecs __read_mostly = 100; | 1569 | static unsigned int migrate_interval_millisecs __read_mostly = 100; |
1563 | static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; | ||
1564 | static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); | 1570 | static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); |
1565 | 1571 | ||
1566 | /* Returns true if NUMA migration is currently rate limited */ | ||
1567 | bool migrate_ratelimited(int node) | ||
1568 | { | ||
1569 | pg_data_t *pgdat = NODE_DATA(node); | ||
1570 | |||
1571 | if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + | ||
1572 | msecs_to_jiffies(pteupdate_interval_millisecs))) | ||
1573 | return false; | ||
1574 | |||
1575 | if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) | ||
1576 | return false; | ||
1577 | |||
1578 | return true; | ||
1579 | } | ||
1580 | |||
1581 | /* Returns true if the node is migrate rate-limited after the update */ | 1572 | /* Returns true if the node is migrate rate-limited after the update */ |
1582 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, | 1573 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, |
1583 | unsigned long nr_pages) | 1574 | unsigned long nr_pages) |