diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:17 -0400 |
commit | 1dcf58d6e6e6eb7ec10e9abc56887b040205b06f (patch) | |
tree | c03e7a25ef13eea62f1547914a76e5c68f3f4c28 /mm/migrate.c | |
parent | 80dcc31fbe55932ac9204daee5f2ebc0c49b6da3 (diff) | |
parent | e4b0db72be2487bae0e3251c22f82c104f7c1cfd (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton:
- arch/sh updates
- ocfs2 updates
- kernel/watchdog feature
- about half of mm/
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (122 commits)
Documentation: update arch list in the 'memtest' entry
Kconfig: memtest: update number of test patterns up to 17
arm: add support for memtest
arm64: add support for memtest
memtest: use phys_addr_t for physical addresses
mm: move memtest under mm
mm, hugetlb: abort __get_user_pages if current has been oom killed
mm, mempool: do not allow atomic resizing
memcg: print cgroup information when system panics due to panic_on_oom
mm: numa: remove migrate_ratelimited
mm: fold arch_randomize_brk into ARCH_HAS_ELF_RANDOMIZE
mm: split ET_DYN ASLR from mmap ASLR
s390: redefine randomize_et_dyn for ELF_ET_DYN_BASE
mm: expose arch_mmap_rnd when available
s390: standardize mmap_rnd() usage
powerpc: standardize mmap_rnd() usage
mips: extract logic for mmap_rnd()
arm64: standardize mmap_rnd() usage
x86: standardize mmap_rnd() usage
arm: factor out mmap ASLR into mmap_rnd
...
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 37 |
1 files changed, 14 insertions, 23 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 85e042686031..a65ff72ab739 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -901,12 +901,23 @@ out: | |||
901 | } | 901 | } |
902 | 902 | ||
903 | /* | 903 | /* |
904 | * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work | ||
905 | * around it. | ||
906 | */ | ||
907 | #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) | ||
908 | #define ICE_noinline noinline | ||
909 | #else | ||
910 | #define ICE_noinline | ||
911 | #endif | ||
912 | |||
913 | /* | ||
904 | * Obtain the lock on page, remove all ptes and migrate the page | 914 | * Obtain the lock on page, remove all ptes and migrate the page |
905 | * to the newly allocated page in newpage. | 915 | * to the newly allocated page in newpage. |
906 | */ | 916 | */ |
907 | static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, | 917 | static ICE_noinline int unmap_and_move(new_page_t get_new_page, |
908 | unsigned long private, struct page *page, int force, | 918 | free_page_t put_new_page, |
909 | enum migrate_mode mode) | 919 | unsigned long private, struct page *page, |
920 | int force, enum migrate_mode mode) | ||
910 | { | 921 | { |
911 | int rc = 0; | 922 | int rc = 0; |
912 | int *result = NULL; | 923 | int *result = NULL; |
@@ -1554,30 +1565,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page, | |||
1554 | * page migration rate limiting control. | 1565 | * page migration rate limiting control. |
1555 | * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs | 1566 | * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs |
1556 | * window of time. Default here says do not migrate more than 1280M per second. | 1567 | * window of time. Default here says do not migrate more than 1280M per second. |
1557 | * If a node is rate-limited then PTE NUMA updates are also rate-limited. However | ||
1558 | * as it is faults that reset the window, pte updates will happen unconditionally | ||
1559 | * if there has not been a fault since @pteupdate_interval_millisecs after the | ||
1560 | * throttle window closed. | ||
1561 | */ | 1568 | */ |
1562 | static unsigned int migrate_interval_millisecs __read_mostly = 100; | 1569 | static unsigned int migrate_interval_millisecs __read_mostly = 100; |
1563 | static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; | ||
1564 | static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); | 1570 | static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); |
1565 | 1571 | ||
1566 | /* Returns true if NUMA migration is currently rate limited */ | ||
1567 | bool migrate_ratelimited(int node) | ||
1568 | { | ||
1569 | pg_data_t *pgdat = NODE_DATA(node); | ||
1570 | |||
1571 | if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + | ||
1572 | msecs_to_jiffies(pteupdate_interval_millisecs))) | ||
1573 | return false; | ||
1574 | |||
1575 | if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) | ||
1576 | return false; | ||
1577 | |||
1578 | return true; | ||
1579 | } | ||
1580 | |||
1581 | /* Returns true if the node is migrate rate-limited after the update */ | 1572 | /* Returns true if the node is migrate rate-limited after the update */ |
1582 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, | 1573 | static bool numamigrate_update_ratelimit(pg_data_t *pgdat, |
1583 | unsigned long nr_pages) | 1574 | unsigned long nr_pages) |