diff options
author | Hugh Dickins <hughd@google.com> | 2013-02-22 19:35:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:19 -0500 |
commit | ee0ea59cf9ea95369d686bdc4b3d8c027e2b99cd (patch) | |
tree | 31b446ae45861baf9532d94bffcc091933f7df95 /mm | |
parent | e850dcf530a470b6115344ee00acf766d824db53 (diff) |
ksm: reorganize ksm_check_stable_tree
Memory hotremove's ksm_check_stable_tree() is pitifully inefficient
(restarting whenever it finds a stale node to remove), but rearrange so
that at least it does not needlessly restart from nid 0 each time. And
add a couple of comments: here is why we keep pfn instead of page.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/ksm.c | 38 |
1 files changed, 22 insertions, 16 deletions
@@ -1830,31 +1830,36 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage) | |||
1830 | #endif /* CONFIG_MIGRATION */ | 1830 | #endif /* CONFIG_MIGRATION */ |
1831 | 1831 | ||
1832 | #ifdef CONFIG_MEMORY_HOTREMOVE | 1832 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1833 | static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, | 1833 | static void ksm_check_stable_tree(unsigned long start_pfn, |
1834 | unsigned long end_pfn) | 1834 | unsigned long end_pfn) |
1835 | { | 1835 | { |
1836 | struct stable_node *stable_node; | ||
1836 | struct rb_node *node; | 1837 | struct rb_node *node; |
1837 | int nid; | 1838 | int nid; |
1838 | 1839 | ||
1839 | for (nid = 0; nid < nr_node_ids; nid++) | 1840 | for (nid = 0; nid < nr_node_ids; nid++) { |
1840 | for (node = rb_first(&root_stable_tree[nid]); node; | 1841 | node = rb_first(&root_stable_tree[nid]); |
1841 | node = rb_next(node)) { | 1842 | while (node) { |
1842 | struct stable_node *stable_node; | ||
1843 | |||
1844 | stable_node = rb_entry(node, struct stable_node, node); | 1843 | stable_node = rb_entry(node, struct stable_node, node); |
1845 | if (stable_node->kpfn >= start_pfn && | 1844 | if (stable_node->kpfn >= start_pfn && |
1846 | stable_node->kpfn < end_pfn) | 1845 | stable_node->kpfn < end_pfn) { |
1847 | return stable_node; | 1846 | /* |
1847 | * Don't get_ksm_page, page has already gone: | ||
1848 | * which is why we keep kpfn instead of page* | ||
1849 | */ | ||
1850 | remove_node_from_stable_tree(stable_node); | ||
1851 | node = rb_first(&root_stable_tree[nid]); | ||
1852 | } else | ||
1853 | node = rb_next(node); | ||
1854 | cond_resched(); | ||
1848 | } | 1855 | } |
1849 | 1856 | } | |
1850 | return NULL; | ||
1851 | } | 1857 | } |
1852 | 1858 | ||
1853 | static int ksm_memory_callback(struct notifier_block *self, | 1859 | static int ksm_memory_callback(struct notifier_block *self, |
1854 | unsigned long action, void *arg) | 1860 | unsigned long action, void *arg) |
1855 | { | 1861 | { |
1856 | struct memory_notify *mn = arg; | 1862 | struct memory_notify *mn = arg; |
1857 | struct stable_node *stable_node; | ||
1858 | 1863 | ||
1859 | switch (action) { | 1864 | switch (action) { |
1860 | case MEM_GOING_OFFLINE: | 1865 | case MEM_GOING_OFFLINE: |
@@ -1874,11 +1879,12 @@ static int ksm_memory_callback(struct notifier_block *self, | |||
1874 | /* | 1879 | /* |
1875 | * Most of the work is done by page migration; but there might | 1880 | * Most of the work is done by page migration; but there might |
1876 | * be a few stable_nodes left over, still pointing to struct | 1881 | * be a few stable_nodes left over, still pointing to struct |
1877 | * pages which have been offlined: prune those from the tree. | 1882 | * pages which have been offlined: prune those from the tree, |
1883 | * otherwise get_ksm_page() might later try to access a | ||
1884 | * non-existent struct page. | ||
1878 | */ | 1885 | */ |
1879 | while ((stable_node = ksm_check_stable_tree(mn->start_pfn, | 1886 | ksm_check_stable_tree(mn->start_pfn, |
1880 | mn->start_pfn + mn->nr_pages)) != NULL) | 1887 | mn->start_pfn + mn->nr_pages); |
1881 | remove_node_from_stable_tree(stable_node); | ||
1882 | /* fallthrough */ | 1888 | /* fallthrough */ |
1883 | 1889 | ||
1884 | case MEM_CANCEL_OFFLINE: | 1890 | case MEM_CANCEL_OFFLINE: |