aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c84
1 files changed, 71 insertions, 13 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index dfdc292d3626..d4c228a9d278 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -29,6 +29,7 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/rbtree.h> 31#include <linux/rbtree.h>
32#include <linux/memory.h>
32#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
33#include <linux/swap.h> 34#include <linux/swap.h>
34#include <linux/ksm.h> 35#include <linux/ksm.h>
@@ -108,14 +109,14 @@ struct ksm_scan {
108 109
109/** 110/**
110 * struct stable_node - node of the stable rbtree 111 * struct stable_node - node of the stable rbtree
111 * @page: pointer to struct page of the ksm page
112 * @node: rb node of this ksm page in the stable tree 112 * @node: rb node of this ksm page in the stable tree
113 * @hlist: hlist head of rmap_items using this ksm page 113 * @hlist: hlist head of rmap_items using this ksm page
114 * @kpfn: page frame number of this ksm page
114 */ 115 */
115struct stable_node { 116struct stable_node {
116 struct page *page;
117 struct rb_node node; 117 struct rb_node node;
118 struct hlist_head hlist; 118 struct hlist_head hlist;
119 unsigned long kpfn;
119}; 120};
120 121
121/** 122/**
@@ -515,7 +516,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node)
515 struct page *page; 516 struct page *page;
516 void *expected_mapping; 517 void *expected_mapping;
517 518
518 page = stable_node->page; 519 page = pfn_to_page(stable_node->kpfn);
519 expected_mapping = (void *)stable_node + 520 expected_mapping = (void *)stable_node +
520 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 521 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
521 rcu_read_lock(); 522 rcu_read_lock();
@@ -973,7 +974,7 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
973 * This function returns the stable tree node of identical content if found, 974 * This function returns the stable tree node of identical content if found,
974 * NULL otherwise. 975 * NULL otherwise.
975 */ 976 */
976static struct stable_node *stable_tree_search(struct page *page) 977static struct page *stable_tree_search(struct page *page)
977{ 978{
978 struct rb_node *node = root_stable_tree.rb_node; 979 struct rb_node *node = root_stable_tree.rb_node;
979 struct stable_node *stable_node; 980 struct stable_node *stable_node;
@@ -981,7 +982,7 @@ static struct stable_node *stable_tree_search(struct page *page)
981 stable_node = page_stable_node(page); 982 stable_node = page_stable_node(page);
982 if (stable_node) { /* ksm page forked */ 983 if (stable_node) { /* ksm page forked */
983 get_page(page); 984 get_page(page);
984 return stable_node; 985 return page;
985 } 986 }
986 987
987 while (node) { 988 while (node) {
@@ -1003,7 +1004,7 @@ static struct stable_node *stable_tree_search(struct page *page)
1003 put_page(tree_page); 1004 put_page(tree_page);
1004 node = node->rb_right; 1005 node = node->rb_right;
1005 } else 1006 } else
1006 return stable_node; 1007 return tree_page;
1007 } 1008 }
1008 1009
1009 return NULL; 1010 return NULL;
@@ -1059,7 +1060,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
1059 1060
1060 INIT_HLIST_HEAD(&stable_node->hlist); 1061 INIT_HLIST_HEAD(&stable_node->hlist);
1061 1062
1062 stable_node->page = kpage; 1063 stable_node->kpfn = page_to_pfn(kpage);
1063 set_page_stable_node(kpage, stable_node); 1064 set_page_stable_node(kpage, stable_node);
1064 1065
1065 return stable_node; 1066 return stable_node;
@@ -1170,9 +1171,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1170 remove_rmap_item_from_tree(rmap_item); 1171 remove_rmap_item_from_tree(rmap_item);
1171 1172
1172 /* We first start with searching the page inside the stable tree */ 1173 /* We first start with searching the page inside the stable tree */
1173 stable_node = stable_tree_search(page); 1174 kpage = stable_tree_search(page);
1174 if (stable_node) { 1175 if (kpage) {
1175 kpage = stable_node->page;
1176 err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 1176 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
1177 if (!err) { 1177 if (!err) {
1178 /* 1178 /*
@@ -1180,7 +1180,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1180 * add its rmap_item to the stable tree. 1180 * add its rmap_item to the stable tree.
1181 */ 1181 */
1182 lock_page(kpage); 1182 lock_page(kpage);
1183 stable_tree_append(rmap_item, stable_node); 1183 stable_tree_append(rmap_item, page_stable_node(kpage));
1184 unlock_page(kpage); 1184 unlock_page(kpage);
1185 } 1185 }
1186 put_page(kpage); 1186 put_page(kpage);
@@ -1715,12 +1715,63 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1715 1715
1716 stable_node = page_stable_node(newpage); 1716 stable_node = page_stable_node(newpage);
1717 if (stable_node) { 1717 if (stable_node) {
1718 VM_BUG_ON(stable_node->page != oldpage); 1718 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
1719 stable_node->page = newpage; 1719 stable_node->kpfn = page_to_pfn(newpage);
1720 } 1720 }
1721} 1721}
1722#endif /* CONFIG_MIGRATION */ 1722#endif /* CONFIG_MIGRATION */
1723 1723
1724#ifdef CONFIG_MEMORY_HOTREMOVE
1725static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
1726 unsigned long end_pfn)
1727{
1728 struct rb_node *node;
1729
1730 for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
1731 struct stable_node *stable_node;
1732
1733 stable_node = rb_entry(node, struct stable_node, node);
1734 if (stable_node->kpfn >= start_pfn &&
1735 stable_node->kpfn < end_pfn)
1736 return stable_node;
1737 }
1738 return NULL;
1739}
1740
1741static int ksm_memory_callback(struct notifier_block *self,
1742 unsigned long action, void *arg)
1743{
1744 struct memory_notify *mn = arg;
1745 struct stable_node *stable_node;
1746
1747 switch (action) {
1748 case MEM_GOING_OFFLINE:
1749 /*
1750 * Keep it very simple for now: just lock out ksmd and
1751 * MADV_UNMERGEABLE while any memory is going offline.
1752 */
1753 mutex_lock(&ksm_thread_mutex);
1754 break;
1755
1756 case MEM_OFFLINE:
1757 /*
1758 * Most of the work is done by page migration; but there might
1759 * be a few stable_nodes left over, still pointing to struct
1760 * pages which have been offlined: prune those from the tree.
1761 */
1762 while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
1763 mn->start_pfn + mn->nr_pages)) != NULL)
1764 remove_node_from_stable_tree(stable_node);
1765 /* fallthrough */
1766
1767 case MEM_CANCEL_OFFLINE:
1768 mutex_unlock(&ksm_thread_mutex);
1769 break;
1770 }
1771 return NOTIFY_OK;
1772}
1773#endif /* CONFIG_MEMORY_HOTREMOVE */
1774
1724#ifdef CONFIG_SYSFS 1775#ifdef CONFIG_SYSFS
1725/* 1776/*
1726 * This all compiles without CONFIG_SYSFS, but is a waste of space. 1777 * This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -1946,6 +1997,13 @@ static int __init ksm_init(void)
1946 1997
1947#endif /* CONFIG_SYSFS */ 1998#endif /* CONFIG_SYSFS */
1948 1999
2000#ifdef CONFIG_MEMORY_HOTREMOVE
2001 /*
2002 * Choose a high priority since the callback takes ksm_thread_mutex:
2003 * later callbacks could only be taking locks which nest within that.
2004 */
2005 hotplug_memory_notifier(ksm_memory_callback, 100);
2006#endif
1949 return 0; 2007 return 0;
1950 2008
1951out_free2: 2009out_free2: