aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2013-02-22 19:35:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:19 -0500
commite850dcf530a470b6115344ee00acf766d824db53 (patch)
tree67a41971578dbb7241daf21d2f2c331c50a9dda0 /mm
parentf00dc0ee5a8e7fd513b05a2446c26be203c05004 (diff)
ksm: trivial tidyups
Add NUMA() and DO_NUMA() macros to minimize blight of #ifdef CONFIG_NUMAs (but indeed we don't want to expand struct rmap_item by nid when not NUMA). Add comment, remove "unsigned" from rmap_item->nid, as "int nid" elsewhere. Define ksm_merge_across_nodes 1U when #ifndef NUMA to help optimizing out. Use ?: in get_kpfn_nid(). Adjust a few comments noticed in ongoing work. Leave stable_tree_insert()'s rb_linkage until after the node has been set up, as unstable_tree_search_insert() does: ksm_thread_mutex and page lock make either way safe, but we're going to copy and I prefer this precedent. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 1602cc9e3d73..e10dc24508f4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -41,6 +41,14 @@
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include "internal.h" 42#include "internal.h"
43 43
44#ifdef CONFIG_NUMA
45#define NUMA(x) (x)
46#define DO_NUMA(x) do { (x); } while (0)
47#else
48#define NUMA(x) (0)
49#define DO_NUMA(x) do { } while (0)
50#endif
51
44/* 52/*
45 * A few notes about the KSM scanning process, 53 * A few notes about the KSM scanning process,
46 * to make it easier to understand the data structures below: 54 * to make it easier to understand the data structures below:
@@ -130,6 +138,7 @@ struct stable_node {
130 * @mm: the memory structure this rmap_item is pointing into 138 * @mm: the memory structure this rmap_item is pointing into
131 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 139 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
132 * @oldchecksum: previous checksum of the page at that virtual address 140 * @oldchecksum: previous checksum of the page at that virtual address
141 * @nid: NUMA node id of unstable tree in which linked (may not match page)
133 * @node: rb node of this rmap_item in the unstable tree 142 * @node: rb node of this rmap_item in the unstable tree
134 * @head: pointer to stable_node heading this list in the stable tree 143 * @head: pointer to stable_node heading this list in the stable tree
135 * @hlist: link into hlist of rmap_items hanging off that stable_node 144 * @hlist: link into hlist of rmap_items hanging off that stable_node
@@ -141,7 +150,7 @@ struct rmap_item {
141 unsigned long address; /* + low bits used for flags below */ 150 unsigned long address; /* + low bits used for flags below */
142 unsigned int oldchecksum; /* when unstable */ 151 unsigned int oldchecksum; /* when unstable */
143#ifdef CONFIG_NUMA 152#ifdef CONFIG_NUMA
144 unsigned int nid; 153 int nid;
145#endif 154#endif
146 union { 155 union {
147 struct rb_node node; /* when node of unstable tree */ 156 struct rb_node node; /* when node of unstable tree */
@@ -192,8 +201,12 @@ static unsigned int ksm_thread_pages_to_scan = 100;
192/* Milliseconds ksmd should sleep between batches */ 201/* Milliseconds ksmd should sleep between batches */
193static unsigned int ksm_thread_sleep_millisecs = 20; 202static unsigned int ksm_thread_sleep_millisecs = 20;
194 203
204#ifdef CONFIG_NUMA
195/* Zeroed when merging across nodes is not allowed */ 205/* Zeroed when merging across nodes is not allowed */
196static unsigned int ksm_merge_across_nodes = 1; 206static unsigned int ksm_merge_across_nodes = 1;
207#else
208#define ksm_merge_across_nodes 1U
209#endif
197 210
198#define KSM_RUN_STOP 0 211#define KSM_RUN_STOP 0
199#define KSM_RUN_MERGE 1 212#define KSM_RUN_MERGE 1
@@ -456,10 +469,7 @@ out: page = NULL;
456 */ 469 */
457static inline int get_kpfn_nid(unsigned long kpfn) 470static inline int get_kpfn_nid(unsigned long kpfn)
458{ 471{
459 if (ksm_merge_across_nodes) 472 return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn);
460 return 0;
461 else
462 return pfn_to_nid(kpfn);
463} 473}
464 474
465static void remove_node_from_stable_tree(struct stable_node *stable_node) 475static void remove_node_from_stable_tree(struct stable_node *stable_node)
@@ -479,7 +489,6 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
479 } 489 }
480 490
481 nid = get_kpfn_nid(stable_node->kpfn); 491 nid = get_kpfn_nid(stable_node->kpfn);
482
483 rb_erase(&stable_node->node, &root_stable_tree[nid]); 492 rb_erase(&stable_node->node, &root_stable_tree[nid]);
484 free_stable_node(stable_node); 493 free_stable_node(stable_node);
485} 494}
@@ -578,13 +587,8 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
578 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 587 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
579 BUG_ON(age > 1); 588 BUG_ON(age > 1);
580 if (!age) 589 if (!age)
581#ifdef CONFIG_NUMA
582 rb_erase(&rmap_item->node, 590 rb_erase(&rmap_item->node,
583 &root_unstable_tree[rmap_item->nid]); 591 &root_unstable_tree[NUMA(rmap_item->nid)]);
584#else
585 rb_erase(&rmap_item->node, &root_unstable_tree[0]);
586#endif
587
588 ksm_pages_unshared--; 592 ksm_pages_unshared--;
589 rmap_item->address &= PAGE_MASK; 593 rmap_item->address &= PAGE_MASK;
590 } 594 }
@@ -604,7 +608,7 @@ static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
604} 608}
605 609
606/* 610/*
607 * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather 611 * Though it's very tempting to unmerge rmap_items from stable tree rather
608 * than check every pte of a given vma, the locking doesn't quite work for 612 * than check every pte of a given vma, the locking doesn't quite work for
609 * that - an rmap_item is assigned to the stable tree after inserting ksm 613 * that - an rmap_item is assigned to the stable tree after inserting ksm
610 * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 614 * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
@@ -1058,7 +1062,7 @@ static struct page *stable_tree_search(struct page *page)
1058} 1062}
1059 1063
1060/* 1064/*
1061 * stable_tree_insert - insert rmap_item pointing to new ksm page 1065 * stable_tree_insert - insert stable tree node pointing to new ksm page
1062 * into the stable tree. 1066 * into the stable tree.
1063 * 1067 *
1064 * This function returns the stable tree node just allocated on success, 1068 * This function returns the stable tree node just allocated on success,
@@ -1108,13 +1112,11 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
1108 if (!stable_node) 1112 if (!stable_node)
1109 return NULL; 1113 return NULL;
1110 1114
1111 rb_link_node(&stable_node->node, parent, new);
1112 rb_insert_color(&stable_node->node, &root_stable_tree[nid]);
1113
1114 INIT_HLIST_HEAD(&stable_node->hlist); 1115 INIT_HLIST_HEAD(&stable_node->hlist);
1115
1116 stable_node->kpfn = kpfn; 1116 stable_node->kpfn = kpfn;
1117 set_page_stable_node(kpage, stable_node); 1117 set_page_stable_node(kpage, stable_node);
1118 rb_link_node(&stable_node->node, parent, new);
1119 rb_insert_color(&stable_node->node, &root_stable_tree[nid]);
1118 1120
1119 return stable_node; 1121 return stable_node;
1120} 1122}
@@ -1170,8 +1172,6 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1170 * If tree_page has been migrated to another NUMA node, it 1172 * If tree_page has been migrated to another NUMA node, it
1171 * will be flushed out and put into the right unstable tree 1173 * will be flushed out and put into the right unstable tree
1172 * next time: only merge with it if merge_across_nodes. 1174 * next time: only merge with it if merge_across_nodes.
1173 * Just notice, we don't have similar problem for PageKsm
1174 * because their migration is disabled now. (62b61f611e)
1175 */ 1175 */
1176 if (!ksm_merge_across_nodes && page_to_nid(tree_page) != nid) { 1176 if (!ksm_merge_across_nodes && page_to_nid(tree_page) != nid) {
1177 put_page(tree_page); 1177 put_page(tree_page);
@@ -1195,9 +1195,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1195 1195
1196 rmap_item->address |= UNSTABLE_FLAG; 1196 rmap_item->address |= UNSTABLE_FLAG;
1197 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1197 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1198#ifdef CONFIG_NUMA 1198 DO_NUMA(rmap_item->nid = nid);
1199 rmap_item->nid = nid;
1200#endif
1201 rb_link_node(&rmap_item->node, parent, new); 1199 rb_link_node(&rmap_item->node, parent, new);
1202 rb_insert_color(&rmap_item->node, root); 1200 rb_insert_color(&rmap_item->node, root);
1203 1201
@@ -1213,13 +1211,11 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1213static void stable_tree_append(struct rmap_item *rmap_item, 1211static void stable_tree_append(struct rmap_item *rmap_item,
1214 struct stable_node *stable_node) 1212 struct stable_node *stable_node)
1215{ 1213{
1216#ifdef CONFIG_NUMA
1217 /* 1214 /*
1218 * Usually rmap_item->nid is already set correctly, 1215 * Usually rmap_item->nid is already set correctly,
1219 * but it may be wrong after switching merge_across_nodes. 1216 * but it may be wrong after switching merge_across_nodes.
1220 */ 1217 */
1221 rmap_item->nid = get_kpfn_nid(stable_node->kpfn); 1218 DO_NUMA(rmap_item->nid = get_kpfn_nid(stable_node->kpfn));
1222#endif
1223 rmap_item->head = stable_node; 1219 rmap_item->head = stable_node;
1224 rmap_item->address |= STABLE_FLAG; 1220 rmap_item->address |= STABLE_FLAG;
1225 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1221 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);