aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:02:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:32 -0400
commit473b0ce4d13ee77925a7062e25dea0d16a91f654 (patch)
tree965433f77af7ddce5d9911e353e13af6db9ea311 /mm/ksm.c
parente178dfde3952192cf44eeb0612882f01fc96c0a9 (diff)
ksm: pages_unshared and pages_volatile
The pages_shared and pages_sharing counts give a good picture of how successful KSM is at sharing; but no clue to how much wasted work it's doing to get there. Add pages_unshared (count of unique pages waiting in the unstable tree, hoping to find a mate) and pages_volatile. pages_volatile is harder to define. It includes those pages changing too fast to get into the unstable tree, but also whatever other edge conditions prevent a page getting into the trees: a high value may deserve investigation. Don't try to calculate it from the various conditions: it's the total of rmap_items less those accounted for. Also show full_scans: the number of completed scans of everything registered in the mm list. The locking for all these counts is simply ksm_thread_mutex. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c52
1 files changed, 51 insertions, 1 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index ef89526fb862..9f8f0523562b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -155,6 +155,12 @@ static unsigned long ksm_pages_shared;
155/* The number of page slots additionally sharing those nodes */ 155/* The number of page slots additionally sharing those nodes */
156static unsigned long ksm_pages_sharing; 156static unsigned long ksm_pages_sharing;
157 157
158/* The number of nodes in the unstable tree */
159static unsigned long ksm_pages_unshared;
160
161/* The number of rmap_items in use: to calculate pages_volatile */
162static unsigned long ksm_rmap_items;
163
158/* Limit on the number of unswappable pages used */ 164/* Limit on the number of unswappable pages used */
159static unsigned long ksm_max_kernel_pages; 165static unsigned long ksm_max_kernel_pages;
160 166
@@ -204,11 +210,17 @@ static void __init ksm_slab_free(void)
204 210
205static inline struct rmap_item *alloc_rmap_item(void) 211static inline struct rmap_item *alloc_rmap_item(void)
206{ 212{
207 return kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 213 struct rmap_item *rmap_item;
214
215 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
216 if (rmap_item)
217 ksm_rmap_items++;
218 return rmap_item;
208} 219}
209 220
210static inline void free_rmap_item(struct rmap_item *rmap_item) 221static inline void free_rmap_item(struct rmap_item *rmap_item)
211{ 222{
223 ksm_rmap_items--;
212 rmap_item->mm = NULL; /* debug safety */ 224 rmap_item->mm = NULL; /* debug safety */
213 kmem_cache_free(rmap_item_cache, rmap_item); 225 kmem_cache_free(rmap_item_cache, rmap_item);
214} 226}
@@ -419,6 +431,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
419 BUG_ON(age > 2); 431 BUG_ON(age > 2);
420 if (!age) 432 if (!age)
421 rb_erase(&rmap_item->node, &root_unstable_tree); 433 rb_erase(&rmap_item->node, &root_unstable_tree);
434 ksm_pages_unshared--;
422 } 435 }
423 436
424 rmap_item->address &= PAGE_MASK; 437 rmap_item->address &= PAGE_MASK;
@@ -995,6 +1008,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
995 rb_link_node(&rmap_item->node, parent, new); 1008 rb_link_node(&rmap_item->node, parent, new);
996 rb_insert_color(&rmap_item->node, &root_unstable_tree); 1009 rb_insert_color(&rmap_item->node, &root_unstable_tree);
997 1010
1011 ksm_pages_unshared++;
998 return NULL; 1012 return NULL;
999} 1013}
1000 1014
@@ -1091,6 +1105,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1091 if (!err) { 1105 if (!err) {
1092 rb_erase(&tree_rmap_item->node, &root_unstable_tree); 1106 rb_erase(&tree_rmap_item->node, &root_unstable_tree);
1093 tree_rmap_item->address &= ~NODE_FLAG; 1107 tree_rmap_item->address &= ~NODE_FLAG;
1108 ksm_pages_unshared--;
1109
1094 /* 1110 /*
1095 * If we fail to insert the page into the stable tree, 1111 * If we fail to insert the page into the stable tree,
1096 * we will have 2 virtual addresses that are pointing 1112 * we will have 2 virtual addresses that are pointing
@@ -1474,6 +1490,37 @@ static ssize_t pages_sharing_show(struct kobject *kobj,
1474} 1490}
1475KSM_ATTR_RO(pages_sharing); 1491KSM_ATTR_RO(pages_sharing);
1476 1492
1493static ssize_t pages_unshared_show(struct kobject *kobj,
1494 struct kobj_attribute *attr, char *buf)
1495{
1496 return sprintf(buf, "%lu\n", ksm_pages_unshared);
1497}
1498KSM_ATTR_RO(pages_unshared);
1499
1500static ssize_t pages_volatile_show(struct kobject *kobj,
1501 struct kobj_attribute *attr, char *buf)
1502{
1503 long ksm_pages_volatile;
1504
1505 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
1506 - ksm_pages_sharing - ksm_pages_unshared;
1507 /*
1508 * It was not worth any locking to calculate that statistic,
1509 * but it might therefore sometimes be negative: conceal that.
1510 */
1511 if (ksm_pages_volatile < 0)
1512 ksm_pages_volatile = 0;
1513 return sprintf(buf, "%ld\n", ksm_pages_volatile);
1514}
1515KSM_ATTR_RO(pages_volatile);
1516
1517static ssize_t full_scans_show(struct kobject *kobj,
1518 struct kobj_attribute *attr, char *buf)
1519{
1520 return sprintf(buf, "%lu\n", ksm_scan.seqnr);
1521}
1522KSM_ATTR_RO(full_scans);
1523
1477static struct attribute *ksm_attrs[] = { 1524static struct attribute *ksm_attrs[] = {
1478 &sleep_millisecs_attr.attr, 1525 &sleep_millisecs_attr.attr,
1479 &pages_to_scan_attr.attr, 1526 &pages_to_scan_attr.attr,
@@ -1481,6 +1528,9 @@ static struct attribute *ksm_attrs[] = {
1481 &max_kernel_pages_attr.attr, 1528 &max_kernel_pages_attr.attr,
1482 &pages_shared_attr.attr, 1529 &pages_shared_attr.attr,
1483 &pages_sharing_attr.attr, 1530 &pages_sharing_attr.attr,
1531 &pages_unshared_attr.attr,
1532 &pages_volatile_attr.attr,
1533 &full_scans_attr.attr,
1484 NULL, 1534 NULL,
1485}; 1535};
1486 1536