aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:02:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:32 -0400
commitb4028260334e1ecf63fb5e0a95d65bb2db02c1ec (patch)
tree0fbfe22a83f4fcb0e47be45374b581f49967085e /mm/ksm.c
parent339aa62469f65daf38a01d6c098b5f3ff8016653 (diff)
ksm: rename kernel_pages_allocated
We're not implementing swapping of KSM pages in its first release; but when that follows, "kernel_pages_allocated" will be a very poor name for the sysfs file showing number of nodes in the stable tree: rename that to "pages_shared" throughout. But we already have a "pages_shared", counting those page slots sharing the shared pages: first rename that to... "pages_sharing". What will become of "max_kernel_pages" when the pages shared can be swapped? I guess it will just be removed, so keep that name. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c57
1 files changed, 28 insertions, 29 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index cf072c54df32..46b4c522ea4f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -150,10 +150,10 @@ static struct kmem_cache *rmap_item_cache;
150static struct kmem_cache *mm_slot_cache; 150static struct kmem_cache *mm_slot_cache;
151 151
152/* The number of nodes in the stable tree */ 152/* The number of nodes in the stable tree */
153static unsigned long ksm_kernel_pages_allocated; 153static unsigned long ksm_pages_shared;
154 154
155/* The number of page slots sharing those nodes */ 155/* The number of page slots sharing those nodes */
156static unsigned long ksm_pages_shared; 156static unsigned long ksm_pages_sharing;
157 157
158/* Limit on the number of unswappable pages used */ 158/* Limit on the number of unswappable pages used */
159static unsigned long ksm_max_kernel_pages; 159static unsigned long ksm_max_kernel_pages;
@@ -384,7 +384,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
384 next_item->address |= NODE_FLAG; 384 next_item->address |= NODE_FLAG;
385 } else { 385 } else {
386 rb_erase(&rmap_item->node, &root_stable_tree); 386 rb_erase(&rmap_item->node, &root_stable_tree);
387 ksm_kernel_pages_allocated--; 387 ksm_pages_shared--;
388 } 388 }
389 } else { 389 } else {
390 struct rmap_item *prev_item = rmap_item->prev; 390 struct rmap_item *prev_item = rmap_item->prev;
@@ -398,7 +398,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
398 } 398 }
399 399
400 rmap_item->next = NULL; 400 rmap_item->next = NULL;
401 ksm_pages_shared--; 401 ksm_pages_sharing--;
402 402
403 } else if (rmap_item->address & NODE_FLAG) { 403 } else if (rmap_item->address & NODE_FLAG) {
404 unsigned char age; 404 unsigned char age;
@@ -748,7 +748,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
748 * is the number of kernel pages that we hold. 748 * is the number of kernel pages that we hold.
749 */ 749 */
750 if (ksm_max_kernel_pages && 750 if (ksm_max_kernel_pages &&
751 ksm_max_kernel_pages <= ksm_kernel_pages_allocated) 751 ksm_max_kernel_pages <= ksm_pages_shared)
752 return err; 752 return err;
753 753
754 kpage = alloc_page(GFP_HIGHUSER); 754 kpage = alloc_page(GFP_HIGHUSER);
@@ -787,7 +787,7 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
787 if (err) 787 if (err)
788 break_cow(mm1, addr1); 788 break_cow(mm1, addr1);
789 else 789 else
790 ksm_pages_shared += 2; 790 ksm_pages_sharing += 2;
791 } 791 }
792 792
793 put_page(kpage); 793 put_page(kpage);
@@ -817,7 +817,7 @@ static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
817 up_read(&mm1->mmap_sem); 817 up_read(&mm1->mmap_sem);
818 818
819 if (!err) 819 if (!err)
820 ksm_pages_shared++; 820 ksm_pages_sharing++;
821 821
822 return err; 822 return err;
823} 823}
@@ -928,7 +928,7 @@ static struct rmap_item *stable_tree_insert(struct page *page,
928 } 928 }
929 } 929 }
930 930
931 ksm_kernel_pages_allocated++; 931 ksm_pages_shared++;
932 932
933 rmap_item->address |= NODE_FLAG | STABLE_FLAG; 933 rmap_item->address |= NODE_FLAG | STABLE_FLAG;
934 rmap_item->next = NULL; 934 rmap_item->next = NULL;
@@ -1044,7 +1044,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1044 tree_rmap_item = stable_tree_search(page, page2, rmap_item); 1044 tree_rmap_item = stable_tree_search(page, page2, rmap_item);
1045 if (tree_rmap_item) { 1045 if (tree_rmap_item) {
1046 if (page == page2[0]) { /* forked */ 1046 if (page == page2[0]) { /* forked */
1047 ksm_pages_shared++; 1047 ksm_pages_sharing++;
1048 err = 0; 1048 err = 0;
1049 } else 1049 } else
1050 err = try_to_merge_with_ksm_page(rmap_item->mm, 1050 err = try_to_merge_with_ksm_page(rmap_item->mm,
@@ -1107,7 +1107,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1107 break_cow(tree_rmap_item->mm, 1107 break_cow(tree_rmap_item->mm,
1108 tree_rmap_item->address); 1108 tree_rmap_item->address);
1109 break_cow(rmap_item->mm, rmap_item->address); 1109 break_cow(rmap_item->mm, rmap_item->address);
1110 ksm_pages_shared -= 2; 1110 ksm_pages_sharing -= 2;
1111 } 1111 }
1112 } 1112 }
1113 1113
@@ -1423,7 +1423,7 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1423 /* 1423 /*
1424 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 1424 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
1425 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 1425 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
1426 * breaking COW to free the kernel_pages_allocated (but leaves 1426 * breaking COW to free the unswappable pages_shared (but leaves
1427 * mm_slots on the list for when ksmd may be set running again). 1427 * mm_slots on the list for when ksmd may be set running again).
1428 */ 1428 */
1429 1429
@@ -1442,22 +1442,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1442} 1442}
1443KSM_ATTR(run); 1443KSM_ATTR(run);
1444 1444
1445static ssize_t pages_shared_show(struct kobject *kobj,
1446 struct kobj_attribute *attr, char *buf)
1447{
1448 return sprintf(buf, "%lu\n",
1449 ksm_pages_shared - ksm_kernel_pages_allocated);
1450}
1451KSM_ATTR_RO(pages_shared);
1452
1453static ssize_t kernel_pages_allocated_show(struct kobject *kobj,
1454 struct kobj_attribute *attr,
1455 char *buf)
1456{
1457 return sprintf(buf, "%lu\n", ksm_kernel_pages_allocated);
1458}
1459KSM_ATTR_RO(kernel_pages_allocated);
1460
1461static ssize_t max_kernel_pages_store(struct kobject *kobj, 1445static ssize_t max_kernel_pages_store(struct kobject *kobj,
1462 struct kobj_attribute *attr, 1446 struct kobj_attribute *attr,
1463 const char *buf, size_t count) 1447 const char *buf, size_t count)
@@ -1481,13 +1465,28 @@ static ssize_t max_kernel_pages_show(struct kobject *kobj,
1481} 1465}
1482KSM_ATTR(max_kernel_pages); 1466KSM_ATTR(max_kernel_pages);
1483 1467
1468static ssize_t pages_shared_show(struct kobject *kobj,
1469 struct kobj_attribute *attr, char *buf)
1470{
1471 return sprintf(buf, "%lu\n", ksm_pages_shared);
1472}
1473KSM_ATTR_RO(pages_shared);
1474
1475static ssize_t pages_sharing_show(struct kobject *kobj,
1476 struct kobj_attribute *attr, char *buf)
1477{
1478 return sprintf(buf, "%lu\n",
1479 ksm_pages_sharing - ksm_pages_shared);
1480}
1481KSM_ATTR_RO(pages_sharing);
1482
1484static struct attribute *ksm_attrs[] = { 1483static struct attribute *ksm_attrs[] = {
1485 &sleep_millisecs_attr.attr, 1484 &sleep_millisecs_attr.attr,
1486 &pages_to_scan_attr.attr, 1485 &pages_to_scan_attr.attr,
1487 &run_attr.attr, 1486 &run_attr.attr,
1488 &pages_shared_attr.attr,
1489 &kernel_pages_allocated_attr.attr,
1490 &max_kernel_pages_attr.attr, 1487 &max_kernel_pages_attr.attr,
1488 &pages_shared_attr.attr,
1489 &pages_sharing_attr.attr,
1491 NULL, 1490 NULL,
1492}; 1491};
1493 1492