aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 20:59:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:19 -0500
commit6514d511dbe5a77b4bdc0a7e26fd679585112e1e (patch)
treea3fcf75191b7900043172d147fc7ed8ea97f3b4a /mm/ksm.c
parent8dd3557a52f0bc8c960307721da307370ccad6fd (diff)
ksm: singly-linked rmap_list
Free up a pointer in struct rmap_item, by making the mm_slot's rmap_list a singly-linked list: we always traverse that list sequentially, and we don't even lose any prefetches (but should consider adding a few later). Name it rmap_list throughout. Do we need to free up that pointer? Not immediately, and in the end, we could continue to avoid it with a union; but having done the conversion, let's keep it this way, since there's no downside, and maybe we'll want more in future (struct rmap_item is a cache-friendly 32 bytes on 32-bit and 64 bytes on 64-bit, so we shall want to avoid expanding it). Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c56
1 files changed, 26 insertions, 30 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 54fb3feebb59..e8e9a2bca809 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -79,13 +79,13 @@
79 * struct mm_slot - ksm information per mm that is being scanned 79 * struct mm_slot - ksm information per mm that is being scanned
80 * @link: link to the mm_slots hash list 80 * @link: link to the mm_slots hash list
81 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 81 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
82 * @rmap_list: head for this mm_slot's list of rmap_items 82 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
83 * @mm: the mm that this information is valid for 83 * @mm: the mm that this information is valid for
84 */ 84 */
85struct mm_slot { 85struct mm_slot {
86 struct hlist_node link; 86 struct hlist_node link;
87 struct list_head mm_list; 87 struct list_head mm_list;
88 struct list_head rmap_list; 88 struct rmap_item *rmap_list;
89 struct mm_struct *mm; 89 struct mm_struct *mm;
90}; 90};
91 91
@@ -93,7 +93,7 @@ struct mm_slot {
93 * struct ksm_scan - cursor for scanning 93 * struct ksm_scan - cursor for scanning
94 * @mm_slot: the current mm_slot we are scanning 94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned 95 * @address: the next address inside that to be scanned
96 * @rmap_item: the current rmap that we are scanning inside the rmap_list 96 * @rmap_list: link to the next rmap to be scanned in the rmap_list
97 * @seqnr: count of completed full scans (needed when removing unstable node) 97 * @seqnr: count of completed full scans (needed when removing unstable node)
98 * 98 *
99 * There is only the one ksm_scan instance of this cursor structure. 99 * There is only the one ksm_scan instance of this cursor structure.
@@ -101,13 +101,14 @@ struct mm_slot {
101struct ksm_scan { 101struct ksm_scan {
102 struct mm_slot *mm_slot; 102 struct mm_slot *mm_slot;
103 unsigned long address; 103 unsigned long address;
104 struct rmap_item *rmap_item; 104 struct rmap_item **rmap_list;
105 unsigned long seqnr; 105 unsigned long seqnr;
106}; 106};
107 107
108/** 108/**
109 * struct rmap_item - reverse mapping item for virtual addresses 109 * struct rmap_item - reverse mapping item for virtual addresses
110 * @link: link into mm_slot's rmap_list (rmap_list is per mm) 110 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
111 * @filler: unused space we're making available in this patch
111 * @mm: the memory structure this rmap_item is pointing into 112 * @mm: the memory structure this rmap_item is pointing into
112 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 113 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
113 * @oldchecksum: previous checksum of the page at that virtual address 114 * @oldchecksum: previous checksum of the page at that virtual address
@@ -116,7 +117,8 @@ struct ksm_scan {
116 * @prev: previous rmap_item hanging off the same node of the stable tree 117 * @prev: previous rmap_item hanging off the same node of the stable tree
117 */ 118 */
118struct rmap_item { 119struct rmap_item {
119 struct list_head link; 120 struct rmap_item *rmap_list;
121 unsigned long filler;
120 struct mm_struct *mm; 122 struct mm_struct *mm;
121 unsigned long address; /* + low bits used for flags below */ 123 unsigned long address; /* + low bits used for flags below */
122 union { 124 union {
@@ -275,7 +277,6 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
275 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 277 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
276 % MM_SLOTS_HASH_HEADS]; 278 % MM_SLOTS_HASH_HEADS];
277 mm_slot->mm = mm; 279 mm_slot->mm = mm;
278 INIT_LIST_HEAD(&mm_slot->rmap_list);
279 hlist_add_head(&mm_slot->link, bucket); 280 hlist_add_head(&mm_slot->link, bucket);
280} 281}
281 282
@@ -479,15 +480,12 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
479} 480}
480 481
481static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 482static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
482 struct list_head *cur) 483 struct rmap_item **rmap_list)
483{ 484{
484 struct rmap_item *rmap_item; 485 while (*rmap_list) {
485 486 struct rmap_item *rmap_item = *rmap_list;
486 while (cur != &mm_slot->rmap_list) { 487 *rmap_list = rmap_item->rmap_list;
487 rmap_item = list_entry(cur, struct rmap_item, link);
488 cur = cur->next;
489 remove_rmap_item_from_tree(rmap_item); 488 remove_rmap_item_from_tree(rmap_item);
490 list_del(&rmap_item->link);
491 free_rmap_item(rmap_item); 489 free_rmap_item(rmap_item);
492 } 490 }
493} 491}
@@ -553,7 +551,7 @@ static int unmerge_and_remove_all_rmap_items(void)
553 goto error; 551 goto error;
554 } 552 }
555 553
556 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next); 554 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
557 555
558 spin_lock(&ksm_mmlist_lock); 556 spin_lock(&ksm_mmlist_lock);
559 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 557 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -1141,20 +1139,19 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1141} 1139}
1142 1140
1143static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 1141static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1144 struct list_head *cur, 1142 struct rmap_item **rmap_list,
1145 unsigned long addr) 1143 unsigned long addr)
1146{ 1144{
1147 struct rmap_item *rmap_item; 1145 struct rmap_item *rmap_item;
1148 1146
1149 while (cur != &mm_slot->rmap_list) { 1147 while (*rmap_list) {
1150 rmap_item = list_entry(cur, struct rmap_item, link); 1148 rmap_item = *rmap_list;
1151 if ((rmap_item->address & PAGE_MASK) == addr) 1149 if ((rmap_item->address & PAGE_MASK) == addr)
1152 return rmap_item; 1150 return rmap_item;
1153 if (rmap_item->address > addr) 1151 if (rmap_item->address > addr)
1154 break; 1152 break;
1155 cur = cur->next; 1153 *rmap_list = rmap_item->rmap_list;
1156 remove_rmap_item_from_tree(rmap_item); 1154 remove_rmap_item_from_tree(rmap_item);
1157 list_del(&rmap_item->link);
1158 free_rmap_item(rmap_item); 1155 free_rmap_item(rmap_item);
1159 } 1156 }
1160 1157
@@ -1163,7 +1160,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1163 /* It has already been zeroed */ 1160 /* It has already been zeroed */
1164 rmap_item->mm = mm_slot->mm; 1161 rmap_item->mm = mm_slot->mm;
1165 rmap_item->address = addr; 1162 rmap_item->address = addr;
1166 list_add_tail(&rmap_item->link, cur); 1163 rmap_item->rmap_list = *rmap_list;
1164 *rmap_list = rmap_item;
1167 } 1165 }
1168 return rmap_item; 1166 return rmap_item;
1169} 1167}
@@ -1188,8 +1186,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1188 spin_unlock(&ksm_mmlist_lock); 1186 spin_unlock(&ksm_mmlist_lock);
1189next_mm: 1187next_mm:
1190 ksm_scan.address = 0; 1188 ksm_scan.address = 0;
1191 ksm_scan.rmap_item = list_entry(&slot->rmap_list, 1189 ksm_scan.rmap_list = &slot->rmap_list;
1192 struct rmap_item, link);
1193 } 1190 }
1194 1191
1195 mm = slot->mm; 1192 mm = slot->mm;
@@ -1215,10 +1212,10 @@ next_mm:
1215 flush_anon_page(vma, *page, ksm_scan.address); 1212 flush_anon_page(vma, *page, ksm_scan.address);
1216 flush_dcache_page(*page); 1213 flush_dcache_page(*page);
1217 rmap_item = get_next_rmap_item(slot, 1214 rmap_item = get_next_rmap_item(slot,
1218 ksm_scan.rmap_item->link.next, 1215 ksm_scan.rmap_list, ksm_scan.address);
1219 ksm_scan.address);
1220 if (rmap_item) { 1216 if (rmap_item) {
1221 ksm_scan.rmap_item = rmap_item; 1217 ksm_scan.rmap_list =
1218 &rmap_item->rmap_list;
1222 ksm_scan.address += PAGE_SIZE; 1219 ksm_scan.address += PAGE_SIZE;
1223 } else 1220 } else
1224 put_page(*page); 1221 put_page(*page);
@@ -1234,14 +1231,13 @@ next_mm:
1234 1231
1235 if (ksm_test_exit(mm)) { 1232 if (ksm_test_exit(mm)) {
1236 ksm_scan.address = 0; 1233 ksm_scan.address = 0;
1237 ksm_scan.rmap_item = list_entry(&slot->rmap_list, 1234 ksm_scan.rmap_list = &slot->rmap_list;
1238 struct rmap_item, link);
1239 } 1235 }
1240 /* 1236 /*
1241 * Nuke all the rmap_items that are above this current rmap: 1237 * Nuke all the rmap_items that are above this current rmap:
1242 * because there were no VM_MERGEABLE vmas with such addresses. 1238 * because there were no VM_MERGEABLE vmas with such addresses.
1243 */ 1239 */
1244 remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); 1240 remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
1245 1241
1246 spin_lock(&ksm_mmlist_lock); 1242 spin_lock(&ksm_mmlist_lock);
1247 ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1243 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
@@ -1423,7 +1419,7 @@ void __ksm_exit(struct mm_struct *mm)
1423 spin_lock(&ksm_mmlist_lock); 1419 spin_lock(&ksm_mmlist_lock);
1424 mm_slot = get_mm_slot(mm); 1420 mm_slot = get_mm_slot(mm);
1425 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 1421 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1426 if (list_empty(&mm_slot->rmap_list)) { 1422 if (!mm_slot->rmap_list) {
1427 hlist_del(&mm_slot->link); 1423 hlist_del(&mm_slot->link);
1428 list_del(&mm_slot->mm_list); 1424 list_del(&mm_slot->mm_list);
1429 easy_to_free = 1; 1425 easy_to_free = 1;