aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:02:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:32 -0400
commitcd551f97519d35855be5a8720a47cc802ee4fd06 (patch)
tree8363f75f66efaf725de9ee7de781efbc2c502407 /mm
parentd952b79136a6c32a3f97e0628ca78340f1d5c6f9 (diff)
ksm: distribute remove_mm_from_lists
Do some housekeeping in ksm.c, to help make the next patch easier to understand: remove the function remove_mm_from_lists, distributing its code to its callsites scan_get_next_rmap_item and __ksm_exit. That turns out to be a win in scan_get_next_rmap_item: move its remove_trailing_rmap_items and cursor advancement up, and it becomes simpler than before. __ksm_exit becomes messier, but will change again; and moving its remove_trailing_rmap_items up lets us strengthen the unstable tree item's age condition in remove_rmap_item_from_tree. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c97
1 files changed, 42 insertions, 55 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index d9e3cfcc150c..7e4d255dadc0 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,14 +444,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
444 * But __ksm_exit has to be careful: do the rb_erase 444 * But __ksm_exit has to be careful: do the rb_erase
445 * if it's interrupting a scan, and this rmap_item was 445 * if it's interrupting a scan, and this rmap_item was
446 * inserted by this scan rather than left from before. 446 * inserted by this scan rather than left from before.
447 *
448 * Because of the case in which remove_mm_from_lists
449 * increments seqnr before removing rmaps, unstable_nr
450 * may even be 2 behind seqnr, but should never be
451 * further behind. Yes, I did have trouble with this!
452 */ 447 */
453 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 448 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
454 BUG_ON(age > 2); 449 BUG_ON(age > 1);
455 if (!age) 450 if (!age)
456 rb_erase(&rmap_item->node, &root_unstable_tree); 451 rb_erase(&rmap_item->node, &root_unstable_tree);
457 ksm_pages_unshared--; 452 ksm_pages_unshared--;
@@ -546,37 +541,6 @@ out:
546 return err; 541 return err;
547} 542}
548 543
549static void remove_mm_from_lists(struct mm_struct *mm)
550{
551 struct mm_slot *mm_slot;
552
553 spin_lock(&ksm_mmlist_lock);
554 mm_slot = get_mm_slot(mm);
555
556 /*
557 * This mm_slot is always at the scanning cursor when we're
558 * called from scan_get_next_rmap_item; but it's a special
559 * case when we're called from __ksm_exit.
560 */
561 if (ksm_scan.mm_slot == mm_slot) {
562 ksm_scan.mm_slot = list_entry(
563 mm_slot->mm_list.next, struct mm_slot, mm_list);
564 ksm_scan.address = 0;
565 ksm_scan.rmap_item = list_entry(
566 &ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
567 if (ksm_scan.mm_slot == &ksm_mm_head)
568 ksm_scan.seqnr++;
569 }
570
571 hlist_del(&mm_slot->link);
572 list_del(&mm_slot->mm_list);
573 spin_unlock(&ksm_mmlist_lock);
574
575 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
576 free_mm_slot(mm_slot);
577 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
578}
579
580static u32 calc_checksum(struct page *page) 544static u32 calc_checksum(struct page *page)
581{ 545{
582 u32 checksum; 546 u32 checksum;
@@ -1241,33 +1205,31 @@ next_mm:
1241 } 1205 }
1242 } 1206 }
1243 1207
1244 if (!ksm_scan.address) {
1245 /*
1246 * We've completed a full scan of all vmas, holding mmap_sem
1247 * throughout, and found no VM_MERGEABLE: so do the same as
1248 * __ksm_exit does to remove this mm from all our lists now.
1249 */
1250 remove_mm_from_lists(mm);
1251 up_read(&mm->mmap_sem);
1252 slot = ksm_scan.mm_slot;
1253 if (slot != &ksm_mm_head)
1254 goto next_mm;
1255 return NULL;
1256 }
1257
1258 /* 1208 /*
1259 * Nuke all the rmap_items that are above this current rmap: 1209 * Nuke all the rmap_items that are above this current rmap:
1260 * because there were no VM_MERGEABLE vmas with such addresses. 1210 * because there were no VM_MERGEABLE vmas with such addresses.
1261 */ 1211 */
1262 remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next); 1212 remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
1263 up_read(&mm->mmap_sem);
1264 1213
1265 spin_lock(&ksm_mmlist_lock); 1214 spin_lock(&ksm_mmlist_lock);
1266 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1215 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1267 ksm_scan.mm_slot = slot; 1216 struct mm_slot, mm_list);
1217 if (ksm_scan.address == 0) {
1218 /*
1219 * We've completed a full scan of all vmas, holding mmap_sem
1220 * throughout, and found no VM_MERGEABLE: so do the same as
1221 * __ksm_exit does to remove this mm from all our lists now.
1222 */
1223 hlist_del(&slot->link);
1224 list_del(&slot->mm_list);
1225 free_mm_slot(slot);
1226 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1227 }
1268 spin_unlock(&ksm_mmlist_lock); 1228 spin_unlock(&ksm_mmlist_lock);
1229 up_read(&mm->mmap_sem);
1269 1230
1270 /* Repeat until we've completed scanning the whole list */ 1231 /* Repeat until we've completed scanning the whole list */
1232 slot = ksm_scan.mm_slot;
1271 if (slot != &ksm_mm_head) 1233 if (slot != &ksm_mm_head)
1272 goto next_mm; 1234 goto next_mm;
1273 1235
@@ -1408,13 +1370,38 @@ int __ksm_enter(struct mm_struct *mm)
1408 1370
1409void __ksm_exit(struct mm_struct *mm) 1371void __ksm_exit(struct mm_struct *mm)
1410{ 1372{
1373 struct mm_slot *mm_slot;
1374
1411 /* 1375 /*
1412 * This process is exiting: doesn't hold and doesn't need mmap_sem; 1376 * This process is exiting: doesn't hold and doesn't need mmap_sem;
1413 * but we do need to exclude ksmd and other exiters while we modify 1377 * but we do need to exclude ksmd and other exiters while we modify
1414 * the various lists and trees. 1378 * the various lists and trees.
1415 */ 1379 */
1416 mutex_lock(&ksm_thread_mutex); 1380 mutex_lock(&ksm_thread_mutex);
1417 remove_mm_from_lists(mm); 1381 spin_lock(&ksm_mmlist_lock);
1382 mm_slot = get_mm_slot(mm);
1383 if (!list_empty(&mm_slot->rmap_list)) {
1384 spin_unlock(&ksm_mmlist_lock);
1385 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
1386 spin_lock(&ksm_mmlist_lock);
1387 }
1388
1389 if (ksm_scan.mm_slot == mm_slot) {
1390 ksm_scan.mm_slot = list_entry(
1391 mm_slot->mm_list.next, struct mm_slot, mm_list);
1392 ksm_scan.address = 0;
1393 ksm_scan.rmap_item = list_entry(
1394 &ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
1395 if (ksm_scan.mm_slot == &ksm_mm_head)
1396 ksm_scan.seqnr++;
1397 }
1398
1399 hlist_del(&mm_slot->link);
1400 list_del(&mm_slot->mm_list);
1401 spin_unlock(&ksm_mmlist_lock);
1402
1403 free_mm_slot(mm_slot);
1404 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1418 mutex_unlock(&ksm_thread_mutex); 1405 mutex_unlock(&ksm_thread_mutex);
1419} 1406}
1420 1407