aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 20:02:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:32 -0400
commit6e15838425ac855982f10419558649954a0684a3 (patch)
treef39e4a9b845e0bdf8f6b222ca718f155437e5b63 /mm/ksm.c
parent26465d3ea5a62d59efb3796b9e0e2b0656d02cb1 (diff)
ksm: keep quiet while list empty
ksm_scan_thread already sleeps in wait_event_interruptible until setting ksm_run activates it; but if there's nothing on its list to look at, i.e. nobody has yet said madvise MADV_MERGEABLE, it's a shame to be clocking up system time and full_scans: ksmd_should_run added to check that too. And move the mutex_lock out around it: the new counts showed that when ksm_run is stopped, a little work often got done afterwards, because it had been read before taking the mutex. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 81f692e836db..2849422448a3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1280,21 +1280,27 @@ static void ksm_do_scan(unsigned int scan_npages)
1280 } 1280 }
1281} 1281}
1282 1282
1283static int ksmd_should_run(void)
1284{
1285 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1286}
1287
1283static int ksm_scan_thread(void *nothing) 1288static int ksm_scan_thread(void *nothing)
1284{ 1289{
1285 set_user_nice(current, 5); 1290 set_user_nice(current, 5);
1286 1291
1287 while (!kthread_should_stop()) { 1292 while (!kthread_should_stop()) {
1288 if (ksm_run & KSM_RUN_MERGE) { 1293 mutex_lock(&ksm_thread_mutex);
1289 mutex_lock(&ksm_thread_mutex); 1294 if (ksmd_should_run())
1290 ksm_do_scan(ksm_thread_pages_to_scan); 1295 ksm_do_scan(ksm_thread_pages_to_scan);
1291 mutex_unlock(&ksm_thread_mutex); 1296 mutex_unlock(&ksm_thread_mutex);
1297
1298 if (ksmd_should_run()) {
1292 schedule_timeout_interruptible( 1299 schedule_timeout_interruptible(
1293 msecs_to_jiffies(ksm_thread_sleep_millisecs)); 1300 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1294 } else { 1301 } else {
1295 wait_event_interruptible(ksm_thread_wait, 1302 wait_event_interruptible(ksm_thread_wait,
1296 (ksm_run & KSM_RUN_MERGE) || 1303 ksmd_should_run() || kthread_should_stop());
1297 kthread_should_stop());
1298 } 1304 }
1299 } 1305 }
1300 return 0; 1306 return 0;
@@ -1339,10 +1345,16 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1339 1345
1340int __ksm_enter(struct mm_struct *mm) 1346int __ksm_enter(struct mm_struct *mm)
1341{ 1347{
1342 struct mm_slot *mm_slot = alloc_mm_slot(); 1348 struct mm_slot *mm_slot;
1349 int needs_wakeup;
1350
1351 mm_slot = alloc_mm_slot();
1343 if (!mm_slot) 1352 if (!mm_slot)
1344 return -ENOMEM; 1353 return -ENOMEM;
1345 1354
1355 /* Check ksm_run too? Would need tighter locking */
1356 needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1357
1346 spin_lock(&ksm_mmlist_lock); 1358 spin_lock(&ksm_mmlist_lock);
1347 insert_to_mm_slots_hash(mm, mm_slot); 1359 insert_to_mm_slots_hash(mm, mm_slot);
1348 /* 1360 /*
@@ -1354,6 +1366,10 @@ int __ksm_enter(struct mm_struct *mm)
1354 spin_unlock(&ksm_mmlist_lock); 1366 spin_unlock(&ksm_mmlist_lock);
1355 1367
1356 set_bit(MMF_VM_MERGEABLE, &mm->flags); 1368 set_bit(MMF_VM_MERGEABLE, &mm->flags);
1369
1370 if (needs_wakeup)
1371 wake_up_interruptible(&ksm_thread_wait);
1372
1357 return 0; 1373 return 0;
1358} 1374}
1359 1375