aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:46 -0500
commit878aee7d6b5504e01b9caffce080e792b6b8d090 (patch)
treec4a01a78885c25b6b3b1e0c74af7cb83c98a07c5 /mm
parent8ee53820edfd1f3b6554c593f337148dd3d7fc91 (diff)
thp: freeze khugepaged and ksmd
It's unclear why schedule friendly kernel threads can't be taken away by the CPU through the scheduler itself. It's safer to stop them as they can trigger memory allocation, if kswapd also freezes itself to avoid generating I/O they have too. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/ksm.c8
2 files changed, 18 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 39d7df40c067..45b6d53bcfbc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -15,6 +15,7 @@
15#include <linux/mm_inline.h> 15#include <linux/mm_inline.h>
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/khugepaged.h> 17#include <linux/khugepaged.h>
18#include <linux/freezer.h>
18#include <asm/tlb.h> 19#include <asm/tlb.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include "internal.h" 21#include "internal.h"
@@ -2085,6 +2086,9 @@ static void khugepaged_do_scan(struct page **hpage)
2085 break; 2086 break;
2086#endif 2087#endif
2087 2088
2089 if (unlikely(kthread_should_stop() || freezing(current)))
2090 break;
2091
2088 spin_lock(&khugepaged_mm_lock); 2092 spin_lock(&khugepaged_mm_lock);
2089 if (!khugepaged_scan.mm_slot) 2093 if (!khugepaged_scan.mm_slot)
2090 pass_through_head++; 2094 pass_through_head++;
@@ -2147,6 +2151,9 @@ static void khugepaged_loop(void)
2147 if (hpage) 2151 if (hpage)
2148 put_page(hpage); 2152 put_page(hpage);
2149#endif 2153#endif
2154 try_to_freeze();
2155 if (unlikely(kthread_should_stop()))
2156 break;
2150 if (khugepaged_has_work()) { 2157 if (khugepaged_has_work()) {
2151 DEFINE_WAIT(wait); 2158 DEFINE_WAIT(wait);
2152 if (!khugepaged_scan_sleep_millisecs) 2159 if (!khugepaged_scan_sleep_millisecs)
@@ -2157,8 +2164,8 @@ static void khugepaged_loop(void)
2157 khugepaged_scan_sleep_millisecs)); 2164 khugepaged_scan_sleep_millisecs));
2158 remove_wait_queue(&khugepaged_wait, &wait); 2165 remove_wait_queue(&khugepaged_wait, &wait);
2159 } else if (khugepaged_enabled()) 2166 } else if (khugepaged_enabled())
2160 wait_event_interruptible(khugepaged_wait, 2167 wait_event_freezable(khugepaged_wait,
2161 khugepaged_wait_event()); 2168 khugepaged_wait_event());
2162 } 2169 }
2163} 2170}
2164 2171
@@ -2166,6 +2173,7 @@ static int khugepaged(void *none)
2166{ 2173{
2167 struct mm_slot *mm_slot; 2174 struct mm_slot *mm_slot;
2168 2175
2176 set_freezable();
2169 set_user_nice(current, 19); 2177 set_user_nice(current, 19);
2170 2178
2171 /* serialize with start_khugepaged() */ 2179 /* serialize with start_khugepaged() */
@@ -2180,6 +2188,8 @@ static int khugepaged(void *none)
2180 mutex_lock(&khugepaged_mutex); 2188 mutex_lock(&khugepaged_mutex);
2181 if (!khugepaged_enabled()) 2189 if (!khugepaged_enabled())
2182 break; 2190 break;
2191 if (unlikely(kthread_should_stop()))
2192 break;
2183 } 2193 }
2184 2194
2185 spin_lock(&khugepaged_mm_lock); 2195 spin_lock(&khugepaged_mm_lock);
diff --git a/mm/ksm.c b/mm/ksm.c
index 5e7d5d35ea82..e2b0afd0a031 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -34,6 +34,7 @@
34#include <linux/swap.h> 34#include <linux/swap.h>
35#include <linux/ksm.h> 35#include <linux/ksm.h>
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/freezer.h>
37 38
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include "internal.h" 40#include "internal.h"
@@ -1365,7 +1366,7 @@ static void ksm_do_scan(unsigned int scan_npages)
1365 struct rmap_item *rmap_item; 1366 struct rmap_item *rmap_item;
1366 struct page *uninitialized_var(page); 1367 struct page *uninitialized_var(page);
1367 1368
1368 while (scan_npages--) { 1369 while (scan_npages-- && likely(!freezing(current))) {
1369 cond_resched(); 1370 cond_resched();
1370 rmap_item = scan_get_next_rmap_item(&page); 1371 rmap_item = scan_get_next_rmap_item(&page);
1371 if (!rmap_item) 1372 if (!rmap_item)
@@ -1383,6 +1384,7 @@ static int ksmd_should_run(void)
1383 1384
1384static int ksm_scan_thread(void *nothing) 1385static int ksm_scan_thread(void *nothing)
1385{ 1386{
1387 set_freezable();
1386 set_user_nice(current, 5); 1388 set_user_nice(current, 5);
1387 1389
1388 while (!kthread_should_stop()) { 1390 while (!kthread_should_stop()) {
@@ -1391,11 +1393,13 @@ static int ksm_scan_thread(void *nothing)
1391 ksm_do_scan(ksm_thread_pages_to_scan); 1393 ksm_do_scan(ksm_thread_pages_to_scan);
1392 mutex_unlock(&ksm_thread_mutex); 1394 mutex_unlock(&ksm_thread_mutex);
1393 1395
1396 try_to_freeze();
1397
1394 if (ksmd_should_run()) { 1398 if (ksmd_should_run()) {
1395 schedule_timeout_interruptible( 1399 schedule_timeout_interruptible(
1396 msecs_to_jiffies(ksm_thread_sleep_millisecs)); 1400 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1397 } else { 1401 } else {
1398 wait_event_interruptible(ksm_thread_wait, 1402 wait_event_freezable(ksm_thread_wait,
1399 ksmd_should_run() || kthread_should_stop()); 1403 ksmd_should_run() || kthread_should_stop());
1400 } 1404 }
1401 } 1405 }