aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:46 -0500
commit878aee7d6b5504e01b9caffce080e792b6b8d090 (patch)
treec4a01a78885c25b6b3b1e0c74af7cb83c98a07c5 /mm/huge_memory.c
parent8ee53820edfd1f3b6554c593f337148dd3d7fc91 (diff)
thp: freeze khugepaged and ksmd
It's unclear why schedule friendly kernel threads can't be taken away by the CPU through the scheduler itself. It's safer to stop them as they can trigger memory allocation, if kswapd also freezes itself to avoid generating I/O they have too. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 39d7df40c067..45b6d53bcfbc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -15,6 +15,7 @@
15#include <linux/mm_inline.h> 15#include <linux/mm_inline.h>
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/khugepaged.h> 17#include <linux/khugepaged.h>
18#include <linux/freezer.h>
18#include <asm/tlb.h> 19#include <asm/tlb.h>
19#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
20#include "internal.h" 21#include "internal.h"
@@ -2085,6 +2086,9 @@ static void khugepaged_do_scan(struct page **hpage)
2085 break; 2086 break;
2086#endif 2087#endif
2087 2088
2089 if (unlikely(kthread_should_stop() || freezing(current)))
2090 break;
2091
2088 spin_lock(&khugepaged_mm_lock); 2092 spin_lock(&khugepaged_mm_lock);
2089 if (!khugepaged_scan.mm_slot) 2093 if (!khugepaged_scan.mm_slot)
2090 pass_through_head++; 2094 pass_through_head++;
@@ -2147,6 +2151,9 @@ static void khugepaged_loop(void)
2147 if (hpage) 2151 if (hpage)
2148 put_page(hpage); 2152 put_page(hpage);
2149#endif 2153#endif
2154 try_to_freeze();
2155 if (unlikely(kthread_should_stop()))
2156 break;
2150 if (khugepaged_has_work()) { 2157 if (khugepaged_has_work()) {
2151 DEFINE_WAIT(wait); 2158 DEFINE_WAIT(wait);
2152 if (!khugepaged_scan_sleep_millisecs) 2159 if (!khugepaged_scan_sleep_millisecs)
@@ -2157,8 +2164,8 @@ static void khugepaged_loop(void)
2157 khugepaged_scan_sleep_millisecs)); 2164 khugepaged_scan_sleep_millisecs));
2158 remove_wait_queue(&khugepaged_wait, &wait); 2165 remove_wait_queue(&khugepaged_wait, &wait);
2159 } else if (khugepaged_enabled()) 2166 } else if (khugepaged_enabled())
2160 wait_event_interruptible(khugepaged_wait, 2167 wait_event_freezable(khugepaged_wait,
2161 khugepaged_wait_event()); 2168 khugepaged_wait_event());
2162 } 2169 }
2163} 2170}
2164 2171
@@ -2166,6 +2173,7 @@ static int khugepaged(void *none)
2166{ 2173{
2167 struct mm_slot *mm_slot; 2174 struct mm_slot *mm_slot;
2168 2175
2176 set_freezable();
2169 set_user_nice(current, 19); 2177 set_user_nice(current, 19);
2170 2178
2171 /* serialize with start_khugepaged() */ 2179 /* serialize with start_khugepaged() */
@@ -2180,6 +2188,8 @@ static int khugepaged(void *none)
2180 mutex_lock(&khugepaged_mutex); 2188 mutex_lock(&khugepaged_mutex);
2181 if (!khugepaged_enabled()) 2189 if (!khugepaged_enabled())
2182 break; 2190 break;
2191 if (unlikely(kthread_should_stop()))
2192 break;
2183 } 2193 }
2184 2194
2185 spin_lock(&khugepaged_mm_lock); 2195 spin_lock(&khugepaged_mm_lock);