aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-08 19:29:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:26 -0400
commitd516904bd239fe2c9f1bd46cf146bb4b8831321c (patch)
treede3ec1af9c309dba0faf2e662ba49f651a566e82
parent9817626e722a5e5699cf38f5d3a4c9851e054436 (diff)
thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan
There are two pre-alloc operations in these two function, the different is: - it allows to sleep if page alloc fail in khugepaged_loop - it exits immediately if page alloc fail in khugepaged_do_scan Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on the first failure, then the operation in khugepaged_loop can be removed Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/huge_memory.c97
1 files changed, 45 insertions, 52 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1e21b4cf4c75..d5b5fcc73c44 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2222,10 +2222,40 @@ static int khugepaged_wait_event(void)
2222 kthread_should_stop(); 2222 kthread_should_stop();
2223} 2223}
2224 2224
2225static void khugepaged_do_scan(struct page **hpage) 2225static void khugepaged_alloc_sleep(void)
2226{
2227 wait_event_freezable_timeout(khugepaged_wait, false,
2228 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2229}
2230
2231#ifndef CONFIG_NUMA
2232static struct page *khugepaged_alloc_hugepage(bool *wait)
2233{
2234 struct page *hpage;
2235
2236 do {
2237 hpage = alloc_hugepage(khugepaged_defrag());
2238 if (!hpage) {
2239 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2240 if (!*wait)
2241 return NULL;
2242
2243 *wait = false;
2244 khugepaged_alloc_sleep();
2245 } else
2246 count_vm_event(THP_COLLAPSE_ALLOC);
2247 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2248
2249 return hpage;
2250}
2251#endif
2252
2253static void khugepaged_do_scan(void)
2226{ 2254{
2255 struct page *hpage = NULL;
2227 unsigned int progress = 0, pass_through_head = 0; 2256 unsigned int progress = 0, pass_through_head = 0;
2228 unsigned int pages = khugepaged_pages_to_scan; 2257 unsigned int pages = khugepaged_pages_to_scan;
2258 bool wait = true;
2229 2259
2230 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2260 barrier(); /* write khugepaged_pages_to_scan to local stack */
2231 2261
@@ -2233,17 +2263,18 @@ static void khugepaged_do_scan(struct page **hpage)
2233 cond_resched(); 2263 cond_resched();
2234 2264
2235#ifndef CONFIG_NUMA 2265#ifndef CONFIG_NUMA
2236 if (!*hpage) { 2266 if (!hpage)
2237 *hpage = alloc_hugepage(khugepaged_defrag()); 2267 hpage = khugepaged_alloc_hugepage(&wait);
2238 if (unlikely(!*hpage)) { 2268
2239 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2269 if (unlikely(!hpage))
2270 break;
2271#else
2272 if (IS_ERR(hpage)) {
2273 if (!wait)
2240 break; 2274 break;
2241 } 2275 wait = false;
2242 count_vm_event(THP_COLLAPSE_ALLOC); 2276 khugepaged_alloc_sleep();
2243 } 2277 }
2244#else
2245 if (IS_ERR(*hpage))
2246 break;
2247#endif 2278#endif
2248 2279
2249 if (unlikely(kthread_should_stop() || freezing(current))) 2280 if (unlikely(kthread_should_stop() || freezing(current)))
@@ -2255,37 +2286,16 @@ static void khugepaged_do_scan(struct page **hpage)
2255 if (khugepaged_has_work() && 2286 if (khugepaged_has_work() &&
2256 pass_through_head < 2) 2287 pass_through_head < 2)
2257 progress += khugepaged_scan_mm_slot(pages - progress, 2288 progress += khugepaged_scan_mm_slot(pages - progress,
2258 hpage); 2289 &hpage);
2259 else 2290 else
2260 progress = pages; 2291 progress = pages;
2261 spin_unlock(&khugepaged_mm_lock); 2292 spin_unlock(&khugepaged_mm_lock);
2262 } 2293 }
2263}
2264 2294
2265static void khugepaged_alloc_sleep(void) 2295 if (!IS_ERR_OR_NULL(hpage))
2266{ 2296 put_page(hpage);
2267 wait_event_freezable_timeout(khugepaged_wait, false,
2268 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2269} 2297}
2270 2298
2271#ifndef CONFIG_NUMA
2272static struct page *khugepaged_alloc_hugepage(void)
2273{
2274 struct page *hpage;
2275
2276 do {
2277 hpage = alloc_hugepage(khugepaged_defrag());
2278 if (!hpage) {
2279 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2280 khugepaged_alloc_sleep();
2281 } else
2282 count_vm_event(THP_COLLAPSE_ALLOC);
2283 } while (unlikely(!hpage) &&
2284 likely(khugepaged_enabled()));
2285 return hpage;
2286}
2287#endif
2288
2289static void khugepaged_wait_work(void) 2299static void khugepaged_wait_work(void)
2290{ 2300{
2291 try_to_freeze(); 2301 try_to_freeze();
@@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void)
2306 2316
2307static void khugepaged_loop(void) 2317static void khugepaged_loop(void)
2308{ 2318{
2309 struct page *hpage = NULL;
2310
2311 while (likely(khugepaged_enabled())) { 2319 while (likely(khugepaged_enabled())) {
2312#ifndef CONFIG_NUMA 2320 khugepaged_do_scan();
2313 hpage = khugepaged_alloc_hugepage();
2314 if (unlikely(!hpage))
2315 break;
2316#else
2317 if (IS_ERR(hpage)) {
2318 khugepaged_alloc_sleep();
2319 hpage = NULL;
2320 }
2321#endif
2322
2323 khugepaged_do_scan(&hpage);
2324
2325 if (!IS_ERR_OR_NULL(hpage))
2326 put_page(hpage);
2327
2328 khugepaged_wait_work(); 2321 khugepaged_wait_work();
2329 } 2322 }
2330} 2323}