aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-09-04 05:01:18 -0400
committerIngo Molnar <mingo@kernel.org>2017-09-04 05:01:18 -0400
commitedc2988c548db05e33b921fed15821010bc74895 (patch)
treeb35860428acea35e5866d4cf007519ed943a85de /mm/page_alloc.c
parentd82fed75294229abc9d757f08a4817febae6c4f4 (diff)
parent81a84ad3cb5711cec79f4dd53a4ce026b092c432 (diff)
Merge branch 'linus' into locking/core, to fix up conflicts
Conflicts: mm/page_alloc.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 471b0526b876..9327a940e373 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -67,6 +67,7 @@
67#include <linux/memcontrol.h> 67#include <linux/memcontrol.h>
68#include <linux/ftrace.h> 68#include <linux/ftrace.h>
69#include <linux/lockdep.h> 69#include <linux/lockdep.h>
70#include <linux/nmi.h>
70 71
71#include <asm/sections.h> 72#include <asm/sections.h>
72#include <asm/tlbflush.h> 73#include <asm/tlbflush.h>
@@ -2536,9 +2537,14 @@ void drain_all_pages(struct zone *zone)
2536 2537
2537#ifdef CONFIG_HIBERNATION 2538#ifdef CONFIG_HIBERNATION
2538 2539
2540/*
2541 * Touch the watchdog for every WD_PAGE_COUNT pages.
2542 */
2543#define WD_PAGE_COUNT (128*1024)
2544
2539void mark_free_pages(struct zone *zone) 2545void mark_free_pages(struct zone *zone)
2540{ 2546{
2541 unsigned long pfn, max_zone_pfn; 2547 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2542 unsigned long flags; 2548 unsigned long flags;
2543 unsigned int order, t; 2549 unsigned int order, t;
2544 struct page *page; 2550 struct page *page;
@@ -2553,6 +2559,11 @@ void mark_free_pages(struct zone *zone)
2553 if (pfn_valid(pfn)) { 2559 if (pfn_valid(pfn)) {
2554 page = pfn_to_page(pfn); 2560 page = pfn_to_page(pfn);
2555 2561
2562 if (!--page_count) {
2563 touch_nmi_watchdog();
2564 page_count = WD_PAGE_COUNT;
2565 }
2566
2556 if (page_zone(page) != zone) 2567 if (page_zone(page) != zone)
2557 continue; 2568 continue;
2558 2569
@@ -2566,8 +2577,13 @@ void mark_free_pages(struct zone *zone)
2566 unsigned long i; 2577 unsigned long i;
2567 2578
2568 pfn = page_to_pfn(page); 2579 pfn = page_to_pfn(page);
2569 for (i = 0; i < (1UL << order); i++) 2580 for (i = 0; i < (1UL << order); i++) {
2581 if (!--page_count) {
2582 touch_nmi_watchdog();
2583 page_count = WD_PAGE_COUNT;
2584 }
2570 swsusp_set_page_free(pfn_to_page(pfn + i)); 2585 swsusp_set_page_free(pfn_to_page(pfn + i));
2586 }
2571 } 2587 }
2572 } 2588 }
2573 spin_unlock_irqrestore(&zone->lock, flags); 2589 spin_unlock_irqrestore(&zone->lock, flags);
@@ -3276,10 +3292,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3276 /* 3292 /*
3277 * Go through the zonelist yet one more time, keep very high watermark 3293 * Go through the zonelist yet one more time, keep very high watermark
3278 * here, this is only to catch a parallel oom killing, we must fail if 3294 * here, this is only to catch a parallel oom killing, we must fail if
3279 * we're still under heavy pressure. 3295 * we're still under heavy pressure. But make sure that this reclaim
3296 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3297 * allocation which will never fail due to oom_lock already held.
3280 */ 3298 */
3281 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 3299 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3282 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3300 ~__GFP_DIRECT_RECLAIM, order,
3301 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3283 if (page) 3302 if (page)
3284 goto out; 3303 goto out;
3285 3304