diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2013-01-04 18:35:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-01-04 19:11:46 -0500 |
commit | a458431e176ddb27e8ef8b98c2a681b217337393 (patch) | |
tree | 466ec91a25ebbe30870d12486071bb08a8c7cd5a /mm | |
parent | 358e419f826b552c9d795bcd3820597217692461 (diff) |
mm: fix zone_watermark_ok_safe() accounting of isolated pages
Commit 702d1a6e0766 ("memory-hotplug: fix kswapd looping forever
problem") added an isolated pageblocks counter (nr_pageblock_isolate in
struct zone) and used it to adjust free pages counter in
zone_watermark_ok_safe() to prevent kswapd looping forever problem.
Then later, commit 2139cbe627b8 ("cma: fix counting of isolated pages")
fixed accounting of isolated pages in global free pages counter. It
made the previous zone_watermark_ok_safe() fix unnecessary and
potentially harmful (cause now isolated pages may be accounted twice
making free pages counter incorrect).
This patch removes the special isolated pageblocks counter altogether
which fixes zone_watermark_ok_safe() free pages check.
Reported-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Aaditya Kumar <aaditya.kumar.30@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_isolation.c | 26 |
2 files changed, 2 insertions, 51 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4ba5e37127fc..bc6cc0e913bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
221 | 221 | ||
222 | int page_group_by_mobility_disabled __read_mostly; | 222 | int page_group_by_mobility_disabled __read_mostly; |
223 | 223 | ||
224 | /* | ||
225 | * NOTE: | ||
226 | * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly. | ||
227 | * Instead, use {un}set_pageblock_isolate. | ||
228 | */ | ||
229 | void set_pageblock_migratetype(struct page *page, int migratetype) | 224 | void set_pageblock_migratetype(struct page *page, int migratetype) |
230 | { | 225 | { |
231 | 226 | ||
@@ -1655,20 +1650,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1655 | return true; | 1650 | return true; |
1656 | } | 1651 | } |
1657 | 1652 | ||
1658 | #ifdef CONFIG_MEMORY_ISOLATION | ||
1659 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1660 | { | ||
1661 | if (unlikely(zone->nr_pageblock_isolate)) | ||
1662 | return zone->nr_pageblock_isolate * pageblock_nr_pages; | ||
1663 | return 0; | ||
1664 | } | ||
1665 | #else | ||
1666 | static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) | ||
1667 | { | ||
1668 | return 0; | ||
1669 | } | ||
1670 | #endif | ||
1671 | |||
1672 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 1653 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
1673 | int classzone_idx, int alloc_flags) | 1654 | int classzone_idx, int alloc_flags) |
1674 | { | 1655 | { |
@@ -1684,14 +1665,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |||
1684 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) | 1665 | if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) |
1685 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); | 1666 | free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); |
1686 | 1667 | ||
1687 | /* | ||
1688 | * If the zone has MIGRATE_ISOLATE type free pages, we should consider | ||
1689 | * it. nr_zone_isolate_freepages is never accurate so kswapd might not | ||
1690 | * sleep although it could do so. But this is more desirable for memory | ||
1691 | * hotplug than sleeping which can cause a livelock in the direct | ||
1692 | * reclaim path. | ||
1693 | */ | ||
1694 | free_pages -= nr_zone_isolate_freepages(z); | ||
1695 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 1668 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
1696 | free_pages); | 1669 | free_pages); |
1697 | } | 1670 | } |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 9d2264ea4606..383bdbb98b04 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -8,28 +8,6 @@ | |||
8 | #include <linux/memory.h> | 8 | #include <linux/memory.h> |
9 | #include "internal.h" | 9 | #include "internal.h" |
10 | 10 | ||
11 | /* called while holding zone->lock */ | ||
12 | static void set_pageblock_isolate(struct page *page) | ||
13 | { | ||
14 | if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) | ||
15 | return; | ||
16 | |||
17 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | ||
18 | page_zone(page)->nr_pageblock_isolate++; | ||
19 | } | ||
20 | |||
21 | /* called while holding zone->lock */ | ||
22 | static void restore_pageblock_isolate(struct page *page, int migratetype) | ||
23 | { | ||
24 | struct zone *zone = page_zone(page); | ||
25 | if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) | ||
26 | return; | ||
27 | |||
28 | BUG_ON(zone->nr_pageblock_isolate <= 0); | ||
29 | set_pageblock_migratetype(page, migratetype); | ||
30 | zone->nr_pageblock_isolate--; | ||
31 | } | ||
32 | |||
33 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) | 11 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) |
34 | { | 12 | { |
35 | struct zone *zone; | 13 | struct zone *zone; |
@@ -80,7 +58,7 @@ out: | |||
80 | unsigned long nr_pages; | 58 | unsigned long nr_pages; |
81 | int migratetype = get_pageblock_migratetype(page); | 59 | int migratetype = get_pageblock_migratetype(page); |
82 | 60 | ||
83 | set_pageblock_isolate(page); | 61 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
84 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); | 62 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
85 | 63 | ||
86 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); | 64 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
@@ -103,7 +81,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
103 | goto out; | 81 | goto out; |
104 | nr_pages = move_freepages_block(zone, page, migratetype); | 82 | nr_pages = move_freepages_block(zone, page, migratetype); |
105 | __mod_zone_freepage_state(zone, nr_pages, migratetype); | 83 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
106 | restore_pageblock_isolate(page, migratetype); | 84 | set_pageblock_migratetype(page, migratetype); |
107 | out: | 85 | out: |
108 | spin_unlock_irqrestore(&zone->lock, flags); | 86 | spin_unlock_irqrestore(&zone->lock, flags); |
109 | } | 87 | } |