diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2018-04-10 19:30:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:32 -0400 |
commit | d3cda2337bbc9edd2a26b83cb00eaa8c048ff274 (patch) | |
tree | 96e2745e71e23f92a7ce96b6a6b1eec3b69cd779 /mm | |
parent | 94723aafb9e76414fada7c1c198733a86f01ea8f (diff) |
mm/page_alloc: don't reserve ZONE_HIGHMEM for ZONE_MOVABLE request
Freepage on ZONE_HIGHMEM doesn't work for kernel memory so it's not that
important to reserve. When ZONE_MOVABLE is used, this problem would
theorectically cause to decrease usable memory for GFP_HIGHUSER_MOVABLE
allocation request which is mainly used for page cache and anon page
allocation. So, fix it by setting 0 to
sysctl_lowmem_reserve_ratio[ZONE_HIGHMEM].
And, defining sysctl_lowmem_reserve_ratio array by MAX_NR_ZONES - 1 size
makes code complex. For example, if there is highmem system, following
reserve ratio is activated for *NORMAL ZONE* which would be easyily
misleading people.
#ifdef CONFIG_HIGHMEM
32
#endif
This patch also fixes this situation by defining
sysctl_lowmem_reserve_ratio array by MAX_NR_ZONES and place "#ifdef" to
right place.
Link: http://lkml.kernel.org/r/1504672525-17915-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Tony Lindgren <tony@atomide.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Will Deacon <will.deacon@arm.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b04667848375..34a4c12d2675 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -205,17 +205,18 @@ static void __free_pages_ok(struct page *page, unsigned int order); | |||
205 | * TBD: should special case ZONE_DMA32 machines here - in those we normally | 205 | * TBD: should special case ZONE_DMA32 machines here - in those we normally |
206 | * don't need any ZONE_NORMAL reservation | 206 | * don't need any ZONE_NORMAL reservation |
207 | */ | 207 | */ |
208 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { | 208 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { |
209 | #ifdef CONFIG_ZONE_DMA | 209 | #ifdef CONFIG_ZONE_DMA |
210 | 256, | 210 | [ZONE_DMA] = 256, |
211 | #endif | 211 | #endif |
212 | #ifdef CONFIG_ZONE_DMA32 | 212 | #ifdef CONFIG_ZONE_DMA32 |
213 | 256, | 213 | [ZONE_DMA32] = 256, |
214 | #endif | 214 | #endif |
215 | [ZONE_NORMAL] = 32, | ||
215 | #ifdef CONFIG_HIGHMEM | 216 | #ifdef CONFIG_HIGHMEM |
216 | 32, | 217 | [ZONE_HIGHMEM] = 0, |
217 | #endif | 218 | #endif |
218 | 32, | 219 | [ZONE_MOVABLE] = 0, |
219 | }; | 220 | }; |
220 | 221 | ||
221 | EXPORT_SYMBOL(totalram_pages); | 222 | EXPORT_SYMBOL(totalram_pages); |
@@ -7132,13 +7133,15 @@ static void setup_per_zone_lowmem_reserve(void) | |||
7132 | struct zone *lower_zone; | 7133 | struct zone *lower_zone; |
7133 | 7134 | ||
7134 | idx--; | 7135 | idx--; |
7135 | |||
7136 | if (sysctl_lowmem_reserve_ratio[idx] < 1) | ||
7137 | sysctl_lowmem_reserve_ratio[idx] = 1; | ||
7138 | |||
7139 | lower_zone = pgdat->node_zones + idx; | 7136 | lower_zone = pgdat->node_zones + idx; |
7140 | lower_zone->lowmem_reserve[j] = managed_pages / | 7137 | |
7141 | sysctl_lowmem_reserve_ratio[idx]; | 7138 | if (sysctl_lowmem_reserve_ratio[idx] < 1) { |
7139 | sysctl_lowmem_reserve_ratio[idx] = 0; | ||
7140 | lower_zone->lowmem_reserve[j] = 0; | ||
7141 | } else { | ||
7142 | lower_zone->lowmem_reserve[j] = | ||
7143 | managed_pages / sysctl_lowmem_reserve_ratio[idx]; | ||
7144 | } | ||
7142 | managed_pages += lower_zone->managed_pages; | 7145 | managed_pages += lower_zone->managed_pages; |
7143 | } | 7146 | } |
7144 | } | 7147 | } |