summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2018-04-05 19:23:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:25 -0400
commita5c6d6509342785bef53bf9508e1842b303f1878 (patch)
treec741ae2db50b36dae555549172246676f9323f5f /mm/page_alloc.c
parent31286a8484a85e8b4e91ddb0f5415aee8a416827 (diff)
mm, page_alloc: extend kernelcore and movablecore for percent
Both kernelcore= and movablecore= can be used to define the amount of ZONE_NORMAL and ZONE_MOVABLE on a system, respectively. This requires the system memory capacity to be known when specifying the command line, however. This introduces the ability to define both kernelcore= and movablecore= as a percentage of total system memory. This is convenient for systems software that wants to define the amount of ZONE_MOVABLE, for example, as a proportion of a system's memory rather than a hardcoded byte value. To define the percentage, the final character of the parameter should be a '%'. mhocko: "why is anyone using these options nowadays?" rientjes: : : Fragmentation of non-__GFP_MOVABLE pages due to low on memory : situations can pollute most pageblocks on the system, as much as 1GB of : slab being fragmented over 128GB of memory, for example. When the : amount of kernel memory is well bounded for certain systems, it is : better to aggressively reclaim from existing MIGRATE_UNMOVABLE : pageblocks rather than eagerly fallback to others. : : We have additional patches that help with this fragmentation if you're : interested, specifically kcompactd compaction of MIGRATE_UNMOVABLE : pageblocks triggered by fallback of non-__GFP_MOVABLE allocations and : draining of pcp lists back to the zone free area to prevent stranding. [rientjes@google.com: updates] Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1802131700160.71590@chino.kir.corp.google.com Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1802121622470.179479@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c43
1 files changed, 35 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3e2ba4f76bb..766ffb5fa94b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -273,7 +273,9 @@ static unsigned long __meminitdata dma_reserve;
273static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 273static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
274static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 274static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
275static unsigned long __initdata required_kernelcore; 275static unsigned long __initdata required_kernelcore;
276static unsigned long required_kernelcore_percent __initdata;
276static unsigned long __initdata required_movablecore; 277static unsigned long __initdata required_movablecore;
278static unsigned long required_movablecore_percent __initdata;
277static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 279static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
278static bool mirrored_kernelcore; 280static bool mirrored_kernelcore;
279 281
@@ -6571,7 +6573,18 @@ static void __init find_zone_movable_pfns_for_nodes(void)
6571 } 6573 }
6572 6574
6573 /* 6575 /*
6574 * If movablecore=nn[KMG] was specified, calculate what size of 6576 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
6577 * amount of necessary memory.
6578 */
6579 if (required_kernelcore_percent)
6580 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
6581 10000UL;
6582 if (required_movablecore_percent)
6583 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
6584 10000UL;
6585
6586 /*
6587 * If movablecore= was specified, calculate what size of
6575 * kernelcore that corresponds so that memory usable for 6588 * kernelcore that corresponds so that memory usable for
6576 * any allocation type is evenly spread. If both kernelcore 6589 * any allocation type is evenly spread. If both kernelcore
6577 * and movablecore are specified, then the value of kernelcore 6590 * and movablecore are specified, then the value of kernelcore
@@ -6811,18 +6824,30 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6811 zero_resv_unavail(); 6824 zero_resv_unavail();
6812} 6825}
6813 6826
6814static int __init cmdline_parse_core(char *p, unsigned long *core) 6827static int __init cmdline_parse_core(char *p, unsigned long *core,
6828 unsigned long *percent)
6815{ 6829{
6816 unsigned long long coremem; 6830 unsigned long long coremem;
6831 char *endptr;
6832
6817 if (!p) 6833 if (!p)
6818 return -EINVAL; 6834 return -EINVAL;
6819 6835
6820 coremem = memparse(p, &p); 6836 /* Value may be a percentage of total memory, otherwise bytes */
6821 *core = coremem >> PAGE_SHIFT; 6837 coremem = simple_strtoull(p, &endptr, 0);
6838 if (*endptr == '%') {
6839 /* Paranoid check for percent values greater than 100 */
6840 WARN_ON(coremem > 100);
6822 6841
6823 /* Paranoid check that UL is enough for the coremem value */ 6842 *percent = coremem;
6824 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 6843 } else {
6844 coremem = memparse(p, &p);
6845 /* Paranoid check that UL is enough for the coremem value */
6846 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6825 6847
6848 *core = coremem >> PAGE_SHIFT;
6849 *percent = 0UL;
6850 }
6826 return 0; 6851 return 0;
6827} 6852}
6828 6853
@@ -6838,7 +6863,8 @@ static int __init cmdline_parse_kernelcore(char *p)
6838 return 0; 6863 return 0;
6839 } 6864 }
6840 6865
6841 return cmdline_parse_core(p, &required_kernelcore); 6866 return cmdline_parse_core(p, &required_kernelcore,
6867 &required_kernelcore_percent);
6842} 6868}
6843 6869
6844/* 6870/*
@@ -6847,7 +6873,8 @@ static int __init cmdline_parse_kernelcore(char *p)
6847 */ 6873 */
6848static int __init cmdline_parse_movablecore(char *p) 6874static int __init cmdline_parse_movablecore(char *p)
6849{ 6875{
6850 return cmdline_parse_core(p, &required_movablecore); 6876 return cmdline_parse_core(p, &required_movablecore,
6877 &required_movablecore_percent);
6851} 6878}
6852 6879
6853early_param("kernelcore", cmdline_parse_kernelcore); 6880early_param("kernelcore", cmdline_parse_kernelcore);