diff options
author | Yaowei Bai <bywxiaobai@163.com> | 2015-09-08 18:04:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 18:35:28 -0400 |
commit | 34b100605cb7e201d5c4e39f54d0e11caa950733 (patch) | |
tree | ddde0e0a3792a448ec5e3d21d4fc1b75ca1ba5a8 /mm | |
parent | 013110a73dcf970cb28c5b0a79f9eee577ea6aa2 (diff) |
mm/page_alloc.c: change sysctl_lower_zone_reserve_ratio to sysctl_lowmem_reserve_ratio in comments
We use sysctl_lowmem_reserve_ratio rather than
sysctl_lower_zone_reserve_ratio to determine how aggressive the kernel
is in defending lowmem from the possibility of being captured into
pinned user memory. To avoid misleading, correct it in some comments.
Signed-off-by: Yaowei Bai <bywxiaobai@163.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59abb47b70ee..5e8e99dd595a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6075,7 +6075,7 @@ void __init page_alloc_init(void) | |||
6075 | } | 6075 | } |
6076 | 6076 | ||
6077 | /* | 6077 | /* |
6078 | * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio | 6078 | * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio |
6079 | * or min_free_kbytes changes. | 6079 | * or min_free_kbytes changes. |
6080 | */ | 6080 | */ |
6081 | static void calculate_totalreserve_pages(void) | 6081 | static void calculate_totalreserve_pages(void) |
@@ -6119,7 +6119,7 @@ static void calculate_totalreserve_pages(void) | |||
6119 | 6119 | ||
6120 | /* | 6120 | /* |
6121 | * setup_per_zone_lowmem_reserve - called whenever | 6121 | * setup_per_zone_lowmem_reserve - called whenever |
6122 | * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone | 6122 | * sysctl_lowmem_reserve_ratio changes. Ensures that each zone |
6123 | * has a correct pages reserved value, so an adequate number of | 6123 | * has a correct pages reserved value, so an adequate number of |
6124 | * pages are left in the zone after a successful __alloc_pages(). | 6124 | * pages are left in the zone after a successful __alloc_pages(). |
6125 | */ | 6125 | */ |