aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:44 -0500
commitf000565adb770b14cebbafde0a4f3e61a3342a63 (patch)
treeff2884da257e1df1b8cb4817d09c892d4f84727c /mm
parentcd7548ab360c462118568eebb8c6da3bc303b02e (diff)
thp: set recommended min free kbytes
If transparent hugepage is enabled initialize min_free_kbytes to an optimal value by default. This moves the hugeadm algorithm in kernel. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7b55fe0e998b..4ed97a2a115f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -85,6 +85,47 @@ struct khugepaged_scan {
85 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 85 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
86}; 86};
87 87
88
89static int set_recommended_min_free_kbytes(void)
90{
91 struct zone *zone;
92 int nr_zones = 0;
93 unsigned long recommended_min;
94 extern int min_free_kbytes;
95
96 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
97 &transparent_hugepage_flags) &&
98 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
99 &transparent_hugepage_flags))
100 return 0;
101
102 for_each_populated_zone(zone)
103 nr_zones++;
104
105 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
106 recommended_min = pageblock_nr_pages * nr_zones * 2;
107
108 /*
109 * Make sure that on average at least two pageblocks are almost free
110 * of another type, one for a migratetype to fall back to and a
111 * second to avoid subsequent fallbacks of other types There are 3
112 * MIGRATE_TYPES we care about.
113 */
114 recommended_min += pageblock_nr_pages * nr_zones *
115 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
116
117 /* don't ever allow to reserve more than 5% of the lowmem */
118 recommended_min = min(recommended_min,
119 (unsigned long) nr_free_buffer_pages() / 20);
120 recommended_min <<= (PAGE_SHIFT-10);
121
122 if (recommended_min > min_free_kbytes)
123 min_free_kbytes = recommended_min;
124 setup_per_zone_wmarks();
125 return 0;
126}
127late_initcall(set_recommended_min_free_kbytes);
128
88static int start_khugepaged(void) 129static int start_khugepaged(void)
89{ 130{
90 int err = 0; 131 int err = 0;
@@ -108,6 +149,8 @@ static int start_khugepaged(void)
108 mutex_unlock(&khugepaged_mutex); 149 mutex_unlock(&khugepaged_mutex);
109 if (wakeup) 150 if (wakeup)
110 wake_up_interruptible(&khugepaged_wait); 151 wake_up_interruptible(&khugepaged_wait);
152
153 set_recommended_min_free_kbytes();
111 } else 154 } else
112 /* wakeup to exit */ 155 /* wakeup to exit */
113 wake_up_interruptible(&khugepaged_wait); 156 wake_up_interruptible(&khugepaged_wait);
@@ -177,6 +220,13 @@ static ssize_t enabled_store(struct kobject *kobj,
177 ret = err; 220 ret = err;
178 } 221 }
179 222
223 if (ret > 0 &&
224 (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
225 &transparent_hugepage_flags) ||
226 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
227 &transparent_hugepage_flags)))
228 set_recommended_min_free_kbytes();
229
180 return ret; 230 return ret;
181} 231}
182static struct kobj_attribute enabled_attr = 232static struct kobj_attribute enabled_attr =
@@ -464,6 +514,8 @@ static int __init hugepage_init(void)
464 514
465 start_khugepaged(); 515 start_khugepaged();
466 516
517 set_recommended_min_free_kbytes();
518
467out: 519out:
468 return err; 520 return err;
469} 521}