aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-08 19:29:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:25 -0400
commit911891afe1c3104adf0f802189909868239ebbfd (patch)
tree8583f33ded3ef14a85fe7cd2244c605ece673f37 /mm/huge_memory.c
parent637e3a27ec2c84f7ecd083fa6943da2f19eb5e9f (diff)
thp: move khugepaged_mutex out of khugepaged
Currently, hugepaged_mutex is used really complexly and hard to understand, actually, it is just used to serialize start_khugepaged and khugepaged for these reasons: - khugepaged_thread is shared between them - the thp disable path (echo never > transparent_hugepage/enabled) is nonblocking, so we need to protect khugepaged_thread to get a stable running state These can be avoided by: - use the lock to serialize the thread creation and cancel - thp disable path can not finised until the thread exits Then khugepaged_thread is fully controlled by start_khugepaged, khugepaged will be happy without the lock Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9833d8ecf38f..0931b2b19c52 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -139,9 +139,6 @@ static int start_khugepaged(void)
139{ 139{
140 int err = 0; 140 int err = 0;
141 if (khugepaged_enabled()) { 141 if (khugepaged_enabled()) {
142 int wakeup;
143
144 mutex_lock(&khugepaged_mutex);
145 if (!khugepaged_thread) 142 if (!khugepaged_thread)
146 khugepaged_thread = kthread_run(khugepaged, NULL, 143 khugepaged_thread = kthread_run(khugepaged, NULL,
147 "khugepaged"); 144 "khugepaged");
@@ -151,15 +148,17 @@ static int start_khugepaged(void)
151 err = PTR_ERR(khugepaged_thread); 148 err = PTR_ERR(khugepaged_thread);
152 khugepaged_thread = NULL; 149 khugepaged_thread = NULL;
153 } 150 }
154 wakeup = !list_empty(&khugepaged_scan.mm_head); 151
155 mutex_unlock(&khugepaged_mutex); 152 if (!list_empty(&khugepaged_scan.mm_head))
156 if (wakeup)
157 wake_up_interruptible(&khugepaged_wait); 153 wake_up_interruptible(&khugepaged_wait);
158 154
159 set_recommended_min_free_kbytes(); 155 set_recommended_min_free_kbytes();
160 } else 156 } else if (khugepaged_thread) {
161 /* wakeup to exit */ 157 /* wakeup to exit */
162 wake_up_interruptible(&khugepaged_wait); 158 wake_up_interruptible(&khugepaged_wait);
159 kthread_stop(khugepaged_thread);
160 khugepaged_thread = NULL;
161 }
163 162
164 return err; 163 return err;
165} 164}
@@ -221,7 +220,12 @@ static ssize_t enabled_store(struct kobject *kobj,
221 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 220 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
222 221
223 if (ret > 0) { 222 if (ret > 0) {
224 int err = start_khugepaged(); 223 int err;
224
225 mutex_lock(&khugepaged_mutex);
226 err = start_khugepaged();
227 mutex_unlock(&khugepaged_mutex);
228
225 if (err) 229 if (err)
226 ret = err; 230 ret = err;
227 } 231 }
@@ -2329,20 +2333,10 @@ static int khugepaged(void *none)
2329 set_freezable(); 2333 set_freezable();
2330 set_user_nice(current, 19); 2334 set_user_nice(current, 19);
2331 2335
2332 /* serialize with start_khugepaged() */ 2336 while (!kthread_should_stop()) {
2333 mutex_lock(&khugepaged_mutex);
2334
2335 for (;;) {
2336 mutex_unlock(&khugepaged_mutex);
2337 VM_BUG_ON(khugepaged_thread != current); 2337 VM_BUG_ON(khugepaged_thread != current);
2338 khugepaged_loop(); 2338 khugepaged_loop();
2339 VM_BUG_ON(khugepaged_thread != current); 2339 VM_BUG_ON(khugepaged_thread != current);
2340
2341 mutex_lock(&khugepaged_mutex);
2342 if (!khugepaged_enabled())
2343 break;
2344 if (unlikely(kthread_should_stop()))
2345 break;
2346 } 2340 }
2347 2341
2348 spin_lock(&khugepaged_mm_lock); 2342 spin_lock(&khugepaged_mm_lock);
@@ -2351,10 +2345,6 @@ static int khugepaged(void *none)
2351 if (mm_slot) 2345 if (mm_slot)
2352 collect_mm_slot(mm_slot); 2346 collect_mm_slot(mm_slot);
2353 spin_unlock(&khugepaged_mm_lock); 2347 spin_unlock(&khugepaged_mm_lock);
2354
2355 khugepaged_thread = NULL;
2356 mutex_unlock(&khugepaged_mutex);
2357
2358 return 0; 2348 return 0;
2359} 2349}
2360 2350