diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 21:23:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 21:23:28 -0500 |
commit | 59d53737a8640482995fea13c6e2c0fd016115d6 (patch) | |
tree | 3423eb92315865d76cb8d488513bfef6ab9251d0 /kernel | |
parent | d3f180ea1a44aecba1b0dab2a253428e77f906bf (diff) | |
parent | 8138a67a5557ffea3a21dfd6f037842d4e748513 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge second set of updates from Andrew Morton:
"More of MM"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits)
mm/nommu.c: fix arithmetic overflow in __vm_enough_memory()
mm/mmap.c: fix arithmetic overflow in __vm_enough_memory()
vmstat: Reduce time interval to stat update on idle cpu
mm/page_owner.c: remove unnecessary stack_trace field
Documentation/filesystems/proc.txt: describe /proc/<pid>/map_files
mm: incorporate read-only pages into transparent huge pages
vmstat: do not use deferrable delayed work for vmstat_update
mm: more aggressive page stealing for UNMOVABLE allocations
mm: always steal split buddies in fallback allocations
mm: when stealing freepages, also take pages created by splitting buddy page
mincore: apply page table walker on do_mincore()
mm: /proc/pid/clear_refs: avoid split_huge_page()
mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP)
mempolicy: apply page table walker on queue_pages_range()
arch/powerpc/mm/subpage-prot.c: use walk->vma and walk_page_vma()
memcg: cleanup preparation for page table walk
numa_maps: remove numa_maps->vma
numa_maps: fix typo in gather_hugetbl_stats
pagemap: use walk->vma instead of calling find_vma()
clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk()
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/fork.c | 11 | ||||
-rw-r--r-- | kernel/power/process.c | 75 |
3 files changed, 35 insertions, 54 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 6806c55475ee..feff10bbb307 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -435,7 +435,8 @@ static void exit_mm(struct task_struct *tsk) | |||
435 | task_unlock(tsk); | 435 | task_unlock(tsk); |
436 | mm_update_next_owner(mm); | 436 | mm_update_next_owner(mm); |
437 | mmput(mm); | 437 | mmput(mm); |
438 | clear_thread_flag(TIF_MEMDIE); | 438 | if (test_thread_flag(TIF_MEMDIE)) |
439 | unmark_oom_victim(); | ||
439 | } | 440 | } |
440 | 441 | ||
441 | static struct task_struct *find_alive_thread(struct task_struct *p) | 442 | static struct task_struct *find_alive_thread(struct task_struct *p) |
diff --git a/kernel/fork.c b/kernel/fork.c index b379d9abddc7..66e19c251581 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -555,6 +555,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) | |||
555 | INIT_LIST_HEAD(&mm->mmlist); | 555 | INIT_LIST_HEAD(&mm->mmlist); |
556 | mm->core_state = NULL; | 556 | mm->core_state = NULL; |
557 | atomic_long_set(&mm->nr_ptes, 0); | 557 | atomic_long_set(&mm->nr_ptes, 0); |
558 | #ifndef __PAGETABLE_PMD_FOLDED | ||
559 | atomic_long_set(&mm->nr_pmds, 0); | ||
560 | #endif | ||
558 | mm->map_count = 0; | 561 | mm->map_count = 0; |
559 | mm->locked_vm = 0; | 562 | mm->locked_vm = 0; |
560 | mm->pinned_vm = 0; | 563 | mm->pinned_vm = 0; |
@@ -603,6 +606,14 @@ static void check_mm(struct mm_struct *mm) | |||
603 | printk(KERN_ALERT "BUG: Bad rss-counter state " | 606 | printk(KERN_ALERT "BUG: Bad rss-counter state " |
604 | "mm:%p idx:%d val:%ld\n", mm, i, x); | 607 | "mm:%p idx:%d val:%ld\n", mm, i, x); |
605 | } | 608 | } |
609 | |||
610 | if (atomic_long_read(&mm->nr_ptes)) | ||
611 | pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", | ||
612 | atomic_long_read(&mm->nr_ptes)); | ||
613 | if (mm_nr_pmds(mm)) | ||
614 | pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", | ||
615 | mm_nr_pmds(mm)); | ||
616 | |||
606 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS | 617 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
607 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); | 618 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); |
608 | #endif | 619 | #endif |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 5a6ec8678b9a..564f786df470 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -84,8 +84,8 @@ static int try_to_freeze_tasks(bool user_only) | |||
84 | elapsed_msecs = elapsed_msecs64; | 84 | elapsed_msecs = elapsed_msecs64; |
85 | 85 | ||
86 | if (todo) { | 86 | if (todo) { |
87 | printk("\n"); | 87 | pr_cont("\n"); |
88 | printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds " | 88 | pr_err("Freezing of tasks %s after %d.%03d seconds " |
89 | "(%d tasks refusing to freeze, wq_busy=%d):\n", | 89 | "(%d tasks refusing to freeze, wq_busy=%d):\n", |
90 | wakeup ? "aborted" : "failed", | 90 | wakeup ? "aborted" : "failed", |
91 | elapsed_msecs / 1000, elapsed_msecs % 1000, | 91 | elapsed_msecs / 1000, elapsed_msecs % 1000, |
@@ -101,37 +101,13 @@ static int try_to_freeze_tasks(bool user_only) | |||
101 | read_unlock(&tasklist_lock); | 101 | read_unlock(&tasklist_lock); |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, | 104 | pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, |
105 | elapsed_msecs % 1000); | 105 | elapsed_msecs % 1000); |
106 | } | 106 | } |
107 | 107 | ||
108 | return todo ? -EBUSY : 0; | 108 | return todo ? -EBUSY : 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static bool __check_frozen_processes(void) | ||
112 | { | ||
113 | struct task_struct *g, *p; | ||
114 | |||
115 | for_each_process_thread(g, p) | ||
116 | if (p != current && !freezer_should_skip(p) && !frozen(p)) | ||
117 | return false; | ||
118 | |||
119 | return true; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Returns true if all freezable tasks (except for current) are frozen already | ||
124 | */ | ||
125 | static bool check_frozen_processes(void) | ||
126 | { | ||
127 | bool ret; | ||
128 | |||
129 | read_lock(&tasklist_lock); | ||
130 | ret = __check_frozen_processes(); | ||
131 | read_unlock(&tasklist_lock); | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | /** | 111 | /** |
136 | * freeze_processes - Signal user space processes to enter the refrigerator. | 112 | * freeze_processes - Signal user space processes to enter the refrigerator. |
137 | * The current thread will not be frozen. The same process that calls | 113 | * The current thread will not be frozen. The same process that calls |
@@ -142,7 +118,6 @@ static bool check_frozen_processes(void) | |||
142 | int freeze_processes(void) | 118 | int freeze_processes(void) |
143 | { | 119 | { |
144 | int error; | 120 | int error; |
145 | int oom_kills_saved; | ||
146 | 121 | ||
147 | error = __usermodehelper_disable(UMH_FREEZING); | 122 | error = __usermodehelper_disable(UMH_FREEZING); |
148 | if (error) | 123 | if (error) |
@@ -155,31 +130,24 @@ int freeze_processes(void) | |||
155 | atomic_inc(&system_freezing_cnt); | 130 | atomic_inc(&system_freezing_cnt); |
156 | 131 | ||
157 | pm_wakeup_clear(); | 132 | pm_wakeup_clear(); |
158 | printk("Freezing user space processes ... "); | 133 | pr_info("Freezing user space processes ... "); |
159 | pm_freezing = true; | 134 | pm_freezing = true; |
160 | oom_kills_saved = oom_kills_count(); | ||
161 | error = try_to_freeze_tasks(true); | 135 | error = try_to_freeze_tasks(true); |
162 | if (!error) { | 136 | if (!error) { |
163 | __usermodehelper_set_disable_depth(UMH_DISABLED); | 137 | __usermodehelper_set_disable_depth(UMH_DISABLED); |
164 | oom_killer_disable(); | 138 | pr_cont("done."); |
165 | |||
166 | /* | ||
167 | * There might have been an OOM kill while we were | ||
168 | * freezing tasks and the killed task might be still | ||
169 | * on the way out so we have to double check for race. | ||
170 | */ | ||
171 | if (oom_kills_count() != oom_kills_saved && | ||
172 | !check_frozen_processes()) { | ||
173 | __usermodehelper_set_disable_depth(UMH_ENABLED); | ||
174 | printk("OOM in progress."); | ||
175 | error = -EBUSY; | ||
176 | } else { | ||
177 | printk("done."); | ||
178 | } | ||
179 | } | 139 | } |
180 | printk("\n"); | 140 | pr_cont("\n"); |
181 | BUG_ON(in_atomic()); | 141 | BUG_ON(in_atomic()); |
182 | 142 | ||
143 | /* | ||
144 | * Now that the whole userspace is frozen we need to disbale | ||
145 | * the OOM killer to disallow any further interference with | ||
146 | * killable tasks. | ||
147 | */ | ||
148 | if (!error && !oom_killer_disable()) | ||
149 | error = -EBUSY; | ||
150 | |||
183 | if (error) | 151 | if (error) |
184 | thaw_processes(); | 152 | thaw_processes(); |
185 | return error; | 153 | return error; |
@@ -197,13 +165,14 @@ int freeze_kernel_threads(void) | |||
197 | { | 165 | { |
198 | int error; | 166 | int error; |
199 | 167 | ||
200 | printk("Freezing remaining freezable tasks ... "); | 168 | pr_info("Freezing remaining freezable tasks ... "); |
169 | |||
201 | pm_nosig_freezing = true; | 170 | pm_nosig_freezing = true; |
202 | error = try_to_freeze_tasks(false); | 171 | error = try_to_freeze_tasks(false); |
203 | if (!error) | 172 | if (!error) |
204 | printk("done."); | 173 | pr_cont("done."); |
205 | 174 | ||
206 | printk("\n"); | 175 | pr_cont("\n"); |
207 | BUG_ON(in_atomic()); | 176 | BUG_ON(in_atomic()); |
208 | 177 | ||
209 | if (error) | 178 | if (error) |
@@ -224,7 +193,7 @@ void thaw_processes(void) | |||
224 | 193 | ||
225 | oom_killer_enable(); | 194 | oom_killer_enable(); |
226 | 195 | ||
227 | printk("Restarting tasks ... "); | 196 | pr_info("Restarting tasks ... "); |
228 | 197 | ||
229 | __usermodehelper_set_disable_depth(UMH_FREEZING); | 198 | __usermodehelper_set_disable_depth(UMH_FREEZING); |
230 | thaw_workqueues(); | 199 | thaw_workqueues(); |
@@ -243,7 +212,7 @@ void thaw_processes(void) | |||
243 | usermodehelper_enable(); | 212 | usermodehelper_enable(); |
244 | 213 | ||
245 | schedule(); | 214 | schedule(); |
246 | printk("done.\n"); | 215 | pr_cont("done.\n"); |
247 | trace_suspend_resume(TPS("thaw_processes"), 0, false); | 216 | trace_suspend_resume(TPS("thaw_processes"), 0, false); |
248 | } | 217 | } |
249 | 218 | ||
@@ -252,7 +221,7 @@ void thaw_kernel_threads(void) | |||
252 | struct task_struct *g, *p; | 221 | struct task_struct *g, *p; |
253 | 222 | ||
254 | pm_nosig_freezing = false; | 223 | pm_nosig_freezing = false; |
255 | printk("Restarting kernel threads ... "); | 224 | pr_info("Restarting kernel threads ... "); |
256 | 225 | ||
257 | thaw_workqueues(); | 226 | thaw_workqueues(); |
258 | 227 | ||
@@ -264,5 +233,5 @@ void thaw_kernel_threads(void) | |||
264 | read_unlock(&tasklist_lock); | 233 | read_unlock(&tasklist_lock); |
265 | 234 | ||
266 | schedule(); | 235 | schedule(); |
267 | printk("done.\n"); | 236 | pr_cont("done.\n"); |
268 | } | 237 | } |