diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-09-21 20:03:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:39 -0400 |
commit | 495789a51a91cb8c015d8d77fecbac1caf20b186 (patch) | |
tree | ac2a71ed40ed84f5673326aa6bf7f278b54d989a /mm | |
parent | 28b83c5193e7ab951e402252278f2cc79dc4d298 (diff) |
oom: make oom_score to per-process value
oom-killer kills a process, not task. Then oom_score should be calculated
as per-process too. it makes consistency more and makes speed up
select_bad_process().
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/oom_kill.c | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 630b77fe862f..372692294844 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -34,6 +34,23 @@ int sysctl_oom_dump_tasks; | |||
34 | static DEFINE_SPINLOCK(zone_scan_lock); | 34 | static DEFINE_SPINLOCK(zone_scan_lock); |
35 | /* #define DEBUG */ | 35 | /* #define DEBUG */ |
36 | 36 | ||
37 | /* | ||
38 | * Is all threads of the target process nodes overlap ours? | ||
39 | */ | ||
40 | static int has_intersects_mems_allowed(struct task_struct *tsk) | ||
41 | { | ||
42 | struct task_struct *t; | ||
43 | |||
44 | t = tsk; | ||
45 | do { | ||
46 | if (cpuset_mems_allowed_intersects(current, t)) | ||
47 | return 1; | ||
48 | t = next_thread(t); | ||
49 | } while (t != tsk); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
37 | /** | 54 | /** |
38 | * badness - calculate a numeric value for how bad this task has been | 55 | * badness - calculate a numeric value for how bad this task has been |
39 | * @p: task struct of which task we should calculate | 56 | * @p: task struct of which task we should calculate |
@@ -59,6 +76,9 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
59 | struct mm_struct *mm; | 76 | struct mm_struct *mm; |
60 | struct task_struct *child; | 77 | struct task_struct *child; |
61 | int oom_adj = p->signal->oom_adj; | 78 | int oom_adj = p->signal->oom_adj; |
79 | struct task_cputime task_time; | ||
80 | unsigned long utime; | ||
81 | unsigned long stime; | ||
62 | 82 | ||
63 | if (oom_adj == OOM_DISABLE) | 83 | if (oom_adj == OOM_DISABLE) |
64 | return 0; | 84 | return 0; |
@@ -106,8 +126,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
106 | * of seconds. There is no particular reason for this other than | 126 | * of seconds. There is no particular reason for this other than |
107 | * that it turned out to work very well in practice. | 127 | * that it turned out to work very well in practice. |
108 | */ | 128 | */ |
109 | cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime)) | 129 | thread_group_cputime(p, &task_time); |
110 | >> (SHIFT_HZ + 3); | 130 | utime = cputime_to_jiffies(task_time.utime); |
131 | stime = cputime_to_jiffies(task_time.stime); | ||
132 | cpu_time = (utime + stime) >> (SHIFT_HZ + 3); | ||
133 | |||
111 | 134 | ||
112 | if (uptime >= p->start_time.tv_sec) | 135 | if (uptime >= p->start_time.tv_sec) |
113 | run_time = (uptime - p->start_time.tv_sec) >> 10; | 136 | run_time = (uptime - p->start_time.tv_sec) >> 10; |
@@ -148,7 +171,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
148 | * because p may have allocated or otherwise mapped memory on | 171 | * because p may have allocated or otherwise mapped memory on |
149 | * this node before. However it will be less likely. | 172 | * this node before. However it will be less likely. |
150 | */ | 173 | */ |
151 | if (!cpuset_mems_allowed_intersects(current, p)) | 174 | if (!has_intersects_mems_allowed(p)) |
152 | points /= 8; | 175 | points /= 8; |
153 | 176 | ||
154 | /* | 177 | /* |
@@ -204,13 +227,13 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |||
204 | static struct task_struct *select_bad_process(unsigned long *ppoints, | 227 | static struct task_struct *select_bad_process(unsigned long *ppoints, |
205 | struct mem_cgroup *mem) | 228 | struct mem_cgroup *mem) |
206 | { | 229 | { |
207 | struct task_struct *g, *p; | 230 | struct task_struct *p; |
208 | struct task_struct *chosen = NULL; | 231 | struct task_struct *chosen = NULL; |
209 | struct timespec uptime; | 232 | struct timespec uptime; |
210 | *ppoints = 0; | 233 | *ppoints = 0; |
211 | 234 | ||
212 | do_posix_clock_monotonic_gettime(&uptime); | 235 | do_posix_clock_monotonic_gettime(&uptime); |
213 | do_each_thread(g, p) { | 236 | for_each_process(p) { |
214 | unsigned long points; | 237 | unsigned long points; |
215 | 238 | ||
216 | /* | 239 | /* |
@@ -263,7 +286,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints, | |||
263 | chosen = p; | 286 | chosen = p; |
264 | *ppoints = points; | 287 | *ppoints = points; |
265 | } | 288 | } |
266 | } while_each_thread(g, p); | 289 | } |
267 | 290 | ||
268 | return chosen; | 291 | return chosen; |
269 | } | 292 | } |