aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-06-26 03:25:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 12:58:25 -0400
commit99f895518368252ba862cc15ce4eb98ebbe1bec6 (patch)
treea9dcc01963221d1fd6a7e357b95d361ebfe91c6d /mm/mempolicy.c
parent8578cea7509cbdec25b31d08b48a92fcc3b1a9e3 (diff)
[PATCH] proc: don't lock task_structs indefinitely
Every inode in /proc holds a reference to a struct task_struct. If a directory or file is opened and remains open after the the task exits this pinning continues. With 8K stacks on a 32bit machine the amount pinned per file descriptor is about 10K. Normally I would figure a reasonable per user process limit is about 100 processes. With 80 processes, with a 1000 file descriptors each I can trigger the 00M killer on a 32bit kernel, because I have pinned about 800MB of useless data. This patch replaces the struct task_struct pointer with a pointer to a struct task_ref which has a struct task_struct pointer. The so the pinning of dead tasks does not happen. The code now has to contend with the fact that the task may now exit at any time. Which is a little but not muh more complicated. With this change it takes about 1000 processes each opening up 1000 file descriptors before I can trigger the OOM killer. Much better. [mlp@google.com: task_mmu small fixes] Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Paul Jackson <pj@sgi.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Cc: Albert Cahalan <acahalan@gmail.com> Signed-off-by: Prasanna Meda <mlp@google.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 73e0f23b7f51..6b9740bbf4c0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1821,7 +1821,7 @@ static inline void check_huge_range(struct vm_area_struct *vma,
1821 1821
1822int show_numa_map(struct seq_file *m, void *v) 1822int show_numa_map(struct seq_file *m, void *v)
1823{ 1823{
1824 struct task_struct *task = m->private; 1824 struct proc_maps_private *priv = m->private;
1825 struct vm_area_struct *vma = v; 1825 struct vm_area_struct *vma = v;
1826 struct numa_maps *md; 1826 struct numa_maps *md;
1827 struct file *file = vma->vm_file; 1827 struct file *file = vma->vm_file;
@@ -1837,7 +1837,7 @@ int show_numa_map(struct seq_file *m, void *v)
1837 return 0; 1837 return 0;
1838 1838
1839 mpol_to_str(buffer, sizeof(buffer), 1839 mpol_to_str(buffer, sizeof(buffer),
1840 get_vma_policy(task, vma, vma->vm_start)); 1840 get_vma_policy(priv->task, vma, vma->vm_start));
1841 1841
1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1843 1843
@@ -1891,7 +1891,7 @@ out:
1891 kfree(md); 1891 kfree(md);
1892 1892
1893 if (m->count < m->size) 1893 if (m->count < m->size)
1894 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 1894 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1895 return 0; 1895 return 0;
1896} 1896}
1897 1897