diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-10-09 18:25:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:49 -0400 |
commit | ebb6cdde1a50c3cd2a0a4668dfb571ecb3213449 (patch) | |
tree | 766d8c75bf558af33354ffb5b304aa70db7dc7b5 /fs | |
parent | 0d5f5f45f9a4f1f6b694c37f5142ebea893f0a15 (diff) |
fs/proc/task_mmu.c: kill the suboptimal and confusing m->version logic
m_start() carefully documents, checks, and sets "m->version = -1" if
we are going to return NULL. The only problem is that we will be never
called again if m_start() returns NULL, so this is simply pointless
and misleading.
Otoh, ->show() methods m->version = 0 if vma == tail_vma and this is
just wrong, we want -1 in this case. And in fact we also want -1 if
->vm_next == NULL and ->tail_vma == NULL.
And it is not used consistently, the "scan vmas" loop in m_start()
should update last_addr too.
Finally, imo the whole "last_addr" logic in m_start() looks horrible.
find_vma(last_addr) is called unconditionally even if we are not going
to use the result. But the main problem is that this code participates
in tail_vma-or-NULL mess, and this looks simply unfixable.
Remove this optimization. We will add it back after some cleanups.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/proc/task_mmu.c | 35 |
1 files changed, 1 insertions, 34 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 30aa2dd3e6f5..e182fc51ec2b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -141,20 +141,10 @@ static void vma_stop(struct proc_maps_private *priv) | |||
141 | static void *m_start(struct seq_file *m, loff_t *pos) | 141 | static void *m_start(struct seq_file *m, loff_t *pos) |
142 | { | 142 | { |
143 | struct proc_maps_private *priv = m->private; | 143 | struct proc_maps_private *priv = m->private; |
144 | unsigned long last_addr = m->version; | ||
145 | struct mm_struct *mm; | 144 | struct mm_struct *mm; |
146 | struct vm_area_struct *vma, *tail_vma = NULL; | 145 | struct vm_area_struct *vma, *tail_vma = NULL; |
147 | loff_t l = *pos; | 146 | loff_t l = *pos; |
148 | 147 | ||
149 | /* | ||
150 | * We remember last_addr rather than next_addr to hit with | ||
151 | * vmacache most of the time. We have zero last_addr at | ||
152 | * the beginning and also after lseek. We will have -1 last_addr | ||
153 | * after the end of the vmas. | ||
154 | */ | ||
155 | if (last_addr == -1UL) | ||
156 | return NULL; | ||
157 | |||
158 | priv->task = get_pid_task(priv->pid, PIDTYPE_PID); | 148 | priv->task = get_pid_task(priv->pid, PIDTYPE_PID); |
159 | if (!priv->task) | 149 | if (!priv->task) |
160 | return ERR_PTR(-ESRCH); | 150 | return ERR_PTR(-ESRCH); |
@@ -167,12 +157,6 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
167 | tail_vma = get_gate_vma(mm); | 157 | tail_vma = get_gate_vma(mm); |
168 | priv->tail_vma = tail_vma; | 158 | priv->tail_vma = tail_vma; |
169 | hold_task_mempolicy(priv); | 159 | hold_task_mempolicy(priv); |
170 | /* Start with last addr hint */ | ||
171 | vma = find_vma(mm, last_addr); | ||
172 | if (last_addr && vma) { | ||
173 | vma = vma->vm_next; | ||
174 | goto out; | ||
175 | } | ||
176 | 160 | ||
177 | /* | 161 | /* |
178 | * Check the vma index is within the range and do | 162 | * Check the vma index is within the range and do |
@@ -193,8 +177,6 @@ out: | |||
193 | if (vma) | 177 | if (vma) |
194 | return vma; | 178 | return vma; |
195 | 179 | ||
196 | /* End of vmas has been reached */ | ||
197 | m->version = (tail_vma != NULL)? 0: -1UL; | ||
198 | if (tail_vma) | 180 | if (tail_vma) |
199 | return tail_vma; | 181 | return tail_vma; |
200 | 182 | ||
@@ -366,14 +348,7 @@ done: | |||
366 | 348 | ||
367 | static int show_map(struct seq_file *m, void *v, int is_pid) | 349 | static int show_map(struct seq_file *m, void *v, int is_pid) |
368 | { | 350 | { |
369 | struct vm_area_struct *vma = v; | 351 | show_map_vma(m, v, is_pid); |
370 | struct proc_maps_private *priv = m->private; | ||
371 | |||
372 | show_map_vma(m, vma, is_pid); | ||
373 | |||
374 | if (m->count < m->size) /* vma is copied successfully */ | ||
375 | m->version = (vma != priv->tail_vma) | ||
376 | ? vma->vm_start : 0; | ||
377 | return 0; | 352 | return 0; |
378 | } | 353 | } |
379 | 354 | ||
@@ -599,7 +574,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) | |||
599 | 574 | ||
600 | static int show_smap(struct seq_file *m, void *v, int is_pid) | 575 | static int show_smap(struct seq_file *m, void *v, int is_pid) |
601 | { | 576 | { |
602 | struct proc_maps_private *priv = m->private; | ||
603 | struct vm_area_struct *vma = v; | 577 | struct vm_area_struct *vma = v; |
604 | struct mem_size_stats mss; | 578 | struct mem_size_stats mss; |
605 | struct mm_walk smaps_walk = { | 579 | struct mm_walk smaps_walk = { |
@@ -652,10 +626,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) | |||
652 | mss.nonlinear >> 10); | 626 | mss.nonlinear >> 10); |
653 | 627 | ||
654 | show_smap_vma_flags(m, vma); | 628 | show_smap_vma_flags(m, vma); |
655 | |||
656 | if (m->count < m->size) /* vma is copied successfully */ | ||
657 | m->version = (vma != priv->tail_vma) | ||
658 | ? vma->vm_start : 0; | ||
659 | return 0; | 629 | return 0; |
660 | } | 630 | } |
661 | 631 | ||
@@ -1510,9 +1480,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1510 | seq_printf(m, " N%d=%lu", nid, md->node[nid]); | 1480 | seq_printf(m, " N%d=%lu", nid, md->node[nid]); |
1511 | out: | 1481 | out: |
1512 | seq_putc(m, '\n'); | 1482 | seq_putc(m, '\n'); |
1513 | |||
1514 | if (m->count < m->size) | ||
1515 | m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; | ||
1516 | return 0; | 1483 | return 0; |
1517 | } | 1484 | } |
1518 | 1485 | ||