aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-09-22 19:45:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 10:39:41 -0400
commit2ef43ec772551e975a6ea7cf22b59c84955aadf9 (patch)
tree9560c13bcc6deb3ee00a60f93730b248029e69bb /fs
parentd899bf7b55f503ba7d3d07ed27c3a37e270fa7db (diff)
kcore: use usual list for kclist
This patchset is for /proc/kcore. With this, - many per-arch hooks are removed. - /proc/kcore will know really valid physical memory area. - /proc/kcore will be aware of memory hotplug. - /proc/kcore will be architecture independent i.e. if an arch supports CONFIG_MMU, it can use /proc/kcore. (if the arch uses usual memory layout.) This patch: /proc/kcore uses its own list handling codes. It's better to use generic list codes. No changes in logic. just clean up. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: WANG Cong <xiyou.wangcong@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/kcore.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 0cf8a24cf6c3..f9327e51ce99 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <linux/list.h>
23 24
24#define CORE_STR "CORE" 25#define CORE_STR "CORE"
25 26
@@ -57,7 +58,7 @@ struct memelfnote
57 void *data; 58 void *data;
58}; 59};
59 60
60static struct kcore_list *kclist; 61static LIST_HEAD(kclist_head);
61static DEFINE_RWLOCK(kclist_lock); 62static DEFINE_RWLOCK(kclist_lock);
62 63
63void 64void
@@ -67,8 +68,7 @@ kclist_add(struct kcore_list *new, void *addr, size_t size)
67 new->size = size; 68 new->size = size;
68 69
69 write_lock(&kclist_lock); 70 write_lock(&kclist_lock);
70 new->next = kclist; 71 list_add_tail(&new->list, &kclist_head);
71 kclist = new;
72 write_unlock(&kclist_lock); 72 write_unlock(&kclist_lock);
73} 73}
74 74
@@ -80,7 +80,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
80 *nphdr = 1; /* PT_NOTE */ 80 *nphdr = 1; /* PT_NOTE */
81 size = 0; 81 size = 0;
82 82
83 for (m=kclist; m; m=m->next) { 83 list_for_each_entry(m, &kclist_head, list) {
84 try = kc_vaddr_to_offset((size_t)m->addr + m->size); 84 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
85 if (try > size) 85 if (try > size)
86 size = try; 86 size = try;
@@ -192,7 +192,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
192 nhdr->p_align = 0; 192 nhdr->p_align = 0;
193 193
194 /* setup ELF PT_LOAD program header for every area */ 194 /* setup ELF PT_LOAD program header for every area */
195 for (m=kclist; m; m=m->next) { 195 list_for_each_entry(m, &kclist_head, list) {
196 phdr = (struct elf_phdr *) bufp; 196 phdr = (struct elf_phdr *) bufp;
197 bufp += sizeof(struct elf_phdr); 197 bufp += sizeof(struct elf_phdr);
198 offset += sizeof(struct elf_phdr); 198 offset += sizeof(struct elf_phdr);
@@ -317,7 +317,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
317 struct kcore_list *m; 317 struct kcore_list *m;
318 318
319 read_lock(&kclist_lock); 319 read_lock(&kclist_lock);
320 for (m=kclist; m; m=m->next) { 320 list_for_each_entry(m, &kclist_head, list) {
321 if (start >= m->addr && start < (m->addr+m->size)) 321 if (start >= m->addr && start < (m->addr+m->size))
322 break; 322 break;
323 } 323 }