aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/kcore.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/kcore.c')
-rw-r--r--fs/proc/kcore.c179
1 files changed, 167 insertions, 12 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index fdde1cc78392..802de33d6341 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -17,10 +17,14 @@
17#include <linux/elfcore.h> 17#include <linux/elfcore.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/bootmem.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/ioport.h>
26#include <linux/mm.h>
27#include <linux/memory.h>
24#include <asm/sections.h> 28#include <asm/sections.h>
25 29
26#define CORE_STR "CORE" 30#define CORE_STR "CORE"
@@ -31,17 +35,6 @@
31 35
32static struct proc_dir_entry *proc_root_kcore; 36static struct proc_dir_entry *proc_root_kcore;
33 37
34static int open_kcore(struct inode * inode, struct file * filp)
35{
36 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
37}
38
39static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
40
41static const struct file_operations proc_kcore_operations = {
42 .read = read_kcore,
43 .open = open_kcore,
44};
45 38
46#ifndef kc_vaddr_to_offset 39#ifndef kc_vaddr_to_offset
47#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) 40#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
@@ -61,6 +54,7 @@ struct memelfnote
61 54
62static LIST_HEAD(kclist_head); 55static LIST_HEAD(kclist_head);
63static DEFINE_RWLOCK(kclist_lock); 56static DEFINE_RWLOCK(kclist_lock);
57static int kcore_need_update = 1;
64 58
65void 59void
66kclist_add(struct kcore_list *new, void *addr, size_t size, int type) 60kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
@@ -99,6 +93,126 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
99 return size + *elf_buflen; 93 return size + *elf_buflen;
100} 94}
101 95
96static void free_kclist_ents(struct list_head *head)
97{
98 struct kcore_list *tmp, *pos;
99
100 list_for_each_entry_safe(pos, tmp, head, list) {
101 list_del(&pos->list);
102 kfree(pos);
103 }
104}
105/*
106 * Replace all KCORE_RAM information with passed list.
107 */
108static void __kcore_update_ram(struct list_head *list)
109{
110 struct kcore_list *tmp, *pos;
111 LIST_HEAD(garbage);
112
113 write_lock(&kclist_lock);
114 if (kcore_need_update) {
115 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
116 if (pos->type == KCORE_RAM)
117 list_move(&pos->list, &garbage);
118 }
119 list_splice_tail(list, &kclist_head);
120 } else
121 list_splice(list, &garbage);
122 kcore_need_update = 0;
123 write_unlock(&kclist_lock);
124
125 free_kclist_ents(&garbage);
126}
127
128
129#ifdef CONFIG_HIGHMEM
130/*
131 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
132 * because memory hole is not as big as !HIGHMEM case.
133 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
134 */
135static int kcore_update_ram(void)
136{
137 LIST_HEAD(head);
138 struct kcore_list *ent;
139 int ret = 0;
140
141 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
142 if (!ent)
143 return -ENOMEM;
144 ent->addr = (unsigned long)__va(0);
145 ent->size = max_low_pfn << PAGE_SHIFT;
146 ent->type = KCORE_RAM;
147 list_add(&ent->list, &head);
148 __kcore_update_ram(&head);
149 return ret;
150}
151
152#else /* !CONFIG_HIGHMEM */
153
154static int
155kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
156{
157 struct list_head *head = (struct list_head *)arg;
158 struct kcore_list *ent;
159
160 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
161 if (!ent)
162 return -ENOMEM;
163 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
164 ent->size = nr_pages << PAGE_SHIFT;
165
166 /* Sanity check: Can happen in 32bit arch...maybe */
167 if (ent->addr < (unsigned long) __va(0))
168 goto free_out;
169
170 /* cut not-mapped area. ....from ppc-32 code. */
171 if (ULONG_MAX - ent->addr < ent->size)
172 ent->size = ULONG_MAX - ent->addr;
173
174 /* cut when vmalloc() area is higher than direct-map area */
175 if (VMALLOC_START > (unsigned long)__va(0)) {
176 if (ent->addr > VMALLOC_START)
177 goto free_out;
178 if (VMALLOC_START - ent->addr < ent->size)
179 ent->size = VMALLOC_START - ent->addr;
180 }
181
182 ent->type = KCORE_RAM;
183 list_add_tail(&ent->list, head);
184 return 0;
185free_out:
186 kfree(ent);
187 return 1;
188}
189
190static int kcore_update_ram(void)
191{
192 int nid, ret;
193 unsigned long end_pfn;
194 LIST_HEAD(head);
195
196 /* Not inialized....update now */
197 /* find out "max pfn" */
198 end_pfn = 0;
199 for_each_node_state(nid, N_HIGH_MEMORY) {
200 unsigned long node_end;
201 node_end = NODE_DATA(nid)->node_start_pfn +
202 NODE_DATA(nid)->node_spanned_pages;
203 if (end_pfn < node_end)
204 end_pfn = node_end;
205 }
206 /* scan 0 to max_pfn */
207 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
208 if (ret) {
209 free_kclist_ents(&head);
210 return -ENOMEM;
211 }
212 __kcore_update_ram(&head);
213 return ret;
214}
215#endif /* CONFIG_HIGHMEM */
102 216
103/*****************************************************************************/ 217/*****************************************************************************/
104/* 218/*
@@ -373,6 +487,39 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
373 return acc; 487 return acc;
374} 488}
375 489
490
491static int open_kcore(struct inode *inode, struct file *filp)
492{
493 if (!capable(CAP_SYS_RAWIO))
494 return -EPERM;
495 if (kcore_need_update)
496 kcore_update_ram();
497 return 0;
498}
499
500
501static const struct file_operations proc_kcore_operations = {
502 .read = read_kcore,
503 .open = open_kcore,
504};
505
506#ifdef CONFIG_MEMORY_HOTPLUG
507/* just remember that we have to update kcore */
508static int __meminit kcore_callback(struct notifier_block *self,
509 unsigned long action, void *arg)
510{
511 switch (action) {
512 case MEM_ONLINE:
513 case MEM_OFFLINE:
514 write_lock(&kclist_lock);
515 kcore_need_update = 1;
516 write_unlock(&kclist_lock);
517 }
518 return NOTIFY_OK;
519}
520#endif
521
522
376static struct kcore_list kcore_vmalloc; 523static struct kcore_list kcore_vmalloc;
377 524
378#ifdef CONFIG_ARCH_PROC_KCORE_TEXT 525#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
@@ -393,10 +540,18 @@ static void __init proc_kcore_text_init(void)
393 540
394static int __init proc_kcore_init(void) 541static int __init proc_kcore_init(void)
395{ 542{
396 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations); 543 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
544 &proc_kcore_operations);
545 /* Store text area if it's special */
397 proc_kcore_text_init(); 546 proc_kcore_text_init();
547 /* Store vmalloc area */
398 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 548 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
399 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); 549 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
550 /* Store direct-map area from physical memory map */
551 kcore_update_ram();
552 hotplug_memory_notifier(kcore_callback, 0);
553 /* Other special area, area-for-module etc is arch specific. */
554
400 return 0; 555 return 0;
401} 556}
402module_init(proc_kcore_init); 557module_init(proc_kcore_init);