aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/vmcore.c
diff options
context:
space:
mode:
authorMichael Holzheu <holzheu@linux.vnet.ibm.com>2013-09-11 17:24:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:59:10 -0400
commit9cb218131de1c59dca9063b2efe876f053f316af (patch)
treee01f110a4137e8e2d33bc28f1f77e3a6361c0ee4 /fs/proc/vmcore.c
parent97b0f6f9cd73ff8285835c5e295d3c4b0e2dbf78 (diff)
vmcore: introduce remap_oldmem_pfn_range()
For zfcpdump we can't map the HSA storage because it is only available via a read interface. Therefore, for the new vmcore mmap feature we have introduce a new mechanism to create mappings on demand. This patch introduces a new architecture function remap_oldmem_pfn_range() that should be used to create mappings with remap_pfn_range() for oldmem areas that can be directly mapped. For zfcpdump this is everything besides of the HSA memory. For the areas that are not mapped by remap_oldmem_pfn_range() a generic vmcore a new generic vmcore fault handler mmap_vmcore_fault() is called. This handler works as follows: * Get already available or new page from page cache (find_or_create_page) * Check if /proc/vmcore page is filled with data (PageUptodate) * If yes: Return that page * If no: Fill page using __vmcore_read(), set PageUptodate, and return page Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com> Cc: Jan Willeke <willeke@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/vmcore.c')
-rw-r--r--fs/proc/vmcore.c91
1 files changed, 83 insertions, 8 deletions
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 02cb3ff108bc..d07b70a6eed5 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -21,6 +21,7 @@
21#include <linux/crash_dump.h> 21#include <linux/crash_dump.h>
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/pagemap.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include "internal.h" 27#include "internal.h"
@@ -153,11 +154,35 @@ ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153 return read_from_oldmem(buf, count, ppos, 0); 154 return read_from_oldmem(buf, count, ppos, 0);
154} 155}
155 156
157/*
158 * Architectures may override this function to map oldmem
159 */
160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
163{
164 return remap_pfn_range(vma, from, pfn, size, prot);
165}
166
167/*
168 * Copy to either kernel or user space
169 */
170static int copy_to(void *target, void *src, size_t size, int userbuf)
171{
172 if (userbuf) {
173 if (copy_to_user((char __user *) target, src, size))
174 return -EFAULT;
175 } else {
176 memcpy(target, src, size);
177 }
178 return 0;
179}
180
156/* Read from the ELF header and then the crash dump. On error, negative value is 181/* Read from the ELF header and then the crash dump. On error, negative value is
157 * returned otherwise number of bytes read are returned. 182 * returned otherwise number of bytes read are returned.
158 */ 183 */
159static ssize_t read_vmcore(struct file *file, char __user *buffer, 184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
160 size_t buflen, loff_t *fpos) 185 int userbuf)
161{ 186{
162 ssize_t acc = 0, tmp; 187 ssize_t acc = 0, tmp;
163 size_t tsz; 188 size_t tsz;
@@ -174,7 +199,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
174 /* Read ELF core header */ 199 /* Read ELF core header */
175 if (*fpos < elfcorebuf_sz) { 200 if (*fpos < elfcorebuf_sz) {
176 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); 201 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
177 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) 202 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
178 return -EFAULT; 203 return -EFAULT;
179 buflen -= tsz; 204 buflen -= tsz;
180 *fpos += tsz; 205 *fpos += tsz;
@@ -192,7 +217,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
192 217
193 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); 218 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
194 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; 219 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
195 if (copy_to_user(buffer, kaddr, tsz)) 220 if (copy_to(buffer, kaddr, tsz, userbuf))
196 return -EFAULT; 221 return -EFAULT;
197 buflen -= tsz; 222 buflen -= tsz;
198 *fpos += tsz; 223 *fpos += tsz;
@@ -208,7 +233,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
208 if (*fpos < m->offset + m->size) { 233 if (*fpos < m->offset + m->size) {
209 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); 234 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
210 start = m->paddr + *fpos - m->offset; 235 start = m->paddr + *fpos - m->offset;
211 tmp = read_from_oldmem(buffer, tsz, &start, 1); 236 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
212 if (tmp < 0) 237 if (tmp < 0)
213 return tmp; 238 return tmp;
214 buflen -= tsz; 239 buflen -= tsz;
@@ -225,6 +250,55 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
225 return acc; 250 return acc;
226} 251}
227 252
253static ssize_t read_vmcore(struct file *file, char __user *buffer,
254 size_t buflen, loff_t *fpos)
255{
256 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
257}
258
259/*
260 * The vmcore fault handler uses the page cache and fills data using the
261 * standard __vmcore_read() function.
262 *
263 * On s390 the fault handler is used for memory regions that can't be mapped
264 * directly with remap_pfn_range().
265 */
266static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
267{
268#ifdef CONFIG_S390
269 struct address_space *mapping = vma->vm_file->f_mapping;
270 pgoff_t index = vmf->pgoff;
271 struct page *page;
272 loff_t offset;
273 char *buf;
274 int rc;
275
276 page = find_or_create_page(mapping, index, GFP_KERNEL);
277 if (!page)
278 return VM_FAULT_OOM;
279 if (!PageUptodate(page)) {
280 offset = (loff_t) index << PAGE_CACHE_SHIFT;
281 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
282 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
283 if (rc < 0) {
284 unlock_page(page);
285 page_cache_release(page);
286 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
287 }
288 SetPageUptodate(page);
289 }
290 unlock_page(page);
291 vmf->page = page;
292 return 0;
293#else
294 return VM_FAULT_SIGBUS;
295#endif
296}
297
298static const struct vm_operations_struct vmcore_mmap_ops = {
299 .fault = mmap_vmcore_fault,
300};
301
228/** 302/**
229 * alloc_elfnotes_buf - allocate buffer for ELF note segment in 303 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
230 * vmalloc memory 304 * vmalloc memory
@@ -271,6 +345,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
271 345
272 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); 346 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
273 vma->vm_flags |= VM_MIXEDMAP; 347 vma->vm_flags |= VM_MIXEDMAP;
348 vma->vm_ops = &vmcore_mmap_ops;
274 349
275 len = 0; 350 len = 0;
276 351
@@ -312,9 +387,9 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
312 387
313 tsz = min_t(size_t, m->offset + m->size - start, size); 388 tsz = min_t(size_t, m->offset + m->size - start, size);
314 paddr = m->paddr + start - m->offset; 389 paddr = m->paddr + start - m->offset;
315 if (remap_pfn_range(vma, vma->vm_start + len, 390 if (remap_oldmem_pfn_range(vma, vma->vm_start + len,
316 paddr >> PAGE_SHIFT, tsz, 391 paddr >> PAGE_SHIFT, tsz,
317 vma->vm_page_prot)) 392 vma->vm_page_prot))
318 goto fail; 393 goto fail;
319 size -= tsz; 394 size -= tsz;
320 start += tsz; 395 start += tsz;