diff options
author | Michael Holzheu <holzheu@linux.vnet.ibm.com> | 2013-07-18 06:18:27 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-07-18 07:40:22 -0400 |
commit | 191a2fa0a8d2bbb64c98f9b1976fcb37ee5eae6b (patch) | |
tree | 8e2e772016315712f851aefce40af29f40b6980f /arch/s390/kernel | |
parent | 5a74953ff56aa870d6913ef4d81934f5c620c59d (diff) |
s390/kdump: Allow copy_oldmem_page() copy to virtual memory
The kdump mmap patch series (git commit 83086978c63afd7c73e1c) changed the
requirements for copy_oldmem_page(). Now this function is used for copying
to virtual memory.
So implement vmalloc support for the s390 version of copy_oldmem_page().
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 51 |
1 files changed, 47 insertions, 4 deletions
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index f703d91bf720..d8f355657171 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -21,6 +21,48 @@ | |||
21 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) | 21 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) |
22 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) | 22 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) |
23 | 23 | ||
24 | |||
25 | /* | ||
26 | * Return physical address for virtual address | ||
27 | */ | ||
28 | static inline void *load_real_addr(void *addr) | ||
29 | { | ||
30 | unsigned long real_addr; | ||
31 | |||
32 | asm volatile( | ||
33 | " lra %0,0(%1)\n" | ||
34 | " jz 0f\n" | ||
35 | " la %0,0\n" | ||
36 | "0:" | ||
37 | : "=a" (real_addr) : "a" (addr) : "cc"); | ||
38 | return (void *)real_addr; | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * Copy up to one page to vmalloc or real memory | ||
43 | */ | ||
44 | static ssize_t copy_page_real(void *buf, void *src, size_t csize) | ||
45 | { | ||
46 | size_t size; | ||
47 | |||
48 | if (is_vmalloc_addr(buf)) { | ||
49 | BUG_ON(csize >= PAGE_SIZE); | ||
50 | /* If buf is not page aligned, copy first part */ | ||
51 | size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); | ||
52 | if (size) { | ||
53 | if (memcpy_real(load_real_addr(buf), src, size)) | ||
54 | return -EFAULT; | ||
55 | buf += size; | ||
56 | src += size; | ||
57 | } | ||
58 | /* Copy second part */ | ||
59 | size = csize - size; | ||
60 | return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; | ||
61 | } else { | ||
62 | return memcpy_real(buf, src, csize); | ||
63 | } | ||
64 | } | ||
65 | |||
24 | /* | 66 | /* |
25 | * Copy one page from "oldmem" | 67 | * Copy one page from "oldmem" |
26 | * | 68 | * |
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
32 | size_t csize, unsigned long offset, int userbuf) | 74 | size_t csize, unsigned long offset, int userbuf) |
33 | { | 75 | { |
34 | unsigned long src; | 76 | unsigned long src; |
77 | int rc; | ||
35 | 78 | ||
36 | if (!csize) | 79 | if (!csize) |
37 | return 0; | 80 | return 0; |
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
43 | src < OLDMEM_BASE + OLDMEM_SIZE) | 86 | src < OLDMEM_BASE + OLDMEM_SIZE) |
44 | src -= OLDMEM_BASE; | 87 | src -= OLDMEM_BASE; |
45 | if (userbuf) | 88 | if (userbuf) |
46 | copy_to_user_real((void __force __user *) buf, (void *) src, | 89 | rc = copy_to_user_real((void __force __user *) buf, |
47 | csize); | 90 | (void *) src, csize); |
48 | else | 91 | else |
49 | memcpy_real(buf, (void *) src, csize); | 92 | rc = copy_page_real(buf, (void *) src, csize); |
50 | return csize; | 93 | return (rc == 0) ? csize : rc; |
51 | } | 94 | } |
52 | 95 | ||
53 | /* | 96 | /* |