diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kvm/gaccess.h | 62 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 14 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 21 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 9 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 5 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 32 |
6 files changed, 60 insertions, 83 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 4e0633c413f3..ed60f3a74a85 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -18,11 +18,11 @@ | |||
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | 19 | ||
20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | 20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, |
21 | u64 guestaddr) | 21 | unsigned long guestaddr) |
22 | { | 22 | { |
23 | u64 prefix = vcpu->arch.sie_block->prefix; | 23 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
24 | u64 origin = vcpu->kvm->arch.guest_origin; | 24 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
25 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 25 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
26 | 26 | ||
27 | if (guestaddr < 2 * PAGE_SIZE) | 27 | if (guestaddr < 2 * PAGE_SIZE) |
28 | guestaddr += prefix; | 28 | guestaddr += prefix; |
@@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | |||
37 | return (void __user *) guestaddr; | 37 | return (void __user *) guestaddr; |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | 40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
41 | u64 *result) | 41 | u64 *result) |
42 | { | 42 | { |
43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
47 | if (IS_ERR((void __force *) uptr)) | 47 | if (IS_ERR((void __force *) uptr)) |
48 | return PTR_ERR((void __force *) uptr); | 48 | return PTR_ERR((void __force *) uptr); |
49 | 49 | ||
50 | return get_user(*result, (u64 __user *) uptr); | 50 | return get_user(*result, (unsigned long __user *) uptr); |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | 53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
54 | u32 *result) | 54 | u32 *result) |
55 | { | 55 | { |
56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
63 | return get_user(*result, (u32 __user *) uptr); | 63 | return get_user(*result, (u32 __user *) uptr); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | 66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
67 | u16 *result) | 67 | u16 *result) |
68 | { | 68 | { |
69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
76 | return get_user(*result, (u16 __user *) uptr); | 76 | return get_user(*result, (u16 __user *) uptr); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | 79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
80 | u8 *result) | 80 | u8 *result) |
81 | { | 81 | { |
82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
87 | return get_user(*result, (u8 __user *) uptr); | 87 | return get_user(*result, (u8 __user *) uptr); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | 90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
91 | u64 value) | 91 | u64 value) |
92 | { | 92 | { |
93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
100 | return put_user(value, (u64 __user *) uptr); | 100 | return put_user(value, (u64 __user *) uptr); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | 103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
104 | u32 value) | 104 | u32 value) |
105 | { | 105 | { |
106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
113 | return put_user(value, (u32 __user *) uptr); | 113 | return put_user(value, (u32 __user *) uptr); |
114 | } | 114 | } |
115 | 115 | ||
116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | 116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
117 | u16 value) | 117 | u16 value) |
118 | { | 118 | { |
119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
126 | return put_user(value, (u16 __user *) uptr); | 126 | return put_user(value, (u16 __user *) uptr); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | 129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
130 | u8 value) | 130 | u8 value) |
131 | { | 131 | { |
132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | 132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
@@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | 140 | ||
141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | 141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, |
142 | unsigned long guestdest, | ||
142 | const void *from, unsigned long n) | 143 | const void *from, unsigned long n) |
143 | { | 144 | { |
144 | int rc; | 145 | int rc; |
@@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | |||
153 | return 0; | 154 | return 0; |
154 | } | 155 | } |
155 | 156 | ||
156 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, | 157 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, |
157 | const void *from, unsigned long n) | 158 | const void *from, unsigned long n) |
158 | { | 159 | { |
159 | u64 prefix = vcpu->arch.sie_block->prefix; | 160 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
160 | u64 origin = vcpu->kvm->arch.guest_origin; | 161 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
161 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 162 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
162 | 163 | ||
163 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | 164 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) |
164 | goto slowpath; | 165 | goto slowpath; |
@@ -189,7 +190,8 @@ slowpath: | |||
189 | } | 190 | } |
190 | 191 | ||
191 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | 192 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, |
192 | u64 guestsrc, unsigned long n) | 193 | unsigned long guestsrc, |
194 | unsigned long n) | ||
193 | { | 195 | { |
194 | int rc; | 196 | int rc; |
195 | unsigned long i; | 197 | unsigned long i; |
@@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | |||
204 | } | 206 | } |
205 | 207 | ||
206 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | 208 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, |
207 | u64 guestsrc, unsigned long n) | 209 | unsigned long guestsrc, unsigned long n) |
208 | { | 210 | { |
209 | u64 prefix = vcpu->arch.sie_block->prefix; | 211 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
210 | u64 origin = vcpu->kvm->arch.guest_origin; | 212 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
211 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 213 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
212 | 214 | ||
213 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | 215 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) |
214 | goto slowpath; | 216 | goto slowpath; |
@@ -238,11 +240,12 @@ slowpath: | |||
238 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | 240 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); |
239 | } | 241 | } |
240 | 242 | ||
241 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | 243 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, |
244 | unsigned long guestdest, | ||
242 | const void *from, unsigned long n) | 245 | const void *from, unsigned long n) |
243 | { | 246 | { |
244 | u64 origin = vcpu->kvm->arch.guest_origin; | 247 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
245 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 248 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
246 | 249 | ||
247 | if (guestdest + n > memsize) | 250 | if (guestdest + n > memsize) |
248 | return -EFAULT; | 251 | return -EFAULT; |
@@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | |||
256 | } | 259 | } |
257 | 260 | ||
258 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | 261 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, |
259 | u64 guestsrc, unsigned long n) | 262 | unsigned long guestsrc, |
263 | unsigned long n) | ||
260 | { | 264 | { |
261 | u64 origin = vcpu->kvm->arch.guest_origin; | 265 | unsigned long origin = vcpu->kvm->arch.guest_origin; |
262 | u64 memsize = vcpu->kvm->arch.guest_memsize; | 266 | unsigned long memsize = vcpu->kvm->arch.guest_memsize; |
263 | 267 | ||
264 | if (guestsrc + n > memsize) | 268 | if (guestsrc + n > memsize) |
265 | return -EFAULT; | 269 | return -EFAULT; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 47a0b642174c..61236102203e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include "kvm-s390.h" | 20 | #include "kvm-s390.h" |
21 | #include "gaccess.h" | 21 | #include "gaccess.h" |
22 | 22 | ||
23 | static int handle_lctg(struct kvm_vcpu *vcpu) | 23 | static int handle_lctlg(struct kvm_vcpu *vcpu) |
24 | { | 24 | { |
25 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 25 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
26 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | 26 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; |
@@ -30,7 +30,7 @@ static int handle_lctg(struct kvm_vcpu *vcpu) | |||
30 | u64 useraddr; | 30 | u64 useraddr; |
31 | int reg, rc; | 31 | int reg, rc; |
32 | 32 | ||
33 | vcpu->stat.instruction_lctg++; | 33 | vcpu->stat.instruction_lctlg++; |
34 | if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) | 34 | if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) |
35 | return -ENOTSUPP; | 35 | return -ENOTSUPP; |
36 | 36 | ||
@@ -38,9 +38,12 @@ static int handle_lctg(struct kvm_vcpu *vcpu) | |||
38 | if (base2) | 38 | if (base2) |
39 | useraddr += vcpu->arch.guest_gprs[base2]; | 39 | useraddr += vcpu->arch.guest_gprs[base2]; |
40 | 40 | ||
41 | if (useraddr & 7) | ||
42 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
43 | |||
41 | reg = reg1; | 44 | reg = reg1; |
42 | 45 | ||
43 | VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, | 46 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, |
44 | disp2); | 47 | disp2); |
45 | 48 | ||
46 | do { | 49 | do { |
@@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu) | |||
74 | if (base2) | 77 | if (base2) |
75 | useraddr += vcpu->arch.guest_gprs[base2]; | 78 | useraddr += vcpu->arch.guest_gprs[base2]; |
76 | 79 | ||
80 | if (useraddr & 3) | ||
81 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
82 | |||
77 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, | 83 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, |
78 | disp2); | 84 | disp2); |
79 | 85 | ||
@@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = { | |||
99 | [0xae] = kvm_s390_handle_sigp, | 105 | [0xae] = kvm_s390_handle_sigp, |
100 | [0xb2] = kvm_s390_handle_priv, | 106 | [0xb2] = kvm_s390_handle_priv, |
101 | [0xb7] = handle_lctl, | 107 | [0xb7] = handle_lctl, |
102 | [0xeb] = handle_lctg, | 108 | [0xeb] = handle_lctlg, |
103 | }; | 109 | }; |
104 | 110 | ||
105 | static int handle_noop(struct kvm_vcpu *vcpu) | 111 | static int handle_noop(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 11230b0db957..2960702b4824 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/lowcore.h> | 13 | #include <asm/lowcore.h> |
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include <linux/signal.h> | ||
16 | #include "kvm-s390.h" | 17 | #include "kvm-s390.h" |
17 | #include "gaccess.h" | 18 | #include "gaccess.h" |
18 | 19 | ||
@@ -246,15 +247,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
246 | default: | 247 | default: |
247 | BUG(); | 248 | BUG(); |
248 | } | 249 | } |
249 | |||
250 | if (exception) { | 250 | if (exception) { |
251 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" | 251 | printk("kvm: The guest lowcore is not mapped during interrupt " |
252 | " interrupt"); | 252 | "delivery, killing userspace\n"); |
253 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 253 | do_exit(SIGKILL); |
254 | if (inti->type == KVM_S390_PROGRAM_INT) { | ||
255 | printk(KERN_WARNING "kvm: recursive program check\n"); | ||
256 | BUG(); | ||
257 | } | ||
258 | } | 254 | } |
259 | } | 255 | } |
260 | 256 | ||
@@ -277,14 +273,11 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) | |||
277 | __LC_EXT_NEW_PSW, sizeof(psw_t)); | 273 | __LC_EXT_NEW_PSW, sizeof(psw_t)); |
278 | if (rc == -EFAULT) | 274 | if (rc == -EFAULT) |
279 | exception = 1; | 275 | exception = 1; |
280 | |||
281 | if (exception) { | 276 | if (exception) { |
282 | VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ | 277 | printk("kvm: The guest lowcore is not mapped during interrupt " |
283 | " ckc interrupt"); | 278 | "delivery, killing userspace\n"); |
284 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 279 | do_exit(SIGKILL); |
285 | return 0; | ||
286 | } | 280 | } |
287 | |||
288 | return 1; | 281 | return 1; |
289 | } | 282 | } |
290 | 283 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1782cbcd2829..8b00eb2ddf57 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -39,7 +39,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
39 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | 39 | { "exit_instruction", VCPU_STAT(exit_instruction) }, |
40 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | 40 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
41 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 41 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
42 | { "instruction_lctg", VCPU_STAT(instruction_lctg) }, | 42 | { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
43 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, | 43 | { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
44 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, | 44 | { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
45 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, | 45 | { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
@@ -112,7 +112,12 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
112 | 112 | ||
113 | int kvm_dev_ioctl_check_extension(long ext) | 113 | int kvm_dev_ioctl_check_extension(long ext) |
114 | { | 114 | { |
115 | return 0; | 115 | switch (ext) { |
116 | case KVM_CAP_USER_MEMORY: | ||
117 | return 1; | ||
118 | default: | ||
119 | return 0; | ||
120 | } | ||
116 | } | 121 | } |
117 | 122 | ||
118 | /* Section: vm related */ | 123 | /* Section: vm related */ |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 5a556114eaa5..170392687ce0 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -43,7 +43,8 @@ | |||
43 | #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL | 43 | #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL |
44 | 44 | ||
45 | 45 | ||
46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) | 46 | static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, |
47 | unsigned long *reg) | ||
47 | { | 48 | { |
48 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 49 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
49 | int rc; | 50 | int rc; |
@@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
167 | } | 168 | } |
168 | 169 | ||
169 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | 170 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, |
170 | u64 *reg) | 171 | unsigned long *reg) |
171 | { | 172 | { |
172 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 173 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
173 | struct kvm_s390_local_interrupt *li; | 174 | struct kvm_s390_local_interrupt *li; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 388cc7420055..4993b0f594eb 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -42,38 +42,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
42 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | 42 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
43 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 43 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
44 | 44 | ||
45 | void show_mem(void) | ||
46 | { | ||
47 | unsigned long i, total = 0, reserved = 0; | ||
48 | unsigned long shared = 0, cached = 0; | ||
49 | unsigned long flags; | ||
50 | struct page *page; | ||
51 | pg_data_t *pgdat; | ||
52 | |||
53 | printk("Mem-info:\n"); | ||
54 | show_free_areas(); | ||
55 | for_each_online_pgdat(pgdat) { | ||
56 | pgdat_resize_lock(pgdat, &flags); | ||
57 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
58 | if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
59 | continue; | ||
60 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
61 | total++; | ||
62 | if (PageReserved(page)) | ||
63 | reserved++; | ||
64 | else if (PageSwapCache(page)) | ||
65 | cached++; | ||
66 | else if (page_count(page)) | ||
67 | shared += page_count(page) - 1; | ||
68 | } | ||
69 | pgdat_resize_unlock(pgdat, &flags); | ||
70 | } | ||
71 | printk("%ld pages of RAM\n", total); | ||
72 | printk("%ld reserved pages\n", reserved); | ||
73 | printk("%ld pages shared\n", shared); | ||
74 | printk("%ld pages swap cached\n", cached); | ||
75 | } | ||
76 | |||
77 | /* | 45 | /* |
78 | * paging_init() sets up the page tables | 46 | * paging_init() sets up the page tables |
79 | */ | 47 | */ |