aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/priv.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-03-25 12:22:53 -0400
committerGleb Natapov <gleb@redhat.com>2013-04-02 09:14:39 -0400
commitdb4a29cb6ac7b2fda505923bdbc58fc35a719f62 (patch)
treefdd5a066cf77e511e6d28d79e525a6a23638487e /arch/s390/kvm/priv.c
parent3736b874a39a1df2a94186c357aabeb6a7d7d4f6 (diff)
KVM: s390: fix and enforce return code handling for irq injections
kvm_s390_inject_program_int() and friends may fail if no memory is available. This must be reported to the calling functions, so that this gets passed down to user space which should fix the situation. Alternatively we end up with guest state corruption. So fix this and enforce return value checking by adding a __must_check annotation to all of these function prototypes. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/s390/kvm/priv.c')
-rw-r--r--arch/s390/kvm/priv.c83
1 files changed, 26 insertions, 57 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 05d186c21eca..23a8370b1045 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,31 +36,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
36 operand2 = kvm_s390_get_base_disp_s(vcpu); 36 operand2 = kvm_s390_get_base_disp_s(vcpu);
37 37
38 /* must be word boundary */ 38 /* must be word boundary */
39 if (operand2 & 3) { 39 if (operand2 & 3)
40 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 40 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
41 goto out;
42 }
43 41
44 /* get the value */ 42 /* get the value */
45 if (get_guest(vcpu, address, (u32 __user *) operand2)) { 43 if (get_guest(vcpu, address, (u32 __user *) operand2))
46 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 44 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
47 goto out;
48 }
49 45
50 address = address & 0x7fffe000u; 46 address = address & 0x7fffe000u;
51 47
52 /* make sure that the new value is valid memory */ 48 /* make sure that the new value is valid memory */
53 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 49 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
54 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { 50 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
55 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 51 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
56 goto out;
57 }
58 52
59 kvm_s390_set_prefix(vcpu, address); 53 kvm_s390_set_prefix(vcpu, address);
60 54
61 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 55 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62 trace_kvm_s390_handle_prefix(vcpu, 1, address); 56 trace_kvm_s390_handle_prefix(vcpu, 1, address);
63out:
64 return 0; 57 return 0;
65} 58}
66 59
@@ -74,49 +67,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
74 operand2 = kvm_s390_get_base_disp_s(vcpu); 67 operand2 = kvm_s390_get_base_disp_s(vcpu);
75 68
76 /* must be word boundary */ 69 /* must be word boundary */
77 if (operand2 & 3) { 70 if (operand2 & 3)
78 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 71 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
79 goto out;
80 }
81 72
82 address = vcpu->arch.sie_block->prefix; 73 address = vcpu->arch.sie_block->prefix;
83 address = address & 0x7fffe000u; 74 address = address & 0x7fffe000u;
84 75
85 /* get the value */ 76 /* get the value */
86 if (put_guest(vcpu, address, (u32 __user *)operand2)) { 77 if (put_guest(vcpu, address, (u32 __user *)operand2))
87 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 78 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
88 goto out;
89 }
90 79
91 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 80 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92 trace_kvm_s390_handle_prefix(vcpu, 0, address); 81 trace_kvm_s390_handle_prefix(vcpu, 0, address);
93out:
94 return 0; 82 return 0;
95} 83}
96 84
97static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 85static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
98{ 86{
99 u64 useraddr; 87 u64 useraddr;
100 int rc;
101 88
102 vcpu->stat.instruction_stap++; 89 vcpu->stat.instruction_stap++;
103 90
104 useraddr = kvm_s390_get_base_disp_s(vcpu); 91 useraddr = kvm_s390_get_base_disp_s(vcpu);
105 92
106 if (useraddr & 1) { 93 if (useraddr & 1)
107 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 94 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
108 goto out;
109 }
110 95
111 rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr); 96 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
112 if (rc) { 97 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
114 goto out;
115 }
116 98
117 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 99 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
118 trace_kvm_s390_handle_stap(vcpu, useraddr); 100 trace_kvm_s390_handle_stap(vcpu, useraddr);
119out:
120 return 0; 101 return 0;
121} 102}
122 103
@@ -135,10 +116,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
135 int cc; 116 int cc;
136 117
137 addr = kvm_s390_get_base_disp_s(vcpu); 118 addr = kvm_s390_get_base_disp_s(vcpu);
138 if (addr & 3) { 119 if (addr & 3)
139 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140 goto out;
141 }
142 cc = 0; 121 cc = 0;
143 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); 122 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
144 if (!inti) 123 if (!inti)
@@ -167,7 +146,6 @@ no_interrupt:
167 /* Set condition code and we're done. */ 146 /* Set condition code and we're done. */
168 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 147 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
169 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; 148 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
170out:
171 return 0; 149 return 0;
172} 150}
173 151
@@ -237,12 +215,9 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
237 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 215 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
238 &facility_list, sizeof(facility_list)); 216 &facility_list, sizeof(facility_list));
239 if (rc) 217 if (rc)
240 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 218 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
241 else { 219 VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
242 VCPU_EVENT(vcpu, 5, "store facility list value %x", 220 trace_kvm_s390_handle_stfl(vcpu, facility_list);
243 facility_list);
244 trace_kvm_s390_handle_stfl(vcpu, facility_list);
245 }
246 return 0; 221 return 0;
247} 222}
248 223
@@ -317,25 +292,18 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
317static int handle_stidp(struct kvm_vcpu *vcpu) 292static int handle_stidp(struct kvm_vcpu *vcpu)
318{ 293{
319 u64 operand2; 294 u64 operand2;
320 int rc;
321 295
322 vcpu->stat.instruction_stidp++; 296 vcpu->stat.instruction_stidp++;
323 297
324 operand2 = kvm_s390_get_base_disp_s(vcpu); 298 operand2 = kvm_s390_get_base_disp_s(vcpu);
325 299
326 if (operand2 & 7) { 300 if (operand2 & 7)
327 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 301 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
328 goto out;
329 }
330 302
331 rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2); 303 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
332 if (rc) { 304 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
333 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
334 goto out;
335 }
336 305
337 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 306 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
338out:
339 return 0; 307 return 0;
340} 308}
341 309
@@ -377,6 +345,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
377 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 345 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
378 u64 operand2; 346 u64 operand2;
379 unsigned long mem; 347 unsigned long mem;
348 int rc = 0;
380 349
381 vcpu->stat.instruction_stsi++; 350 vcpu->stat.instruction_stsi++;
382 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 351 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -412,7 +381,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
412 } 381 }
413 382
414 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 383 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
415 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 384 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
416 goto out_mem; 385 goto out_mem;
417 } 386 }
418 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 387 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
@@ -425,7 +394,7 @@ out_mem:
425out_fail: 394out_fail:
426 /* condition code 3 */ 395 /* condition code 3 */
427 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; 396 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
428 return 0; 397 return rc;
429} 398}
430 399
431static const intercept_handler_t b2_handlers[256] = { 400static const intercept_handler_t b2_handlers[256] = {