aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/sigp.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-06-26 10:06:40 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-07-03 13:55:36 -0400
commitea1918dd3d1a8fcb7ce26816fdf31a50f7d04689 (patch)
treee0f66024c68e2f52414efbbcf3058b29032ac64c /arch/s390/kvm/sigp.c
parent0744426e28490357855aafd2ca76c819231851c5 (diff)
KVM: s390: use sigp condition code defines
Just use the defines instead of using plain numbers and adding a comment behind each line. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/s390/kvm/sigp.c')
-rw-r--r--arch/s390/kvm/sigp.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index ca544d53557e..97c9f36a4533 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -26,19 +26,19 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
26 int rc; 26 int rc;
27 27
28 if (cpu_addr >= KVM_MAX_VCPUS) 28 if (cpu_addr >= KVM_MAX_VCPUS)
29 return 3; /* not operational */ 29 return SIGP_CC_NOT_OPERATIONAL;
30 30
31 spin_lock(&fi->lock); 31 spin_lock(&fi->lock);
32 if (fi->local_int[cpu_addr] == NULL) 32 if (fi->local_int[cpu_addr] == NULL)
33 rc = 3; /* not operational */ 33 rc = SIGP_CC_NOT_OPERATIONAL;
34 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) 34 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
35 & CPUSTAT_STOPPED)) { 35 & CPUSTAT_STOPPED)) {
36 *reg &= 0xffffffff00000000UL; 36 *reg &= 0xffffffff00000000UL;
37 rc = 1; /* status stored */ 37 rc = SIGP_CC_STATUS_STORED;
38 } else { 38 } else {
39 *reg &= 0xffffffff00000000UL; 39 *reg &= 0xffffffff00000000UL;
40 *reg |= SIGP_STATUS_STOPPED; 40 *reg |= SIGP_STATUS_STOPPED;
41 rc = 1; /* status stored */ 41 rc = SIGP_CC_STATUS_STORED;
42 } 42 }
43 spin_unlock(&fi->lock); 43 spin_unlock(&fi->lock);
44 44
@@ -54,7 +54,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
54 int rc; 54 int rc;
55 55
56 if (cpu_addr >= KVM_MAX_VCPUS) 56 if (cpu_addr >= KVM_MAX_VCPUS)
57 return 3; /* not operational */ 57 return SIGP_CC_NOT_OPERATIONAL;
58 58
59 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 59 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
60 if (!inti) 60 if (!inti)
@@ -66,7 +66,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
66 spin_lock(&fi->lock); 66 spin_lock(&fi->lock);
67 li = fi->local_int[cpu_addr]; 67 li = fi->local_int[cpu_addr];
68 if (li == NULL) { 68 if (li == NULL) {
69 rc = 3; /* not operational */ 69 rc = SIGP_CC_NOT_OPERATIONAL;
70 kfree(inti); 70 kfree(inti);
71 goto unlock; 71 goto unlock;
72 } 72 }
@@ -77,7 +77,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
77 if (waitqueue_active(&li->wq)) 77 if (waitqueue_active(&li->wq))
78 wake_up_interruptible(&li->wq); 78 wake_up_interruptible(&li->wq);
79 spin_unlock_bh(&li->lock); 79 spin_unlock_bh(&li->lock);
80 rc = 0; /* order accepted */ 80 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
81 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 81 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82unlock: 82unlock:
83 spin_unlock(&fi->lock); 83 spin_unlock(&fi->lock);
@@ -92,7 +92,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
92 int rc; 92 int rc;
93 93
94 if (cpu_addr >= KVM_MAX_VCPUS) 94 if (cpu_addr >= KVM_MAX_VCPUS)
95 return 3; /* not operational */ 95 return SIGP_CC_NOT_OPERATIONAL;
96 96
97 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 97 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
98 if (!inti) 98 if (!inti)
@@ -104,7 +104,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
104 spin_lock(&fi->lock); 104 spin_lock(&fi->lock);
105 li = fi->local_int[cpu_addr]; 105 li = fi->local_int[cpu_addr];
106 if (li == NULL) { 106 if (li == NULL) {
107 rc = 3; /* not operational */ 107 rc = SIGP_CC_NOT_OPERATIONAL;
108 kfree(inti); 108 kfree(inti);
109 goto unlock; 109 goto unlock;
110 } 110 }
@@ -115,7 +115,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
115 if (waitqueue_active(&li->wq)) 115 if (waitqueue_active(&li->wq))
116 wake_up_interruptible(&li->wq); 116 wake_up_interruptible(&li->wq);
117 spin_unlock_bh(&li->lock); 117 spin_unlock_bh(&li->lock);
118 rc = 0; /* order accepted */ 118 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
119 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 119 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
120unlock: 120unlock:
121 spin_unlock(&fi->lock); 121 spin_unlock(&fi->lock);
@@ -143,7 +143,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
143out: 143out:
144 spin_unlock_bh(&li->lock); 144 spin_unlock_bh(&li->lock);
145 145
146 return 0; /* order accepted */ 146 return SIGP_CC_ORDER_CODE_ACCEPTED;
147} 147}
148 148
149static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) 149static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
@@ -153,12 +153,12 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
153 int rc; 153 int rc;
154 154
155 if (cpu_addr >= KVM_MAX_VCPUS) 155 if (cpu_addr >= KVM_MAX_VCPUS)
156 return 3; /* not operational */ 156 return SIGP_CC_NOT_OPERATIONAL;
157 157
158 spin_lock(&fi->lock); 158 spin_lock(&fi->lock);
159 li = fi->local_int[cpu_addr]; 159 li = fi->local_int[cpu_addr];
160 if (li == NULL) { 160 if (li == NULL) {
161 rc = 3; /* not operational */ 161 rc = SIGP_CC_NOT_OPERATIONAL;
162 goto unlock; 162 goto unlock;
163 } 163 }
164 164
@@ -182,11 +182,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
182 182
183 switch (parameter & 0xff) { 183 switch (parameter & 0xff) {
184 case 0: 184 case 0:
185 rc = 3; /* not operational */ 185 rc = SIGP_CC_NOT_OPERATIONAL;
186 break; 186 break;
187 case 1: 187 case 1:
188 case 2: 188 case 2:
189 rc = 0; /* order accepted */ 189 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
190 break; 190 break;
191 default: 191 default:
192 rc = -EOPNOTSUPP; 192 rc = -EOPNOTSUPP;
@@ -209,12 +209,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
209 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { 209 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
210 *reg &= 0xffffffff00000000UL; 210 *reg &= 0xffffffff00000000UL;
211 *reg |= SIGP_STATUS_INVALID_PARAMETER; 211 *reg |= SIGP_STATUS_INVALID_PARAMETER;
212 return 1; /* invalid parameter */ 212 return SIGP_CC_STATUS_STORED;
213 } 213 }
214 214
215 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 215 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
216 if (!inti) 216 if (!inti)
217 return 2; /* busy */ 217 return SIGP_CC_BUSY;
218 218
219 spin_lock(&fi->lock); 219 spin_lock(&fi->lock);
220 if (cpu_addr < KVM_MAX_VCPUS) 220 if (cpu_addr < KVM_MAX_VCPUS)
@@ -223,7 +223,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
223 if (li == NULL) { 223 if (li == NULL) {
224 *reg &= 0xffffffff00000000UL; 224 *reg &= 0xffffffff00000000UL;
225 *reg |= SIGP_STATUS_INCORRECT_STATE; 225 *reg |= SIGP_STATUS_INCORRECT_STATE;
226 rc = 1; /* incorrect state */ 226 rc = SIGP_CC_STATUS_STORED;
227 kfree(inti); 227 kfree(inti);
228 goto out_fi; 228 goto out_fi;
229 } 229 }
@@ -233,7 +233,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
233 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { 233 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
234 *reg &= 0xffffffff00000000UL; 234 *reg &= 0xffffffff00000000UL;
235 *reg |= SIGP_STATUS_INCORRECT_STATE; 235 *reg |= SIGP_STATUS_INCORRECT_STATE;
236 rc = 1; /* incorrect state */ 236 rc = SIGP_CC_STATUS_STORED;
237 kfree(inti); 237 kfree(inti);
238 goto out_li; 238 goto out_li;
239 } 239 }
@@ -245,7 +245,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
245 atomic_set(&li->active, 1); 245 atomic_set(&li->active, 1);
246 if (waitqueue_active(&li->wq)) 246 if (waitqueue_active(&li->wq))
247 wake_up_interruptible(&li->wq); 247 wake_up_interruptible(&li->wq);
248 rc = 0; /* order accepted */ 248 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
249 249
250 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 250 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
251out_li: 251out_li:
@@ -262,21 +262,21 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
262 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 262 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
263 263
264 if (cpu_addr >= KVM_MAX_VCPUS) 264 if (cpu_addr >= KVM_MAX_VCPUS)
265 return 3; /* not operational */ 265 return SIGP_CC_NOT_OPERATIONAL;
266 266
267 spin_lock(&fi->lock); 267 spin_lock(&fi->lock);
268 if (fi->local_int[cpu_addr] == NULL) 268 if (fi->local_int[cpu_addr] == NULL)
269 rc = 3; /* not operational */ 269 rc = SIGP_CC_NOT_OPERATIONAL;
270 else { 270 else {
271 if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 271 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
272 & CPUSTAT_RUNNING) { 272 & CPUSTAT_RUNNING) {
273 /* running */ 273 /* running */
274 rc = 0; 274 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
275 } else { 275 } else {
276 /* not running */ 276 /* not running */
277 *reg &= 0xffffffff00000000UL; 277 *reg &= 0xffffffff00000000UL;
278 *reg |= SIGP_STATUS_NOT_RUNNING; 278 *reg |= SIGP_STATUS_NOT_RUNNING;
279 rc = 1; 279 rc = SIGP_CC_STATUS_STORED;
280 } 280 }
281 } 281 }
282 spin_unlock(&fi->lock); 282 spin_unlock(&fi->lock);
@@ -289,23 +289,23 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
289 289
290static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) 290static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
291{ 291{
292 int rc = 0;
293 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 292 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
294 struct kvm_s390_local_interrupt *li; 293 struct kvm_s390_local_interrupt *li;
294 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
295 295
296 if (cpu_addr >= KVM_MAX_VCPUS) 296 if (cpu_addr >= KVM_MAX_VCPUS)
297 return 3; /* not operational */ 297 return SIGP_CC_NOT_OPERATIONAL;
298 298
299 spin_lock(&fi->lock); 299 spin_lock(&fi->lock);
300 li = fi->local_int[cpu_addr]; 300 li = fi->local_int[cpu_addr];
301 if (li == NULL) { 301 if (li == NULL) {
302 rc = 3; /* not operational */ 302 rc = SIGP_CC_NOT_OPERATIONAL;
303 goto out; 303 goto out;
304 } 304 }
305 305
306 spin_lock_bh(&li->lock); 306 spin_lock_bh(&li->lock);
307 if (li->action_bits & ACTION_STOP_ON_STOP) 307 if (li->action_bits & ACTION_STOP_ON_STOP)
308 rc = 2; /* busy */ 308 rc = SIGP_CC_BUSY;
309 else 309 else
310 VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", 310 VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
311 cpu_addr); 311 cpu_addr);
@@ -380,7 +380,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
380 case SIGP_RESTART: 380 case SIGP_RESTART:
381 vcpu->stat.instruction_sigp_restart++; 381 vcpu->stat.instruction_sigp_restart++;
382 rc = __sigp_restart(vcpu, cpu_addr); 382 rc = __sigp_restart(vcpu, cpu_addr);
383 if (rc == 2) /* busy */ 383 if (rc == SIGP_CC_BUSY)
384 break; 384 break;
385 /* user space must know about restart */ 385 /* user space must know about restart */
386 default: 386 default: