aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-04-23 19:12:32 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 08:06:24 -0400
commit805de8f43c20ba8b479bb598b543fa86b20067f6 (patch)
treeb5f367bde66a4dded45243d435a8ab00cb5f580b /arch
parentde9e432cb5de1bf2952919dc0b22e4bec0ed8d53 (diff)
atomic: Replace atomic_{set,clear}_mask() usage
Replace the deprecated atomic_{set,clear}_mask() usage with the now ubiquous atomic_{or,andnot}() functions. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/mn10300/mm/tlb-smp.c2
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kvm/interrupt.c30
-rw-r--r--arch/s390/kvm/kvm-s390.c32
6 files changed, 37 insertions, 37 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c7259597395..0030e21cfceb 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
195 local_irq_save(flags); 195 local_irq_save(flags);
196 for_each_cpu(cpu, cpumask) { 196 for_each_cpu(cpu, cpumask) {
197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
198 atomic_set_mask((1 << msg), &bfin_ipi_data->bits); 198 atomic_or((1 << msg), &bfin_ipi_data->bits);
199 atomic_inc(&bfin_ipi_data->count); 199 atomic_inc(&bfin_ipi_data->count);
200 } 200 }
201 local_irq_restore(flags); 201 local_irq_restore(flags);
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc74ef9a..62d6961e7f2b 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
156 cpumask_clear_cpu(smp_processor_id(), &cpumask); 156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock); 157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask); 158 mask=cpumask_bits(&cpumask);
159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all(); 161 _flush_cache_copyback_all();
162 while (flushcache_cpumask) 162 while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
407 flush_vma = vma; 407 flush_vma = vma;
408 flush_va = va; 408 flush_va = va;
409 mask=cpumask_bits(&cpumask); 409 mask=cpumask_bits(&cpumask);
410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 atomic_or(*mask, (atomic_t *)&flush_cpumask);
411 411
412 /* 412 /*
413 * We have to send the IPI only to 413 * We have to send the IPI only to
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef722bfa..9a39ea9031d4 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
119 flush_mm = mm; 119 flush_mm = mm;
120 flush_va = va; 120 flush_va = va;
121#if NR_CPUS <= BITS_PER_LONG 121#if NR_CPUS <= BITS_PER_LONG
122 atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); 122 atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
123#else 123#else
124#error Not supported. 124#error Not supported.
125#endif 125#endif
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9e733d965e08..f5a0bd778ace 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
381 * increase the "sequence" counter to avoid the race of an 381 * increase the "sequence" counter to avoid the race of an
382 * etr event and the complete recovery against get_sync_clock. 382 * etr event and the complete recovery against get_sync_clock.
383 */ 383 */
384 atomic_clear_mask(0x80000000, sw_ptr); 384 atomic_andnot(0x80000000, sw_ptr);
385 atomic_inc(sw_ptr); 385 atomic_inc(sw_ptr);
386} 386}
387 387
@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
392static void enable_sync_clock(void) 392static void enable_sync_clock(void)
393{ 393{
394 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); 394 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
395 atomic_set_mask(0x80000000, sw_ptr); 395 atomic_or(0x80000000, sw_ptr);
396} 396}
397 397
398/* 398/*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c98d89708e99..57309e9cdd80 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
170 170
171static void __set_cpu_idle(struct kvm_vcpu *vcpu) 171static void __set_cpu_idle(struct kvm_vcpu *vcpu)
172{ 172{
173 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 173 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 174 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
175} 175}
176 176
177static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 177static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
178{ 178{
179 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 179 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
180 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 180 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
181} 181}
182 182
183static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 183static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
184{ 184{
185 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 185 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
186 &vcpu->arch.sie_block->cpuflags); 186 &vcpu->arch.sie_block->cpuflags);
187 vcpu->arch.sie_block->lctl = 0x0000; 187 vcpu->arch.sie_block->lctl = 0x0000;
188 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 188 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
189 189
@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
196 196
197static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 197static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
198{ 198{
199 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 199 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
200} 200}
201 201
202static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 202static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
919 spin_unlock(&li->lock); 919 spin_unlock(&li->lock);
920 920
921 /* clear pending external calls set by sigp interpretation facility */ 921 /* clear pending external calls set by sigp interpretation facility */
922 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 922 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
923 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; 923 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
924} 924}
925 925
@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1020 1020
1021 li->irq.ext = irq->u.ext; 1021 li->irq.ext = irq->u.ext;
1022 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1022 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1023 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1023 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1024 return 0; 1024 return 0;
1025} 1025}
1026 1026
@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1035 /* another external call is pending */ 1035 /* another external call is pending */
1036 return -EBUSY; 1036 return -EBUSY;
1037 } 1037 }
1038 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1038 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1039 return 0; 1039 return 0;
1040} 1040}
1041 1041
@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1061 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1061 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1062 return -EBUSY; 1062 return -EBUSY;
1063 *extcall = irq->u.extcall; 1063 *extcall = irq->u.extcall;
1064 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1064 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1133 1133
1134 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1134 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1135 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1135 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1136 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1136 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1137 return 0; 1137 return 0;
1138} 1138}
1139 1139
@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1177 0, 0, 2); 1177 0, 0, 2);
1178 1178
1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1180 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1180 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1181 return 0; 1181 return 0;
1182} 1182}
1183 1183
@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1190 0, 0, 2); 1190 0, 0, 2);
1191 1191
1192 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1192 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1193 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1193 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1194 return 0; 1194 return 0;
1195} 1195}
1196 1196
@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
1369 spin_lock(&li->lock); 1369 spin_lock(&li->lock);
1370 switch (type) { 1370 switch (type) {
1371 case KVM_S390_MCHK: 1371 case KVM_S390_MCHK:
1372 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 1372 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1373 break; 1373 break;
1374 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1374 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1375 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); 1375 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1376 break; 1376 break;
1377 default: 1377 default:
1378 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1378 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1379 break; 1379 break;
1380 } 1380 }
1381 spin_unlock(&li->lock); 1381 spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..b73302fb0507 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1215 } 1215 }
1216 restore_access_regs(vcpu->run->s.regs.acrs); 1216 restore_access_regs(vcpu->run->s.regs.acrs);
1217 gmap_enable(vcpu->arch.gmap); 1217 gmap_enable(vcpu->arch.gmap);
1218 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1218 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1219} 1219}
1220 1220
1221void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1221void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1222{ 1222{
1223 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1223 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1224 gmap_disable(vcpu->arch.gmap); 1224 gmap_disable(vcpu->arch.gmap);
1225 if (test_kvm_facility(vcpu->kvm, 129)) { 1225 if (test_kvm_facility(vcpu->kvm, 129)) {
1226 save_fp_ctl(&vcpu->run->s.regs.fpc); 1226 save_fp_ctl(&vcpu->run->s.regs.fpc);
@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1320 CPUSTAT_STOPPED); 1320 CPUSTAT_STOPPED);
1321 1321
1322 if (test_kvm_facility(vcpu->kvm, 78)) 1322 if (test_kvm_facility(vcpu->kvm, 78))
1323 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); 1323 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1324 else if (test_kvm_facility(vcpu->kvm, 8)) 1324 else if (test_kvm_facility(vcpu->kvm, 8))
1325 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); 1325 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1326 1326
1327 kvm_s390_vcpu_setup_model(vcpu); 1327 kvm_s390_vcpu_setup_model(vcpu);
1328 1328
@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1422 1422
1423void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) 1423void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1424{ 1424{
1425 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1425 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1426 exit_sie(vcpu); 1426 exit_sie(vcpu);
1427} 1427}
1428 1428
1429void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) 1429void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1430{ 1430{
1431 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1431 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1432} 1432}
1433 1433
1434static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) 1434static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1435{ 1435{
1436 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1436 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1437 exit_sie(vcpu); 1437 exit_sie(vcpu);
1438} 1438}
1439 1439
1440static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 1440static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1441{ 1441{
1442 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1442 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1443} 1443}
1444 1444
1445/* 1445/*
@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1448 * return immediately. */ 1448 * return immediately. */
1449void exit_sie(struct kvm_vcpu *vcpu) 1449void exit_sie(struct kvm_vcpu *vcpu)
1450{ 1450{
1451 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 1451 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1452 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 1452 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1453 cpu_relax(); 1453 cpu_relax();
1454} 1454}
@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1672 if (dbg->control & KVM_GUESTDBG_ENABLE) { 1672 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1673 vcpu->guest_debug = dbg->control; 1673 vcpu->guest_debug = dbg->control;
1674 /* enforce guest PER */ 1674 /* enforce guest PER */
1675 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1675 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1676 1676
1677 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 1677 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1678 rc = kvm_s390_import_bp_data(vcpu, dbg); 1678 rc = kvm_s390_import_bp_data(vcpu, dbg);
1679 } else { 1679 } else {
1680 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1680 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1681 vcpu->arch.guestdbg.last_bp = 0; 1681 vcpu->arch.guestdbg.last_bp = 0;
1682 } 1682 }
1683 1683
1684 if (rc) { 1684 if (rc) {
1685 vcpu->guest_debug = 0; 1685 vcpu->guest_debug = 0;
1686 kvm_s390_clear_bp_data(vcpu); 1686 kvm_s390_clear_bp_data(vcpu);
1687 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1687 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1688 } 1688 }
1689 1689
1690 return rc; 1690 return rc;
@@ -1771,7 +1771,7 @@ retry:
1771 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 1771 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1772 if (!ibs_enabled(vcpu)) { 1772 if (!ibs_enabled(vcpu)) {
1773 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 1773 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1774 atomic_set_mask(CPUSTAT_IBS, 1774 atomic_or(CPUSTAT_IBS,
1775 &vcpu->arch.sie_block->cpuflags); 1775 &vcpu->arch.sie_block->cpuflags);
1776 } 1776 }
1777 goto retry; 1777 goto retry;
@@ -1780,7 +1780,7 @@ retry:
1780 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 1780 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1781 if (ibs_enabled(vcpu)) { 1781 if (ibs_enabled(vcpu)) {
1782 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 1782 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1783 atomic_clear_mask(CPUSTAT_IBS, 1783 atomic_andnot(CPUSTAT_IBS,
1784 &vcpu->arch.sie_block->cpuflags); 1784 &vcpu->arch.sie_block->cpuflags);
1785 } 1785 }
1786 goto retry; 1786 goto retry;
@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2280 __disable_ibs_on_all_vcpus(vcpu->kvm); 2280 __disable_ibs_on_all_vcpus(vcpu->kvm);
2281 } 2281 }
2282 2282
2283 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2283 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2284 /* 2284 /*
2285 * Another VCPU might have used IBS while we were offline. 2285 * Another VCPU might have used IBS while we were offline.
2286 * Let's play safe and flush the VCPU at startup. 2286 * Let's play safe and flush the VCPU at startup.
@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2306 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 2306 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2307 kvm_s390_clear_stop_irq(vcpu); 2307 kvm_s390_clear_stop_irq(vcpu);
2308 2308
2309 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2309 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2310 __disable_ibs_on_vcpu(vcpu); 2310 __disable_ibs_on_vcpu(vcpu);
2311 2311
2312 for (i = 0; i < online_vcpus; i++) { 2312 for (i = 0; i < online_vcpus; i++) {