diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-04-23 19:12:32 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 08:06:24 -0400 |
commit | 805de8f43c20ba8b479bb598b543fa86b20067f6 (patch) | |
tree | b5f367bde66a4dded45243d435a8ab00cb5f580b /arch/s390/kvm/interrupt.c | |
parent | de9e432cb5de1bf2952919dc0b22e4bec0ed8d53 (diff) |
atomic: Replace atomic_{set,clear}_mask() usage
Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r-- | arch/s390/kvm/interrupt.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..57309e9cdd80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) | |||
170 | 170 | ||
171 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | 171 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
172 | { | 172 | { |
173 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 173 | atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
174 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 174 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
175 | } | 175 | } |
176 | 176 | ||
177 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | 177 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
178 | { | 178 | { |
179 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 179 | atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
180 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 180 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
181 | } | 181 | } |
182 | 182 | ||
183 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | 183 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
184 | { | 184 | { |
185 | atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | 185 | atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, |
186 | &vcpu->arch.sie_block->cpuflags); | 186 | &vcpu->arch.sie_block->cpuflags); |
187 | vcpu->arch.sie_block->lctl = 0x0000; | 187 | vcpu->arch.sie_block->lctl = 0x0000; |
188 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); | 188 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); |
189 | 189 | ||
@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
196 | 196 | ||
197 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 197 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
198 | { | 198 | { |
199 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 199 | atomic_or(flag, &vcpu->arch.sie_block->cpuflags); |
200 | } | 200 | } |
201 | 201 | ||
202 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | 202 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) |
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
919 | spin_unlock(&li->lock); | 919 | spin_unlock(&li->lock); |
920 | 920 | ||
921 | /* clear pending external calls set by sigp interpretation facility */ | 921 | /* clear pending external calls set by sigp interpretation facility */ |
922 | atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); | 922 | atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); |
923 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; | 923 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; |
924 | } | 924 | } |
925 | 925 | ||
@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1020 | 1020 | ||
1021 | li->irq.ext = irq->u.ext; | 1021 | li->irq.ext = irq->u.ext; |
1022 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); | 1022 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); |
1023 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1023 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1024 | return 0; | 1024 | return 0; |
1025 | } | 1025 | } |
1026 | 1026 | ||
@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) | |||
1035 | /* another external call is pending */ | 1035 | /* another external call is pending */ |
1036 | return -EBUSY; | 1036 | return -EBUSY; |
1037 | } | 1037 | } |
1038 | atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 1038 | atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
1039 | return 0; | 1039 | return 0; |
1040 | } | 1040 | } |
1041 | 1041 | ||
@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1061 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) | 1061 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) |
1062 | return -EBUSY; | 1062 | return -EBUSY; |
1063 | *extcall = irq->u.extcall; | 1063 | *extcall = irq->u.extcall; |
1064 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1064 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1065 | return 0; | 1065 | return 0; |
1066 | } | 1066 | } |
1067 | 1067 | ||
@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | |||
1133 | 1133 | ||
1134 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); | 1134 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); |
1135 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | 1135 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
1136 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1136 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1137 | return 0; | 1137 | return 0; |
1138 | } | 1138 | } |
1139 | 1139 | ||
@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) | |||
1177 | 0, 0, 2); | 1177 | 0, 0, 2); |
1178 | 1178 | ||
1179 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 1179 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
1180 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1180 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1181 | return 0; | 1181 | return 0; |
1182 | } | 1182 | } |
1183 | 1183 | ||
@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1190 | 0, 0, 2); | 1190 | 0, 0, 2); |
1191 | 1191 | ||
1192 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | 1192 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
1193 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1193 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1194 | return 0; | 1194 | return 0; |
1195 | } | 1195 | } |
1196 | 1196 | ||
@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) | |||
1369 | spin_lock(&li->lock); | 1369 | spin_lock(&li->lock); |
1370 | switch (type) { | 1370 | switch (type) { |
1371 | case KVM_S390_MCHK: | 1371 | case KVM_S390_MCHK: |
1372 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 1372 | atomic_or(CPUSTAT_STOP_INT, li->cpuflags); |
1373 | break; | 1373 | break; |
1374 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1374 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1375 | atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); | 1375 | atomic_or(CPUSTAT_IO_INT, li->cpuflags); |
1376 | break; | 1376 | break; |
1377 | default: | 1377 | default: |
1378 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1378 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1379 | break; | 1379 | break; |
1380 | } | 1380 | } |
1381 | spin_unlock(&li->lock); | 1381 | spin_unlock(&li->lock); |