aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/interrupt.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/s390/kvm/interrupt.c
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r--arch/s390/kvm/interrupt.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50dcf76..5c2c169395c3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
173 173
174static void __set_cpu_idle(struct kvm_vcpu *vcpu) 174static void __set_cpu_idle(struct kvm_vcpu *vcpu)
175{ 175{
176 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 176 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
178} 178}
179 179
180static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 180static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
181{ 181{
182 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 182 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
184} 184}
185 185
186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
187{ 187{
188 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 188 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
189 &vcpu->arch.sie_block->cpuflags); 189 &vcpu->arch.sie_block->cpuflags);
190 vcpu->arch.sie_block->lctl = 0x0000; 190 vcpu->arch.sie_block->lctl = 0x0000;
191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
192 192
@@ -199,7 +199,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
199 199
200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
201{ 201{
202 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 202 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
203} 203}
204 204
205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
928 spin_unlock(&li->lock); 928 spin_unlock(&li->lock);
929 929
930 /* clear pending external calls set by sigp interpretation facility */ 930 /* clear pending external calls set by sigp interpretation facility */
931 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 931 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; 932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
933} 933}
934 934
@@ -1026,7 +1026,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1026 1026
1027 li->irq.ext = irq->u.ext; 1027 li->irq.ext = irq->u.ext;
1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1029 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1029 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1030 return 0; 1030 return 0;
1031} 1031}
1032 1032
@@ -1041,7 +1041,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1041 /* another external call is pending */ 1041 /* another external call is pending */
1042 return -EBUSY; 1042 return -EBUSY;
1043 } 1043 }
1044 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1044 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
@@ -1067,7 +1067,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1068 return -EBUSY; 1068 return -EBUSY;
1069 *extcall = irq->u.extcall; 1069 *extcall = irq->u.extcall;
1070 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1070 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1071 return 0; 1071 return 0;
1072} 1072}
1073 1073
@@ -1139,7 +1139,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1139 1139
1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1142 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1142 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1143 return 0; 1143 return 0;
1144} 1144}
1145 1145
@@ -1183,7 +1183,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1183 0, 0); 1183 0, 0);
1184 1184
1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1186 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1186 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1187 return 0; 1187 return 0;
1188} 1188}
1189 1189
@@ -1196,7 +1196,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1196 0, 0); 1196 0, 0);
1197 1197
1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1199 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1199 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1200 return 0; 1200 return 0;
1201} 1201}
1202 1202
@@ -1375,13 +1375,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
1375 spin_lock(&li->lock); 1375 spin_lock(&li->lock);
1376 switch (type) { 1376 switch (type) {
1377 case KVM_S390_MCHK: 1377 case KVM_S390_MCHK:
1378 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 1378 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1379 break; 1379 break;
1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1381 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); 1381 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1382 break; 1382 break;
1383 default: 1383 default:
1384 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1384 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1385 break; 1385 break;
1386 } 1386 }
1387 spin_unlock(&li->lock); 1387 spin_unlock(&li->lock);