aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/s390/kvm
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/interrupt.c30
-rw-r--r--arch/s390/kvm/kvm-s390.c32
2 files changed, 31 insertions, 31 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50dcf76..5c2c169395c3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
173 173
174static void __set_cpu_idle(struct kvm_vcpu *vcpu) 174static void __set_cpu_idle(struct kvm_vcpu *vcpu)
175{ 175{
176 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 176 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 177 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
178} 178}
179 179
180static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 180static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
181{ 181{
182 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 182 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 183 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
184} 184}
185 185
186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 186static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
187{ 187{
188 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 188 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
189 &vcpu->arch.sie_block->cpuflags); 189 &vcpu->arch.sie_block->cpuflags);
190 vcpu->arch.sie_block->lctl = 0x0000; 190 vcpu->arch.sie_block->lctl = 0x0000;
191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 191 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
192 192
@@ -199,7 +199,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
199 199
200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 200static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
201{ 201{
202 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 202 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
203} 203}
204 204
205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 205static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
928 spin_unlock(&li->lock); 928 spin_unlock(&li->lock);
929 929
930 /* clear pending external calls set by sigp interpretation facility */ 930 /* clear pending external calls set by sigp interpretation facility */
931 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 931 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; 932 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
933} 933}
934 934
@@ -1026,7 +1026,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1026 1026
1027 li->irq.ext = irq->u.ext; 1027 li->irq.ext = irq->u.ext;
1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1029 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1029 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1030 return 0; 1030 return 0;
1031} 1031}
1032 1032
@@ -1041,7 +1041,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1041 /* another external call is pending */ 1041 /* another external call is pending */
1042 return -EBUSY; 1042 return -EBUSY;
1043 } 1043 }
1044 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1044 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
@@ -1067,7 +1067,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1067 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1068 return -EBUSY; 1068 return -EBUSY;
1069 *extcall = irq->u.extcall; 1069 *extcall = irq->u.extcall;
1070 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1070 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1071 return 0; 1071 return 0;
1072} 1072}
1073 1073
@@ -1139,7 +1139,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1139 1139
1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1142 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1142 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1143 return 0; 1143 return 0;
1144} 1144}
1145 1145
@@ -1183,7 +1183,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1183 0, 0); 1183 0, 0);
1184 1184
1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1186 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1186 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1187 return 0; 1187 return 0;
1188} 1188}
1189 1189
@@ -1196,7 +1196,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1196 0, 0); 1196 0, 0);
1197 1197
1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1199 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1199 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1200 return 0; 1200 return 0;
1201} 1201}
1202 1202
@@ -1375,13 +1375,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
1375 spin_lock(&li->lock); 1375 spin_lock(&li->lock);
1376 switch (type) { 1376 switch (type) {
1377 case KVM_S390_MCHK: 1377 case KVM_S390_MCHK:
1378 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 1378 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
1379 break; 1379 break;
1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1380 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1381 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); 1381 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
1382 break; 1382 break;
1383 default: 1383 default:
1384 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1384 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
1385 break; 1385 break;
1386 } 1386 }
1387 spin_unlock(&li->lock); 1387 spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98df53c01343..c91eb941b444 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1333,12 +1333,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1333 save_access_regs(vcpu->arch.host_acrs); 1333 save_access_regs(vcpu->arch.host_acrs);
1334 restore_access_regs(vcpu->run->s.regs.acrs); 1334 restore_access_regs(vcpu->run->s.regs.acrs);
1335 gmap_enable(vcpu->arch.gmap); 1335 gmap_enable(vcpu->arch.gmap);
1336 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1336 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1337} 1337}
1338 1338
1339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1340{ 1340{
1341 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1341 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1342 gmap_disable(vcpu->arch.gmap); 1342 gmap_disable(vcpu->arch.gmap);
1343 1343
1344 save_fpu_regs(); 1344 save_fpu_regs();
@@ -1443,9 +1443,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1443 CPUSTAT_STOPPED); 1443 CPUSTAT_STOPPED);
1444 1444
1445 if (test_kvm_facility(vcpu->kvm, 78)) 1445 if (test_kvm_facility(vcpu->kvm, 78))
1446 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); 1446 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1447 else if (test_kvm_facility(vcpu->kvm, 8)) 1447 else if (test_kvm_facility(vcpu->kvm, 8))
1448 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); 1448 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1449 1449
1450 kvm_s390_vcpu_setup_model(vcpu); 1450 kvm_s390_vcpu_setup_model(vcpu);
1451 1451
@@ -1557,24 +1557,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1557 1557
1558void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) 1558void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1559{ 1559{
1560 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1560 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1561 exit_sie(vcpu); 1561 exit_sie(vcpu);
1562} 1562}
1563 1563
1564void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) 1564void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1565{ 1565{
1566 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1566 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1567} 1567}
1568 1568
1569static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) 1569static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1570{ 1570{
1571 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1571 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1572 exit_sie(vcpu); 1572 exit_sie(vcpu);
1573} 1573}
1574 1574
1575static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 1575static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1576{ 1576{
1577 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1577 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1578} 1578}
1579 1579
1580/* 1580/*
@@ -1583,7 +1583,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1583 * return immediately. */ 1583 * return immediately. */
1584void exit_sie(struct kvm_vcpu *vcpu) 1584void exit_sie(struct kvm_vcpu *vcpu)
1585{ 1585{
1586 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 1586 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1587 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 1587 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1588 cpu_relax(); 1588 cpu_relax();
1589} 1589}
@@ -1807,19 +1807,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1807 if (dbg->control & KVM_GUESTDBG_ENABLE) { 1807 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1808 vcpu->guest_debug = dbg->control; 1808 vcpu->guest_debug = dbg->control;
1809 /* enforce guest PER */ 1809 /* enforce guest PER */
1810 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1810 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1811 1811
1812 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 1812 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1813 rc = kvm_s390_import_bp_data(vcpu, dbg); 1813 rc = kvm_s390_import_bp_data(vcpu, dbg);
1814 } else { 1814 } else {
1815 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1815 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1816 vcpu->arch.guestdbg.last_bp = 0; 1816 vcpu->arch.guestdbg.last_bp = 0;
1817 } 1817 }
1818 1818
1819 if (rc) { 1819 if (rc) {
1820 vcpu->guest_debug = 0; 1820 vcpu->guest_debug = 0;
1821 kvm_s390_clear_bp_data(vcpu); 1821 kvm_s390_clear_bp_data(vcpu);
1822 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1822 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1823 } 1823 }
1824 1824
1825 return rc; 1825 return rc;
@@ -1894,7 +1894,7 @@ retry:
1894 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 1894 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1895 if (!ibs_enabled(vcpu)) { 1895 if (!ibs_enabled(vcpu)) {
1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1897 atomic_set_mask(CPUSTAT_IBS, 1897 atomic_or(CPUSTAT_IBS,
1898 &vcpu->arch.sie_block->cpuflags); 1898 &vcpu->arch.sie_block->cpuflags);
1899 } 1899 }
1900 goto retry; 1900 goto retry;
@@ -1903,7 +1903,7 @@ retry:
1903 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 1903 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1904 if (ibs_enabled(vcpu)) { 1904 if (ibs_enabled(vcpu)) {
1905 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 1905 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1906 atomic_clear_mask(CPUSTAT_IBS, 1906 atomic_andnot(CPUSTAT_IBS,
1907 &vcpu->arch.sie_block->cpuflags); 1907 &vcpu->arch.sie_block->cpuflags);
1908 } 1908 }
1909 goto retry; 1909 goto retry;
@@ -2419,7 +2419,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2419 __disable_ibs_on_all_vcpus(vcpu->kvm); 2419 __disable_ibs_on_all_vcpus(vcpu->kvm);
2420 } 2420 }
2421 2421
2422 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2422 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2423 /* 2423 /*
2424 * Another VCPU might have used IBS while we were offline. 2424 * Another VCPU might have used IBS while we were offline.
2425 * Let's play safe and flush the VCPU at startup. 2425 * Let's play safe and flush the VCPU at startup.
@@ -2445,7 +2445,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2445 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 2445 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2446 kvm_s390_clear_stop_irq(vcpu); 2446 kvm_s390_clear_stop_irq(vcpu);
2447 2447
2448 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2448 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2449 __disable_ibs_on_vcpu(vcpu); 2449 __disable_ibs_on_vcpu(vcpu);
2450 2450
2451 for (i = 0; i < online_vcpus; i++) { 2451 for (i = 0; i < online_vcpus; i++) {