diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/crypto/crypt_s390.h | 7 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 4 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 20 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 45 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/s390/kvm/diag.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 3 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 12 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 10 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 45 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 9 |
20 files changed, 161 insertions, 28 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a9fbd43395f7..373679b3744a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -572,6 +572,7 @@ config KEXEC | |||
572 | config CRASH_DUMP | 572 | config CRASH_DUMP |
573 | bool "kernel crash dumps" | 573 | bool "kernel crash dumps" |
574 | depends on 64BIT | 574 | depends on 64BIT |
575 | select KEXEC | ||
575 | help | 576 | help |
576 | Generate crash dump after being started by kexec. | 577 | Generate crash dump after being started by kexec. |
577 | Crash dump kernels are loaded in the main kernel with kexec-tools | 578 | Crash dump kernels are loaded in the main kernel with kexec-tools |
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 49676771bd66..ffd1ac255f19 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func, | |||
368 | 368 | ||
369 | if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) | 369 | if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) |
370 | return 0; | 370 | return 0; |
371 | if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) | 371 | |
372 | if (facility_mask & CRYPT_S390_MSA3 && | ||
373 | (!test_facility(2) || !test_facility(76))) | ||
372 | return 0; | 374 | return 0; |
373 | if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) | 375 | if (facility_mask & CRYPT_S390_MSA4 && |
376 | (!test_facility(2) || !test_facility(77))) | ||
374 | return 0; | 377 | return 0; |
375 | 378 | ||
376 | switch (func & CRYPT_S390_OP_MASK) { | 379 | switch (func & CRYPT_S390_OP_MASK) { |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 24e18473d926..b0c235cb6ad5 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -47,7 +47,7 @@ struct sca_block { | |||
47 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | 47 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
48 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | 48 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
49 | 49 | ||
50 | #define CPUSTAT_HOST 0x80000000 | 50 | #define CPUSTAT_STOPPED 0x80000000 |
51 | #define CPUSTAT_WAIT 0x10000000 | 51 | #define CPUSTAT_WAIT 0x10000000 |
52 | #define CPUSTAT_ECALL_PEND 0x08000000 | 52 | #define CPUSTAT_ECALL_PEND 0x08000000 |
53 | #define CPUSTAT_STOP_INT 0x04000000 | 53 | #define CPUSTAT_STOP_INT 0x04000000 |
@@ -139,6 +139,7 @@ struct kvm_vcpu_stat { | |||
139 | u32 instruction_stfl; | 139 | u32 instruction_stfl; |
140 | u32 instruction_tprot; | 140 | u32 instruction_tprot; |
141 | u32 instruction_sigp_sense; | 141 | u32 instruction_sigp_sense; |
142 | u32 instruction_sigp_sense_running; | ||
142 | u32 instruction_sigp_external_call; | 143 | u32 instruction_sigp_external_call; |
143 | u32 instruction_sigp_emergency; | 144 | u32 instruction_sigp_emergency; |
144 | u32 instruction_sigp_stop; | 145 | u32 instruction_sigp_stop; |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 34ede0ea85a9..524d23b8610c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -593,6 +593,8 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | |||
593 | unsigned long address, bits; | 593 | unsigned long address, bits; |
594 | unsigned char skey; | 594 | unsigned char skey; |
595 | 595 | ||
596 | if (!pte_present(*ptep)) | ||
597 | return pgste; | ||
596 | address = pte_val(*ptep) & PAGE_MASK; | 598 | address = pte_val(*ptep) & PAGE_MASK; |
597 | skey = page_get_storage_key(address); | 599 | skey = page_get_storage_key(address); |
598 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 600 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
@@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | |||
625 | #ifdef CONFIG_PGSTE | 627 | #ifdef CONFIG_PGSTE |
626 | int young; | 628 | int young; |
627 | 629 | ||
630 | if (!pte_present(*ptep)) | ||
631 | return pgste; | ||
628 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); | 632 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); |
629 | /* Transfer page referenced bit to pte software bit (host view) */ | 633 | /* Transfer page referenced bit to pte software bit (host view) */ |
630 | if (young || (pgste_val(pgste) & RCP_HR_BIT)) | 634 | if (young || (pgste_val(pgste) & RCP_HR_BIT)) |
@@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | |||
638 | 642 | ||
639 | } | 643 | } |
640 | 644 | ||
641 | static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) | 645 | static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
642 | { | 646 | { |
643 | #ifdef CONFIG_PGSTE | 647 | #ifdef CONFIG_PGSTE |
644 | unsigned long address; | 648 | unsigned long address; |
645 | unsigned long okey, nkey; | 649 | unsigned long okey, nkey; |
646 | 650 | ||
647 | address = pte_val(*ptep) & PAGE_MASK; | 651 | if (!pte_present(entry)) |
652 | return; | ||
653 | address = pte_val(entry) & PAGE_MASK; | ||
648 | okey = nkey = page_get_storage_key(address); | 654 | okey = nkey = page_get_storage_key(address); |
649 | nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); | 655 | nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); |
650 | /* Set page access key and fetch protection bit from pgste */ | 656 | /* Set page access key and fetch protection bit from pgste */ |
@@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
712 | 718 | ||
713 | if (mm_has_pgste(mm)) { | 719 | if (mm_has_pgste(mm)) { |
714 | pgste = pgste_get_lock(ptep); | 720 | pgste = pgste_get_lock(ptep); |
715 | pgste_set_pte(ptep, pgste); | 721 | pgste_set_pte(ptep, pgste, entry); |
716 | *ptep = entry; | 722 | *ptep = entry; |
717 | pgste_set_unlock(ptep, pgste); | 723 | pgste_set_unlock(ptep, pgste); |
718 | } else | 724 | } else |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 5a099714df04..097183c70407 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -82,6 +82,7 @@ extern unsigned int user_mode; | |||
82 | #define MACHINE_FLAG_LPAR (1UL << 12) | 82 | #define MACHINE_FLAG_LPAR (1UL << 12) |
83 | #define MACHINE_FLAG_SPP (1UL << 13) | 83 | #define MACHINE_FLAG_SPP (1UL << 13) |
84 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) | 84 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) |
85 | #define MACHINE_FLAG_STCKF (1UL << 15) | ||
85 | 86 | ||
86 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) | 87 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
87 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) | 88 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
@@ -100,6 +101,7 @@ extern unsigned int user_mode; | |||
100 | #define MACHINE_HAS_PFMF (0) | 101 | #define MACHINE_HAS_PFMF (0) |
101 | #define MACHINE_HAS_SPP (0) | 102 | #define MACHINE_HAS_SPP (0) |
102 | #define MACHINE_HAS_TOPOLOGY (0) | 103 | #define MACHINE_HAS_TOPOLOGY (0) |
104 | #define MACHINE_HAS_STCKF (0) | ||
103 | #else /* __s390x__ */ | 105 | #else /* __s390x__ */ |
104 | #define MACHINE_HAS_IEEE (1) | 106 | #define MACHINE_HAS_IEEE (1) |
105 | #define MACHINE_HAS_CSP (1) | 107 | #define MACHINE_HAS_CSP (1) |
@@ -111,6 +113,7 @@ extern unsigned int user_mode; | |||
111 | #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) | 113 | #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) |
112 | #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) | 114 | #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) |
113 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) | 115 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) |
116 | #define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF) | ||
114 | #endif /* __s390x__ */ | 117 | #endif /* __s390x__ */ |
115 | 118 | ||
116 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) | 119 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index d610bef9c5e9..c447a27a7fdb 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void) | |||
90 | { | 90 | { |
91 | unsigned long long clk; | 91 | unsigned long long clk; |
92 | 92 | ||
93 | if (test_facility(25)) | 93 | if (MACHINE_HAS_STCKF) |
94 | asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); | 94 | asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); |
95 | else | 95 | else |
96 | clk = get_clock(); | 96 | clk = get_clock(); |
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 404bdb9671b4..58de4c91c333 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h | |||
@@ -277,7 +277,9 @@ | |||
277 | #define __NR_clock_adjtime 337 | 277 | #define __NR_clock_adjtime 337 |
278 | #define __NR_syncfs 338 | 278 | #define __NR_syncfs 338 |
279 | #define __NR_setns 339 | 279 | #define __NR_setns 339 |
280 | #define NR_syscalls 340 | 280 | #define __NR_process_vm_readv 340 |
281 | #define __NR_process_vm_writev 341 | ||
282 | #define NR_syscalls 342 | ||
281 | 283 | ||
282 | /* | 284 | /* |
283 | * There are some system calls that are not present on 64 bit, some | 285 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 5006a1d9f5d0..18c51df9fe06 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper) | |||
1627 | lgfr %r2,%r2 # int | 1627 | lgfr %r2,%r2 # int |
1628 | lgfr %r3,%r3 # int | 1628 | lgfr %r3,%r3 # int |
1629 | jg sys_setns | 1629 | jg sys_setns |
1630 | |||
1631 | ENTRY(compat_sys_process_vm_readv_wrapper) | ||
1632 | lgfr %r2,%r2 # compat_pid_t | ||
1633 | llgtr %r3,%r3 # struct compat_iovec __user * | ||
1634 | llgfr %r4,%r4 # unsigned long | ||
1635 | llgtr %r5,%r5 # struct compat_iovec __user * | ||
1636 | llgfr %r6,%r6 # unsigned long | ||
1637 | llgf %r0,164(%r15) # unsigned long | ||
1638 | stg %r0,160(%r15) | ||
1639 | jg sys_process_vm_readv | ||
1640 | |||
1641 | ENTRY(compat_sys_process_vm_writev_wrapper) | ||
1642 | lgfr %r2,%r2 # compat_pid_t | ||
1643 | llgtr %r3,%r3 # struct compat_iovec __user * | ||
1644 | llgfr %r4,%r4 # unsigned long | ||
1645 | llgtr %r5,%r5 # struct compat_iovec __user * | ||
1646 | llgfr %r6,%r6 # unsigned long | ||
1647 | llgf %r0,164(%r15) # unsigned long | ||
1648 | stg %r0,160(%r15) | ||
1649 | jg sys_process_vm_writev | ||
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 37394b3413e2..c9ffe0025197 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void) | |||
390 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; | 390 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
391 | if (test_facility(40)) | 391 | if (test_facility(40)) |
392 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; | 392 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; |
393 | if (test_facility(25)) | ||
394 | S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF; | ||
393 | #endif | 395 | #endif |
394 | } | 396 | } |
395 | 397 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 8ac6bfa2786c..e58a462949b1 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno) | |||
211 | 211 | ||
212 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 212 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
213 | return; | 213 | return; |
214 | if (OLDMEM_BASE) | ||
215 | return; | ||
214 | if (console_devno != -1) | 216 | if (console_devno != -1) |
215 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", | 217 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", |
216 | ipl_info.data.fcp.dev_id.devno, console_devno); | 218 | ipl_info.data.fcp.dev_id.devno, console_devno); |
@@ -482,7 +484,7 @@ static void __init setup_memory_end(void) | |||
482 | 484 | ||
483 | 485 | ||
484 | #ifdef CONFIG_ZFCPDUMP | 486 | #ifdef CONFIG_ZFCPDUMP |
485 | if (ipl_info.type == IPL_TYPE_FCP_DUMP) { | 487 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { |
486 | memory_end = ZFCPDUMP_HSA_SIZE; | 488 | memory_end = ZFCPDUMP_HSA_SIZE; |
487 | memory_end_set = 1; | 489 | memory_end_set = 1; |
488 | } | 490 | } |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 73eb08c874fb..bcab2f04ba58 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at | |||
348 | SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) | 348 | SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) |
349 | SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) | 349 | SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) |
350 | SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) | 350 | SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) |
351 | SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */ | ||
352 | SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) | ||
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 77b8942b9a15..fdb5b8cb260f 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
68 | return mask; | 68 | return mask; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void add_cpus_to_mask(struct topology_cpu *tl_cpu, | 71 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, |
72 | struct mask_info *book, struct mask_info *core) | 72 | struct mask_info *book, |
73 | struct mask_info *core, | ||
74 | int z10) | ||
73 | { | 75 | { |
74 | unsigned int cpu; | 76 | unsigned int cpu; |
75 | 77 | ||
@@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
88 | cpu_book_id[lcpu] = book->id; | 90 | cpu_book_id[lcpu] = book->id; |
89 | #endif | 91 | #endif |
90 | cpumask_set_cpu(lcpu, &core->mask); | 92 | cpumask_set_cpu(lcpu, &core->mask); |
91 | cpu_core_id[lcpu] = core->id; | 93 | if (z10) { |
94 | cpu_core_id[lcpu] = rcpu; | ||
95 | core = core->next; | ||
96 | } else { | ||
97 | cpu_core_id[lcpu] = core->id; | ||
98 | } | ||
92 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 99 | smp_cpu_polarization[lcpu] = tl_cpu->pp; |
93 | } | 100 | } |
94 | } | 101 | } |
102 | return core; | ||
95 | } | 103 | } |
96 | 104 | ||
97 | static void clear_masks(void) | 105 | static void clear_masks(void) |
@@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) | |||
123 | { | 131 | { |
124 | #ifdef CONFIG_SCHED_BOOK | 132 | #ifdef CONFIG_SCHED_BOOK |
125 | struct mask_info *book = &book_info; | 133 | struct mask_info *book = &book_info; |
134 | struct cpuid cpu_id; | ||
126 | #else | 135 | #else |
127 | struct mask_info *book = NULL; | 136 | struct mask_info *book = NULL; |
128 | #endif | 137 | #endif |
129 | struct mask_info *core = &core_info; | 138 | struct mask_info *core = &core_info; |
130 | union topology_entry *tle, *end; | 139 | union topology_entry *tle, *end; |
140 | int z10 = 0; | ||
131 | 141 | ||
132 | 142 | #ifdef CONFIG_SCHED_BOOK | |
143 | get_cpu_id(&cpu_id); | ||
144 | z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; | ||
145 | #endif | ||
133 | spin_lock_irq(&topology_lock); | 146 | spin_lock_irq(&topology_lock); |
134 | clear_masks(); | 147 | clear_masks(); |
135 | tle = info->tle; | 148 | tle = info->tle; |
136 | end = (union topology_entry *)((unsigned long)info + info->length); | 149 | end = (union topology_entry *)((unsigned long)info + info->length); |
137 | while (tle < end) { | 150 | while (tle < end) { |
151 | #ifdef CONFIG_SCHED_BOOK | ||
152 | if (z10) { | ||
153 | switch (tle->nl) { | ||
154 | case 1: | ||
155 | book = book->next; | ||
156 | book->id = tle->container.id; | ||
157 | break; | ||
158 | case 0: | ||
159 | core = add_cpus_to_mask(&tle->cpu, book, core, z10); | ||
160 | break; | ||
161 | default: | ||
162 | clear_masks(); | ||
163 | goto out; | ||
164 | } | ||
165 | tle = next_tle(tle); | ||
166 | continue; | ||
167 | } | ||
168 | #endif | ||
138 | switch (tle->nl) { | 169 | switch (tle->nl) { |
139 | #ifdef CONFIG_SCHED_BOOK | 170 | #ifdef CONFIG_SCHED_BOOK |
140 | case 2: | 171 | case 2: |
@@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) | |||
147 | core->id = tle->container.id; | 178 | core->id = tle->container.id; |
148 | break; | 179 | break; |
149 | case 0: | 180 | case 0: |
150 | add_cpus_to_mask(&tle->cpu, book, core); | 181 | add_cpus_to_mask(&tle->cpu, book, core, z10); |
151 | break; | 182 | break; |
152 | default: | 183 | default: |
153 | clear_masks(); | 184 | clear_masks(); |
@@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void) | |||
328 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | 359 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
329 | printk(" %d", info->mag[i]); | 360 | printk(" %d", info->mag[i]); |
330 | printk(" / %d\n", info->mnest); | 361 | printk(" / %d\n", info->mnest); |
331 | alloc_masks(info, &core_info, 2); | 362 | alloc_masks(info, &core_info, 1); |
332 | #ifdef CONFIG_SCHED_BOOK | 363 | #ifdef CONFIG_SCHED_BOOK |
333 | alloc_masks(info, &book_info, 3); | 364 | alloc_masks(info, &book_info, 2); |
334 | #endif | 365 | #endif |
335 | } | 366 | } |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 56fe6bc81fee..e4c79ebb40e6 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -43,6 +43,8 @@ SECTIONS | |||
43 | 43 | ||
44 | NOTES :text :note | 44 | NOTES :text :note |
45 | 45 | ||
46 | .dummy : { *(.dummy) } :data | ||
47 | |||
46 | RODATA | 48 | RODATA |
47 | 49 | ||
48 | #ifdef CONFIG_SHARED_KERNEL | 50 | #ifdef CONFIG_SHARED_KERNEL |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 87cedd61be04..8943e82cd4d9 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
70 | return -EOPNOTSUPP; | 70 | return -EOPNOTSUPP; |
71 | } | 71 | } |
72 | 72 | ||
73 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 73 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
74 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 74 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; |
75 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 75 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
76 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 76 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index c7c51898984e..02434543eabb 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
132 | int rc = 0; | 132 | int rc = 0; |
133 | 133 | ||
134 | vcpu->stat.exit_stop_request++; | 134 | vcpu->stat.exit_stop_request++; |
135 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
136 | spin_lock_bh(&vcpu->arch.local_int.lock); | 135 | spin_lock_bh(&vcpu->arch.local_int.lock); |
137 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | 136 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { |
138 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | 137 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; |
@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
149 | } | 148 | } |
150 | 149 | ||
151 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 150 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { |
151 | atomic_set_mask(CPUSTAT_STOPPED, | ||
152 | &vcpu->arch.sie_block->cpuflags); | ||
152 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | 153 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; |
153 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | 154 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); |
154 | rc = -EOPNOTSUPP; | 155 | rc = -EOPNOTSUPP; |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 87c16705b381..278ee009ce65 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
252 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); | 252 | offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); |
253 | if (rc == -EFAULT) | 253 | if (rc == -EFAULT) |
254 | exception = 1; | 254 | exception = 1; |
255 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | ||
255 | break; | 256 | break; |
256 | 257 | ||
257 | case KVM_S390_PROGRAM_INT: | 258 | case KVM_S390_PROGRAM_INT: |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0bd3bea1e4cd..d1c445732451 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | 65 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, | 66 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, |
67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | 67 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
68 | { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, | ||
68 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, | 69 | { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, |
69 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | 70 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
70 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 71 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
127 | switch (ext) { | 128 | switch (ext) { |
128 | case KVM_CAP_S390_PSW: | 129 | case KVM_CAP_S390_PSW: |
129 | case KVM_CAP_S390_GMAP: | 130 | case KVM_CAP_S390_GMAP: |
131 | case KVM_CAP_SYNC_MMU: | ||
130 | r = 1; | 132 | r = 1; |
131 | break; | 133 | break; |
132 | default: | 134 | default: |
@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
270 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 272 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
271 | restore_access_regs(vcpu->arch.guest_acrs); | 273 | restore_access_regs(vcpu->arch.guest_acrs); |
272 | gmap_enable(vcpu->arch.gmap); | 274 | gmap_enable(vcpu->arch.gmap); |
275 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
273 | } | 276 | } |
274 | 277 | ||
275 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 278 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
276 | { | 279 | { |
280 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
277 | gmap_disable(vcpu->arch.gmap); | 281 | gmap_disable(vcpu->arch.gmap); |
278 | save_fp_regs(&vcpu->arch.guest_fpregs); | 282 | save_fp_regs(&vcpu->arch.guest_fpregs); |
279 | save_access_regs(vcpu->arch.guest_acrs); | 283 | save_access_regs(vcpu->arch.guest_acrs); |
@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
301 | 305 | ||
302 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 306 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
303 | { | 307 | { |
304 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); | 308 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
309 | CPUSTAT_SM | | ||
310 | CPUSTAT_STOPPED); | ||
305 | vcpu->arch.sie_block->ecb = 6; | 311 | vcpu->arch.sie_block->ecb = 6; |
306 | vcpu->arch.sie_block->eca = 0xC1002001U; | 312 | vcpu->arch.sie_block->eca = 0xC1002001U; |
307 | vcpu->arch.sie_block->fac = (int) (long) facilities; | 313 | vcpu->arch.sie_block->fac = (int) (long) facilities; |
@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
428 | { | 434 | { |
429 | int rc = 0; | 435 | int rc = 0; |
430 | 436 | ||
431 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | 437 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) |
432 | rc = -EBUSY; | 438 | rc = -EBUSY; |
433 | else { | 439 | else { |
434 | vcpu->run->psw_mask = psw.mask; | 440 | vcpu->run->psw_mask = psw.mask; |
@@ -501,7 +507,7 @@ rerun_vcpu: | |||
501 | if (vcpu->sigset_active) | 507 | if (vcpu->sigset_active) |
502 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 508 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
503 | 509 | ||
504 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 510 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
505 | 511 | ||
506 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); | 512 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); |
507 | 513 | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 391626361084..d02638959922 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
336 | u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; | 336 | u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; |
337 | u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; | 337 | u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; |
338 | struct vm_area_struct *vma; | 338 | struct vm_area_struct *vma; |
339 | unsigned long user_address; | ||
339 | 340 | ||
340 | vcpu->stat.instruction_tprot++; | 341 | vcpu->stat.instruction_tprot++; |
341 | 342 | ||
@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
349 | return -EOPNOTSUPP; | 350 | return -EOPNOTSUPP; |
350 | 351 | ||
351 | 352 | ||
353 | /* we must resolve the address without holding the mmap semaphore. | ||
354 | * This is ok since the userspace hypervisor is not supposed to change | ||
355 | * the mapping while the guest queries the memory. Otherwise the guest | ||
356 | * might crash or get wrong info anyway. */ | ||
357 | user_address = (unsigned long) __guestaddr_to_user(vcpu, address1); | ||
358 | |||
352 | down_read(¤t->mm->mmap_sem); | 359 | down_read(¤t->mm->mmap_sem); |
353 | vma = find_vma(current->mm, | 360 | vma = find_vma(current->mm, user_address); |
354 | (unsigned long) __guestaddr_to_user(vcpu, address1)); | ||
355 | if (!vma) { | 361 | if (!vma) { |
356 | up_read(¤t->mm->mmap_sem); | 362 | up_read(¤t->mm->mmap_sem); |
357 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 363 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f815118835f3..0a7941d74bc6 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -31,9 +31,11 @@ | |||
31 | #define SIGP_SET_PREFIX 0x0d | 31 | #define SIGP_SET_PREFIX 0x0d |
32 | #define SIGP_STORE_STATUS_ADDR 0x0e | 32 | #define SIGP_STORE_STATUS_ADDR 0x0e |
33 | #define SIGP_SET_ARCH 0x12 | 33 | #define SIGP_SET_ARCH 0x12 |
34 | #define SIGP_SENSE_RUNNING 0x15 | ||
34 | 35 | ||
35 | /* cpu status bits */ | 36 | /* cpu status bits */ |
36 | #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL | 37 | #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL |
38 | #define SIGP_STAT_NOT_RUNNING 0x00000400UL | ||
37 | #define SIGP_STAT_INCORRECT_STATE 0x00000200UL | 39 | #define SIGP_STAT_INCORRECT_STATE 0x00000200UL |
38 | #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL | 40 | #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL |
39 | #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL | 41 | #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL |
@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, | |||
57 | spin_lock(&fi->lock); | 59 | spin_lock(&fi->lock); |
58 | if (fi->local_int[cpu_addr] == NULL) | 60 | if (fi->local_int[cpu_addr] == NULL) |
59 | rc = 3; /* not operational */ | 61 | rc = 3; /* not operational */ |
60 | else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | 62 | else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) |
61 | & CPUSTAT_RUNNING) { | 63 | & CPUSTAT_STOPPED)) { |
62 | *reg &= 0xffffffff00000000UL; | 64 | *reg &= 0xffffffff00000000UL; |
63 | rc = 1; /* status stored */ | 65 | rc = 1; /* status stored */ |
64 | } else { | 66 | } else { |
@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
251 | 253 | ||
252 | spin_lock_bh(&li->lock); | 254 | spin_lock_bh(&li->lock); |
253 | /* cpu must be in stopped state */ | 255 | /* cpu must be in stopped state */ |
254 | if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { | 256 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
255 | rc = 1; /* incorrect state */ | 257 | rc = 1; /* incorrect state */ |
256 | *reg &= SIGP_STAT_INCORRECT_STATE; | 258 | *reg &= SIGP_STAT_INCORRECT_STATE; |
257 | kfree(inti); | 259 | kfree(inti); |
@@ -275,6 +277,38 @@ out_fi: | |||
275 | return rc; | 277 | return rc; |
276 | } | 278 | } |
277 | 279 | ||
280 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, | ||
281 | unsigned long *reg) | ||
282 | { | ||
283 | int rc; | ||
284 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
285 | |||
286 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
287 | return 3; /* not operational */ | ||
288 | |||
289 | spin_lock(&fi->lock); | ||
290 | if (fi->local_int[cpu_addr] == NULL) | ||
291 | rc = 3; /* not operational */ | ||
292 | else { | ||
293 | if (atomic_read(fi->local_int[cpu_addr]->cpuflags) | ||
294 | & CPUSTAT_RUNNING) { | ||
295 | /* running */ | ||
296 | rc = 1; | ||
297 | } else { | ||
298 | /* not running */ | ||
299 | *reg &= 0xffffffff00000000UL; | ||
300 | *reg |= SIGP_STAT_NOT_RUNNING; | ||
301 | rc = 0; | ||
302 | } | ||
303 | } | ||
304 | spin_unlock(&fi->lock); | ||
305 | |||
306 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, | ||
307 | rc); | ||
308 | |||
309 | return rc; | ||
310 | } | ||
311 | |||
278 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | 312 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) |
279 | { | 313 | { |
280 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | 314 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
331 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, | 365 | rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, |
332 | &vcpu->arch.guest_gprs[r1]); | 366 | &vcpu->arch.guest_gprs[r1]); |
333 | break; | 367 | break; |
368 | case SIGP_SENSE_RUNNING: | ||
369 | vcpu->stat.instruction_sigp_sense_running++; | ||
370 | rc = __sigp_sense_running(vcpu, cpu_addr, | ||
371 | &vcpu->arch.guest_gprs[r1]); | ||
372 | break; | ||
334 | case SIGP_RESTART: | 373 | case SIGP_RESTART: |
335 | vcpu->stat.instruction_sigp_restart++; | 374 | vcpu->stat.instruction_sigp_restart++; |
336 | /* user space must know about restart */ | 375 | /* user space must know about restart */ |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1766def5bc3f..a9a301866b3c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code, | |||
587 | } else { | 587 | } else { |
588 | /* Completion interrupt was faster than initial | 588 | /* Completion interrupt was faster than initial |
589 | * interrupt. Set pfault_wait to -1 so the initial | 589 | * interrupt. Set pfault_wait to -1 so the initial |
590 | * interrupt doesn't put the task to sleep. */ | 590 | * interrupt doesn't put the task to sleep. |
591 | tsk->thread.pfault_wait = -1; | 591 | * If the task is not running, ignore the completion |
592 | * interrupt since it must be a leftover of a PFAULT | ||
593 | * CANCEL operation which didn't remove all pending | ||
594 | * completion interrupts. */ | ||
595 | if (tsk->state == TASK_RUNNING) | ||
596 | tsk->thread.pfault_wait = -1; | ||
592 | } | 597 | } |
593 | put_task_struct(tsk); | 598 | put_task_struct(tsk); |
594 | } else { | 599 | } else { |