aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c13
-rw-r--r--arch/s390/mm/pgtable.c44
-rw-r--r--arch/s390/mm/vmem.c2
7 files changed, 47 insertions, 24 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 93acb3c1859d..107e492cb47e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -304,6 +304,7 @@ config ARCH_SPARSEMEM_ENABLE
304 def_bool y 304 def_bool y
305 select SPARSEMEM_VMEMMAP_ENABLE 305 select SPARSEMEM_VMEMMAP_ENABLE
306 select SPARSEMEM_VMEMMAP 306 select SPARSEMEM_VMEMMAP
307 select SPARSEMEM_STATIC if !64BIT
307 308
308config ARCH_SPARSEMEM_DEFAULT 309config ARCH_SPARSEMEM_DEFAULT
309 def_bool y 310 def_bool y
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 42b1d12ebb10..5d4fa4b1c74c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
711 memset(sf, 0, sizeof(struct stack_frame)); 711 memset(sf, 0, sizeof(struct stack_frame));
712 sf->gprs[9] = (unsigned long) sf; 712 sf->gprs[9] = (unsigned long) sf;
713 cpu_lowcore->save_area[15] = (unsigned long) sf; 713 cpu_lowcore->save_area[15] = (unsigned long) sf;
714 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 714 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
715 asm volatile( 715 asm volatile(
716 " stam 0,15,0(%0)" 716 " stam 0,15,0(%0)"
717 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 717 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index f639a152869f..a0775e1f08df 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
21 vcpu->stat.diagnose_44++; 21 vcpu->stat.diagnose_44++;
22 vcpu_put(vcpu); 22 vcpu_put(vcpu);
23 schedule(); 23 yield();
24 vcpu_load(vcpu); 24 vcpu_load(vcpu);
25 return 0; 25 return 0;
26} 26}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fcd1ed8015c1..84a7fed4cd4e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
339 if (kvm_cpu_has_interrupt(vcpu)) 339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0; 340 return 0;
341 341
342 __set_cpu_idle(vcpu);
343 spin_lock_bh(&vcpu->arch.local_int.lock);
344 vcpu->arch.local_int.timer_due = 0;
345 spin_unlock_bh(&vcpu->arch.local_int.lock);
346
342 if (psw_interrupts_disabled(vcpu)) { 347 if (psw_interrupts_disabled(vcpu)) {
343 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 348 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
344 __unset_cpu_idle(vcpu); 349 __unset_cpu_idle(vcpu);
@@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
366no_timer: 371no_timer:
367 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 372 spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
368 spin_lock_bh(&vcpu->arch.local_int.lock); 373 spin_lock_bh(&vcpu->arch.local_int.lock);
369 __set_cpu_idle(vcpu);
370 vcpu->arch.local_int.timer_due = 0;
371 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 374 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
372 while (list_empty(&vcpu->arch.local_int.list) && 375 while (list_empty(&vcpu->arch.local_int.list) &&
373 list_empty(&vcpu->arch.local_int.float_int->list) && 376 list_empty(&vcpu->arch.local_int.float_int->list) &&
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0ac36a649eba..6558b09ff579 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
423 return -EINVAL; /* not implemented yet */ 423 return -EINVAL; /* not implemented yet */
424} 424}
425 425
426extern void s390_handle_mcck(void);
427
426static void __vcpu_run(struct kvm_vcpu *vcpu) 428static void __vcpu_run(struct kvm_vcpu *vcpu)
427{ 429{
428 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); 430 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
@@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
430 if (need_resched()) 432 if (need_resched())
431 schedule(); 433 schedule();
432 434
435 if (test_thread_flag(TIF_MCCK_PENDING))
436 s390_handle_mcck();
437
438 kvm_s390_deliver_pending_interrupts(vcpu);
439
433 vcpu->arch.sie_block->icptcode = 0; 440 vcpu->arch.sie_block->icptcode = 0;
434 local_irq_disable(); 441 local_irq_disable();
435 kvm_guest_enter(); 442 kvm_guest_enter();
436 local_irq_enable(); 443 local_irq_enable();
437 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 444 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
438 atomic_read(&vcpu->arch.sie_block->cpuflags)); 445 atomic_read(&vcpu->arch.sie_block->cpuflags));
439 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); 446 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
447 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
448 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
449 }
440 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 450 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
441 vcpu->arch.sie_block->icptcode); 451 vcpu->arch.sie_block->icptcode);
442 local_irq_disable(); 452 local_irq_disable();
@@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
475 might_sleep(); 485 might_sleep();
476 486
477 do { 487 do {
478 kvm_s390_deliver_pending_interrupts(vcpu);
479 __vcpu_run(vcpu); 488 __vcpu_run(vcpu);
480 rc = kvm_handle_sie_intercept(vcpu); 489 rc = kvm_handle_sie_intercept(vcpu);
481 } while (!signal_pending(current) && !rc); 490 } while (!signal_pending(current) && !rc);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5c1aea97cd12..3d98ba82ea67 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
254int s390_enable_sie(void) 254int s390_enable_sie(void)
255{ 255{
256 struct task_struct *tsk = current; 256 struct task_struct *tsk = current;
257 struct mm_struct *mm; 257 struct mm_struct *mm, *old_mm;
258 int rc;
259 258
260 task_lock(tsk); 259 /* Do we have pgstes? if yes, we are done */
261
262 rc = 0;
263 if (tsk->mm->context.pgstes) 260 if (tsk->mm->context.pgstes)
264 goto unlock; 261 return 0;
265 262
266 rc = -EINVAL; 263 /* lets check if we are allowed to replace the mm */
264 task_lock(tsk);
267 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 265 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
268 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) 266 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
269 goto unlock; 267 task_unlock(tsk);
268 return -EINVAL;
269 }
270 task_unlock(tsk);
270 271
271 tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ 272 /* we copy the mm with pgstes enabled */
273 tsk->mm->context.pgstes = 1;
272 mm = dup_mm(tsk); 274 mm = dup_mm(tsk);
273 tsk->mm->context.pgstes = 0; 275 tsk->mm->context.pgstes = 0;
274
275 rc = -ENOMEM;
276 if (!mm) 276 if (!mm)
277 goto unlock; 277 return -ENOMEM;
278 mmput(tsk->mm); 278
279 /* Now lets check again if somebody attached ptrace etc */
280 task_lock(tsk);
281 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
282 tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
283 mmput(mm);
284 task_unlock(tsk);
285 return -EINVAL;
286 }
287
288 /* ok, we are alone. No ptrace, no threads, etc. */
289 old_mm = tsk->mm;
279 tsk->mm = tsk->active_mm = mm; 290 tsk->mm = tsk->active_mm = mm;
280 preempt_disable(); 291 preempt_disable();
281 update_mm(mm, tsk); 292 update_mm(mm, tsk);
282 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 293 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
283 preempt_enable(); 294 preempt_enable();
284 rc = 0;
285unlock:
286 task_unlock(tsk); 295 task_unlock(tsk);
287 return rc; 296 mmput(old_mm);
297 return 0;
288} 298}
289EXPORT_SYMBOL_GPL(s390_enable_sie); 299EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f591188fa2c0..e4868bfc672f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -236,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg)
236{ 236{
237 struct memory_segment *tmp; 237 struct memory_segment *tmp;
238 238
239 if (seg->start + seg->size >= VMEM_MAX_PHYS || 239 if (seg->start + seg->size > VMEM_MAX_PHYS ||
240 seg->start + seg->size < seg->start) 240 seg->start + seg->size < seg->start)
241 return -ERANGE; 241 return -ERANGE;
242 242