diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig | 7 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 29 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 57 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 100 | ||||
-rw-r--r-- | arch/s390/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 3 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 5 | ||||
-rw-r--r-- | arch/s390/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 3 | ||||
-rw-r--r-- | arch/s390/mm/page-states.c | 79 |
11 files changed, 157 insertions, 130 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 29a7940f284f..1d035082e78e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -430,6 +430,13 @@ config CMM_IUCV | |||
430 | Select this option to enable the special message interface to | 430 | Select this option to enable the special message interface to |
431 | the cooperative memory management. | 431 | the cooperative memory management. |
432 | 432 | ||
433 | config PAGE_STATES | ||
434 | bool "Unused page notification" | ||
435 | help | ||
436 | This enables the notification of unused pages to the | ||
437 | hypervisor. The ESSA instruction is used to do the states | ||
438 | changes between a page that has content and the unused state. | ||
439 | |||
433 | config VIRT_TIMER | 440 | config VIRT_TIMER |
434 | bool "Virtual CPU timer support" | 441 | bool "Virtual CPU timer support" |
435 | help | 442 | help |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 743d54f0b8db..d003a6e16afb 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -121,7 +121,7 @@ sys32_ptrace_wrapper: | |||
121 | lgfr %r3,%r3 # long | 121 | lgfr %r3,%r3 # long |
122 | llgtr %r4,%r4 # long | 122 | llgtr %r4,%r4 # long |
123 | llgfr %r5,%r5 # long | 123 | llgfr %r5,%r5 # long |
124 | jg sys_ptrace # branch to system call | 124 | jg compat_sys_ptrace # branch to system call |
125 | 125 | ||
126 | .globl sys32_alarm_wrapper | 126 | .globl sys32_alarm_wrapper |
127 | sys32_alarm_wrapper: | 127 | sys32_alarm_wrapper: |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index bdbb3bcd78a5..708cf9cf9a35 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -279,8 +279,6 @@ sysc_do_restart: | |||
279 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) | 279 | st %r2,SP_R2(%r15) # store return value (change R2 on stack) |
280 | 280 | ||
281 | sysc_return: | 281 | sysc_return: |
282 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
283 | bno BASED(sysc_restore) | ||
284 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | 282 | tm __TI_flags+3(%r9),_TIF_WORK_SVC |
285 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 283 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
286 | sysc_restore: | 284 | sysc_restore: |
@@ -312,6 +310,8 @@ sysc_work_loop: | |||
312 | # One of the work bits is on. Find out which one. | 310 | # One of the work bits is on. Find out which one. |
313 | # | 311 | # |
314 | sysc_work: | 312 | sysc_work: |
313 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
314 | bno BASED(sysc_restore) | ||
315 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING | 315 | tm __TI_flags+3(%r9),_TIF_MCCK_PENDING |
316 | bo BASED(sysc_mcck_pending) | 316 | bo BASED(sysc_mcck_pending) |
317 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED | 317 | tm __TI_flags+3(%r9),_TIF_NEED_RESCHED |
@@ -602,12 +602,6 @@ io_no_vtime: | |||
602 | la %r2,SP_PTREGS(%r15) # address of register-save area | 602 | la %r2,SP_PTREGS(%r15) # address of register-save area |
603 | basr %r14,%r1 # branch to standard irq handler | 603 | basr %r14,%r1 # branch to standard irq handler |
604 | io_return: | 604 | io_return: |
605 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
606 | #ifdef CONFIG_PREEMPT | ||
607 | bno BASED(io_preempt) # no -> check for preemptive scheduling | ||
608 | #else | ||
609 | bno BASED(io_restore) # no-> skip resched & signal | ||
610 | #endif | ||
611 | tm __TI_flags+3(%r9),_TIF_WORK_INT | 605 | tm __TI_flags+3(%r9),_TIF_WORK_INT |
612 | bnz BASED(io_work) # there is work to do (signals etc.) | 606 | bnz BASED(io_work) # there is work to do (signals etc.) |
613 | io_restore: | 607 | io_restore: |
@@ -629,10 +623,18 @@ io_restore_trace_psw: | |||
629 | .long 0, io_restore_trace + 0x80000000 | 623 | .long 0, io_restore_trace + 0x80000000 |
630 | #endif | 624 | #endif |
631 | 625 | ||
632 | #ifdef CONFIG_PREEMPT | 626 | # |
633 | io_preempt: | 627 | # switch to kernel stack, then check the TIF bits |
628 | # | ||
629 | io_work: | ||
630 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
631 | #ifndef CONFIG_PREEMPT | ||
632 | bno BASED(io_restore) # no-> skip resched & signal | ||
633 | #else | ||
634 | bnz BASED(io_work_user) # no -> check for preemptive scheduling | ||
635 | # check for preemptive scheduling | ||
634 | icm %r0,15,__TI_precount(%r9) | 636 | icm %r0,15,__TI_precount(%r9) |
635 | bnz BASED(io_restore) | 637 | bnz BASED(io_restore) # preemption disabled |
636 | l %r1,SP_R15(%r15) | 638 | l %r1,SP_R15(%r15) |
637 | s %r1,BASED(.Lc_spsize) | 639 | s %r1,BASED(.Lc_spsize) |
638 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 640 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
@@ -646,10 +648,7 @@ io_resume_loop: | |||
646 | br %r1 # call schedule | 648 | br %r1 # call schedule |
647 | #endif | 649 | #endif |
648 | 650 | ||
649 | # | 651 | io_work_user: |
650 | # switch to kernel stack, then check the TIF bits | ||
651 | # | ||
652 | io_work: | ||
653 | l %r1,__LC_KERNEL_STACK | 652 | l %r1,__LC_KERNEL_STACK |
654 | s %r1,BASED(.Lc_spsize) | 653 | s %r1,BASED(.Lc_spsize) |
655 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 654 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5a4a7bcd2bba..fee10177dbfc 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -271,8 +271,6 @@ sysc_noemu: | |||
271 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) | 271 | stg %r2,SP_R2(%r15) # store return value (change R2 on stack) |
272 | 272 | ||
273 | sysc_return: | 273 | sysc_return: |
274 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
275 | jno sysc_restore | ||
276 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | 274 | tm __TI_flags+7(%r9),_TIF_WORK_SVC |
277 | jnz sysc_work # there is work to do (signals etc.) | 275 | jnz sysc_work # there is work to do (signals etc.) |
278 | sysc_restore: | 276 | sysc_restore: |
@@ -304,6 +302,8 @@ sysc_work_loop: | |||
304 | # One of the work bits is on. Find out which one. | 302 | # One of the work bits is on. Find out which one. |
305 | # | 303 | # |
306 | sysc_work: | 304 | sysc_work: |
305 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
306 | jno sysc_restore | ||
307 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING | 307 | tm __TI_flags+7(%r9),_TIF_MCCK_PENDING |
308 | jo sysc_mcck_pending | 308 | jo sysc_mcck_pending |
309 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED | 309 | tm __TI_flags+7(%r9),_TIF_NEED_RESCHED |
@@ -585,12 +585,6 @@ io_no_vtime: | |||
585 | la %r2,SP_PTREGS(%r15) # address of register-save area | 585 | la %r2,SP_PTREGS(%r15) # address of register-save area |
586 | brasl %r14,do_IRQ # call standard irq handler | 586 | brasl %r14,do_IRQ # call standard irq handler |
587 | io_return: | 587 | io_return: |
588 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
589 | #ifdef CONFIG_PREEMPT | ||
590 | jno io_preempt # no -> check for preemptive scheduling | ||
591 | #else | ||
592 | jno io_restore # no-> skip resched & signal | ||
593 | #endif | ||
594 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 588 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
595 | jnz io_work # there is work to do (signals etc.) | 589 | jnz io_work # there is work to do (signals etc.) |
596 | io_restore: | 590 | io_restore: |
@@ -612,10 +606,41 @@ io_restore_trace_psw: | |||
612 | .quad 0, io_restore_trace | 606 | .quad 0, io_restore_trace |
613 | #endif | 607 | #endif |
614 | 608 | ||
615 | #ifdef CONFIG_PREEMPT | 609 | # |
616 | io_preempt: | 610 | # There is work todo, we need to check if we return to userspace, then |
611 | # check, if we are in SIE, if yes leave it | ||
612 | # | ||
613 | io_work: | ||
614 | tm SP_PSW+1(%r15),0x01 # returning to user ? | ||
615 | #ifndef CONFIG_PREEMPT | ||
616 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
617 | jnz io_work_user # yes -> no need to check for SIE | ||
618 | la %r1, BASED(sie_opcode) # we return to kernel here | ||
619 | lg %r2, SP_PSW+8(%r15) | ||
620 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
621 | jne io_restore # no-> return to kernel | ||
622 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
623 | aghi %r1, 4 | ||
624 | stg %r1, SP_PSW+8(%r15) | ||
625 | j io_restore # return to kernel | ||
626 | #else | ||
627 | jno io_restore # no-> skip resched & signal | ||
628 | #endif | ||
629 | #else | ||
630 | jnz io_work_user # yes -> do resched & signal | ||
631 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
632 | la %r1, BASED(sie_opcode) | ||
633 | lg %r2, SP_PSW+8(%r15) | ||
634 | clc 0(2,%r1), 0(%r2) # is current instruction = SIE? | ||
635 | jne 0f # no -> leave PSW alone | ||
636 | lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE | ||
637 | aghi %r1, 4 | ||
638 | stg %r1, SP_PSW+8(%r15) | ||
639 | 0: | ||
640 | #endif | ||
641 | # check for preemptive scheduling | ||
617 | icm %r0,15,__TI_precount(%r9) | 642 | icm %r0,15,__TI_precount(%r9) |
618 | jnz io_restore | 643 | jnz io_restore # preemption is disabled |
619 | # switch to kernel stack | 644 | # switch to kernel stack |
620 | lg %r1,SP_R15(%r15) | 645 | lg %r1,SP_R15(%r15) |
621 | aghi %r1,-SP_SIZE | 646 | aghi %r1,-SP_SIZE |
@@ -629,10 +654,7 @@ io_resume_loop: | |||
629 | jg preempt_schedule_irq | 654 | jg preempt_schedule_irq |
630 | #endif | 655 | #endif |
631 | 656 | ||
632 | # | 657 | io_work_user: |
633 | # switch to kernel stack, then check TIF bits | ||
634 | # | ||
635 | io_work: | ||
636 | lg %r1,__LC_KERNEL_STACK | 658 | lg %r1,__LC_KERNEL_STACK |
637 | aghi %r1,-SP_SIZE | 659 | aghi %r1,-SP_SIZE |
638 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) | 660 | mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) |
@@ -653,6 +675,11 @@ io_work_loop: | |||
653 | j io_restore | 675 | j io_restore |
654 | io_work_done: | 676 | io_work_done: |
655 | 677 | ||
678 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | ||
679 | sie_opcode: | ||
680 | .long 0xb2140000 | ||
681 | #endif | ||
682 | |||
656 | # | 683 | # |
657 | # _TIF_MCCK_PENDING is set, call handler | 684 | # _TIF_MCCK_PENDING is set, call handler |
658 | # | 685 | # |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 7f4270163744..35827b9bd4d1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -292,8 +292,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
292 | return 0; | 292 | return 0; |
293 | } | 293 | } |
294 | 294 | ||
295 | static int | 295 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
296 | do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | ||
297 | { | 296 | { |
298 | ptrace_area parea; | 297 | ptrace_area parea; |
299 | int copied, ret; | 298 | int copied, ret; |
@@ -529,35 +528,19 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
529 | return 0; | 528 | return 0; |
530 | } | 529 | } |
531 | 530 | ||
532 | static int | 531 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
533 | do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | 532 | compat_ulong_t caddr, compat_ulong_t cdata) |
534 | { | 533 | { |
535 | unsigned int tmp; /* 4 bytes !! */ | 534 | unsigned long addr = caddr; |
535 | unsigned long data = cdata; | ||
536 | ptrace_area_emu31 parea; | 536 | ptrace_area_emu31 parea; |
537 | int copied, ret; | 537 | int copied, ret; |
538 | 538 | ||
539 | switch (request) { | 539 | switch (request) { |
540 | case PTRACE_PEEKTEXT: | ||
541 | case PTRACE_PEEKDATA: | ||
542 | /* read word at location addr. */ | ||
543 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
544 | if (copied != sizeof(tmp)) | ||
545 | return -EIO; | ||
546 | return put_user(tmp, (unsigned int __force __user *) data); | ||
547 | |||
548 | case PTRACE_PEEKUSR: | 540 | case PTRACE_PEEKUSR: |
549 | /* read the word at location addr in the USER area. */ | 541 | /* read the word at location addr in the USER area. */ |
550 | return peek_user_emu31(child, addr, data); | 542 | return peek_user_emu31(child, addr, data); |
551 | 543 | ||
552 | case PTRACE_POKETEXT: | ||
553 | case PTRACE_POKEDATA: | ||
554 | /* write the word at location addr. */ | ||
555 | tmp = data; | ||
556 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1); | ||
557 | if (copied != sizeof(tmp)) | ||
558 | return -EIO; | ||
559 | return 0; | ||
560 | |||
561 | case PTRACE_POKEUSR: | 544 | case PTRACE_POKEUSR: |
562 | /* write the word at location addr in the USER area */ | 545 | /* write the word at location addr in the USER area */ |
563 | return poke_user_emu31(child, addr, data); | 546 | return poke_user_emu31(child, addr, data); |
@@ -587,82 +570,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
587 | copied += sizeof(unsigned int); | 570 | copied += sizeof(unsigned int); |
588 | } | 571 | } |
589 | return 0; | 572 | return 0; |
590 | case PTRACE_GETEVENTMSG: | ||
591 | return put_user((__u32) child->ptrace_message, | ||
592 | (unsigned int __force __user *) data); | ||
593 | case PTRACE_GETSIGINFO: | ||
594 | if (child->last_siginfo == NULL) | ||
595 | return -EINVAL; | ||
596 | return copy_siginfo_to_user32((compat_siginfo_t | ||
597 | __force __user *) data, | ||
598 | child->last_siginfo); | ||
599 | case PTRACE_SETSIGINFO: | ||
600 | if (child->last_siginfo == NULL) | ||
601 | return -EINVAL; | ||
602 | return copy_siginfo_from_user32(child->last_siginfo, | ||
603 | (compat_siginfo_t | ||
604 | __force __user *) data); | ||
605 | } | 573 | } |
606 | return ptrace_request(child, request, addr, data); | 574 | return compat_ptrace_request(child, request, addr, data); |
607 | } | 575 | } |
608 | #endif | 576 | #endif |
609 | 577 | ||
610 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
611 | { | ||
612 | switch (request) { | ||
613 | case PTRACE_SYSCALL: | ||
614 | /* continue and stop at next (return from) syscall */ | ||
615 | case PTRACE_CONT: | ||
616 | /* restart after signal. */ | ||
617 | if (!valid_signal(data)) | ||
618 | return -EIO; | ||
619 | if (request == PTRACE_SYSCALL) | ||
620 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
621 | else | ||
622 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
623 | child->exit_code = data; | ||
624 | /* make sure the single step bit is not set. */ | ||
625 | user_disable_single_step(child); | ||
626 | wake_up_process(child); | ||
627 | return 0; | ||
628 | |||
629 | case PTRACE_KILL: | ||
630 | /* | ||
631 | * make the child exit. Best I can do is send it a sigkill. | ||
632 | * perhaps it should be put in the status that it wants to | ||
633 | * exit. | ||
634 | */ | ||
635 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
636 | return 0; | ||
637 | child->exit_code = SIGKILL; | ||
638 | /* make sure the single step bit is not set. */ | ||
639 | user_disable_single_step(child); | ||
640 | wake_up_process(child); | ||
641 | return 0; | ||
642 | |||
643 | case PTRACE_SINGLESTEP: | ||
644 | /* set the trap flag. */ | ||
645 | if (!valid_signal(data)) | ||
646 | return -EIO; | ||
647 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
648 | child->exit_code = data; | ||
649 | user_enable_single_step(child); | ||
650 | /* give it a chance to run. */ | ||
651 | wake_up_process(child); | ||
652 | return 0; | ||
653 | |||
654 | /* Do requests that differ for 31/64 bit */ | ||
655 | default: | ||
656 | #ifdef CONFIG_COMPAT | ||
657 | if (test_thread_flag(TIF_31BIT)) | ||
658 | return do_ptrace_emu31(child, request, addr, data); | ||
659 | #endif | ||
660 | return do_ptrace_normal(child, request, addr, data); | ||
661 | } | ||
662 | /* Not reached. */ | ||
663 | return -EIO; | ||
664 | } | ||
665 | |||
666 | asmlinkage void | 578 | asmlinkage void |
667 | syscall_trace(struct pt_regs *regs, int entryexit) | 579 | syscall_trace(struct pt_regs *regs, int entryexit) |
668 | { | 580 | { |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 1761b74d639b..e051cad1f1e0 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig | |||
@@ -22,7 +22,6 @@ config KVM | |||
22 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 23 | select ANON_INODES |
24 | select S390_SWITCH_AMODE | 24 | select S390_SWITCH_AMODE |
25 | select PREEMPT | ||
26 | ---help--- | 25 | ---help--- |
27 | Support hosting paravirtualized guest machines using the SIE | 26 | Support hosting paravirtualized guest machines using the SIE |
28 | virtualization capability on the mainframe. This should work | 27 | virtualization capability on the mainframe. This should work |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 349581a26103..47a0b642174c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -105,6 +105,9 @@ static intercept_handler_t instruction_handlers[256] = { | |||
105 | static int handle_noop(struct kvm_vcpu *vcpu) | 105 | static int handle_noop(struct kvm_vcpu *vcpu) |
106 | { | 106 | { |
107 | switch (vcpu->arch.sie_block->icptcode) { | 107 | switch (vcpu->arch.sie_block->icptcode) { |
108 | case 0x0: | ||
109 | vcpu->stat.exit_null++; | ||
110 | break; | ||
108 | case 0x10: | 111 | case 0x10: |
109 | vcpu->stat.exit_external_request++; | 112 | vcpu->stat.exit_external_request++; |
110 | break; | 113 | break; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 98d1e73e01f1..0ac36a649eba 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 32 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, | 33 | { "userspace_handled", VCPU_STAT(exit_userspace) }, |
34 | { "exit_null", VCPU_STAT(exit_null) }, | ||
34 | { "exit_validity", VCPU_STAT(exit_validity) }, | 35 | { "exit_validity", VCPU_STAT(exit_validity) }, |
35 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, | 36 | { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
36 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 37 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
@@ -221,10 +222,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
221 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 222 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
222 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 223 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
223 | restore_access_regs(vcpu->arch.guest_acrs); | 224 | restore_access_regs(vcpu->arch.guest_acrs); |
224 | |||
225 | if (signal_pending(current)) | ||
226 | atomic_set_mask(CPUSTAT_STOP_INT, | ||
227 | &vcpu->arch.sie_block->cpuflags); | ||
228 | } | 225 | } |
229 | 226 | ||
230 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 227 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index fb988a48a754..2a7458134544 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o |
6 | obj-$(CONFIG_CMM) += cmm.o | 6 | obj-$(CONFIG_CMM) += cmm.o |
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_PAGE_STATES) += page-states.o | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index fa31de6ae97a..29f3a63806b9 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -126,6 +126,9 @@ void __init mem_init(void) | |||
126 | /* clear the zero-page */ | 126 | /* clear the zero-page */ |
127 | memset(empty_zero_page, 0, PAGE_SIZE); | 127 | memset(empty_zero_page, 0, PAGE_SIZE); |
128 | 128 | ||
129 | /* Setup guest page hinting */ | ||
130 | cmma_init(); | ||
131 | |||
129 | /* this will put all low memory onto the freelists */ | 132 | /* this will put all low memory onto the freelists */ |
130 | totalram_pages += free_all_bootmem(); | 133 | totalram_pages += free_all_bootmem(); |
131 | 134 | ||
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c new file mode 100644 index 000000000000..fc0ad73ffd90 --- /dev/null +++ b/arch/s390/mm/page-states.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/page-states.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * | ||
6 | * Guest page hinting for unused pages. | ||
7 | * | ||
8 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/init.h> | ||
16 | |||
17 | #define ESSA_SET_STABLE 1 | ||
18 | #define ESSA_SET_UNUSED 2 | ||
19 | |||
20 | static int cmma_flag; | ||
21 | |||
22 | static int __init cmma(char *str) | ||
23 | { | ||
24 | char *parm; | ||
25 | parm = strstrip(str); | ||
26 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { | ||
27 | cmma_flag = 1; | ||
28 | return 1; | ||
29 | } | ||
30 | cmma_flag = 0; | ||
31 | if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) | ||
32 | return 1; | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | __setup("cmma=", cmma); | ||
37 | |||
38 | void __init cmma_init(void) | ||
39 | { | ||
40 | register unsigned long tmp asm("0") = 0; | ||
41 | register int rc asm("1") = -EOPNOTSUPP; | ||
42 | |||
43 | if (!cmma_flag) | ||
44 | return; | ||
45 | asm volatile( | ||
46 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" | ||
47 | "0: la %0,0\n" | ||
48 | "1:\n" | ||
49 | EX_TABLE(0b,1b) | ||
50 | : "+&d" (rc), "+&d" (tmp)); | ||
51 | if (rc) | ||
52 | cmma_flag = 0; | ||
53 | } | ||
54 | |||
55 | void arch_free_page(struct page *page, int order) | ||
56 | { | ||
57 | int i, rc; | ||
58 | |||
59 | if (!cmma_flag) | ||
60 | return; | ||
61 | for (i = 0; i < (1 << order); i++) | ||
62 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | ||
63 | : "=&d" (rc) | ||
64 | : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), | ||
65 | "i" (ESSA_SET_UNUSED)); | ||
66 | } | ||
67 | |||
68 | void arch_alloc_page(struct page *page, int order) | ||
69 | { | ||
70 | int i, rc; | ||
71 | |||
72 | if (!cmma_flag) | ||
73 | return; | ||
74 | for (i = 0; i < (1 << order); i++) | ||
75 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | ||
76 | : "=&d" (rc) | ||
77 | : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), | ||
78 | "i" (ESSA_SET_STABLE)); | ||
79 | } | ||