diff options
99 files changed, 5401 insertions, 2057 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index b112efc816f1..bc9f6fe44e27 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -997,7 +997,7 @@ for vm-wide capabilities. | |||
997 | 4.38 KVM_GET_MP_STATE | 997 | 4.38 KVM_GET_MP_STATE |
998 | 998 | ||
999 | Capability: KVM_CAP_MP_STATE | 999 | Capability: KVM_CAP_MP_STATE |
1000 | Architectures: x86, s390 | 1000 | Architectures: x86, s390, arm, arm64 |
1001 | Type: vcpu ioctl | 1001 | Type: vcpu ioctl |
1002 | Parameters: struct kvm_mp_state (out) | 1002 | Parameters: struct kvm_mp_state (out) |
1003 | Returns: 0 on success; -1 on error | 1003 | Returns: 0 on success; -1 on error |
@@ -1011,7 +1011,7 @@ uniprocessor guests). | |||
1011 | 1011 | ||
1012 | Possible values are: | 1012 | Possible values are: |
1013 | 1013 | ||
1014 | - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86] | 1014 | - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64] |
1015 | - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) | 1015 | - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) |
1016 | which has not yet received an INIT signal [x86] | 1016 | which has not yet received an INIT signal [x86] |
1017 | - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is | 1017 | - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is |
@@ -1020,7 +1020,7 @@ Possible values are: | |||
1020 | is waiting for an interrupt [x86] | 1020 | is waiting for an interrupt [x86] |
1021 | - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector | 1021 | - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector |
1022 | accessible via KVM_GET_VCPU_EVENTS) [x86] | 1022 | accessible via KVM_GET_VCPU_EVENTS) [x86] |
1023 | - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390] | 1023 | - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64] |
1024 | - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] | 1024 | - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] |
1025 | - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) | 1025 | - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) |
1026 | [s390] | 1026 | [s390] |
@@ -1031,11 +1031,15 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an | |||
1031 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on | 1031 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on |
1032 | these architectures. | 1032 | these architectures. |
1033 | 1033 | ||
1034 | For arm/arm64: | ||
1035 | |||
1036 | The only states that are valid are KVM_MP_STATE_STOPPED and | ||
1037 | KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. | ||
1034 | 1038 | ||
1035 | 4.39 KVM_SET_MP_STATE | 1039 | 4.39 KVM_SET_MP_STATE |
1036 | 1040 | ||
1037 | Capability: KVM_CAP_MP_STATE | 1041 | Capability: KVM_CAP_MP_STATE |
1038 | Architectures: x86, s390 | 1042 | Architectures: x86, s390, arm, arm64 |
1039 | Type: vcpu ioctl | 1043 | Type: vcpu ioctl |
1040 | Parameters: struct kvm_mp_state (in) | 1044 | Parameters: struct kvm_mp_state (in) |
1041 | Returns: 0 on success; -1 on error | 1045 | Returns: 0 on success; -1 on error |
@@ -1047,6 +1051,10 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an | |||
1047 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on | 1051 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on |
1048 | these architectures. | 1052 | these architectures. |
1049 | 1053 | ||
1054 | For arm/arm64: | ||
1055 | |||
1056 | The only states that are valid are KVM_MP_STATE_STOPPED and | ||
1057 | KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. | ||
1050 | 1058 | ||
1051 | 4.40 KVM_SET_IDENTITY_MAP_ADDR | 1059 | 4.40 KVM_SET_IDENTITY_MAP_ADDR |
1052 | 1060 | ||
@@ -1967,15 +1975,25 @@ registers, find a list below: | |||
1967 | MIPS | KVM_REG_MIPS_CP0_STATUS | 32 | 1975 | MIPS | KVM_REG_MIPS_CP0_STATUS | 32 |
1968 | MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 | 1976 | MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 |
1969 | MIPS | KVM_REG_MIPS_CP0_EPC | 64 | 1977 | MIPS | KVM_REG_MIPS_CP0_EPC | 64 |
1978 | MIPS | KVM_REG_MIPS_CP0_PRID | 32 | ||
1970 | MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 | 1979 | MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 |
1971 | MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 | 1980 | MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 |
1972 | MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 | 1981 | MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 |
1973 | MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32 | 1982 | MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32 |
1983 | MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32 | ||
1984 | MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 | ||
1974 | MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 | 1985 | MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 |
1975 | MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 | 1986 | MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 |
1976 | MIPS | KVM_REG_MIPS_COUNT_CTL | 64 | 1987 | MIPS | KVM_REG_MIPS_COUNT_CTL | 64 |
1977 | MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 | 1988 | MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 |
1978 | MIPS | KVM_REG_MIPS_COUNT_HZ | 64 | 1989 | MIPS | KVM_REG_MIPS_COUNT_HZ | 64 |
1990 | MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32 | ||
1991 | MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64 | ||
1992 | MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128 | ||
1993 | MIPS | KVM_REG_MIPS_FCR_IR | 32 | ||
1994 | MIPS | KVM_REG_MIPS_FCR_CSR | 32 | ||
1995 | MIPS | KVM_REG_MIPS_MSA_IR | 32 | ||
1996 | MIPS | KVM_REG_MIPS_MSA_CSR | 32 | ||
1979 | 1997 | ||
1980 | ARM registers are mapped using the lower 32 bits. The upper 16 of that | 1998 | ARM registers are mapped using the lower 32 bits. The upper 16 of that |
1981 | is the register group type, or coprocessor number: | 1999 | is the register group type, or coprocessor number: |
@@ -2029,6 +2047,25 @@ patterns depending on whether they're 32-bit or 64-bit registers: | |||
2029 | MIPS KVM control registers (see above) have the following id bit patterns: | 2047 | MIPS KVM control registers (see above) have the following id bit patterns: |
2030 | 0x7030 0000 0002 <reg:16> | 2048 | 0x7030 0000 0002 <reg:16> |
2031 | 2049 | ||
2050 | MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following | ||
2051 | id bit patterns depending on the size of the register being accessed. They are | ||
2052 | always accessed according to the current guest FPU mode (Status.FR and | ||
2053 | Config5.FRE), i.e. as the guest would see them, and they become unpredictable | ||
2054 | if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector | ||
2055 | registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they | ||
2056 | overlap the FPU registers: | ||
2057 | 0x7020 0000 0003 00 <0:3> <reg:5> (32-bit FPU registers) | ||
2058 | 0x7030 0000 0003 00 <0:3> <reg:5> (64-bit FPU registers) | ||
2059 | 0x7040 0000 0003 00 <0:3> <reg:5> (128-bit MSA vector registers) | ||
2060 | |||
2061 | MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the | ||
2062 | following id bit patterns: | ||
2063 | 0x7020 0000 0003 01 <0:3> <reg:5> | ||
2064 | |||
2065 | MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the | ||
2066 | following id bit patterns: | ||
2067 | 0x7020 0000 0003 02 <0:3> <reg:5> | ||
2068 | |||
2032 | 2069 | ||
2033 | 4.69 KVM_GET_ONE_REG | 2070 | 4.69 KVM_GET_ONE_REG |
2034 | 2071 | ||
@@ -2234,7 +2271,7 @@ into the hash PTE second double word). | |||
2234 | 4.75 KVM_IRQFD | 2271 | 4.75 KVM_IRQFD |
2235 | 2272 | ||
2236 | Capability: KVM_CAP_IRQFD | 2273 | Capability: KVM_CAP_IRQFD |
2237 | Architectures: x86 s390 | 2274 | Architectures: x86 s390 arm arm64 |
2238 | Type: vm ioctl | 2275 | Type: vm ioctl |
2239 | Parameters: struct kvm_irqfd (in) | 2276 | Parameters: struct kvm_irqfd (in) |
2240 | Returns: 0 on success, -1 on error | 2277 | Returns: 0 on success, -1 on error |
@@ -2260,6 +2297,10 @@ Note that closing the resamplefd is not sufficient to disable the | |||
2260 | irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment | 2297 | irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment |
2261 | and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. | 2298 | and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. |
2262 | 2299 | ||
2300 | On ARM/ARM64, the gsi field in the kvm_irqfd struct specifies the Shared | ||
2301 | Peripheral Interrupt (SPI) index, such that the GIC interrupt ID is | ||
2302 | given by gsi + 32. | ||
2303 | |||
2263 | 4.76 KVM_PPC_ALLOCATE_HTAB | 2304 | 4.76 KVM_PPC_ALLOCATE_HTAB |
2264 | 2305 | ||
2265 | Capability: KVM_CAP_PPC_ALLOC_HTAB | 2306 | Capability: KVM_CAP_PPC_ALLOC_HTAB |
@@ -2716,6 +2757,227 @@ The fields in each entry are defined as follows: | |||
2716 | eax, ebx, ecx, edx: the values returned by the cpuid instruction for | 2757 | eax, ebx, ecx, edx: the values returned by the cpuid instruction for |
2717 | this function/index combination | 2758 | this function/index combination |
2718 | 2759 | ||
2760 | 4.89 KVM_S390_MEM_OP | ||
2761 | |||
2762 | Capability: KVM_CAP_S390_MEM_OP | ||
2763 | Architectures: s390 | ||
2764 | Type: vcpu ioctl | ||
2765 | Parameters: struct kvm_s390_mem_op (in) | ||
2766 | Returns: = 0 on success, | ||
2767 | < 0 on generic error (e.g. -EFAULT or -ENOMEM), | ||
2768 | > 0 if an exception occurred while walking the page tables | ||
2769 | |||
2770 | Read or write data from/to the logical (virtual) memory of a VPCU. | ||
2771 | |||
2772 | Parameters are specified via the following structure: | ||
2773 | |||
2774 | struct kvm_s390_mem_op { | ||
2775 | __u64 gaddr; /* the guest address */ | ||
2776 | __u64 flags; /* flags */ | ||
2777 | __u32 size; /* amount of bytes */ | ||
2778 | __u32 op; /* type of operation */ | ||
2779 | __u64 buf; /* buffer in userspace */ | ||
2780 | __u8 ar; /* the access register number */ | ||
2781 | __u8 reserved[31]; /* should be set to 0 */ | ||
2782 | }; | ||
2783 | |||
2784 | The type of operation is specified in the "op" field. It is either | ||
2785 | KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or | ||
2786 | KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The | ||
2787 | KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check | ||
2788 | whether the corresponding memory access would create an access exception | ||
2789 | (without touching the data in the memory at the destination). In case an | ||
2790 | access exception occurred while walking the MMU tables of the guest, the | ||
2791 | ioctl returns a positive error number to indicate the type of exception. | ||
2792 | This exception is also raised directly at the corresponding VCPU if the | ||
2793 | flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. | ||
2794 | |||
2795 | The start address of the memory region has to be specified in the "gaddr" | ||
2796 | field, and the length of the region in the "size" field. "buf" is the buffer | ||
2797 | supplied by the userspace application where the read data should be written | ||
2798 | to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written | ||
2799 | is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL | ||
2800 | when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access | ||
2801 | register number to be used. | ||
2802 | |||
2803 | The "reserved" field is meant for future extensions. It is not used by | ||
2804 | KVM with the currently defined set of flags. | ||
2805 | |||
2806 | 4.90 KVM_S390_GET_SKEYS | ||
2807 | |||
2808 | Capability: KVM_CAP_S390_SKEYS | ||
2809 | Architectures: s390 | ||
2810 | Type: vm ioctl | ||
2811 | Parameters: struct kvm_s390_skeys | ||
2812 | Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage | ||
2813 | keys, negative value on error | ||
2814 | |||
2815 | This ioctl is used to get guest storage key values on the s390 | ||
2816 | architecture. The ioctl takes parameters via the kvm_s390_skeys struct. | ||
2817 | |||
2818 | struct kvm_s390_skeys { | ||
2819 | __u64 start_gfn; | ||
2820 | __u64 count; | ||
2821 | __u64 skeydata_addr; | ||
2822 | __u32 flags; | ||
2823 | __u32 reserved[9]; | ||
2824 | }; | ||
2825 | |||
2826 | The start_gfn field is the number of the first guest frame whose storage keys | ||
2827 | you want to get. | ||
2828 | |||
2829 | The count field is the number of consecutive frames (starting from start_gfn) | ||
2830 | whose storage keys to get. The count field must be at least 1 and the maximum | ||
2831 | allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range | ||
2832 | will cause the ioctl to return -EINVAL. | ||
2833 | |||
2834 | The skeydata_addr field is the address to a buffer large enough to hold count | ||
2835 | bytes. This buffer will be filled with storage key data by the ioctl. | ||
2836 | |||
2837 | 4.91 KVM_S390_SET_SKEYS | ||
2838 | |||
2839 | Capability: KVM_CAP_S390_SKEYS | ||
2840 | Architectures: s390 | ||
2841 | Type: vm ioctl | ||
2842 | Parameters: struct kvm_s390_skeys | ||
2843 | Returns: 0 on success, negative value on error | ||
2844 | |||
2845 | This ioctl is used to set guest storage key values on the s390 | ||
2846 | architecture. The ioctl takes parameters via the kvm_s390_skeys struct. | ||
2847 | See section on KVM_S390_GET_SKEYS for struct definition. | ||
2848 | |||
2849 | The start_gfn field is the number of the first guest frame whose storage keys | ||
2850 | you want to set. | ||
2851 | |||
2852 | The count field is the number of consecutive frames (starting from start_gfn) | ||
2853 | whose storage keys to get. The count field must be at least 1 and the maximum | ||
2854 | allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range | ||
2855 | will cause the ioctl to return -EINVAL. | ||
2856 | |||
2857 | The skeydata_addr field is the address to a buffer containing count bytes of | ||
2858 | storage keys. Each byte in the buffer will be set as the storage key for a | ||
2859 | single frame starting at start_gfn for count frames. | ||
2860 | |||
2861 | Note: If any architecturally invalid key value is found in the given data then | ||
2862 | the ioctl will return -EINVAL. | ||
2863 | |||
2864 | 4.92 KVM_S390_IRQ | ||
2865 | |||
2866 | Capability: KVM_CAP_S390_INJECT_IRQ | ||
2867 | Architectures: s390 | ||
2868 | Type: vcpu ioctl | ||
2869 | Parameters: struct kvm_s390_irq (in) | ||
2870 | Returns: 0 on success, -1 on error | ||
2871 | Errors: | ||
2872 | EINVAL: interrupt type is invalid | ||
2873 | type is KVM_S390_SIGP_STOP and flag parameter is invalid value | ||
2874 | type is KVM_S390_INT_EXTERNAL_CALL and code is bigger | ||
2875 | than the maximum of VCPUs | ||
2876 | EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped | ||
2877 | type is KVM_S390_SIGP_STOP and a stop irq is already pending | ||
2878 | type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt | ||
2879 | is already pending | ||
2880 | |||
2881 | Allows to inject an interrupt to the guest. | ||
2882 | |||
2883 | Using struct kvm_s390_irq as a parameter allows | ||
2884 | to inject additional payload which is not | ||
2885 | possible via KVM_S390_INTERRUPT. | ||
2886 | |||
2887 | Interrupt parameters are passed via kvm_s390_irq: | ||
2888 | |||
2889 | struct kvm_s390_irq { | ||
2890 | __u64 type; | ||
2891 | union { | ||
2892 | struct kvm_s390_io_info io; | ||
2893 | struct kvm_s390_ext_info ext; | ||
2894 | struct kvm_s390_pgm_info pgm; | ||
2895 | struct kvm_s390_emerg_info emerg; | ||
2896 | struct kvm_s390_extcall_info extcall; | ||
2897 | struct kvm_s390_prefix_info prefix; | ||
2898 | struct kvm_s390_stop_info stop; | ||
2899 | struct kvm_s390_mchk_info mchk; | ||
2900 | char reserved[64]; | ||
2901 | } u; | ||
2902 | }; | ||
2903 | |||
2904 | type can be one of the following: | ||
2905 | |||
2906 | KVM_S390_SIGP_STOP - sigp stop; parameter in .stop | ||
2907 | KVM_S390_PROGRAM_INT - program check; parameters in .pgm | ||
2908 | KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix | ||
2909 | KVM_S390_RESTART - restart; no parameters | ||
2910 | KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters | ||
2911 | KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters | ||
2912 | KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg | ||
2913 | KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall | ||
2914 | KVM_S390_MCHK - machine check interrupt; parameters in .mchk | ||
2915 | |||
2916 | |||
2917 | Note that the vcpu ioctl is asynchronous to vcpu execution. | ||
2918 | |||
2919 | 4.94 KVM_S390_GET_IRQ_STATE | ||
2920 | |||
2921 | Capability: KVM_CAP_S390_IRQ_STATE | ||
2922 | Architectures: s390 | ||
2923 | Type: vcpu ioctl | ||
2924 | Parameters: struct kvm_s390_irq_state (out) | ||
2925 | Returns: >= number of bytes copied into buffer, | ||
2926 | -EINVAL if buffer size is 0, | ||
2927 | -ENOBUFS if buffer size is too small to fit all pending interrupts, | ||
2928 | -EFAULT if the buffer address was invalid | ||
2929 | |||
2930 | This ioctl allows userspace to retrieve the complete state of all currently | ||
2931 | pending interrupts in a single buffer. Use cases include migration | ||
2932 | and introspection. The parameter structure contains the address of a | ||
2933 | userspace buffer and its length: | ||
2934 | |||
2935 | struct kvm_s390_irq_state { | ||
2936 | __u64 buf; | ||
2937 | __u32 flags; | ||
2938 | __u32 len; | ||
2939 | __u32 reserved[4]; | ||
2940 | }; | ||
2941 | |||
2942 | Userspace passes in the above struct and for each pending interrupt a | ||
2943 | struct kvm_s390_irq is copied to the provided buffer. | ||
2944 | |||
2945 | If -ENOBUFS is returned the buffer provided was too small and userspace | ||
2946 | may retry with a bigger buffer. | ||
2947 | |||
2948 | 4.95 KVM_S390_SET_IRQ_STATE | ||
2949 | |||
2950 | Capability: KVM_CAP_S390_IRQ_STATE | ||
2951 | Architectures: s390 | ||
2952 | Type: vcpu ioctl | ||
2953 | Parameters: struct kvm_s390_irq_state (in) | ||
2954 | Returns: 0 on success, | ||
2955 | -EFAULT if the buffer address was invalid, | ||
2956 | -EINVAL for an invalid buffer length (see below), | ||
2957 | -EBUSY if there were already interrupts pending, | ||
2958 | errors occurring when actually injecting the | ||
2959 | interrupt. See KVM_S390_IRQ. | ||
2960 | |||
2961 | This ioctl allows userspace to set the complete state of all cpu-local | ||
2962 | interrupts currently pending for the vcpu. It is intended for restoring | ||
2963 | interrupt state after a migration. The input parameter is a userspace buffer | ||
2964 | containing a struct kvm_s390_irq_state: | ||
2965 | |||
2966 | struct kvm_s390_irq_state { | ||
2967 | __u64 buf; | ||
2968 | __u32 len; | ||
2969 | __u32 pad; | ||
2970 | }; | ||
2971 | |||
2972 | The userspace memory referenced by buf contains a struct kvm_s390_irq | ||
2973 | for each interrupt to be injected into the guest. | ||
2974 | If one of the interrupts could not be injected for some reason the | ||
2975 | ioctl aborts. | ||
2976 | |||
2977 | len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0 | ||
2978 | and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), | ||
2979 | which is the maximum number of possibly pending cpu-local interrupts. | ||
2980 | |||
2719 | 5. The kvm_run structure | 2981 | 5. The kvm_run structure |
2720 | ------------------------ | 2982 | ------------------------ |
2721 | 2983 | ||
@@ -3189,6 +3451,31 @@ Parameters: none | |||
3189 | This capability enables the in-kernel irqchip for s390. Please refer to | 3451 | This capability enables the in-kernel irqchip for s390. Please refer to |
3190 | "4.24 KVM_CREATE_IRQCHIP" for details. | 3452 | "4.24 KVM_CREATE_IRQCHIP" for details. |
3191 | 3453 | ||
3454 | 6.9 KVM_CAP_MIPS_FPU | ||
3455 | |||
3456 | Architectures: mips | ||
3457 | Target: vcpu | ||
3458 | Parameters: args[0] is reserved for future use (should be 0). | ||
3459 | |||
3460 | This capability allows the use of the host Floating Point Unit by the guest. It | ||
3461 | allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is | ||
3462 | done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed | ||
3463 | (depending on the current guest FPU register mode), and the Status.FR, | ||
3464 | Config5.FRE bits are accessible via the KVM API and also from the guest, | ||
3465 | depending on them being supported by the FPU. | ||
3466 | |||
3467 | 6.10 KVM_CAP_MIPS_MSA | ||
3468 | |||
3469 | Architectures: mips | ||
3470 | Target: vcpu | ||
3471 | Parameters: args[0] is reserved for future use (should be 0). | ||
3472 | |||
3473 | This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest. | ||
3474 | It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest. | ||
3475 | Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be | ||
3476 | accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from | ||
3477 | the guest. | ||
3478 | |||
3192 | 7. Capabilities that can be enabled on VMs | 3479 | 7. Capabilities that can be enabled on VMs |
3193 | ------------------------------------------ | 3480 | ------------------------------------------ |
3194 | 3481 | ||
@@ -3248,3 +3535,41 @@ All other orders will be handled completely in user space. | |||
3248 | Only privileged operation exceptions will be checked for in the kernel (or even | 3535 | Only privileged operation exceptions will be checked for in the kernel (or even |
3249 | in the hardware prior to interception). If this capability is not enabled, the | 3536 | in the hardware prior to interception). If this capability is not enabled, the |
3250 | old way of handling SIGP orders is used (partially in kernel and user space). | 3537 | old way of handling SIGP orders is used (partially in kernel and user space). |
3538 | |||
3539 | 7.3 KVM_CAP_S390_VECTOR_REGISTERS | ||
3540 | |||
3541 | Architectures: s390 | ||
3542 | Parameters: none | ||
3543 | Returns: 0 on success, negative value on error | ||
3544 | |||
3545 | Allows use of the vector registers introduced with z13 processor, and | ||
3546 | provides for the synchronization between host and user space. Will | ||
3547 | return -EINVAL if the machine does not support vectors. | ||
3548 | |||
3549 | 7.4 KVM_CAP_S390_USER_STSI | ||
3550 | |||
3551 | Architectures: s390 | ||
3552 | Parameters: none | ||
3553 | |||
3554 | This capability allows post-handlers for the STSI instruction. After | ||
3555 | initial handling in the kernel, KVM exits to user space with | ||
3556 | KVM_EXIT_S390_STSI to allow user space to insert further data. | ||
3557 | |||
3558 | Before exiting to userspace, kvm handlers should fill in s390_stsi field of | ||
3559 | vcpu->run: | ||
3560 | struct { | ||
3561 | __u64 addr; | ||
3562 | __u8 ar; | ||
3563 | __u8 reserved; | ||
3564 | __u8 fc; | ||
3565 | __u8 sel1; | ||
3566 | __u16 sel2; | ||
3567 | } s390_stsi; | ||
3568 | |||
3569 | @addr - guest address of STSI SYSIB | ||
3570 | @fc - function code | ||
3571 | @sel1 - selector 1 | ||
3572 | @sel2 - selector 2 | ||
3573 | @ar - access register number | ||
3574 | |||
3575 | KVM handlers should exit to userspace with rc = -EREMOTE. | ||
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt index 4ceef53164b0..d1ad9d5cae46 100644 --- a/Documentation/virtual/kvm/devices/s390_flic.txt +++ b/Documentation/virtual/kvm/devices/s390_flic.txt | |||
@@ -27,6 +27,9 @@ Groups: | |||
27 | Copies all floating interrupts into a buffer provided by userspace. | 27 | Copies all floating interrupts into a buffer provided by userspace. |
28 | When the buffer is too small it returns -ENOMEM, which is the indication | 28 | When the buffer is too small it returns -ENOMEM, which is the indication |
29 | for userspace to try again with a bigger buffer. | 29 | for userspace to try again with a bigger buffer. |
30 | -ENOBUFS is returned when the allocation of a kernelspace buffer has | ||
31 | failed. | ||
32 | -EFAULT is returned when copying data to userspace failed. | ||
30 | All interrupts remain pending, i.e. are not deleted from the list of | 33 | All interrupts remain pending, i.e. are not deleted from the list of |
31 | currently pending interrupts. | 34 | currently pending interrupts. |
32 | attr->addr contains the userspace address of the buffer into which all | 35 | attr->addr contains the userspace address of the buffer into which all |
diff --git a/MAINTAINERS b/MAINTAINERS index e8bdf1b17cdb..b84686826b23 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5591,6 +5591,8 @@ S: Supported | |||
5591 | F: Documentation/*/kvm*.txt | 5591 | F: Documentation/*/kvm*.txt |
5592 | F: Documentation/virtual/kvm/ | 5592 | F: Documentation/virtual/kvm/ |
5593 | F: arch/*/kvm/ | 5593 | F: arch/*/kvm/ |
5594 | F: arch/x86/kernel/kvm.c | ||
5595 | F: arch/x86/kernel/kvmclock.c | ||
5594 | F: arch/*/include/asm/kvm* | 5596 | F: arch/*/include/asm/kvm* |
5595 | F: include/linux/kvm* | 5597 | F: include/linux/kvm* |
5596 | F: include/uapi/linux/kvm* | 5598 | F: include/uapi/linux/kvm* |
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 816db0bf2dd8..d995821f1698 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -185,6 +185,7 @@ | |||
185 | #define HSR_COND (0xfU << HSR_COND_SHIFT) | 185 | #define HSR_COND (0xfU << HSR_COND_SHIFT) |
186 | 186 | ||
187 | #define FSC_FAULT (0x04) | 187 | #define FSC_FAULT (0x04) |
188 | #define FSC_ACCESS (0x08) | ||
188 | #define FSC_PERM (0x0c) | 189 | #define FSC_PERM (0x0c) |
189 | 190 | ||
190 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | 191 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 41008cd7c53f..d71607c16601 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <asm/fpstate.h> | 27 | #include <asm/fpstate.h> |
28 | #include <kvm/arm_arch_timer.h> | 28 | #include <kvm/arm_arch_timer.h> |
29 | 29 | ||
30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | ||
31 | |||
30 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | 32 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) |
31 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | 33 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS |
32 | #else | 34 | #else |
@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | |||
165 | 167 | ||
166 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 168 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
167 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 169 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
170 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | ||
171 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | ||
168 | 172 | ||
169 | /* We do not have shadow page tables, hence the empty hooks */ | 173 | /* We do not have shadow page tables, hence the empty hooks */ |
170 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, | ||
171 | unsigned long end) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
177 | { | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | 174 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
182 | unsigned long address) | 175 | unsigned long address) |
183 | { | 176 | { |
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h index 3f83db2f6cf0..d8e90c8cb5fa 100644 --- a/arch/arm/include/asm/kvm_mmio.h +++ b/arch/arm/include/asm/kvm_mmio.h | |||
@@ -28,28 +28,6 @@ struct kvm_decode { | |||
28 | bool sign_extend; | 28 | bool sign_extend; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | /* | ||
32 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
33 | * which is an anonymous type. Use our own type instead. | ||
34 | */ | ||
35 | struct kvm_exit_mmio { | ||
36 | phys_addr_t phys_addr; | ||
37 | u8 data[8]; | ||
38 | u32 len; | ||
39 | bool is_write; | ||
40 | void *private; | ||
41 | }; | ||
42 | |||
43 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
44 | struct kvm_exit_mmio *mmio) | ||
45 | { | ||
46 | run->mmio.phys_addr = mmio->phys_addr; | ||
47 | run->mmio.len = mmio->len; | ||
48 | run->mmio.is_write = mmio->is_write; | ||
49 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
50 | run->exit_reason = KVM_EXIT_MMIO; | ||
51 | } | ||
52 | |||
53 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | 31 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); |
54 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | 32 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
55 | phys_addr_t fault_ipa); | 33 | phys_addr_t fault_ipa); |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 0db25bc32864..2499867dd0d8 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot { | |||
198 | /* Highest supported SPI, from VGIC_NR_IRQS */ | 198 | /* Highest supported SPI, from VGIC_NR_IRQS */ |
199 | #define KVM_ARM_IRQ_GIC_MAX 127 | 199 | #define KVM_ARM_IRQ_GIC_MAX 127 |
200 | 200 | ||
201 | /* One single KVM irqchip, ie. the VGIC */ | ||
202 | #define KVM_NR_IRQCHIPS 1 | ||
203 | |||
201 | /* PSCI interface */ | 204 | /* PSCI interface */ |
202 | #define KVM_PSCI_FN_BASE 0x95c1ba5e | 205 | #define KVM_PSCI_FN_BASE 0x95c1ba5e |
203 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) | 206 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 2d2d6087b9b1..488eaac56028 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -190,7 +190,6 @@ int main(void) | |||
190 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); | 190 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); |
191 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); | 191 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); |
192 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); | 192 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); |
193 | #ifdef CONFIG_KVM_ARM_VGIC | ||
194 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); | 193 | DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); |
195 | DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); | 194 | DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); |
196 | DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); | 195 | DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); |
@@ -200,14 +199,11 @@ int main(void) | |||
200 | DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); | 199 | DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); |
201 | DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); | 200 | DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); |
202 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); | 201 | DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); |
203 | #ifdef CONFIG_KVM_ARM_TIMER | ||
204 | DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); | 202 | DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); |
205 | DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); | 203 | DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); |
206 | DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); | 204 | DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); |
207 | DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); | 205 | DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); |
208 | #endif | ||
209 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); | 206 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); |
210 | #endif | ||
211 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | 207 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); |
212 | #endif | 208 | #endif |
213 | return 0; | 209 | return 0; |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 338ace78ed18..f1f79d104309 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
@@ -18,6 +18,7 @@ if VIRTUALIZATION | |||
18 | 18 | ||
19 | config KVM | 19 | config KVM |
20 | bool "Kernel-based Virtual Machine (KVM) support" | 20 | bool "Kernel-based Virtual Machine (KVM) support" |
21 | depends on MMU && OF | ||
21 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 23 | select ANON_INODES |
23 | select HAVE_KVM_CPU_RELAX_INTERCEPT | 24 | select HAVE_KVM_CPU_RELAX_INTERCEPT |
@@ -26,10 +27,12 @@ config KVM | |||
26 | select KVM_ARM_HOST | 27 | select KVM_ARM_HOST |
27 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT | 28 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT |
28 | select SRCU | 29 | select SRCU |
29 | depends on ARM_VIRT_EXT && ARM_LPAE | 30 | select MMU_NOTIFIER |
31 | select HAVE_KVM_EVENTFD | ||
32 | select HAVE_KVM_IRQFD | ||
33 | depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER | ||
30 | ---help--- | 34 | ---help--- |
31 | Support hosting virtualized guest machines. You will also | 35 | Support hosting virtualized guest machines. |
32 | need to select one or more of the processor modules below. | ||
33 | 36 | ||
34 | This module provides access to the hardware capabilities through | 37 | This module provides access to the hardware capabilities through |
35 | a character device node named /dev/kvm. | 38 | a character device node named /dev/kvm. |
@@ -37,10 +40,7 @@ config KVM | |||
37 | If unsure, say N. | 40 | If unsure, say N. |
38 | 41 | ||
39 | config KVM_ARM_HOST | 42 | config KVM_ARM_HOST |
40 | bool "KVM host support for ARM cpus." | 43 | bool |
41 | depends on KVM | ||
42 | depends on MMU | ||
43 | select MMU_NOTIFIER | ||
44 | ---help--- | 44 | ---help--- |
45 | Provides host support for ARM processors. | 45 | Provides host support for ARM processors. |
46 | 46 | ||
@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS | |||
55 | large, so only choose a reasonable number that you expect to | 55 | large, so only choose a reasonable number that you expect to |
56 | actually use. | 56 | actually use. |
57 | 57 | ||
58 | config KVM_ARM_VGIC | ||
59 | bool "KVM support for Virtual GIC" | ||
60 | depends on KVM_ARM_HOST && OF | ||
61 | select HAVE_KVM_IRQCHIP | ||
62 | default y | ||
63 | ---help--- | ||
64 | Adds support for a hardware assisted, in-kernel GIC emulation. | ||
65 | |||
66 | config KVM_ARM_TIMER | ||
67 | bool "KVM support for Architected Timers" | ||
68 | depends on KVM_ARM_VGIC && ARM_ARCH_TIMER | ||
69 | select HAVE_KVM_IRQCHIP | ||
70 | default y | ||
71 | ---help--- | ||
72 | Adds support for the Architected Timers in virtual machines | ||
73 | |||
74 | endif # VIRTUALIZATION | 58 | endif # VIRTUALIZATION |
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 443b8bea43e9..139e46c08b6e 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt) | |||
7 | plus_virt_def := -DREQUIRES_VIRT=1 | 7 | plus_virt_def := -DREQUIRES_VIRT=1 |
8 | endif | 8 | endif |
9 | 9 | ||
10 | ccflags-y += -Ivirt/kvm -Iarch/arm/kvm | 10 | ccflags-y += -Iarch/arm/kvm |
11 | CFLAGS_arm.o := -I. $(plus_virt_def) | 11 | CFLAGS_arm.o := -I. $(plus_virt_def) |
12 | CFLAGS_mmu.o := -I. | 12 | CFLAGS_mmu.o := -I. |
13 | 13 | ||
@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) | |||
15 | AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) | 15 | AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) |
16 | 16 | ||
17 | KVM := ../../../virt/kvm | 17 | KVM := ../../../virt/kvm |
18 | kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o | 18 | kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o |
19 | 19 | ||
20 | obj-y += kvm-arm.o init.o interrupts.o | 20 | obj-y += kvm-arm.o init.o interrupts.o |
21 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o | 21 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
22 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o | 22 | obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o |
23 | obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o | 23 | obj-y += $(KVM)/arm/vgic.o |
24 | obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o | 24 | obj-y += $(KVM)/arm/vgic-v2.o |
25 | obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o | 25 | obj-y += $(KVM)/arm/vgic-v2-emul.o |
26 | obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o | 26 | obj-y += $(KVM)/arm/arch_timer.o |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5560f74f9eee..6f536451ab78 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | |||
61 | static u8 kvm_next_vmid; | 61 | static u8 kvm_next_vmid; |
62 | static DEFINE_SPINLOCK(kvm_vmid_lock); | 62 | static DEFINE_SPINLOCK(kvm_vmid_lock); |
63 | 63 | ||
64 | static bool vgic_present; | ||
65 | |||
66 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) | 64 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) |
67 | { | 65 | { |
68 | BUG_ON(preemptible()); | 66 | BUG_ON(preemptible()); |
@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
173 | int r; | 171 | int r; |
174 | switch (ext) { | 172 | switch (ext) { |
175 | case KVM_CAP_IRQCHIP: | 173 | case KVM_CAP_IRQCHIP: |
176 | r = vgic_present; | 174 | case KVM_CAP_IRQFD: |
177 | break; | 175 | case KVM_CAP_IOEVENTFD: |
178 | case KVM_CAP_DEVICE_CTRL: | 176 | case KVM_CAP_DEVICE_CTRL: |
179 | case KVM_CAP_USER_MEMORY: | 177 | case KVM_CAP_USER_MEMORY: |
180 | case KVM_CAP_SYNC_MMU: | 178 | case KVM_CAP_SYNC_MMU: |
@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
183 | case KVM_CAP_ARM_PSCI: | 181 | case KVM_CAP_ARM_PSCI: |
184 | case KVM_CAP_ARM_PSCI_0_2: | 182 | case KVM_CAP_ARM_PSCI_0_2: |
185 | case KVM_CAP_READONLY_MEM: | 183 | case KVM_CAP_READONLY_MEM: |
184 | case KVM_CAP_MP_STATE: | ||
186 | r = 1; | 185 | r = 1; |
187 | break; | 186 | break; |
188 | case KVM_CAP_COALESCED_MMIO: | 187 | case KVM_CAP_COALESCED_MMIO: |
@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
268 | 267 | ||
269 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 268 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
270 | { | 269 | { |
271 | return 0; | 270 | return kvm_timer_should_fire(vcpu); |
272 | } | 271 | } |
273 | 272 | ||
274 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 273 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
313 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 312 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
314 | struct kvm_mp_state *mp_state) | 313 | struct kvm_mp_state *mp_state) |
315 | { | 314 | { |
316 | return -EINVAL; | 315 | if (vcpu->arch.pause) |
316 | mp_state->mp_state = KVM_MP_STATE_STOPPED; | ||
317 | else | ||
318 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; | ||
319 | |||
320 | return 0; | ||
317 | } | 321 | } |
318 | 322 | ||
319 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 323 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
320 | struct kvm_mp_state *mp_state) | 324 | struct kvm_mp_state *mp_state) |
321 | { | 325 | { |
322 | return -EINVAL; | 326 | switch (mp_state->mp_state) { |
327 | case KVM_MP_STATE_RUNNABLE: | ||
328 | vcpu->arch.pause = false; | ||
329 | break; | ||
330 | case KVM_MP_STATE_STOPPED: | ||
331 | vcpu->arch.pause = true; | ||
332 | break; | ||
333 | default: | ||
334 | return -EINVAL; | ||
335 | } | ||
336 | |||
337 | return 0; | ||
323 | } | 338 | } |
324 | 339 | ||
325 | /** | 340 | /** |
@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | |||
452 | return 0; | 467 | return 0; |
453 | } | 468 | } |
454 | 469 | ||
470 | bool kvm_arch_intc_initialized(struct kvm *kvm) | ||
471 | { | ||
472 | return vgic_initialized(kvm); | ||
473 | } | ||
474 | |||
455 | static void vcpu_pause(struct kvm_vcpu *vcpu) | 475 | static void vcpu_pause(struct kvm_vcpu *vcpu) |
456 | { | 476 | { |
457 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); | 477 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); |
@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, | |||
831 | 851 | ||
832 | switch (dev_id) { | 852 | switch (dev_id) { |
833 | case KVM_ARM_DEVICE_VGIC_V2: | 853 | case KVM_ARM_DEVICE_VGIC_V2: |
834 | if (!vgic_present) | ||
835 | return -ENXIO; | ||
836 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); | 854 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); |
837 | default: | 855 | default: |
838 | return -ENODEV; | 856 | return -ENODEV; |
@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
847 | 865 | ||
848 | switch (ioctl) { | 866 | switch (ioctl) { |
849 | case KVM_CREATE_IRQCHIP: { | 867 | case KVM_CREATE_IRQCHIP: { |
850 | if (vgic_present) | 868 | return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); |
851 | return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); | ||
852 | else | ||
853 | return -ENXIO; | ||
854 | } | 869 | } |
855 | case KVM_ARM_SET_DEVICE_ADDR: { | 870 | case KVM_ARM_SET_DEVICE_ADDR: { |
856 | struct kvm_arm_device_addr dev_addr; | 871 | struct kvm_arm_device_addr dev_addr; |
@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void) | |||
1035 | if (err) | 1050 | if (err) |
1036 | goto out_free_context; | 1051 | goto out_free_context; |
1037 | 1052 | ||
1038 | #ifdef CONFIG_KVM_ARM_VGIC | ||
1039 | vgic_present = true; | ||
1040 | #endif | ||
1041 | |||
1042 | /* | 1053 | /* |
1043 | * Init HYP architected timer support | 1054 | * Init HYP architected timer support |
1044 | */ | 1055 | */ |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 384bab67c462..d503fbb787d3 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | 111 | ||
112 | #ifndef CONFIG_KVM_ARM_TIMER | ||
113 | |||
114 | #define NUM_TIMER_REGS 0 | ||
115 | |||
116 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static bool is_timer_reg(u64 index) | ||
122 | { | ||
123 | return false; | ||
124 | } | ||
125 | |||
126 | #else | ||
127 | |||
128 | #define NUM_TIMER_REGS 3 | 112 | #define NUM_TIMER_REGS 3 |
129 | 113 | ||
130 | static bool is_timer_reg(u64 index) | 114 | static bool is_timer_reg(u64 index) |
@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
152 | return 0; | 136 | return 0; |
153 | } | 137 | } |
154 | 138 | ||
155 | #endif | ||
156 | |||
157 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 139 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
158 | { | 140 | { |
159 | void __user *uaddr = (void __user *)(long)reg->addr; | 141 | void __user *uaddr = (void __user *)(long)reg->addr; |
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 14d488388480..35e4a3a0c476 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
402 | * Assumes vcpu pointer in vcpu reg | 402 | * Assumes vcpu pointer in vcpu reg |
403 | */ | 403 | */ |
404 | .macro save_vgic_state | 404 | .macro save_vgic_state |
405 | #ifdef CONFIG_KVM_ARM_VGIC | ||
406 | /* Get VGIC VCTRL base into r2 */ | 405 | /* Get VGIC VCTRL base into r2 */ |
407 | ldr r2, [vcpu, #VCPU_KVM] | 406 | ldr r2, [vcpu, #VCPU_KVM] |
408 | ldr r2, [r2, #KVM_VGIC_VCTRL] | 407 | ldr r2, [r2, #KVM_VGIC_VCTRL] |
@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 ) | |||
460 | subs r4, r4, #1 | 459 | subs r4, r4, #1 |
461 | bne 1b | 460 | bne 1b |
462 | 2: | 461 | 2: |
463 | #endif | ||
464 | .endm | 462 | .endm |
465 | 463 | ||
466 | /* | 464 | /* |
@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 ) | |||
469 | * Assumes vcpu pointer in vcpu reg | 467 | * Assumes vcpu pointer in vcpu reg |
470 | */ | 468 | */ |
471 | .macro restore_vgic_state | 469 | .macro restore_vgic_state |
472 | #ifdef CONFIG_KVM_ARM_VGIC | ||
473 | /* Get VGIC VCTRL base into r2 */ | 470 | /* Get VGIC VCTRL base into r2 */ |
474 | ldr r2, [vcpu, #VCPU_KVM] | 471 | ldr r2, [vcpu, #VCPU_KVM] |
475 | ldr r2, [r2, #KVM_VGIC_VCTRL] | 472 | ldr r2, [r2, #KVM_VGIC_VCTRL] |
@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 ) | |||
501 | subs r4, r4, #1 | 498 | subs r4, r4, #1 |
502 | bne 1b | 499 | bne 1b |
503 | 2: | 500 | 2: |
504 | #endif | ||
505 | .endm | 501 | .endm |
506 | 502 | ||
507 | #define CNTHCTL_PL1PCTEN (1 << 0) | 503 | #define CNTHCTL_PL1PCTEN (1 << 0) |
@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 ) | |||
515 | * Clobbers r2-r5 | 511 | * Clobbers r2-r5 |
516 | */ | 512 | */ |
517 | .macro save_timer_state | 513 | .macro save_timer_state |
518 | #ifdef CONFIG_KVM_ARM_TIMER | ||
519 | ldr r4, [vcpu, #VCPU_KVM] | 514 | ldr r4, [vcpu, #VCPU_KVM] |
520 | ldr r2, [r4, #KVM_TIMER_ENABLED] | 515 | ldr r2, [r4, #KVM_TIMER_ENABLED] |
521 | cmp r2, #0 | 516 | cmp r2, #0 |
@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 ) | |||
537 | mcrr p15, 4, r2, r2, c14 @ CNTVOFF | 532 | mcrr p15, 4, r2, r2, c14 @ CNTVOFF |
538 | 533 | ||
539 | 1: | 534 | 1: |
540 | #endif | ||
541 | @ Allow physical timer/counter access for the host | 535 | @ Allow physical timer/counter access for the host |
542 | mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL | 536 | mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL |
543 | orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) | 537 | orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) |
@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 ) | |||
559 | bic r2, r2, #CNTHCTL_PL1PCEN | 553 | bic r2, r2, #CNTHCTL_PL1PCEN |
560 | mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL | 554 | mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL |
561 | 555 | ||
562 | #ifdef CONFIG_KVM_ARM_TIMER | ||
563 | ldr r4, [vcpu, #VCPU_KVM] | 556 | ldr r4, [vcpu, #VCPU_KVM] |
564 | ldr r2, [r4, #KVM_TIMER_ENABLED] | 557 | ldr r2, [r4, #KVM_TIMER_ENABLED] |
565 | cmp r2, #0 | 558 | cmp r2, #0 |
@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 ) | |||
579 | and r2, r2, #3 | 572 | and r2, r2, #3 |
580 | mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL | 573 | mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL |
581 | 1: | 574 | 1: |
582 | #endif | ||
583 | .endm | 575 | .endm |
584 | 576 | ||
585 | .equ vmentry, 0 | 577 | .equ vmentry, 0 |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 5d3bfc0eb3f0..974b1c606d04 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 124 | static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) |
125 | struct kvm_exit_mmio *mmio) | ||
126 | { | 125 | { |
127 | unsigned long rt; | 126 | unsigned long rt; |
128 | int len; | 127 | int access_size; |
129 | bool is_write, sign_extend; | 128 | bool sign_extend; |
130 | 129 | ||
131 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | 130 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
132 | /* cache operation on I/O addr, tell guest unsupported */ | 131 | /* cache operation on I/O addr, tell guest unsupported */ |
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
140 | return 1; | 139 | return 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | len = kvm_vcpu_dabt_get_as(vcpu); | 142 | access_size = kvm_vcpu_dabt_get_as(vcpu); |
144 | if (unlikely(len < 0)) | 143 | if (unlikely(access_size < 0)) |
145 | return len; | 144 | return access_size; |
146 | 145 | ||
147 | is_write = kvm_vcpu_dabt_iswrite(vcpu); | 146 | *is_write = kvm_vcpu_dabt_iswrite(vcpu); |
148 | sign_extend = kvm_vcpu_dabt_issext(vcpu); | 147 | sign_extend = kvm_vcpu_dabt_issext(vcpu); |
149 | rt = kvm_vcpu_dabt_get_rd(vcpu); | 148 | rt = kvm_vcpu_dabt_get_rd(vcpu); |
150 | 149 | ||
151 | mmio->is_write = is_write; | 150 | *len = access_size; |
152 | mmio->phys_addr = fault_ipa; | ||
153 | mmio->len = len; | ||
154 | vcpu->arch.mmio_decode.sign_extend = sign_extend; | 151 | vcpu->arch.mmio_decode.sign_extend = sign_extend; |
155 | vcpu->arch.mmio_decode.rt = rt; | 152 | vcpu->arch.mmio_decode.rt = rt; |
156 | 153 | ||
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
165 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | 162 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
166 | phys_addr_t fault_ipa) | 163 | phys_addr_t fault_ipa) |
167 | { | 164 | { |
168 | struct kvm_exit_mmio mmio; | ||
169 | unsigned long data; | 165 | unsigned long data; |
170 | unsigned long rt; | 166 | unsigned long rt; |
171 | int ret; | 167 | int ret; |
168 | bool is_write; | ||
169 | int len; | ||
170 | u8 data_buf[8]; | ||
172 | 171 | ||
173 | /* | 172 | /* |
174 | * Prepare MMIO operation. First stash it in a private | 173 | * Prepare MMIO operation. First decode the syndrome data we get |
175 | * structure that we can use for in-kernel emulation. If the | 174 | * from the CPU. Then try if some in-kernel emulation feels |
176 | * kernel can't handle it, copy it into run->mmio and let user | 175 | * responsible, otherwise let user space do its magic. |
177 | * space do its magic. | ||
178 | */ | 176 | */ |
179 | |||
180 | if (kvm_vcpu_dabt_isvalid(vcpu)) { | 177 | if (kvm_vcpu_dabt_isvalid(vcpu)) { |
181 | ret = decode_hsr(vcpu, fault_ipa, &mmio); | 178 | ret = decode_hsr(vcpu, &is_write, &len); |
182 | if (ret) | 179 | if (ret) |
183 | return ret; | 180 | return ret; |
184 | } else { | 181 | } else { |
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
188 | 185 | ||
189 | rt = vcpu->arch.mmio_decode.rt; | 186 | rt = vcpu->arch.mmio_decode.rt; |
190 | 187 | ||
191 | if (mmio.is_write) { | 188 | if (is_write) { |
192 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), | 189 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); |
193 | mmio.len); | 190 | |
191 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); | ||
192 | mmio_write_buf(data_buf, len, data); | ||
194 | 193 | ||
195 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, | 194 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, |
196 | fault_ipa, data); | 195 | data_buf); |
197 | mmio_write_buf(mmio.data, mmio.len, data); | ||
198 | } else { | 196 | } else { |
199 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, | 197 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, |
200 | fault_ipa, 0); | 198 | fault_ipa, 0); |
199 | |||
200 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, | ||
201 | data_buf); | ||
201 | } | 202 | } |
202 | 203 | ||
203 | if (vgic_handle_mmio(vcpu, run, &mmio)) | 204 | /* Now prepare kvm_run for the potential return to userland. */ |
205 | run->mmio.is_write = is_write; | ||
206 | run->mmio.phys_addr = fault_ipa; | ||
207 | run->mmio.len = len; | ||
208 | memcpy(run->mmio.data, data_buf, len); | ||
209 | |||
210 | if (!ret) { | ||
211 | /* We handled the access successfully in the kernel. */ | ||
212 | kvm_handle_mmio_return(vcpu, run); | ||
204 | return 1; | 213 | return 1; |
214 | } | ||
205 | 215 | ||
206 | kvm_prepare_mmio(run, &mmio); | 216 | run->exit_reason = KVM_EXIT_MMIO; |
207 | return 0; | 217 | return 0; |
208 | } | 218 | } |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 5656d79c5a44..15b050d46fc9 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1330 | 1330 | ||
1331 | out_unlock: | 1331 | out_unlock: |
1332 | spin_unlock(&kvm->mmu_lock); | 1332 | spin_unlock(&kvm->mmu_lock); |
1333 | kvm_set_pfn_accessed(pfn); | ||
1333 | kvm_release_pfn_clean(pfn); | 1334 | kvm_release_pfn_clean(pfn); |
1334 | return ret; | 1335 | return ret; |
1335 | } | 1336 | } |
1336 | 1337 | ||
1338 | /* | ||
1339 | * Resolve the access fault by making the page young again. | ||
1340 | * Note that because the faulting entry is guaranteed not to be | ||
1341 | * cached in the TLB, we don't need to invalidate anything. | ||
1342 | */ | ||
1343 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) | ||
1344 | { | ||
1345 | pmd_t *pmd; | ||
1346 | pte_t *pte; | ||
1347 | pfn_t pfn; | ||
1348 | bool pfn_valid = false; | ||
1349 | |||
1350 | trace_kvm_access_fault(fault_ipa); | ||
1351 | |||
1352 | spin_lock(&vcpu->kvm->mmu_lock); | ||
1353 | |||
1354 | pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); | ||
1355 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1356 | goto out; | ||
1357 | |||
1358 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | ||
1359 | *pmd = pmd_mkyoung(*pmd); | ||
1360 | pfn = pmd_pfn(*pmd); | ||
1361 | pfn_valid = true; | ||
1362 | goto out; | ||
1363 | } | ||
1364 | |||
1365 | pte = pte_offset_kernel(pmd, fault_ipa); | ||
1366 | if (pte_none(*pte)) /* Nothing there either */ | ||
1367 | goto out; | ||
1368 | |||
1369 | *pte = pte_mkyoung(*pte); /* Just a page... */ | ||
1370 | pfn = pte_pfn(*pte); | ||
1371 | pfn_valid = true; | ||
1372 | out: | ||
1373 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
1374 | if (pfn_valid) | ||
1375 | kvm_set_pfn_accessed(pfn); | ||
1376 | } | ||
1377 | |||
1337 | /** | 1378 | /** |
1338 | * kvm_handle_guest_abort - handles all 2nd stage aborts | 1379 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
1339 | * @vcpu: the VCPU pointer | 1380 | * @vcpu: the VCPU pointer |
@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1364 | 1405 | ||
1365 | /* Check the stage-2 fault is trans. fault or write fault */ | 1406 | /* Check the stage-2 fault is trans. fault or write fault */ |
1366 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); | 1407 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
1367 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 1408 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
1409 | fault_status != FSC_ACCESS) { | ||
1368 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", | 1410 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1369 | kvm_vcpu_trap_get_class(vcpu), | 1411 | kvm_vcpu_trap_get_class(vcpu), |
1370 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | 1412 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1400 | /* Userspace should not be able to register out-of-bounds IPAs */ | 1442 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1401 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); | 1443 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); |
1402 | 1444 | ||
1445 | if (fault_status == FSC_ACCESS) { | ||
1446 | handle_access_fault(vcpu, fault_ipa); | ||
1447 | ret = 1; | ||
1448 | goto out_unlock; | ||
1449 | } | ||
1450 | |||
1403 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); | 1451 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
1404 | if (ret == 0) | 1452 | if (ret == 0) |
1405 | ret = 1; | 1453 | ret = 1; |
@@ -1408,15 +1456,16 @@ out_unlock: | |||
1408 | return ret; | 1456 | return ret; |
1409 | } | 1457 | } |
1410 | 1458 | ||
1411 | static void handle_hva_to_gpa(struct kvm *kvm, | 1459 | static int handle_hva_to_gpa(struct kvm *kvm, |
1412 | unsigned long start, | 1460 | unsigned long start, |
1413 | unsigned long end, | 1461 | unsigned long end, |
1414 | void (*handler)(struct kvm *kvm, | 1462 | int (*handler)(struct kvm *kvm, |
1415 | gpa_t gpa, void *data), | 1463 | gpa_t gpa, void *data), |
1416 | void *data) | 1464 | void *data) |
1417 | { | 1465 | { |
1418 | struct kvm_memslots *slots; | 1466 | struct kvm_memslots *slots; |
1419 | struct kvm_memory_slot *memslot; | 1467 | struct kvm_memory_slot *memslot; |
1468 | int ret = 0; | ||
1420 | 1469 | ||
1421 | slots = kvm_memslots(kvm); | 1470 | slots = kvm_memslots(kvm); |
1422 | 1471 | ||
@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
1440 | 1489 | ||
1441 | for (; gfn < gfn_end; ++gfn) { | 1490 | for (; gfn < gfn_end; ++gfn) { |
1442 | gpa_t gpa = gfn << PAGE_SHIFT; | 1491 | gpa_t gpa = gfn << PAGE_SHIFT; |
1443 | handler(kvm, gpa, data); | 1492 | ret |= handler(kvm, gpa, data); |
1444 | } | 1493 | } |
1445 | } | 1494 | } |
1495 | |||
1496 | return ret; | ||
1446 | } | 1497 | } |
1447 | 1498 | ||
1448 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1499 | static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1449 | { | 1500 | { |
1450 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 1501 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
1502 | return 0; | ||
1451 | } | 1503 | } |
1452 | 1504 | ||
1453 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 1505 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, | |||
1473 | return 0; | 1525 | return 0; |
1474 | } | 1526 | } |
1475 | 1527 | ||
1476 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1528 | static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1477 | { | 1529 | { |
1478 | pte_t *pte = (pte_t *)data; | 1530 | pte_t *pte = (pte_t *)data; |
1479 | 1531 | ||
@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | |||
1485 | * through this calling path. | 1537 | * through this calling path. |
1486 | */ | 1538 | */ |
1487 | stage2_set_pte(kvm, NULL, gpa, pte, 0); | 1539 | stage2_set_pte(kvm, NULL, gpa, pte, 0); |
1540 | return 0; | ||
1488 | } | 1541 | } |
1489 | 1542 | ||
1490 | 1543 | ||
@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
1501 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | 1554 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
1502 | } | 1555 | } |
1503 | 1556 | ||
1557 | static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
1558 | { | ||
1559 | pmd_t *pmd; | ||
1560 | pte_t *pte; | ||
1561 | |||
1562 | pmd = stage2_get_pmd(kvm, NULL, gpa); | ||
1563 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1564 | return 0; | ||
1565 | |||
1566 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | ||
1567 | if (pmd_young(*pmd)) { | ||
1568 | *pmd = pmd_mkold(*pmd); | ||
1569 | return 1; | ||
1570 | } | ||
1571 | |||
1572 | return 0; | ||
1573 | } | ||
1574 | |||
1575 | pte = pte_offset_kernel(pmd, gpa); | ||
1576 | if (pte_none(*pte)) | ||
1577 | return 0; | ||
1578 | |||
1579 | if (pte_young(*pte)) { | ||
1580 | *pte = pte_mkold(*pte); /* Just a page... */ | ||
1581 | return 1; | ||
1582 | } | ||
1583 | |||
1584 | return 0; | ||
1585 | } | ||
1586 | |||
1587 | static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
1588 | { | ||
1589 | pmd_t *pmd; | ||
1590 | pte_t *pte; | ||
1591 | |||
1592 | pmd = stage2_get_pmd(kvm, NULL, gpa); | ||
1593 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1594 | return 0; | ||
1595 | |||
1596 | if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */ | ||
1597 | return pmd_young(*pmd); | ||
1598 | |||
1599 | pte = pte_offset_kernel(pmd, gpa); | ||
1600 | if (!pte_none(*pte)) /* Just a page... */ | ||
1601 | return pte_young(*pte); | ||
1602 | |||
1603 | return 0; | ||
1604 | } | ||
1605 | |||
1606 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | ||
1607 | { | ||
1608 | trace_kvm_age_hva(start, end); | ||
1609 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | ||
1610 | } | ||
1611 | |||
1612 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
1613 | { | ||
1614 | trace_kvm_test_age_hva(hva); | ||
1615 | return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); | ||
1616 | } | ||
1617 | |||
1504 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 1618 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
1505 | { | 1619 | { |
1506 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | 1620 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index 6817664b46b8..0ec35392d208 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault, | |||
68 | __entry->hxfar, __entry->vcpu_pc) | 68 | __entry->hxfar, __entry->vcpu_pc) |
69 | ); | 69 | ); |
70 | 70 | ||
71 | TRACE_EVENT(kvm_access_fault, | ||
72 | TP_PROTO(unsigned long ipa), | ||
73 | TP_ARGS(ipa), | ||
74 | |||
75 | TP_STRUCT__entry( | ||
76 | __field( unsigned long, ipa ) | ||
77 | ), | ||
78 | |||
79 | TP_fast_assign( | ||
80 | __entry->ipa = ipa; | ||
81 | ), | ||
82 | |||
83 | TP_printk("IPA: %lx", __entry->ipa) | ||
84 | ); | ||
85 | |||
71 | TRACE_EVENT(kvm_irq_line, | 86 | TRACE_EVENT(kvm_irq_line, |
72 | TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), | 87 | TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), |
73 | TP_ARGS(type, vcpu_idx, irq_num, level), | 88 | TP_ARGS(type, vcpu_idx, irq_num, level), |
@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva, | |||
210 | TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) | 225 | TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) |
211 | ); | 226 | ); |
212 | 227 | ||
228 | TRACE_EVENT(kvm_age_hva, | ||
229 | TP_PROTO(unsigned long start, unsigned long end), | ||
230 | TP_ARGS(start, end), | ||
231 | |||
232 | TP_STRUCT__entry( | ||
233 | __field( unsigned long, start ) | ||
234 | __field( unsigned long, end ) | ||
235 | ), | ||
236 | |||
237 | TP_fast_assign( | ||
238 | __entry->start = start; | ||
239 | __entry->end = end; | ||
240 | ), | ||
241 | |||
242 | TP_printk("mmu notifier age hva: %#08lx -- %#08lx", | ||
243 | __entry->start, __entry->end) | ||
244 | ); | ||
245 | |||
246 | TRACE_EVENT(kvm_test_age_hva, | ||
247 | TP_PROTO(unsigned long hva), | ||
248 | TP_ARGS(hva), | ||
249 | |||
250 | TP_STRUCT__entry( | ||
251 | __field( unsigned long, hva ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->hva = hva; | ||
256 | ), | ||
257 | |||
258 | TP_printk("mmu notifier test age hva: %#08lx", __entry->hva) | ||
259 | ); | ||
260 | |||
213 | TRACE_EVENT(kvm_hvc, | 261 | TRACE_EVENT(kvm_hvc, |
214 | TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), | 262 | TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), |
215 | TP_ARGS(vcpu_pc, r0, imm), | 263 | TP_ARGS(vcpu_pc, r0, imm), |
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 92bbae381598..70522450ca23 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h | |||
@@ -90,6 +90,7 @@ | |||
90 | #define ESR_ELx_FSC (0x3F) | 90 | #define ESR_ELx_FSC (0x3F) |
91 | #define ESR_ELx_FSC_TYPE (0x3C) | 91 | #define ESR_ELx_FSC_TYPE (0x3C) |
92 | #define ESR_ELx_FSC_EXTABT (0x10) | 92 | #define ESR_ELx_FSC_EXTABT (0x10) |
93 | #define ESR_ELx_FSC_ACCESS (0x08) | ||
93 | #define ESR_ELx_FSC_FAULT (0x04) | 94 | #define ESR_ELx_FSC_FAULT (0x04) |
94 | #define ESR_ELx_FSC_PERM (0x0C) | 95 | #define ESR_ELx_FSC_PERM (0x0C) |
95 | #define ESR_ELx_CV (UL(1) << 24) | 96 | #define ESR_ELx_CV (UL(1) << 24) |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 54bb4ba97441..ac6fafb95fe7 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -188,6 +188,7 @@ | |||
188 | 188 | ||
189 | /* For compatibility with fault code shared with 32-bit */ | 189 | /* For compatibility with fault code shared with 32-bit */ |
190 | #define FSC_FAULT ESR_ELx_FSC_FAULT | 190 | #define FSC_FAULT ESR_ELx_FSC_FAULT |
191 | #define FSC_ACCESS ESR_ELx_FSC_ACCESS | ||
191 | #define FSC_PERM ESR_ELx_FSC_PERM | 192 | #define FSC_PERM ESR_ELx_FSC_PERM |
192 | 193 | ||
193 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | 194 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8ac3c70fe3c6..f0f58c9beec0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
29 | #include <asm/kvm_mmio.h> | 29 | #include <asm/kvm_mmio.h> |
30 | 30 | ||
31 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED | ||
32 | |||
31 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | 33 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) |
32 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | 34 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS |
33 | #else | 35 | #else |
@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |||
177 | int kvm_unmap_hva_range(struct kvm *kvm, | 179 | int kvm_unmap_hva_range(struct kvm *kvm, |
178 | unsigned long start, unsigned long end); | 180 | unsigned long start, unsigned long end); |
179 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 181 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
182 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | ||
183 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | ||
180 | 184 | ||
181 | /* We do not have shadow page tables, hence the empty hooks */ | 185 | /* We do not have shadow page tables, hence the empty hooks */ |
182 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, | ||
183 | unsigned long end) | ||
184 | { | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
189 | { | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | 186 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
194 | unsigned long address) | 187 | unsigned long address) |
195 | { | 188 | { |
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index 9f52beb7cb13..889c908ee631 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h | |||
@@ -31,28 +31,6 @@ struct kvm_decode { | |||
31 | bool sign_extend; | 31 | bool sign_extend; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | /* | ||
35 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
36 | * which is an anonymous type. Use our own type instead. | ||
37 | */ | ||
38 | struct kvm_exit_mmio { | ||
39 | phys_addr_t phys_addr; | ||
40 | u8 data[8]; | ||
41 | u32 len; | ||
42 | bool is_write; | ||
43 | void *private; | ||
44 | }; | ||
45 | |||
46 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
47 | struct kvm_exit_mmio *mmio) | ||
48 | { | ||
49 | run->mmio.phys_addr = mmio->phys_addr; | ||
50 | run->mmio.len = mmio->len; | ||
51 | run->mmio.is_write = mmio->is_write; | ||
52 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
53 | run->exit_reason = KVM_EXIT_MMIO; | ||
54 | } | ||
55 | |||
56 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | 34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); |
57 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | 35 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, |
58 | phys_addr_t fault_ipa); | 36 | phys_addr_t fault_ipa); |
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 3ef77a466018..c154c0b7eb60 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot { | |||
191 | /* Highest supported SPI, from VGIC_NR_IRQS */ | 191 | /* Highest supported SPI, from VGIC_NR_IRQS */ |
192 | #define KVM_ARM_IRQ_GIC_MAX 127 | 192 | #define KVM_ARM_IRQ_GIC_MAX 127 |
193 | 193 | ||
194 | /* One single KVM irqchip, ie. the VGIC */ | ||
195 | #define KVM_NR_IRQCHIPS 1 | ||
196 | |||
194 | /* PSCI interface */ | 197 | /* PSCI interface */ |
195 | #define KVM_PSCI_FN_BASE 0x95c1ba5e | 198 | #define KVM_PSCI_FN_BASE 0x95c1ba5e |
196 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) | 199 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index f5590c81d95f..5105e297ed5f 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -18,6 +18,7 @@ if VIRTUALIZATION | |||
18 | 18 | ||
19 | config KVM | 19 | config KVM |
20 | bool "Kernel-based Virtual Machine (KVM) support" | 20 | bool "Kernel-based Virtual Machine (KVM) support" |
21 | depends on OF | ||
21 | select MMU_NOTIFIER | 22 | select MMU_NOTIFIER |
22 | select PREEMPT_NOTIFIERS | 23 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 24 | select ANON_INODES |
@@ -25,10 +26,10 @@ config KVM | |||
25 | select HAVE_KVM_ARCH_TLB_FLUSH_ALL | 26 | select HAVE_KVM_ARCH_TLB_FLUSH_ALL |
26 | select KVM_MMIO | 27 | select KVM_MMIO |
27 | select KVM_ARM_HOST | 28 | select KVM_ARM_HOST |
28 | select KVM_ARM_VGIC | ||
29 | select KVM_ARM_TIMER | ||
30 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT | 29 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT |
31 | select SRCU | 30 | select SRCU |
31 | select HAVE_KVM_EVENTFD | ||
32 | select HAVE_KVM_IRQFD | ||
32 | ---help--- | 33 | ---help--- |
33 | Support hosting virtualized guest machines. | 34 | Support hosting virtualized guest machines. |
34 | 35 | ||
@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS | |||
50 | large, so only choose a reasonable number that you expect to | 51 | large, so only choose a reasonable number that you expect to |
51 | actually use. | 52 | actually use. |
52 | 53 | ||
53 | config KVM_ARM_VGIC | ||
54 | bool | ||
55 | depends on KVM_ARM_HOST && OF | ||
56 | select HAVE_KVM_IRQCHIP | ||
57 | ---help--- | ||
58 | Adds support for a hardware assisted, in-kernel GIC emulation. | ||
59 | |||
60 | config KVM_ARM_TIMER | ||
61 | bool | ||
62 | depends on KVM_ARM_VGIC | ||
63 | ---help--- | ||
64 | Adds support for the Architected Timers in virtual machines. | ||
65 | |||
66 | endif # VIRTUALIZATION | 54 | endif # VIRTUALIZATION |
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 4e6e09ee4033..d5904f876cdb 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for Kernel-based Virtual Machine module | 2 | # Makefile for Kernel-based Virtual Machine module |
3 | # | 3 | # |
4 | 4 | ||
5 | ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm | 5 | ccflags-y += -Iarch/arm64/kvm |
6 | CFLAGS_arm.o := -I. | 6 | CFLAGS_arm.o := -I. |
7 | CFLAGS_mmu.o := -I. | 7 | CFLAGS_mmu.o := -I. |
8 | 8 | ||
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm | |||
11 | 11 | ||
12 | obj-$(CONFIG_KVM_ARM_HOST) += kvm.o | 12 | obj-$(CONFIG_KVM_ARM_HOST) += kvm.o |
13 | 13 | ||
14 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o | 14 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o |
15 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o | 15 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o |
16 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o | 16 | kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o |
17 | 17 | ||
@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o | |||
19 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o | 19 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o |
20 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o | 20 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o |
21 | 21 | ||
22 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o | 22 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o |
23 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o | 23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o |
24 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o | 24 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o |
25 | kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o | 25 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o |
26 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o | 26 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o |
27 | kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o | 27 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o |
28 | kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o | 28 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o |
29 | kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o | 29 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o |
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h index cdac7b3eeaf7..80386470d3a4 100644 --- a/arch/mips/include/asm/asmmacro-32.h +++ b/arch/mips/include/asm/asmmacro-32.h | |||
@@ -16,38 +16,38 @@ | |||
16 | .set push | 16 | .set push |
17 | SET_HARDFLOAT | 17 | SET_HARDFLOAT |
18 | cfc1 \tmp, fcr31 | 18 | cfc1 \tmp, fcr31 |
19 | swc1 $f0, THREAD_FPR0_LS64(\thread) | 19 | swc1 $f0, THREAD_FPR0(\thread) |
20 | swc1 $f1, THREAD_FPR1_LS64(\thread) | 20 | swc1 $f1, THREAD_FPR1(\thread) |
21 | swc1 $f2, THREAD_FPR2_LS64(\thread) | 21 | swc1 $f2, THREAD_FPR2(\thread) |
22 | swc1 $f3, THREAD_FPR3_LS64(\thread) | 22 | swc1 $f3, THREAD_FPR3(\thread) |
23 | swc1 $f4, THREAD_FPR4_LS64(\thread) | 23 | swc1 $f4, THREAD_FPR4(\thread) |
24 | swc1 $f5, THREAD_FPR5_LS64(\thread) | 24 | swc1 $f5, THREAD_FPR5(\thread) |
25 | swc1 $f6, THREAD_FPR6_LS64(\thread) | 25 | swc1 $f6, THREAD_FPR6(\thread) |
26 | swc1 $f7, THREAD_FPR7_LS64(\thread) | 26 | swc1 $f7, THREAD_FPR7(\thread) |
27 | swc1 $f8, THREAD_FPR8_LS64(\thread) | 27 | swc1 $f8, THREAD_FPR8(\thread) |
28 | swc1 $f9, THREAD_FPR9_LS64(\thread) | 28 | swc1 $f9, THREAD_FPR9(\thread) |
29 | swc1 $f10, THREAD_FPR10_LS64(\thread) | 29 | swc1 $f10, THREAD_FPR10(\thread) |
30 | swc1 $f11, THREAD_FPR11_LS64(\thread) | 30 | swc1 $f11, THREAD_FPR11(\thread) |
31 | swc1 $f12, THREAD_FPR12_LS64(\thread) | 31 | swc1 $f12, THREAD_FPR12(\thread) |
32 | swc1 $f13, THREAD_FPR13_LS64(\thread) | 32 | swc1 $f13, THREAD_FPR13(\thread) |
33 | swc1 $f14, THREAD_FPR14_LS64(\thread) | 33 | swc1 $f14, THREAD_FPR14(\thread) |
34 | swc1 $f15, THREAD_FPR15_LS64(\thread) | 34 | swc1 $f15, THREAD_FPR15(\thread) |
35 | swc1 $f16, THREAD_FPR16_LS64(\thread) | 35 | swc1 $f16, THREAD_FPR16(\thread) |
36 | swc1 $f17, THREAD_FPR17_LS64(\thread) | 36 | swc1 $f17, THREAD_FPR17(\thread) |
37 | swc1 $f18, THREAD_FPR18_LS64(\thread) | 37 | swc1 $f18, THREAD_FPR18(\thread) |
38 | swc1 $f19, THREAD_FPR19_LS64(\thread) | 38 | swc1 $f19, THREAD_FPR19(\thread) |
39 | swc1 $f20, THREAD_FPR20_LS64(\thread) | 39 | swc1 $f20, THREAD_FPR20(\thread) |
40 | swc1 $f21, THREAD_FPR21_LS64(\thread) | 40 | swc1 $f21, THREAD_FPR21(\thread) |
41 | swc1 $f22, THREAD_FPR22_LS64(\thread) | 41 | swc1 $f22, THREAD_FPR22(\thread) |
42 | swc1 $f23, THREAD_FPR23_LS64(\thread) | 42 | swc1 $f23, THREAD_FPR23(\thread) |
43 | swc1 $f24, THREAD_FPR24_LS64(\thread) | 43 | swc1 $f24, THREAD_FPR24(\thread) |
44 | swc1 $f25, THREAD_FPR25_LS64(\thread) | 44 | swc1 $f25, THREAD_FPR25(\thread) |
45 | swc1 $f26, THREAD_FPR26_LS64(\thread) | 45 | swc1 $f26, THREAD_FPR26(\thread) |
46 | swc1 $f27, THREAD_FPR27_LS64(\thread) | 46 | swc1 $f27, THREAD_FPR27(\thread) |
47 | swc1 $f28, THREAD_FPR28_LS64(\thread) | 47 | swc1 $f28, THREAD_FPR28(\thread) |
48 | swc1 $f29, THREAD_FPR29_LS64(\thread) | 48 | swc1 $f29, THREAD_FPR29(\thread) |
49 | swc1 $f30, THREAD_FPR30_LS64(\thread) | 49 | swc1 $f30, THREAD_FPR30(\thread) |
50 | swc1 $f31, THREAD_FPR31_LS64(\thread) | 50 | swc1 $f31, THREAD_FPR31(\thread) |
51 | sw \tmp, THREAD_FCR31(\thread) | 51 | sw \tmp, THREAD_FCR31(\thread) |
52 | .set pop | 52 | .set pop |
53 | .endm | 53 | .endm |
@@ -56,38 +56,38 @@ | |||
56 | .set push | 56 | .set push |
57 | SET_HARDFLOAT | 57 | SET_HARDFLOAT |
58 | lw \tmp, THREAD_FCR31(\thread) | 58 | lw \tmp, THREAD_FCR31(\thread) |
59 | lwc1 $f0, THREAD_FPR0_LS64(\thread) | 59 | lwc1 $f0, THREAD_FPR0(\thread) |
60 | lwc1 $f1, THREAD_FPR1_LS64(\thread) | 60 | lwc1 $f1, THREAD_FPR1(\thread) |
61 | lwc1 $f2, THREAD_FPR2_LS64(\thread) | 61 | lwc1 $f2, THREAD_FPR2(\thread) |
62 | lwc1 $f3, THREAD_FPR3_LS64(\thread) | 62 | lwc1 $f3, THREAD_FPR3(\thread) |
63 | lwc1 $f4, THREAD_FPR4_LS64(\thread) | 63 | lwc1 $f4, THREAD_FPR4(\thread) |
64 | lwc1 $f5, THREAD_FPR5_LS64(\thread) | 64 | lwc1 $f5, THREAD_FPR5(\thread) |
65 | lwc1 $f6, THREAD_FPR6_LS64(\thread) | 65 | lwc1 $f6, THREAD_FPR6(\thread) |
66 | lwc1 $f7, THREAD_FPR7_LS64(\thread) | 66 | lwc1 $f7, THREAD_FPR7(\thread) |
67 | lwc1 $f8, THREAD_FPR8_LS64(\thread) | 67 | lwc1 $f8, THREAD_FPR8(\thread) |
68 | lwc1 $f9, THREAD_FPR9_LS64(\thread) | 68 | lwc1 $f9, THREAD_FPR9(\thread) |
69 | lwc1 $f10, THREAD_FPR10_LS64(\thread) | 69 | lwc1 $f10, THREAD_FPR10(\thread) |
70 | lwc1 $f11, THREAD_FPR11_LS64(\thread) | 70 | lwc1 $f11, THREAD_FPR11(\thread) |
71 | lwc1 $f12, THREAD_FPR12_LS64(\thread) | 71 | lwc1 $f12, THREAD_FPR12(\thread) |
72 | lwc1 $f13, THREAD_FPR13_LS64(\thread) | 72 | lwc1 $f13, THREAD_FPR13(\thread) |
73 | lwc1 $f14, THREAD_FPR14_LS64(\thread) | 73 | lwc1 $f14, THREAD_FPR14(\thread) |
74 | lwc1 $f15, THREAD_FPR15_LS64(\thread) | 74 | lwc1 $f15, THREAD_FPR15(\thread) |
75 | lwc1 $f16, THREAD_FPR16_LS64(\thread) | 75 | lwc1 $f16, THREAD_FPR16(\thread) |
76 | lwc1 $f17, THREAD_FPR17_LS64(\thread) | 76 | lwc1 $f17, THREAD_FPR17(\thread) |
77 | lwc1 $f18, THREAD_FPR18_LS64(\thread) | 77 | lwc1 $f18, THREAD_FPR18(\thread) |
78 | lwc1 $f19, THREAD_FPR19_LS64(\thread) | 78 | lwc1 $f19, THREAD_FPR19(\thread) |
79 | lwc1 $f20, THREAD_FPR20_LS64(\thread) | 79 | lwc1 $f20, THREAD_FPR20(\thread) |
80 | lwc1 $f21, THREAD_FPR21_LS64(\thread) | 80 | lwc1 $f21, THREAD_FPR21(\thread) |
81 | lwc1 $f22, THREAD_FPR22_LS64(\thread) | 81 | lwc1 $f22, THREAD_FPR22(\thread) |
82 | lwc1 $f23, THREAD_FPR23_LS64(\thread) | 82 | lwc1 $f23, THREAD_FPR23(\thread) |
83 | lwc1 $f24, THREAD_FPR24_LS64(\thread) | 83 | lwc1 $f24, THREAD_FPR24(\thread) |
84 | lwc1 $f25, THREAD_FPR25_LS64(\thread) | 84 | lwc1 $f25, THREAD_FPR25(\thread) |
85 | lwc1 $f26, THREAD_FPR26_LS64(\thread) | 85 | lwc1 $f26, THREAD_FPR26(\thread) |
86 | lwc1 $f27, THREAD_FPR27_LS64(\thread) | 86 | lwc1 $f27, THREAD_FPR27(\thread) |
87 | lwc1 $f28, THREAD_FPR28_LS64(\thread) | 87 | lwc1 $f28, THREAD_FPR28(\thread) |
88 | lwc1 $f29, THREAD_FPR29_LS64(\thread) | 88 | lwc1 $f29, THREAD_FPR29(\thread) |
89 | lwc1 $f30, THREAD_FPR30_LS64(\thread) | 89 | lwc1 $f30, THREAD_FPR30(\thread) |
90 | lwc1 $f31, THREAD_FPR31_LS64(\thread) | 90 | lwc1 $f31, THREAD_FPR31(\thread) |
91 | ctc1 \tmp, fcr31 | 91 | ctc1 \tmp, fcr31 |
92 | .set pop | 92 | .set pop |
93 | .endm | 93 | .endm |
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 0cae4595e985..6156ac8c4cfb 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h | |||
@@ -60,22 +60,22 @@ | |||
60 | .set push | 60 | .set push |
61 | SET_HARDFLOAT | 61 | SET_HARDFLOAT |
62 | cfc1 \tmp, fcr31 | 62 | cfc1 \tmp, fcr31 |
63 | sdc1 $f0, THREAD_FPR0_LS64(\thread) | 63 | sdc1 $f0, THREAD_FPR0(\thread) |
64 | sdc1 $f2, THREAD_FPR2_LS64(\thread) | 64 | sdc1 $f2, THREAD_FPR2(\thread) |
65 | sdc1 $f4, THREAD_FPR4_LS64(\thread) | 65 | sdc1 $f4, THREAD_FPR4(\thread) |
66 | sdc1 $f6, THREAD_FPR6_LS64(\thread) | 66 | sdc1 $f6, THREAD_FPR6(\thread) |
67 | sdc1 $f8, THREAD_FPR8_LS64(\thread) | 67 | sdc1 $f8, THREAD_FPR8(\thread) |
68 | sdc1 $f10, THREAD_FPR10_LS64(\thread) | 68 | sdc1 $f10, THREAD_FPR10(\thread) |
69 | sdc1 $f12, THREAD_FPR12_LS64(\thread) | 69 | sdc1 $f12, THREAD_FPR12(\thread) |
70 | sdc1 $f14, THREAD_FPR14_LS64(\thread) | 70 | sdc1 $f14, THREAD_FPR14(\thread) |
71 | sdc1 $f16, THREAD_FPR16_LS64(\thread) | 71 | sdc1 $f16, THREAD_FPR16(\thread) |
72 | sdc1 $f18, THREAD_FPR18_LS64(\thread) | 72 | sdc1 $f18, THREAD_FPR18(\thread) |
73 | sdc1 $f20, THREAD_FPR20_LS64(\thread) | 73 | sdc1 $f20, THREAD_FPR20(\thread) |
74 | sdc1 $f22, THREAD_FPR22_LS64(\thread) | 74 | sdc1 $f22, THREAD_FPR22(\thread) |
75 | sdc1 $f24, THREAD_FPR24_LS64(\thread) | 75 | sdc1 $f24, THREAD_FPR24(\thread) |
76 | sdc1 $f26, THREAD_FPR26_LS64(\thread) | 76 | sdc1 $f26, THREAD_FPR26(\thread) |
77 | sdc1 $f28, THREAD_FPR28_LS64(\thread) | 77 | sdc1 $f28, THREAD_FPR28(\thread) |
78 | sdc1 $f30, THREAD_FPR30_LS64(\thread) | 78 | sdc1 $f30, THREAD_FPR30(\thread) |
79 | sw \tmp, THREAD_FCR31(\thread) | 79 | sw \tmp, THREAD_FCR31(\thread) |
80 | .set pop | 80 | .set pop |
81 | .endm | 81 | .endm |
@@ -84,22 +84,22 @@ | |||
84 | .set push | 84 | .set push |
85 | .set mips64r2 | 85 | .set mips64r2 |
86 | SET_HARDFLOAT | 86 | SET_HARDFLOAT |
87 | sdc1 $f1, THREAD_FPR1_LS64(\thread) | 87 | sdc1 $f1, THREAD_FPR1(\thread) |
88 | sdc1 $f3, THREAD_FPR3_LS64(\thread) | 88 | sdc1 $f3, THREAD_FPR3(\thread) |
89 | sdc1 $f5, THREAD_FPR5_LS64(\thread) | 89 | sdc1 $f5, THREAD_FPR5(\thread) |
90 | sdc1 $f7, THREAD_FPR7_LS64(\thread) | 90 | sdc1 $f7, THREAD_FPR7(\thread) |
91 | sdc1 $f9, THREAD_FPR9_LS64(\thread) | 91 | sdc1 $f9, THREAD_FPR9(\thread) |
92 | sdc1 $f11, THREAD_FPR11_LS64(\thread) | 92 | sdc1 $f11, THREAD_FPR11(\thread) |
93 | sdc1 $f13, THREAD_FPR13_LS64(\thread) | 93 | sdc1 $f13, THREAD_FPR13(\thread) |
94 | sdc1 $f15, THREAD_FPR15_LS64(\thread) | 94 | sdc1 $f15, THREAD_FPR15(\thread) |
95 | sdc1 $f17, THREAD_FPR17_LS64(\thread) | 95 | sdc1 $f17, THREAD_FPR17(\thread) |
96 | sdc1 $f19, THREAD_FPR19_LS64(\thread) | 96 | sdc1 $f19, THREAD_FPR19(\thread) |
97 | sdc1 $f21, THREAD_FPR21_LS64(\thread) | 97 | sdc1 $f21, THREAD_FPR21(\thread) |
98 | sdc1 $f23, THREAD_FPR23_LS64(\thread) | 98 | sdc1 $f23, THREAD_FPR23(\thread) |
99 | sdc1 $f25, THREAD_FPR25_LS64(\thread) | 99 | sdc1 $f25, THREAD_FPR25(\thread) |
100 | sdc1 $f27, THREAD_FPR27_LS64(\thread) | 100 | sdc1 $f27, THREAD_FPR27(\thread) |
101 | sdc1 $f29, THREAD_FPR29_LS64(\thread) | 101 | sdc1 $f29, THREAD_FPR29(\thread) |
102 | sdc1 $f31, THREAD_FPR31_LS64(\thread) | 102 | sdc1 $f31, THREAD_FPR31(\thread) |
103 | .set pop | 103 | .set pop |
104 | .endm | 104 | .endm |
105 | 105 | ||
@@ -118,22 +118,22 @@ | |||
118 | .set push | 118 | .set push |
119 | SET_HARDFLOAT | 119 | SET_HARDFLOAT |
120 | lw \tmp, THREAD_FCR31(\thread) | 120 | lw \tmp, THREAD_FCR31(\thread) |
121 | ldc1 $f0, THREAD_FPR0_LS64(\thread) | 121 | ldc1 $f0, THREAD_FPR0(\thread) |
122 | ldc1 $f2, THREAD_FPR2_LS64(\thread) | 122 | ldc1 $f2, THREAD_FPR2(\thread) |
123 | ldc1 $f4, THREAD_FPR4_LS64(\thread) | 123 | ldc1 $f4, THREAD_FPR4(\thread) |
124 | ldc1 $f6, THREAD_FPR6_LS64(\thread) | 124 | ldc1 $f6, THREAD_FPR6(\thread) |
125 | ldc1 $f8, THREAD_FPR8_LS64(\thread) | 125 | ldc1 $f8, THREAD_FPR8(\thread) |
126 | ldc1 $f10, THREAD_FPR10_LS64(\thread) | 126 | ldc1 $f10, THREAD_FPR10(\thread) |
127 | ldc1 $f12, THREAD_FPR12_LS64(\thread) | 127 | ldc1 $f12, THREAD_FPR12(\thread) |
128 | ldc1 $f14, THREAD_FPR14_LS64(\thread) | 128 | ldc1 $f14, THREAD_FPR14(\thread) |
129 | ldc1 $f16, THREAD_FPR16_LS64(\thread) | 129 | ldc1 $f16, THREAD_FPR16(\thread) |
130 | ldc1 $f18, THREAD_FPR18_LS64(\thread) | 130 | ldc1 $f18, THREAD_FPR18(\thread) |
131 | ldc1 $f20, THREAD_FPR20_LS64(\thread) | 131 | ldc1 $f20, THREAD_FPR20(\thread) |
132 | ldc1 $f22, THREAD_FPR22_LS64(\thread) | 132 | ldc1 $f22, THREAD_FPR22(\thread) |
133 | ldc1 $f24, THREAD_FPR24_LS64(\thread) | 133 | ldc1 $f24, THREAD_FPR24(\thread) |
134 | ldc1 $f26, THREAD_FPR26_LS64(\thread) | 134 | ldc1 $f26, THREAD_FPR26(\thread) |
135 | ldc1 $f28, THREAD_FPR28_LS64(\thread) | 135 | ldc1 $f28, THREAD_FPR28(\thread) |
136 | ldc1 $f30, THREAD_FPR30_LS64(\thread) | 136 | ldc1 $f30, THREAD_FPR30(\thread) |
137 | ctc1 \tmp, fcr31 | 137 | ctc1 \tmp, fcr31 |
138 | .endm | 138 | .endm |
139 | 139 | ||
@@ -141,22 +141,22 @@ | |||
141 | .set push | 141 | .set push |
142 | .set mips64r2 | 142 | .set mips64r2 |
143 | SET_HARDFLOAT | 143 | SET_HARDFLOAT |
144 | ldc1 $f1, THREAD_FPR1_LS64(\thread) | 144 | ldc1 $f1, THREAD_FPR1(\thread) |
145 | ldc1 $f3, THREAD_FPR3_LS64(\thread) | 145 | ldc1 $f3, THREAD_FPR3(\thread) |
146 | ldc1 $f5, THREAD_FPR5_LS64(\thread) | 146 | ldc1 $f5, THREAD_FPR5(\thread) |
147 | ldc1 $f7, THREAD_FPR7_LS64(\thread) | 147 | ldc1 $f7, THREAD_FPR7(\thread) |
148 | ldc1 $f9, THREAD_FPR9_LS64(\thread) | 148 | ldc1 $f9, THREAD_FPR9(\thread) |
149 | ldc1 $f11, THREAD_FPR11_LS64(\thread) | 149 | ldc1 $f11, THREAD_FPR11(\thread) |
150 | ldc1 $f13, THREAD_FPR13_LS64(\thread) | 150 | ldc1 $f13, THREAD_FPR13(\thread) |
151 | ldc1 $f15, THREAD_FPR15_LS64(\thread) | 151 | ldc1 $f15, THREAD_FPR15(\thread) |
152 | ldc1 $f17, THREAD_FPR17_LS64(\thread) | 152 | ldc1 $f17, THREAD_FPR17(\thread) |
153 | ldc1 $f19, THREAD_FPR19_LS64(\thread) | 153 | ldc1 $f19, THREAD_FPR19(\thread) |
154 | ldc1 $f21, THREAD_FPR21_LS64(\thread) | 154 | ldc1 $f21, THREAD_FPR21(\thread) |
155 | ldc1 $f23, THREAD_FPR23_LS64(\thread) | 155 | ldc1 $f23, THREAD_FPR23(\thread) |
156 | ldc1 $f25, THREAD_FPR25_LS64(\thread) | 156 | ldc1 $f25, THREAD_FPR25(\thread) |
157 | ldc1 $f27, THREAD_FPR27_LS64(\thread) | 157 | ldc1 $f27, THREAD_FPR27(\thread) |
158 | ldc1 $f29, THREAD_FPR29_LS64(\thread) | 158 | ldc1 $f29, THREAD_FPR29(\thread) |
159 | ldc1 $f31, THREAD_FPR31_LS64(\thread) | 159 | ldc1 $f31, THREAD_FPR31(\thread) |
160 | .set pop | 160 | .set pop |
161 | .endm | 161 | .endm |
162 | 162 | ||
@@ -211,6 +211,22 @@ | |||
211 | .endm | 211 | .endm |
212 | 212 | ||
213 | #ifdef TOOLCHAIN_SUPPORTS_MSA | 213 | #ifdef TOOLCHAIN_SUPPORTS_MSA |
214 | .macro _cfcmsa rd, cs | ||
215 | .set push | ||
216 | .set mips32r2 | ||
217 | .set msa | ||
218 | cfcmsa \rd, $\cs | ||
219 | .set pop | ||
220 | .endm | ||
221 | |||
222 | .macro _ctcmsa cd, rs | ||
223 | .set push | ||
224 | .set mips32r2 | ||
225 | .set msa | ||
226 | ctcmsa $\cd, \rs | ||
227 | .set pop | ||
228 | .endm | ||
229 | |||
214 | .macro ld_d wd, off, base | 230 | .macro ld_d wd, off, base |
215 | .set push | 231 | .set push |
216 | .set mips32r2 | 232 | .set mips32r2 |
@@ -227,35 +243,35 @@ | |||
227 | .set pop | 243 | .set pop |
228 | .endm | 244 | .endm |
229 | 245 | ||
230 | .macro copy_u_w rd, ws, n | 246 | .macro copy_u_w ws, n |
231 | .set push | 247 | .set push |
232 | .set mips32r2 | 248 | .set mips32r2 |
233 | .set msa | 249 | .set msa |
234 | copy_u.w \rd, $w\ws[\n] | 250 | copy_u.w $1, $w\ws[\n] |
235 | .set pop | 251 | .set pop |
236 | .endm | 252 | .endm |
237 | 253 | ||
238 | .macro copy_u_d rd, ws, n | 254 | .macro copy_u_d ws, n |
239 | .set push | 255 | .set push |
240 | .set mips64r2 | 256 | .set mips64r2 |
241 | .set msa | 257 | .set msa |
242 | copy_u.d \rd, $w\ws[\n] | 258 | copy_u.d $1, $w\ws[\n] |
243 | .set pop | 259 | .set pop |
244 | .endm | 260 | .endm |
245 | 261 | ||
246 | .macro insert_w wd, n, rs | 262 | .macro insert_w wd, n |
247 | .set push | 263 | .set push |
248 | .set mips32r2 | 264 | .set mips32r2 |
249 | .set msa | 265 | .set msa |
250 | insert.w $w\wd[\n], \rs | 266 | insert.w $w\wd[\n], $1 |
251 | .set pop | 267 | .set pop |
252 | .endm | 268 | .endm |
253 | 269 | ||
254 | .macro insert_d wd, n, rs | 270 | .macro insert_d wd, n |
255 | .set push | 271 | .set push |
256 | .set mips64r2 | 272 | .set mips64r2 |
257 | .set msa | 273 | .set msa |
258 | insert.d $w\wd[\n], \rs | 274 | insert.d $w\wd[\n], $1 |
259 | .set pop | 275 | .set pop |
260 | .endm | 276 | .endm |
261 | #else | 277 | #else |
@@ -283,7 +299,7 @@ | |||
283 | /* | 299 | /* |
284 | * Temporary until all toolchains in use include MSA support. | 300 | * Temporary until all toolchains in use include MSA support. |
285 | */ | 301 | */ |
286 | .macro cfcmsa rd, cs | 302 | .macro _cfcmsa rd, cs |
287 | .set push | 303 | .set push |
288 | .set noat | 304 | .set noat |
289 | SET_HARDFLOAT | 305 | SET_HARDFLOAT |
@@ -293,7 +309,7 @@ | |||
293 | .set pop | 309 | .set pop |
294 | .endm | 310 | .endm |
295 | 311 | ||
296 | .macro ctcmsa cd, rs | 312 | .macro _ctcmsa cd, rs |
297 | .set push | 313 | .set push |
298 | .set noat | 314 | .set noat |
299 | SET_HARDFLOAT | 315 | SET_HARDFLOAT |
@@ -320,44 +336,36 @@ | |||
320 | .set pop | 336 | .set pop |
321 | .endm | 337 | .endm |
322 | 338 | ||
323 | .macro copy_u_w rd, ws, n | 339 | .macro copy_u_w ws, n |
324 | .set push | 340 | .set push |
325 | .set noat | 341 | .set noat |
326 | SET_HARDFLOAT | 342 | SET_HARDFLOAT |
327 | .insn | 343 | .insn |
328 | .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) | 344 | .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) |
329 | /* move triggers an assembler bug... */ | ||
330 | or \rd, $1, zero | ||
331 | .set pop | 345 | .set pop |
332 | .endm | 346 | .endm |
333 | 347 | ||
334 | .macro copy_u_d rd, ws, n | 348 | .macro copy_u_d ws, n |
335 | .set push | 349 | .set push |
336 | .set noat | 350 | .set noat |
337 | SET_HARDFLOAT | 351 | SET_HARDFLOAT |
338 | .insn | 352 | .insn |
339 | .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) | 353 | .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) |
340 | /* move triggers an assembler bug... */ | ||
341 | or \rd, $1, zero | ||
342 | .set pop | 354 | .set pop |
343 | .endm | 355 | .endm |
344 | 356 | ||
345 | .macro insert_w wd, n, rs | 357 | .macro insert_w wd, n |
346 | .set push | 358 | .set push |
347 | .set noat | 359 | .set noat |
348 | SET_HARDFLOAT | 360 | SET_HARDFLOAT |
349 | /* move triggers an assembler bug... */ | ||
350 | or $1, \rs, zero | ||
351 | .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) | 361 | .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) |
352 | .set pop | 362 | .set pop |
353 | .endm | 363 | .endm |
354 | 364 | ||
355 | .macro insert_d wd, n, rs | 365 | .macro insert_d wd, n |
356 | .set push | 366 | .set push |
357 | .set noat | 367 | .set noat |
358 | SET_HARDFLOAT | 368 | SET_HARDFLOAT |
359 | /* move triggers an assembler bug... */ | ||
360 | or $1, \rs, zero | ||
361 | .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) | 369 | .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) |
362 | .set pop | 370 | .set pop |
363 | .endm | 371 | .endm |
@@ -399,7 +407,7 @@ | |||
399 | .set push | 407 | .set push |
400 | .set noat | 408 | .set noat |
401 | SET_HARDFLOAT | 409 | SET_HARDFLOAT |
402 | cfcmsa $1, MSA_CSR | 410 | _cfcmsa $1, MSA_CSR |
403 | sw $1, THREAD_MSA_CSR(\thread) | 411 | sw $1, THREAD_MSA_CSR(\thread) |
404 | .set pop | 412 | .set pop |
405 | .endm | 413 | .endm |
@@ -409,7 +417,7 @@ | |||
409 | .set noat | 417 | .set noat |
410 | SET_HARDFLOAT | 418 | SET_HARDFLOAT |
411 | lw $1, THREAD_MSA_CSR(\thread) | 419 | lw $1, THREAD_MSA_CSR(\thread) |
412 | ctcmsa MSA_CSR, $1 | 420 | _ctcmsa MSA_CSR, $1 |
413 | .set pop | 421 | .set pop |
414 | ld_d 0, THREAD_FPR0, \thread | 422 | ld_d 0, THREAD_FPR0, \thread |
415 | ld_d 1, THREAD_FPR1, \thread | 423 | ld_d 1, THREAD_FPR1, \thread |
@@ -452,9 +460,6 @@ | |||
452 | insert_w \wd, 2 | 460 | insert_w \wd, 2 |
453 | insert_w \wd, 3 | 461 | insert_w \wd, 3 |
454 | #endif | 462 | #endif |
455 | .if 31-\wd | ||
456 | msa_init_upper (\wd+1) | ||
457 | .endif | ||
458 | .endm | 463 | .endm |
459 | 464 | ||
460 | .macro msa_init_all_upper | 465 | .macro msa_init_all_upper |
@@ -463,6 +468,37 @@ | |||
463 | SET_HARDFLOAT | 468 | SET_HARDFLOAT |
464 | not $1, zero | 469 | not $1, zero |
465 | msa_init_upper 0 | 470 | msa_init_upper 0 |
471 | msa_init_upper 1 | ||
472 | msa_init_upper 2 | ||
473 | msa_init_upper 3 | ||
474 | msa_init_upper 4 | ||
475 | msa_init_upper 5 | ||
476 | msa_init_upper 6 | ||
477 | msa_init_upper 7 | ||
478 | msa_init_upper 8 | ||
479 | msa_init_upper 9 | ||
480 | msa_init_upper 10 | ||
481 | msa_init_upper 11 | ||
482 | msa_init_upper 12 | ||
483 | msa_init_upper 13 | ||
484 | msa_init_upper 14 | ||
485 | msa_init_upper 15 | ||
486 | msa_init_upper 16 | ||
487 | msa_init_upper 17 | ||
488 | msa_init_upper 18 | ||
489 | msa_init_upper 19 | ||
490 | msa_init_upper 20 | ||
491 | msa_init_upper 21 | ||
492 | msa_init_upper 22 | ||
493 | msa_init_upper 23 | ||
494 | msa_init_upper 24 | ||
495 | msa_init_upper 25 | ||
496 | msa_init_upper 26 | ||
497 | msa_init_upper 27 | ||
498 | msa_init_upper 28 | ||
499 | msa_init_upper 29 | ||
500 | msa_init_upper 30 | ||
501 | msa_init_upper 31 | ||
466 | .set pop | 502 | .set pop |
467 | .endm | 503 | .endm |
468 | 504 | ||
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index dd083e999b08..b104ad9d655f 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h | |||
@@ -48,6 +48,12 @@ enum fpu_mode { | |||
48 | #define FPU_FR_MASK 0x1 | 48 | #define FPU_FR_MASK 0x1 |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define __disable_fpu() \ | ||
52 | do { \ | ||
53 | clear_c0_status(ST0_CU1); \ | ||
54 | disable_fpu_hazard(); \ | ||
55 | } while (0) | ||
56 | |||
51 | static inline int __enable_fpu(enum fpu_mode mode) | 57 | static inline int __enable_fpu(enum fpu_mode mode) |
52 | { | 58 | { |
53 | int fr; | 59 | int fr; |
@@ -86,7 +92,12 @@ fr_common: | |||
86 | enable_fpu_hazard(); | 92 | enable_fpu_hazard(); |
87 | 93 | ||
88 | /* check FR has the desired value */ | 94 | /* check FR has the desired value */ |
89 | return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE; | 95 | if (!!(read_c0_status() & ST0_FR) == !!fr) |
96 | return 0; | ||
97 | |||
98 | /* unsupported FR value */ | ||
99 | __disable_fpu(); | ||
100 | return SIGFPE; | ||
90 | 101 | ||
91 | default: | 102 | default: |
92 | BUG(); | 103 | BUG(); |
@@ -95,12 +106,6 @@ fr_common: | |||
95 | return SIGFPE; | 106 | return SIGFPE; |
96 | } | 107 | } |
97 | 108 | ||
98 | #define __disable_fpu() \ | ||
99 | do { \ | ||
100 | clear_c0_status(ST0_CU1); \ | ||
101 | disable_fpu_hazard(); \ | ||
102 | } while (0) | ||
103 | |||
104 | #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) | 109 | #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) |
105 | 110 | ||
106 | static inline int __is_fpu_owner(void) | 111 | static inline int __is_fpu_owner(void) |
@@ -170,6 +175,7 @@ static inline void lose_fpu(int save) | |||
170 | } | 175 | } |
171 | disable_msa(); | 176 | disable_msa(); |
172 | clear_thread_flag(TIF_USEDMSA); | 177 | clear_thread_flag(TIF_USEDMSA); |
178 | __disable_fpu(); | ||
173 | } else if (is_fpu_owner()) { | 179 | } else if (is_fpu_owner()) { |
174 | if (save) | 180 | if (save) |
175 | _save_fp(current); | 181 | _save_fp(current); |
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h index 6a9af5fcb5d7..cba22ab7ad4d 100644 --- a/arch/mips/include/asm/kdebug.h +++ b/arch/mips/include/asm/kdebug.h | |||
@@ -10,7 +10,8 @@ enum die_val { | |||
10 | DIE_RI, | 10 | DIE_RI, |
11 | DIE_PAGE_FAULT, | 11 | DIE_PAGE_FAULT, |
12 | DIE_BREAK, | 12 | DIE_BREAK, |
13 | DIE_SSTEPBP | 13 | DIE_SSTEPBP, |
14 | DIE_MSAFP | ||
14 | }; | 15 | }; |
15 | 16 | ||
16 | #endif /* _ASM_MIPS_KDEBUG_H */ | 17 | #endif /* _ASM_MIPS_KDEBUG_H */ |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index ac4fc716062b..4c25823563fe 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -21,10 +21,10 @@ | |||
21 | 21 | ||
22 | /* MIPS KVM register ids */ | 22 | /* MIPS KVM register ids */ |
23 | #define MIPS_CP0_32(_R, _S) \ | 23 | #define MIPS_CP0_32(_R, _S) \ |
24 | (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) | 24 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) |
25 | 25 | ||
26 | #define MIPS_CP0_64(_R, _S) \ | 26 | #define MIPS_CP0_64(_R, _S) \ |
27 | (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) | 27 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) |
28 | 28 | ||
29 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | 29 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) |
30 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) | 30 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) |
@@ -42,11 +42,14 @@ | |||
42 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | 42 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) |
43 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | 43 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) |
44 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | 44 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) |
45 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) | ||
45 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) | 46 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) |
46 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) | 47 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) |
47 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | 48 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) |
48 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | 49 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) |
49 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | 50 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) |
51 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) | ||
52 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) | ||
50 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) | 53 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) |
51 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) | 54 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) |
52 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) | 55 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) |
@@ -119,6 +122,10 @@ struct kvm_vcpu_stat { | |||
119 | u32 syscall_exits; | 122 | u32 syscall_exits; |
120 | u32 resvd_inst_exits; | 123 | u32 resvd_inst_exits; |
121 | u32 break_inst_exits; | 124 | u32 break_inst_exits; |
125 | u32 trap_inst_exits; | ||
126 | u32 msa_fpe_exits; | ||
127 | u32 fpe_exits; | ||
128 | u32 msa_disabled_exits; | ||
122 | u32 flush_dcache_exits; | 129 | u32 flush_dcache_exits; |
123 | u32 halt_successful_poll; | 130 | u32 halt_successful_poll; |
124 | u32 halt_wakeup; | 131 | u32 halt_wakeup; |
@@ -138,6 +145,10 @@ enum kvm_mips_exit_types { | |||
138 | SYSCALL_EXITS, | 145 | SYSCALL_EXITS, |
139 | RESVD_INST_EXITS, | 146 | RESVD_INST_EXITS, |
140 | BREAK_INST_EXITS, | 147 | BREAK_INST_EXITS, |
148 | TRAP_INST_EXITS, | ||
149 | MSA_FPE_EXITS, | ||
150 | FPE_EXITS, | ||
151 | MSA_DISABLED_EXITS, | ||
141 | FLUSH_DCACHE_EXITS, | 152 | FLUSH_DCACHE_EXITS, |
142 | MAX_KVM_MIPS_EXIT_TYPES | 153 | MAX_KVM_MIPS_EXIT_TYPES |
143 | }; | 154 | }; |
@@ -206,6 +217,8 @@ struct mips_coproc { | |||
206 | #define MIPS_CP0_CONFIG1_SEL 1 | 217 | #define MIPS_CP0_CONFIG1_SEL 1 |
207 | #define MIPS_CP0_CONFIG2_SEL 2 | 218 | #define MIPS_CP0_CONFIG2_SEL 2 |
208 | #define MIPS_CP0_CONFIG3_SEL 3 | 219 | #define MIPS_CP0_CONFIG3_SEL 3 |
220 | #define MIPS_CP0_CONFIG4_SEL 4 | ||
221 | #define MIPS_CP0_CONFIG5_SEL 5 | ||
209 | 222 | ||
210 | /* Config0 register bits */ | 223 | /* Config0 register bits */ |
211 | #define CP0C0_M 31 | 224 | #define CP0C0_M 31 |
@@ -262,31 +275,6 @@ struct mips_coproc { | |||
262 | #define CP0C3_SM 1 | 275 | #define CP0C3_SM 1 |
263 | #define CP0C3_TL 0 | 276 | #define CP0C3_TL 0 |
264 | 277 | ||
265 | /* Have config1, Cacheable, noncoherent, write-back, write allocate*/ | ||
266 | #define MIPS_CONFIG0 \ | ||
267 | ((1 << CP0C0_M) | (0x3 << CP0C0_K0)) | ||
268 | |||
269 | /* Have config2, no coprocessor2 attached, no MDMX support attached, | ||
270 | no performance counters, watch registers present, | ||
271 | no code compression, EJTAG present, no FPU, no watch registers */ | ||
272 | #define MIPS_CONFIG1 \ | ||
273 | ((1 << CP0C1_M) | \ | ||
274 | (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ | ||
275 | (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ | ||
276 | (0 << CP0C1_FP)) | ||
277 | |||
278 | /* Have config3, no tertiary/secondary caches implemented */ | ||
279 | #define MIPS_CONFIG2 \ | ||
280 | ((1 << CP0C2_M)) | ||
281 | |||
282 | /* No config4, no DSP ASE, no large physaddr (PABITS), | ||
283 | no external interrupt controller, no vectored interrupts, | ||
284 | no 1kb pages, no SmartMIPS ASE, no trace logic */ | ||
285 | #define MIPS_CONFIG3 \ | ||
286 | ((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ | ||
287 | (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ | ||
288 | (0 << CP0C3_SM) | (0 << CP0C3_TL)) | ||
289 | |||
290 | /* MMU types, the first four entries have the same layout as the | 278 | /* MMU types, the first four entries have the same layout as the |
291 | CP0C0_MT field. */ | 279 | CP0C0_MT field. */ |
292 | enum mips_mmu_types { | 280 | enum mips_mmu_types { |
@@ -321,7 +309,9 @@ enum mips_mmu_types { | |||
321 | */ | 309 | */ |
322 | #define T_TRAP 13 /* Trap instruction */ | 310 | #define T_TRAP 13 /* Trap instruction */ |
323 | #define T_VCEI 14 /* Virtual coherency exception */ | 311 | #define T_VCEI 14 /* Virtual coherency exception */ |
312 | #define T_MSAFPE 14 /* MSA floating point exception */ | ||
324 | #define T_FPE 15 /* Floating point exception */ | 313 | #define T_FPE 15 /* Floating point exception */ |
314 | #define T_MSADIS 21 /* MSA disabled exception */ | ||
325 | #define T_WATCH 23 /* Watch address reference */ | 315 | #define T_WATCH 23 /* Watch address reference */ |
326 | #define T_VCED 31 /* Virtual coherency data */ | 316 | #define T_VCED 31 /* Virtual coherency data */ |
327 | 317 | ||
@@ -374,6 +364,9 @@ struct kvm_mips_tlb { | |||
374 | long tlb_lo1; | 364 | long tlb_lo1; |
375 | }; | 365 | }; |
376 | 366 | ||
367 | #define KVM_MIPS_FPU_FPU 0x1 | ||
368 | #define KVM_MIPS_FPU_MSA 0x2 | ||
369 | |||
377 | #define KVM_MIPS_GUEST_TLB_SIZE 64 | 370 | #define KVM_MIPS_GUEST_TLB_SIZE 64 |
378 | struct kvm_vcpu_arch { | 371 | struct kvm_vcpu_arch { |
379 | void *host_ebase, *guest_ebase; | 372 | void *host_ebase, *guest_ebase; |
@@ -395,6 +388,8 @@ struct kvm_vcpu_arch { | |||
395 | 388 | ||
396 | /* FPU State */ | 389 | /* FPU State */ |
397 | struct mips_fpu_struct fpu; | 390 | struct mips_fpu_struct fpu; |
391 | /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ | ||
392 | unsigned int fpu_inuse; | ||
398 | 393 | ||
399 | /* COP0 State */ | 394 | /* COP0 State */ |
400 | struct mips_coproc *cop0; | 395 | struct mips_coproc *cop0; |
@@ -441,6 +436,9 @@ struct kvm_vcpu_arch { | |||
441 | 436 | ||
442 | /* WAIT executed */ | 437 | /* WAIT executed */ |
443 | int wait; | 438 | int wait; |
439 | |||
440 | u8 fpu_enabled; | ||
441 | u8 msa_enabled; | ||
444 | }; | 442 | }; |
445 | 443 | ||
446 | 444 | ||
@@ -482,11 +480,15 @@ struct kvm_vcpu_arch { | |||
482 | #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) | 480 | #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) |
483 | #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) | 481 | #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) |
484 | #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) | 482 | #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) |
483 | #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) | ||
484 | #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) | ||
485 | #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) | 485 | #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) |
486 | #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) | 486 | #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) |
487 | #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) | 487 | #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) |
488 | #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) | 488 | #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) |
489 | #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) | 489 | #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) |
490 | #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) | ||
491 | #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) | ||
490 | #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) | 492 | #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) |
491 | #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) | 493 | #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) |
492 | #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) | 494 | #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) |
@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, | |||
567 | kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ | 569 | kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ |
568 | } | 570 | } |
569 | 571 | ||
572 | /* Helpers */ | ||
573 | |||
574 | static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) | ||
575 | { | ||
576 | return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) && | ||
577 | vcpu->fpu_enabled; | ||
578 | } | ||
579 | |||
580 | static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) | ||
581 | { | ||
582 | return kvm_mips_guest_can_have_fpu(vcpu) && | ||
583 | kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; | ||
584 | } | ||
585 | |||
586 | static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) | ||
587 | { | ||
588 | return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && | ||
589 | vcpu->msa_enabled; | ||
590 | } | ||
591 | |||
592 | static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) | ||
593 | { | ||
594 | return kvm_mips_guest_can_have_msa(vcpu) && | ||
595 | kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; | ||
596 | } | ||
570 | 597 | ||
571 | struct kvm_mips_callbacks { | 598 | struct kvm_mips_callbacks { |
572 | int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); | 599 | int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); |
@@ -578,6 +605,10 @@ struct kvm_mips_callbacks { | |||
578 | int (*handle_syscall)(struct kvm_vcpu *vcpu); | 605 | int (*handle_syscall)(struct kvm_vcpu *vcpu); |
579 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); | 606 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); |
580 | int (*handle_break)(struct kvm_vcpu *vcpu); | 607 | int (*handle_break)(struct kvm_vcpu *vcpu); |
608 | int (*handle_trap)(struct kvm_vcpu *vcpu); | ||
609 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); | ||
610 | int (*handle_fpe)(struct kvm_vcpu *vcpu); | ||
611 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); | ||
581 | int (*vm_init)(struct kvm *kvm); | 612 | int (*vm_init)(struct kvm *kvm); |
582 | int (*vcpu_init)(struct kvm_vcpu *vcpu); | 613 | int (*vcpu_init)(struct kvm_vcpu *vcpu); |
583 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | 614 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); |
@@ -596,6 +627,8 @@ struct kvm_mips_callbacks { | |||
596 | const struct kvm_one_reg *reg, s64 *v); | 627 | const struct kvm_one_reg *reg, s64 *v); |
597 | int (*set_one_reg)(struct kvm_vcpu *vcpu, | 628 | int (*set_one_reg)(struct kvm_vcpu *vcpu, |
598 | const struct kvm_one_reg *reg, s64 v); | 629 | const struct kvm_one_reg *reg, s64 v); |
630 | int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); | ||
631 | int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); | ||
599 | }; | 632 | }; |
600 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | 633 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; |
601 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | 634 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); |
@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); | |||
606 | /* Trampoline ASM routine to start running in "Guest" context */ | 639 | /* Trampoline ASM routine to start running in "Guest" context */ |
607 | extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); | 640 | extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); |
608 | 641 | ||
642 | /* FPU/MSA context management */ | ||
643 | void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); | ||
644 | void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); | ||
645 | void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); | ||
646 | void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); | ||
647 | void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); | ||
648 | void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); | ||
649 | void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); | ||
650 | void kvm_own_fpu(struct kvm_vcpu *vcpu); | ||
651 | void kvm_own_msa(struct kvm_vcpu *vcpu); | ||
652 | void kvm_drop_fpu(struct kvm_vcpu *vcpu); | ||
653 | void kvm_lose_fpu(struct kvm_vcpu *vcpu); | ||
654 | |||
609 | /* TLB handling */ | 655 | /* TLB handling */ |
610 | uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); | 656 | uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); |
611 | 657 | ||
@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, | |||
711 | struct kvm_run *run, | 757 | struct kvm_run *run, |
712 | struct kvm_vcpu *vcpu); | 758 | struct kvm_vcpu *vcpu); |
713 | 759 | ||
760 | extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, | ||
761 | uint32_t *opc, | ||
762 | struct kvm_run *run, | ||
763 | struct kvm_vcpu *vcpu); | ||
764 | |||
765 | extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, | ||
766 | uint32_t *opc, | ||
767 | struct kvm_run *run, | ||
768 | struct kvm_vcpu *vcpu); | ||
769 | |||
770 | extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, | ||
771 | uint32_t *opc, | ||
772 | struct kvm_run *run, | ||
773 | struct kvm_vcpu *vcpu); | ||
774 | |||
775 | extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, | ||
776 | uint32_t *opc, | ||
777 | struct kvm_run *run, | ||
778 | struct kvm_vcpu *vcpu); | ||
779 | |||
714 | extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, | 780 | extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
715 | struct kvm_run *run); | 781 | struct kvm_run *run); |
716 | 782 | ||
@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, | |||
749 | struct kvm_run *run, | 815 | struct kvm_run *run, |
750 | struct kvm_vcpu *vcpu); | 816 | struct kvm_vcpu *vcpu); |
751 | 817 | ||
818 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); | ||
819 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); | ||
820 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); | ||
821 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); | ||
822 | |||
752 | /* Dynamic binary translation */ | 823 | /* Dynamic binary translation */ |
753 | extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | 824 | extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, |
754 | struct kvm_vcpu *vcpu); | 825 | struct kvm_vcpu *vcpu); |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index b5dcbee01fd7..9b3b48e21c22 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -105,7 +105,7 @@ union fpureg { | |||
105 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 105 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
106 | # define FPR_IDX(width, idx) (idx) | 106 | # define FPR_IDX(width, idx) (idx) |
107 | #else | 107 | #else |
108 | # define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx)) | 108 | # define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1)) |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | #define BUILD_FPR_ACCESS(width) \ | 111 | #define BUILD_FPR_ACCESS(width) \ |
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 2c04b6d9ff85..6985eb59b085 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h | |||
@@ -36,77 +36,85 @@ struct kvm_regs { | |||
36 | 36 | ||
37 | /* | 37 | /* |
38 | * for KVM_GET_FPU and KVM_SET_FPU | 38 | * for KVM_GET_FPU and KVM_SET_FPU |
39 | * | ||
40 | * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs | ||
41 | * are zero filled. | ||
42 | */ | 39 | */ |
43 | struct kvm_fpu { | 40 | struct kvm_fpu { |
44 | __u64 fpr[32]; | ||
45 | __u32 fir; | ||
46 | __u32 fccr; | ||
47 | __u32 fexr; | ||
48 | __u32 fenr; | ||
49 | __u32 fcsr; | ||
50 | __u32 pad; | ||
51 | }; | 41 | }; |
52 | 42 | ||
53 | 43 | ||
54 | /* | 44 | /* |
55 | * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 | 45 | * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various |
56 | * registers. The id field is broken down as follows: | 46 | * registers. The id field is broken down as follows: |
57 | * | 47 | * |
58 | * bits[2..0] - Register 'sel' index. | ||
59 | * bits[7..3] - Register 'rd' index. | ||
60 | * bits[15..8] - Must be zero. | ||
61 | * bits[31..16] - 1 -> CP0 registers. | ||
62 | * bits[51..32] - Must be zero. | ||
63 | * bits[63..52] - As per linux/kvm.h | 48 | * bits[63..52] - As per linux/kvm.h |
49 | * bits[51..32] - Must be zero. | ||
50 | * bits[31..16] - Register set. | ||
51 | * | ||
52 | * Register set = 0: GP registers from kvm_regs (see definitions below). | ||
53 | * | ||
54 | * Register set = 1: CP0 registers. | ||
55 | * bits[15..8] - Must be zero. | ||
56 | * bits[7..3] - Register 'rd' index. | ||
57 | * bits[2..0] - Register 'sel' index. | ||
58 | * | ||
59 | * Register set = 2: KVM specific registers (see definitions below). | ||
60 | * | ||
61 | * Register set = 3: FPU / MSA registers (see definitions below). | ||
64 | * | 62 | * |
65 | * Other sets registers may be added in the future. Each set would | 63 | * Other sets registers may be added in the future. Each set would |
66 | * have its own identifier in bits[31..16]. | 64 | * have its own identifier in bits[31..16]. |
67 | * | ||
68 | * The registers defined in struct kvm_regs are also accessible, the | ||
69 | * id values for these are below. | ||
70 | */ | 65 | */ |
71 | 66 | ||
72 | #define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) | 67 | #define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL) |
73 | #define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) | 68 | #define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL) |
74 | #define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) | 69 | #define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL) |
75 | #define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) | 70 | #define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL) |
76 | #define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) | 71 | |
77 | #define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) | 72 | |
78 | #define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) | 73 | /* |
79 | #define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) | 74 | * KVM_REG_MIPS_GP - General purpose registers from kvm_regs. |
80 | #define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) | 75 | */ |
81 | #define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) | 76 | |
82 | #define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) | 77 | #define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0) |
83 | #define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) | 78 | #define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1) |
84 | #define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) | 79 | #define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2) |
85 | #define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) | 80 | #define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3) |
86 | #define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) | 81 | #define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4) |
87 | #define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) | 82 | #define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5) |
88 | #define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) | 83 | #define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6) |
89 | #define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) | 84 | #define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7) |
90 | #define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) | 85 | #define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8) |
91 | #define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) | 86 | #define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9) |
92 | #define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) | 87 | #define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10) |
93 | #define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) | 88 | #define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11) |
94 | #define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) | 89 | #define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12) |
95 | #define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) | 90 | #define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13) |
96 | #define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) | 91 | #define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14) |
97 | #define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) | 92 | #define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15) |
98 | #define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) | 93 | #define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16) |
99 | #define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) | 94 | #define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17) |
100 | #define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) | 95 | #define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18) |
101 | #define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) | 96 | #define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19) |
102 | #define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) | 97 | #define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20) |
103 | #define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) | 98 | #define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21) |
104 | 99 | #define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22) | |
105 | #define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) | 100 | #define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23) |
106 | #define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) | 101 | #define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24) |
107 | #define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) | 102 | #define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25) |
108 | 103 | #define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26) | |
109 | /* KVM specific control registers */ | 104 | #define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27) |
105 | #define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28) | ||
106 | #define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29) | ||
107 | #define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30) | ||
108 | #define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31) | ||
109 | |||
110 | #define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32) | ||
111 | #define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33) | ||
112 | #define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34) | ||
113 | |||
114 | |||
115 | /* | ||
116 | * KVM_REG_MIPS_KVM - KVM specific control registers. | ||
117 | */ | ||
110 | 118 | ||
111 | /* | 119 | /* |
112 | * CP0_Count control | 120 | * CP0_Count control |
@@ -118,8 +126,7 @@ struct kvm_fpu { | |||
118 | * safely without losing time or guest timer interrupts. | 126 | * safely without losing time or guest timer interrupts. |
119 | * Other: Reserved, do not change. | 127 | * Other: Reserved, do not change. |
120 | */ | 128 | */ |
121 | #define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | 129 | #define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0) |
122 | 0x20000 | 0) | ||
123 | #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 | 130 | #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 |
124 | 131 | ||
125 | /* | 132 | /* |
@@ -131,15 +138,46 @@ struct kvm_fpu { | |||
131 | * emulated. | 138 | * emulated. |
132 | * Modifications to times in the future are rejected. | 139 | * Modifications to times in the future are rejected. |
133 | */ | 140 | */ |
134 | #define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | 141 | #define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1) |
135 | 0x20000 | 1) | ||
136 | /* | 142 | /* |
137 | * CP0_Count rate in Hz | 143 | * CP0_Count rate in Hz |
138 | * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without | 144 | * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without |
139 | * discontinuities in CP0_Count. | 145 | * discontinuities in CP0_Count. |
140 | */ | 146 | */ |
141 | #define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | 147 | #define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2) |
142 | 0x20000 | 2) | 148 | |
149 | |||
150 | /* | ||
151 | * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers. | ||
152 | * | ||
153 | * bits[15..8] - Register subset (see definitions below). | ||
154 | * bits[7..5] - Must be zero. | ||
155 | * bits[4..0] - Register number within register subset. | ||
156 | */ | ||
157 | |||
158 | #define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL) | ||
159 | #define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL) | ||
160 | #define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL) | ||
161 | |||
162 | /* | ||
163 | * KVM_REG_MIPS_FPR - Floating point / Vector registers. | ||
164 | */ | ||
165 | #define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n)) | ||
166 | #define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n)) | ||
167 | #define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n)) | ||
168 | |||
169 | /* | ||
170 | * KVM_REG_MIPS_FCR - Floating point control registers. | ||
171 | */ | ||
172 | #define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0) | ||
173 | #define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31) | ||
174 | |||
175 | /* | ||
176 | * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers. | ||
177 | */ | ||
178 | #define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0) | ||
179 | #define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1) | ||
180 | |||
143 | 181 | ||
144 | /* | 182 | /* |
145 | * KVM MIPS specific structures and definitions | 183 | * KVM MIPS specific structures and definitions |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 750d67ac41e9..e59fd7cfac9e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void) | |||
167 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); | 167 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); |
168 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); | 168 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); |
169 | 169 | ||
170 | /* the least significant 64 bits of each FP register */ | ||
171 | OFFSET(THREAD_FPR0_LS64, task_struct, | ||
172 | thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]); | ||
173 | OFFSET(THREAD_FPR1_LS64, task_struct, | ||
174 | thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]); | ||
175 | OFFSET(THREAD_FPR2_LS64, task_struct, | ||
176 | thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]); | ||
177 | OFFSET(THREAD_FPR3_LS64, task_struct, | ||
178 | thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]); | ||
179 | OFFSET(THREAD_FPR4_LS64, task_struct, | ||
180 | thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]); | ||
181 | OFFSET(THREAD_FPR5_LS64, task_struct, | ||
182 | thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]); | ||
183 | OFFSET(THREAD_FPR6_LS64, task_struct, | ||
184 | thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]); | ||
185 | OFFSET(THREAD_FPR7_LS64, task_struct, | ||
186 | thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]); | ||
187 | OFFSET(THREAD_FPR8_LS64, task_struct, | ||
188 | thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]); | ||
189 | OFFSET(THREAD_FPR9_LS64, task_struct, | ||
190 | thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]); | ||
191 | OFFSET(THREAD_FPR10_LS64, task_struct, | ||
192 | thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]); | ||
193 | OFFSET(THREAD_FPR11_LS64, task_struct, | ||
194 | thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]); | ||
195 | OFFSET(THREAD_FPR12_LS64, task_struct, | ||
196 | thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]); | ||
197 | OFFSET(THREAD_FPR13_LS64, task_struct, | ||
198 | thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]); | ||
199 | OFFSET(THREAD_FPR14_LS64, task_struct, | ||
200 | thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]); | ||
201 | OFFSET(THREAD_FPR15_LS64, task_struct, | ||
202 | thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]); | ||
203 | OFFSET(THREAD_FPR16_LS64, task_struct, | ||
204 | thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]); | ||
205 | OFFSET(THREAD_FPR17_LS64, task_struct, | ||
206 | thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]); | ||
207 | OFFSET(THREAD_FPR18_LS64, task_struct, | ||
208 | thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]); | ||
209 | OFFSET(THREAD_FPR19_LS64, task_struct, | ||
210 | thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]); | ||
211 | OFFSET(THREAD_FPR20_LS64, task_struct, | ||
212 | thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]); | ||
213 | OFFSET(THREAD_FPR21_LS64, task_struct, | ||
214 | thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]); | ||
215 | OFFSET(THREAD_FPR22_LS64, task_struct, | ||
216 | thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]); | ||
217 | OFFSET(THREAD_FPR23_LS64, task_struct, | ||
218 | thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]); | ||
219 | OFFSET(THREAD_FPR24_LS64, task_struct, | ||
220 | thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]); | ||
221 | OFFSET(THREAD_FPR25_LS64, task_struct, | ||
222 | thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]); | ||
223 | OFFSET(THREAD_FPR26_LS64, task_struct, | ||
224 | thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]); | ||
225 | OFFSET(THREAD_FPR27_LS64, task_struct, | ||
226 | thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]); | ||
227 | OFFSET(THREAD_FPR28_LS64, task_struct, | ||
228 | thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]); | ||
229 | OFFSET(THREAD_FPR29_LS64, task_struct, | ||
230 | thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]); | ||
231 | OFFSET(THREAD_FPR30_LS64, task_struct, | ||
232 | thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]); | ||
233 | OFFSET(THREAD_FPR31_LS64, task_struct, | ||
234 | thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]); | ||
235 | |||
236 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); | 170 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); |
237 | OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); | 171 | OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); |
238 | BLANK(); | 172 | BLANK(); |
@@ -470,6 +404,45 @@ void output_kvm_defines(void) | |||
470 | OFFSET(VCPU_LO, kvm_vcpu_arch, lo); | 404 | OFFSET(VCPU_LO, kvm_vcpu_arch, lo); |
471 | OFFSET(VCPU_HI, kvm_vcpu_arch, hi); | 405 | OFFSET(VCPU_HI, kvm_vcpu_arch, hi); |
472 | OFFSET(VCPU_PC, kvm_vcpu_arch, pc); | 406 | OFFSET(VCPU_PC, kvm_vcpu_arch, pc); |
407 | BLANK(); | ||
408 | |||
409 | OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]); | ||
410 | OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]); | ||
411 | OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]); | ||
412 | OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]); | ||
413 | OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]); | ||
414 | OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]); | ||
415 | OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]); | ||
416 | OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]); | ||
417 | OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]); | ||
418 | OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]); | ||
419 | OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]); | ||
420 | OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]); | ||
421 | OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]); | ||
422 | OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]); | ||
423 | OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]); | ||
424 | OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]); | ||
425 | OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]); | ||
426 | OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]); | ||
427 | OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]); | ||
428 | OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]); | ||
429 | OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]); | ||
430 | OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]); | ||
431 | OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]); | ||
432 | OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]); | ||
433 | OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]); | ||
434 | OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]); | ||
435 | OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]); | ||
436 | OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]); | ||
437 | OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]); | ||
438 | OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]); | ||
439 | OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]); | ||
440 | OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]); | ||
441 | |||
442 | OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31); | ||
443 | OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr); | ||
444 | BLANK(); | ||
445 | |||
473 | OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); | 446 | OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); |
474 | OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); | 447 | OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); |
475 | OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); | 448 | OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 2ebaabe3af15..af42e7003f12 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
360 | .set mips1 | 360 | .set mips1 |
361 | SET_HARDFLOAT | 361 | SET_HARDFLOAT |
362 | cfc1 a1, fcr31 | 362 | cfc1 a1, fcr31 |
363 | li a2, ~(0x3f << 12) | ||
364 | and a2, a1 | ||
365 | ctc1 a2, fcr31 | ||
366 | .set pop | 363 | .set pop |
367 | TRACE_IRQS_ON | 364 | CLI |
368 | STI | 365 | TRACE_IRQS_OFF |
366 | .endm | ||
367 | |||
368 | .macro __build_clear_msa_fpe | ||
369 | _cfcmsa a1, MSA_CSR | ||
370 | CLI | ||
371 | TRACE_IRQS_OFF | ||
369 | .endm | 372 | .endm |
370 | 373 | ||
371 | .macro __build_clear_ade | 374 | .macro __build_clear_ade |
@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
426 | BUILD_HANDLER cpu cpu sti silent /* #11 */ | 429 | BUILD_HANDLER cpu cpu sti silent /* #11 */ |
427 | BUILD_HANDLER ov ov sti silent /* #12 */ | 430 | BUILD_HANDLER ov ov sti silent /* #12 */ |
428 | BUILD_HANDLER tr tr sti silent /* #13 */ | 431 | BUILD_HANDLER tr tr sti silent /* #13 */ |
429 | BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ | 432 | BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ |
430 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ | 433 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
431 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ | 434 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ |
432 | BUILD_HANDLER msa msa sti silent /* #21 */ | 435 | BUILD_HANDLER msa msa sti silent /* #21 */ |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 510452812594..7da6e324dd35 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -46,6 +46,26 @@ | |||
46 | #define CREATE_TRACE_POINTS | 46 | #define CREATE_TRACE_POINTS |
47 | #include <trace/events/syscalls.h> | 47 | #include <trace/events/syscalls.h> |
48 | 48 | ||
49 | static void init_fp_ctx(struct task_struct *target) | ||
50 | { | ||
51 | /* If FP has been used then the target already has context */ | ||
52 | if (tsk_used_math(target)) | ||
53 | return; | ||
54 | |||
55 | /* Begin with data registers set to all 1s... */ | ||
56 | memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); | ||
57 | |||
58 | /* ...and FCSR zeroed */ | ||
59 | target->thread.fpu.fcr31 = 0; | ||
60 | |||
61 | /* | ||
62 | * Record that the target has "used" math, such that the context | ||
63 | * just initialised, and any modifications made by the caller, | ||
64 | * aren't discarded. | ||
65 | */ | ||
66 | set_stopped_child_used_math(target); | ||
67 | } | ||
68 | |||
49 | /* | 69 | /* |
50 | * Called by kernel/ptrace.c when detaching.. | 70 | * Called by kernel/ptrace.c when detaching.. |
51 | * | 71 | * |
@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) | |||
142 | if (!access_ok(VERIFY_READ, data, 33 * 8)) | 162 | if (!access_ok(VERIFY_READ, data, 33 * 8)) |
143 | return -EIO; | 163 | return -EIO; |
144 | 164 | ||
165 | init_fp_ctx(child); | ||
145 | fregs = get_fpu_regs(child); | 166 | fregs = get_fpu_regs(child); |
146 | 167 | ||
147 | for (i = 0; i < 32; i++) { | 168 | for (i = 0; i < 32; i++) { |
@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target, | |||
439 | 460 | ||
440 | /* XXX fcr31 */ | 461 | /* XXX fcr31 */ |
441 | 462 | ||
463 | init_fp_ctx(target); | ||
464 | |||
442 | if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) | 465 | if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) |
443 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 466 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
444 | &target->thread.fpu, | 467 | &target->thread.fpu, |
@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
660 | case FPR_BASE ... FPR_BASE + 31: { | 683 | case FPR_BASE ... FPR_BASE + 31: { |
661 | union fpureg *fregs = get_fpu_regs(child); | 684 | union fpureg *fregs = get_fpu_regs(child); |
662 | 685 | ||
663 | if (!tsk_used_math(child)) { | 686 | init_fp_ctx(child); |
664 | /* FP not yet used */ | ||
665 | memset(&child->thread.fpu, ~0, | ||
666 | sizeof(child->thread.fpu)); | ||
667 | child->thread.fpu.fcr31 = 0; | ||
668 | } | ||
669 | #ifdef CONFIG_32BIT | 687 | #ifdef CONFIG_32BIT |
670 | if (test_thread_flag(TIF_32BIT_FPREGS)) { | 688 | if (test_thread_flag(TIF_32BIT_FPREGS)) { |
671 | /* | 689 | /* |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 676c5030a953..1d88af26ba82 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -34,7 +34,6 @@ | |||
34 | .endm | 34 | .endm |
35 | 35 | ||
36 | .set noreorder | 36 | .set noreorder |
37 | .set MIPS_ISA_ARCH_LEVEL_RAW | ||
38 | 37 | ||
39 | LEAF(_save_fp_context) | 38 | LEAF(_save_fp_context) |
40 | .set push | 39 | .set push |
@@ -103,6 +102,7 @@ LEAF(_save_fp_context) | |||
103 | /* Save 32-bit process floating point context */ | 102 | /* Save 32-bit process floating point context */ |
104 | LEAF(_save_fp_context32) | 103 | LEAF(_save_fp_context32) |
105 | .set push | 104 | .set push |
105 | .set MIPS_ISA_ARCH_LEVEL_RAW | ||
106 | SET_HARDFLOAT | 106 | SET_HARDFLOAT |
107 | cfc1 t1, fcr31 | 107 | cfc1 t1, fcr31 |
108 | 108 | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 33984c04b60b..5b4d711f878d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
701 | 701 | ||
702 | int process_fpemu_return(int sig, void __user *fault_addr) | 702 | int process_fpemu_return(int sig, void __user *fault_addr) |
703 | { | 703 | { |
704 | /* | ||
705 | * We can't allow the emulated instruction to leave any of the cause | ||
706 | * bits set in FCSR. If they were then the kernel would take an FP | ||
707 | * exception when restoring FP context. | ||
708 | */ | ||
709 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | ||
710 | |||
704 | if (sig == SIGSEGV || sig == SIGBUS) { | 711 | if (sig == SIGSEGV || sig == SIGBUS) { |
705 | struct siginfo si = {0}; | 712 | struct siginfo si = {0}; |
706 | si.si_addr = fault_addr; | 713 | si.si_addr = fault_addr; |
@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
781 | if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), | 788 | if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), |
782 | SIGFPE) == NOTIFY_STOP) | 789 | SIGFPE) == NOTIFY_STOP) |
783 | goto out; | 790 | goto out; |
791 | |||
792 | /* Clear FCSR.Cause before enabling interrupts */ | ||
793 | write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); | ||
794 | local_irq_enable(); | ||
795 | |||
784 | die_if_kernel("FP exception in kernel code", regs); | 796 | die_if_kernel("FP exception in kernel code", regs); |
785 | 797 | ||
786 | if (fcr31 & FPU_CSR_UNI_X) { | 798 | if (fcr31 & FPU_CSR_UNI_X) { |
@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
804 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | 816 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
805 | &fault_addr); | 817 | &fault_addr); |
806 | 818 | ||
807 | /* | 819 | /* If something went wrong, signal */ |
808 | * We can't allow the emulated instruction to leave any of | 820 | process_fpemu_return(sig, fault_addr); |
809 | * the cause bit set in $fcr31. | ||
810 | */ | ||
811 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | ||
812 | 821 | ||
813 | /* Restore the hardware register state */ | 822 | /* Restore the hardware register state */ |
814 | own_fpu(1); /* Using the FPU again. */ | 823 | own_fpu(1); /* Using the FPU again. */ |
815 | 824 | ||
816 | /* If something went wrong, signal */ | ||
817 | process_fpemu_return(sig, fault_addr); | ||
818 | |||
819 | goto out; | 825 | goto out; |
820 | } else if (fcr31 & FPU_CSR_INV_X) | 826 | } else if (fcr31 & FPU_CSR_INV_X) |
821 | info.si_code = FPE_FLTINV; | 827 | info.si_code = FPE_FLTINV; |
@@ -1392,13 +1398,22 @@ out: | |||
1392 | exception_exit(prev_state); | 1398 | exception_exit(prev_state); |
1393 | } | 1399 | } |
1394 | 1400 | ||
1395 | asmlinkage void do_msa_fpe(struct pt_regs *regs) | 1401 | asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) |
1396 | { | 1402 | { |
1397 | enum ctx_state prev_state; | 1403 | enum ctx_state prev_state; |
1398 | 1404 | ||
1399 | prev_state = exception_enter(); | 1405 | prev_state = exception_enter(); |
1406 | if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, | ||
1407 | regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) | ||
1408 | goto out; | ||
1409 | |||
1410 | /* Clear MSACSR.Cause before enabling interrupts */ | ||
1411 | write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); | ||
1412 | local_irq_enable(); | ||
1413 | |||
1400 | die_if_kernel("do_msa_fpe invoked from kernel context!", regs); | 1414 | die_if_kernel("do_msa_fpe invoked from kernel context!", regs); |
1401 | force_sig(SIGFPE, current); | 1415 | force_sig(SIGFPE, current); |
1416 | out: | ||
1402 | exception_exit(prev_state); | 1417 | exception_exit(prev_state); |
1403 | } | 1418 | } |
1404 | 1419 | ||
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 401fe027c261..637ebbebd549 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile | |||
@@ -1,13 +1,15 @@ | |||
1 | # Makefile for KVM support for MIPS | 1 | # Makefile for KVM support for MIPS |
2 | # | 2 | # |
3 | 3 | ||
4 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 4 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
5 | 5 | ||
6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm | 6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm |
7 | 7 | ||
8 | kvm-objs := $(common-objs) mips.o emulate.o locore.o \ | 8 | common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o |
9 | |||
10 | kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ | ||
9 | interrupt.o stats.o commpage.o \ | 11 | interrupt.o stats.o commpage.o \ |
10 | dyntrans.o trap_emul.o | 12 | dyntrans.o trap_emul.o fpu.o |
11 | 13 | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 14 | obj-$(CONFIG_KVM) += kvm.o |
13 | obj-y += callback.o tlb.o | 15 | obj-y += callback.o tlb.o |
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index fb3e8dfd1ff6..6230f376a44e 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |||
884 | return EMULATE_DONE; | 884 | return EMULATE_DONE; |
885 | } | 885 | } |
886 | 886 | ||
887 | /** | ||
888 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 | ||
889 | * @vcpu: Virtual CPU. | ||
890 | * | ||
891 | * Finds the mask of bits which are writable in the guest's Config1 CP0 | ||
892 | * register, by userland (currently read-only to the guest). | ||
893 | */ | ||
894 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) | ||
895 | { | ||
896 | unsigned int mask = 0; | ||
897 | |||
898 | /* Permit FPU to be present if FPU is supported */ | ||
899 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | ||
900 | mask |= MIPS_CONF1_FP; | ||
901 | |||
902 | return mask; | ||
903 | } | ||
904 | |||
905 | /** | ||
906 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 | ||
907 | * @vcpu: Virtual CPU. | ||
908 | * | ||
909 | * Finds the mask of bits which are writable in the guest's Config3 CP0 | ||
910 | * register, by userland (currently read-only to the guest). | ||
911 | */ | ||
912 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) | ||
913 | { | ||
914 | /* Config4 is optional */ | ||
915 | unsigned int mask = MIPS_CONF_M; | ||
916 | |||
917 | /* Permit MSA to be present if MSA is supported */ | ||
918 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | ||
919 | mask |= MIPS_CONF3_MSA; | ||
920 | |||
921 | return mask; | ||
922 | } | ||
923 | |||
924 | /** | ||
925 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 | ||
926 | * @vcpu: Virtual CPU. | ||
927 | * | ||
928 | * Finds the mask of bits which are writable in the guest's Config4 CP0 | ||
929 | * register, by userland (currently read-only to the guest). | ||
930 | */ | ||
931 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | ||
932 | { | ||
933 | /* Config5 is optional */ | ||
934 | return MIPS_CONF_M; | ||
935 | } | ||
936 | |||
937 | /** | ||
938 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 | ||
939 | * @vcpu: Virtual CPU. | ||
940 | * | ||
941 | * Finds the mask of bits which are writable in the guest's Config5 CP0 | ||
942 | * register, by the guest itself. | ||
943 | */ | ||
944 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) | ||
945 | { | ||
946 | unsigned int mask = 0; | ||
947 | |||
948 | /* Permit MSAEn changes if MSA supported and enabled */ | ||
949 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | ||
950 | mask |= MIPS_CONF5_MSAEN; | ||
951 | |||
952 | /* | ||
953 | * Permit guest FPU mode changes if FPU is enabled and the relevant | ||
954 | * feature exists according to FIR register. | ||
955 | */ | ||
956 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
957 | if (cpu_has_fre) | ||
958 | mask |= MIPS_CONF5_FRE; | ||
959 | /* We don't support UFR or UFE */ | ||
960 | } | ||
961 | |||
962 | return mask; | ||
963 | } | ||
964 | |||
887 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, | 965 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, |
888 | uint32_t cause, struct kvm_run *run, | 966 | uint32_t cause, struct kvm_run *run, |
889 | struct kvm_vcpu *vcpu) | 967 | struct kvm_vcpu *vcpu) |
@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, | |||
1021 | kvm_mips_write_compare(vcpu, | 1099 | kvm_mips_write_compare(vcpu, |
1022 | vcpu->arch.gprs[rt]); | 1100 | vcpu->arch.gprs[rt]); |
1023 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | 1101 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
1024 | kvm_write_c0_guest_status(cop0, | 1102 | unsigned int old_val, val, change; |
1025 | vcpu->arch.gprs[rt]); | 1103 | |
1104 | old_val = kvm_read_c0_guest_status(cop0); | ||
1105 | val = vcpu->arch.gprs[rt]; | ||
1106 | change = val ^ old_val; | ||
1107 | |||
1108 | /* Make sure that the NMI bit is never set */ | ||
1109 | val &= ~ST0_NMI; | ||
1110 | |||
1111 | /* | ||
1112 | * Don't allow CU1 or FR to be set unless FPU | ||
1113 | * capability enabled and exists in guest | ||
1114 | * configuration. | ||
1115 | */ | ||
1116 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1117 | val &= ~(ST0_CU1 | ST0_FR); | ||
1118 | |||
1119 | /* | ||
1120 | * Also don't allow FR to be set if host doesn't | ||
1121 | * support it. | ||
1122 | */ | ||
1123 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
1124 | val &= ~ST0_FR; | ||
1125 | |||
1126 | |||
1127 | /* Handle changes in FPU mode */ | ||
1128 | preempt_disable(); | ||
1129 | |||
1130 | /* | ||
1131 | * FPU and Vector register state is made | ||
1132 | * UNPREDICTABLE by a change of FR, so don't | ||
1133 | * even bother saving it. | ||
1134 | */ | ||
1135 | if (change & ST0_FR) | ||
1136 | kvm_drop_fpu(vcpu); | ||
1137 | |||
1138 | /* | ||
1139 | * If MSA state is already live, it is undefined | ||
1140 | * how it interacts with FR=0 FPU state, and we | ||
1141 | * don't want to hit reserved instruction | ||
1142 | * exceptions trying to save the MSA state later | ||
1143 | * when CU=1 && FR=1, so play it safe and save | ||
1144 | * it first. | ||
1145 | */ | ||
1146 | if (change & ST0_CU1 && !(val & ST0_FR) && | ||
1147 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | ||
1148 | kvm_lose_fpu(vcpu); | ||
1149 | |||
1026 | /* | 1150 | /* |
1027 | * Make sure that CU1 and NMI bits are | 1151 | * Propagate CU1 (FPU enable) changes |
1028 | * never set | 1152 | * immediately if the FPU context is already |
1153 | * loaded. When disabling we leave the context | ||
1154 | * loaded so it can be quickly enabled again in | ||
1155 | * the near future. | ||
1029 | */ | 1156 | */ |
1030 | kvm_clear_c0_guest_status(cop0, | 1157 | if (change & ST0_CU1 && |
1031 | (ST0_CU1 | ST0_NMI)); | 1158 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) |
1159 | change_c0_status(ST0_CU1, val); | ||
1160 | |||
1161 | preempt_enable(); | ||
1162 | |||
1163 | kvm_write_c0_guest_status(cop0, val); | ||
1032 | 1164 | ||
1033 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1165 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1034 | kvm_mips_trans_mtc0(inst, opc, vcpu); | 1166 | /* |
1167 | * If FPU present, we need CU1/FR bits to take | ||
1168 | * effect fairly soon. | ||
1169 | */ | ||
1170 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1171 | kvm_mips_trans_mtc0(inst, opc, vcpu); | ||
1035 | #endif | 1172 | #endif |
1173 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { | ||
1174 | unsigned int old_val, val, change, wrmask; | ||
1175 | |||
1176 | old_val = kvm_read_c0_guest_config5(cop0); | ||
1177 | val = vcpu->arch.gprs[rt]; | ||
1178 | |||
1179 | /* Only a few bits are writable in Config5 */ | ||
1180 | wrmask = kvm_mips_config5_wrmask(vcpu); | ||
1181 | change = (val ^ old_val) & wrmask; | ||
1182 | val = old_val ^ change; | ||
1183 | |||
1184 | |||
1185 | /* Handle changes in FPU/MSA modes */ | ||
1186 | preempt_disable(); | ||
1187 | |||
1188 | /* | ||
1189 | * Propagate FRE changes immediately if the FPU | ||
1190 | * context is already loaded. | ||
1191 | */ | ||
1192 | if (change & MIPS_CONF5_FRE && | ||
1193 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | ||
1194 | change_c0_config5(MIPS_CONF5_FRE, val); | ||
1195 | |||
1196 | /* | ||
1197 | * Propagate MSAEn changes immediately if the | ||
1198 | * MSA context is already loaded. When disabling | ||
1199 | * we leave the context loaded so it can be | ||
1200 | * quickly enabled again in the near future. | ||
1201 | */ | ||
1202 | if (change & MIPS_CONF5_MSAEN && | ||
1203 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | ||
1204 | change_c0_config5(MIPS_CONF5_MSAEN, | ||
1205 | val); | ||
1206 | |||
1207 | preempt_enable(); | ||
1208 | |||
1209 | kvm_write_c0_guest_config5(cop0, val); | ||
1036 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | 1210 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1037 | uint32_t old_cause, new_cause; | 1211 | uint32_t old_cause, new_cause; |
1038 | 1212 | ||
@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, | |||
1970 | return er; | 2144 | return er; |
1971 | } | 2145 | } |
1972 | 2146 | ||
2147 | enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, | ||
2148 | uint32_t *opc, | ||
2149 | struct kvm_run *run, | ||
2150 | struct kvm_vcpu *vcpu) | ||
2151 | { | ||
2152 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2153 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2154 | enum emulation_result er = EMULATE_DONE; | ||
2155 | |||
2156 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2157 | /* save old pc */ | ||
2158 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2159 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2160 | |||
2161 | if (cause & CAUSEF_BD) | ||
2162 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2163 | else | ||
2164 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2165 | |||
2166 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); | ||
2167 | |||
2168 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2169 | (T_TRAP << CAUSEB_EXCCODE)); | ||
2170 | |||
2171 | /* Set PC to the exception entry point */ | ||
2172 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2173 | |||
2174 | } else { | ||
2175 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | ||
2176 | er = EMULATE_FAIL; | ||
2177 | } | ||
2178 | |||
2179 | return er; | ||
2180 | } | ||
2181 | |||
2182 | enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, | ||
2183 | uint32_t *opc, | ||
2184 | struct kvm_run *run, | ||
2185 | struct kvm_vcpu *vcpu) | ||
2186 | { | ||
2187 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2188 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2189 | enum emulation_result er = EMULATE_DONE; | ||
2190 | |||
2191 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2192 | /* save old pc */ | ||
2193 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2194 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2195 | |||
2196 | if (cause & CAUSEF_BD) | ||
2197 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2198 | else | ||
2199 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2200 | |||
2201 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); | ||
2202 | |||
2203 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2204 | (T_MSAFPE << CAUSEB_EXCCODE)); | ||
2205 | |||
2206 | /* Set PC to the exception entry point */ | ||
2207 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2208 | |||
2209 | } else { | ||
2210 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | ||
2211 | er = EMULATE_FAIL; | ||
2212 | } | ||
2213 | |||
2214 | return er; | ||
2215 | } | ||
2216 | |||
2217 | enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, | ||
2218 | uint32_t *opc, | ||
2219 | struct kvm_run *run, | ||
2220 | struct kvm_vcpu *vcpu) | ||
2221 | { | ||
2222 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2223 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2224 | enum emulation_result er = EMULATE_DONE; | ||
2225 | |||
2226 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2227 | /* save old pc */ | ||
2228 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2229 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2230 | |||
2231 | if (cause & CAUSEF_BD) | ||
2232 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2233 | else | ||
2234 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2235 | |||
2236 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); | ||
2237 | |||
2238 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2239 | (T_FPE << CAUSEB_EXCCODE)); | ||
2240 | |||
2241 | /* Set PC to the exception entry point */ | ||
2242 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2243 | |||
2244 | } else { | ||
2245 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | ||
2246 | er = EMULATE_FAIL; | ||
2247 | } | ||
2248 | |||
2249 | return er; | ||
2250 | } | ||
2251 | |||
2252 | enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, | ||
2253 | uint32_t *opc, | ||
2254 | struct kvm_run *run, | ||
2255 | struct kvm_vcpu *vcpu) | ||
2256 | { | ||
2257 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2258 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2259 | enum emulation_result er = EMULATE_DONE; | ||
2260 | |||
2261 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2262 | /* save old pc */ | ||
2263 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2264 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2265 | |||
2266 | if (cause & CAUSEF_BD) | ||
2267 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2268 | else | ||
2269 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2270 | |||
2271 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); | ||
2272 | |||
2273 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2274 | (T_MSADIS << CAUSEB_EXCCODE)); | ||
2275 | |||
2276 | /* Set PC to the exception entry point */ | ||
2277 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2278 | |||
2279 | } else { | ||
2280 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | ||
2281 | er = EMULATE_FAIL; | ||
2282 | } | ||
2283 | |||
2284 | return er; | ||
2285 | } | ||
2286 | |||
1973 | /* ll/sc, rdhwr, sync emulation */ | 2287 | /* ll/sc, rdhwr, sync emulation */ |
1974 | 2288 | ||
1975 | #define OPCODE 0xfc000000 | 2289 | #define OPCODE 0xfc000000 |
@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, | |||
2176 | case T_SYSCALL: | 2490 | case T_SYSCALL: |
2177 | case T_BREAK: | 2491 | case T_BREAK: |
2178 | case T_RES_INST: | 2492 | case T_RES_INST: |
2493 | case T_TRAP: | ||
2494 | case T_MSAFPE: | ||
2495 | case T_FPE: | ||
2496 | case T_MSADIS: | ||
2179 | break; | 2497 | break; |
2180 | 2498 | ||
2181 | case T_COP_UNUSABLE: | 2499 | case T_COP_UNUSABLE: |
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S new file mode 100644 index 000000000000..531fbf5131c0 --- /dev/null +++ b/arch/mips/kvm/fpu.S | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * FPU context handling code for KVM. | ||
7 | * | ||
8 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | |||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/fpregdef.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | #include <asm/regdef.h> | ||
16 | |||
17 | .set noreorder | ||
18 | .set noat | ||
19 | |||
20 | LEAF(__kvm_save_fpu) | ||
21 | .set push | ||
22 | .set mips64r2 | ||
23 | SET_HARDFLOAT | ||
24 | mfc0 t0, CP0_STATUS | ||
25 | sll t0, t0, 5 # is Status.FR set? | ||
26 | bgez t0, 1f # no: skip odd doubles | ||
27 | nop | ||
28 | sdc1 $f1, VCPU_FPR1(a0) | ||
29 | sdc1 $f3, VCPU_FPR3(a0) | ||
30 | sdc1 $f5, VCPU_FPR5(a0) | ||
31 | sdc1 $f7, VCPU_FPR7(a0) | ||
32 | sdc1 $f9, VCPU_FPR9(a0) | ||
33 | sdc1 $f11, VCPU_FPR11(a0) | ||
34 | sdc1 $f13, VCPU_FPR13(a0) | ||
35 | sdc1 $f15, VCPU_FPR15(a0) | ||
36 | sdc1 $f17, VCPU_FPR17(a0) | ||
37 | sdc1 $f19, VCPU_FPR19(a0) | ||
38 | sdc1 $f21, VCPU_FPR21(a0) | ||
39 | sdc1 $f23, VCPU_FPR23(a0) | ||
40 | sdc1 $f25, VCPU_FPR25(a0) | ||
41 | sdc1 $f27, VCPU_FPR27(a0) | ||
42 | sdc1 $f29, VCPU_FPR29(a0) | ||
43 | sdc1 $f31, VCPU_FPR31(a0) | ||
44 | 1: sdc1 $f0, VCPU_FPR0(a0) | ||
45 | sdc1 $f2, VCPU_FPR2(a0) | ||
46 | sdc1 $f4, VCPU_FPR4(a0) | ||
47 | sdc1 $f6, VCPU_FPR6(a0) | ||
48 | sdc1 $f8, VCPU_FPR8(a0) | ||
49 | sdc1 $f10, VCPU_FPR10(a0) | ||
50 | sdc1 $f12, VCPU_FPR12(a0) | ||
51 | sdc1 $f14, VCPU_FPR14(a0) | ||
52 | sdc1 $f16, VCPU_FPR16(a0) | ||
53 | sdc1 $f18, VCPU_FPR18(a0) | ||
54 | sdc1 $f20, VCPU_FPR20(a0) | ||
55 | sdc1 $f22, VCPU_FPR22(a0) | ||
56 | sdc1 $f24, VCPU_FPR24(a0) | ||
57 | sdc1 $f26, VCPU_FPR26(a0) | ||
58 | sdc1 $f28, VCPU_FPR28(a0) | ||
59 | jr ra | ||
60 | sdc1 $f30, VCPU_FPR30(a0) | ||
61 | .set pop | ||
62 | END(__kvm_save_fpu) | ||
63 | |||
64 | LEAF(__kvm_restore_fpu) | ||
65 | .set push | ||
66 | .set mips64r2 | ||
67 | SET_HARDFLOAT | ||
68 | mfc0 t0, CP0_STATUS | ||
69 | sll t0, t0, 5 # is Status.FR set? | ||
70 | bgez t0, 1f # no: skip odd doubles | ||
71 | nop | ||
72 | ldc1 $f1, VCPU_FPR1(a0) | ||
73 | ldc1 $f3, VCPU_FPR3(a0) | ||
74 | ldc1 $f5, VCPU_FPR5(a0) | ||
75 | ldc1 $f7, VCPU_FPR7(a0) | ||
76 | ldc1 $f9, VCPU_FPR9(a0) | ||
77 | ldc1 $f11, VCPU_FPR11(a0) | ||
78 | ldc1 $f13, VCPU_FPR13(a0) | ||
79 | ldc1 $f15, VCPU_FPR15(a0) | ||
80 | ldc1 $f17, VCPU_FPR17(a0) | ||
81 | ldc1 $f19, VCPU_FPR19(a0) | ||
82 | ldc1 $f21, VCPU_FPR21(a0) | ||
83 | ldc1 $f23, VCPU_FPR23(a0) | ||
84 | ldc1 $f25, VCPU_FPR25(a0) | ||
85 | ldc1 $f27, VCPU_FPR27(a0) | ||
86 | ldc1 $f29, VCPU_FPR29(a0) | ||
87 | ldc1 $f31, VCPU_FPR31(a0) | ||
88 | 1: ldc1 $f0, VCPU_FPR0(a0) | ||
89 | ldc1 $f2, VCPU_FPR2(a0) | ||
90 | ldc1 $f4, VCPU_FPR4(a0) | ||
91 | ldc1 $f6, VCPU_FPR6(a0) | ||
92 | ldc1 $f8, VCPU_FPR8(a0) | ||
93 | ldc1 $f10, VCPU_FPR10(a0) | ||
94 | ldc1 $f12, VCPU_FPR12(a0) | ||
95 | ldc1 $f14, VCPU_FPR14(a0) | ||
96 | ldc1 $f16, VCPU_FPR16(a0) | ||
97 | ldc1 $f18, VCPU_FPR18(a0) | ||
98 | ldc1 $f20, VCPU_FPR20(a0) | ||
99 | ldc1 $f22, VCPU_FPR22(a0) | ||
100 | ldc1 $f24, VCPU_FPR24(a0) | ||
101 | ldc1 $f26, VCPU_FPR26(a0) | ||
102 | ldc1 $f28, VCPU_FPR28(a0) | ||
103 | jr ra | ||
104 | ldc1 $f30, VCPU_FPR30(a0) | ||
105 | .set pop | ||
106 | END(__kvm_restore_fpu) | ||
107 | |||
108 | LEAF(__kvm_restore_fcsr) | ||
109 | .set push | ||
110 | SET_HARDFLOAT | ||
111 | lw t0, VCPU_FCR31(a0) | ||
112 | /* | ||
113 | * The ctc1 must stay at this offset in __kvm_restore_fcsr. | ||
114 | * See kvm_mips_csr_die_notify() which handles t0 containing a value | ||
115 | * which triggers an FP Exception, which must be stepped over and | ||
116 | * ignored since the set cause bits must remain there for the guest. | ||
117 | */ | ||
118 | ctc1 t0, fcr31 | ||
119 | jr ra | ||
120 | nop | ||
121 | .set pop | ||
122 | END(__kvm_restore_fcsr) | ||
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 4a68b176d6e4..c567240386a0 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S | |||
@@ -36,6 +36,8 @@ | |||
36 | #define PT_HOST_USERLOCAL PT_EPC | 36 | #define PT_HOST_USERLOCAL PT_EPC |
37 | 37 | ||
38 | #define CP0_DDATA_LO $28,3 | 38 | #define CP0_DDATA_LO $28,3 |
39 | #define CP0_CONFIG3 $16,3 | ||
40 | #define CP0_CONFIG5 $16,5 | ||
39 | #define CP0_EBASE $15,1 | 41 | #define CP0_EBASE $15,1 |
40 | 42 | ||
41 | #define CP0_INTCTL $12,1 | 43 | #define CP0_INTCTL $12,1 |
@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
353 | LONG_L k0, VCPU_HOST_EBASE(k1) | 355 | LONG_L k0, VCPU_HOST_EBASE(k1) |
354 | mtc0 k0,CP0_EBASE | 356 | mtc0 k0,CP0_EBASE |
355 | 357 | ||
358 | /* | ||
359 | * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't | ||
360 | * trigger FPE for pending exceptions. | ||
361 | */ | ||
362 | .set at | ||
363 | and v1, v0, ST0_CU1 | ||
364 | beqz v1, 1f | ||
365 | nop | ||
366 | .set push | ||
367 | SET_HARDFLOAT | ||
368 | cfc1 t0, fcr31 | ||
369 | sw t0, VCPU_FCR31(k1) | ||
370 | ctc1 zero,fcr31 | ||
371 | .set pop | ||
372 | .set noat | ||
373 | 1: | ||
374 | |||
375 | #ifdef CONFIG_CPU_HAS_MSA | ||
376 | /* | ||
377 | * If MSA is enabled, save MSACSR and clear it so that later | ||
378 | * instructions don't trigger MSAFPE for pending exceptions. | ||
379 | */ | ||
380 | mfc0 t0, CP0_CONFIG3 | ||
381 | ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */ | ||
382 | beqz t0, 1f | ||
383 | nop | ||
384 | mfc0 t0, CP0_CONFIG5 | ||
385 | ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */ | ||
386 | beqz t0, 1f | ||
387 | nop | ||
388 | _cfcmsa t0, MSA_CSR | ||
389 | sw t0, VCPU_MSA_CSR(k1) | ||
390 | _ctcmsa MSA_CSR, zero | ||
391 | 1: | ||
392 | #endif | ||
393 | |||
356 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | 394 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
357 | .set at | 395 | .set at |
358 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) | 396 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index c9eccf5df912..bb68e8d520e8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/kdebug.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
48 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, | 49 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, |
49 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, | 50 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, |
50 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, | 51 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, |
52 | { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, | ||
53 | { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, | ||
54 | { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, | ||
55 | { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, | ||
51 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, | 56 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
52 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, | 57 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, |
53 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, | 58 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = { | |||
504 | KVM_REG_MIPS_CP0_STATUS, | 509 | KVM_REG_MIPS_CP0_STATUS, |
505 | KVM_REG_MIPS_CP0_CAUSE, | 510 | KVM_REG_MIPS_CP0_CAUSE, |
506 | KVM_REG_MIPS_CP0_EPC, | 511 | KVM_REG_MIPS_CP0_EPC, |
512 | KVM_REG_MIPS_CP0_PRID, | ||
507 | KVM_REG_MIPS_CP0_CONFIG, | 513 | KVM_REG_MIPS_CP0_CONFIG, |
508 | KVM_REG_MIPS_CP0_CONFIG1, | 514 | KVM_REG_MIPS_CP0_CONFIG1, |
509 | KVM_REG_MIPS_CP0_CONFIG2, | 515 | KVM_REG_MIPS_CP0_CONFIG2, |
510 | KVM_REG_MIPS_CP0_CONFIG3, | 516 | KVM_REG_MIPS_CP0_CONFIG3, |
517 | KVM_REG_MIPS_CP0_CONFIG4, | ||
518 | KVM_REG_MIPS_CP0_CONFIG5, | ||
511 | KVM_REG_MIPS_CP0_CONFIG7, | 519 | KVM_REG_MIPS_CP0_CONFIG7, |
512 | KVM_REG_MIPS_CP0_ERROREPC, | 520 | KVM_REG_MIPS_CP0_ERROREPC, |
513 | 521 | ||
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
520 | const struct kvm_one_reg *reg) | 528 | const struct kvm_one_reg *reg) |
521 | { | 529 | { |
522 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 530 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
531 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; | ||
523 | int ret; | 532 | int ret; |
524 | s64 v; | 533 | s64 v; |
534 | s64 vs[2]; | ||
535 | unsigned int idx; | ||
525 | 536 | ||
526 | switch (reg->id) { | 537 | switch (reg->id) { |
538 | /* General purpose registers */ | ||
527 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: | 539 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: |
528 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | 540 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; |
529 | break; | 541 | break; |
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
537 | v = (long)vcpu->arch.pc; | 549 | v = (long)vcpu->arch.pc; |
538 | break; | 550 | break; |
539 | 551 | ||
552 | /* Floating point registers */ | ||
553 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
554 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
555 | return -EINVAL; | ||
556 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
557 | /* Odd singles in top of even double when FR=0 */ | ||
558 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
559 | v = get_fpr32(&fpu->fpr[idx], 0); | ||
560 | else | ||
561 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | ||
562 | break; | ||
563 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
564 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
565 | return -EINVAL; | ||
566 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
567 | /* Can't access odd doubles in FR=0 mode */ | ||
568 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
569 | return -EINVAL; | ||
570 | v = get_fpr64(&fpu->fpr[idx], 0); | ||
571 | break; | ||
572 | case KVM_REG_MIPS_FCR_IR: | ||
573 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
574 | return -EINVAL; | ||
575 | v = boot_cpu_data.fpu_id; | ||
576 | break; | ||
577 | case KVM_REG_MIPS_FCR_CSR: | ||
578 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
579 | return -EINVAL; | ||
580 | v = fpu->fcr31; | ||
581 | break; | ||
582 | |||
583 | /* MIPS SIMD Architecture (MSA) registers */ | ||
584 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
585 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
586 | return -EINVAL; | ||
587 | /* Can't access MSA registers in FR=0 mode */ | ||
588 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
589 | return -EINVAL; | ||
590 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
591 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
592 | /* least significant byte first */ | ||
593 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | ||
594 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | ||
595 | #else | ||
596 | /* most significant byte first */ | ||
597 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | ||
598 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | ||
599 | #endif | ||
600 | break; | ||
601 | case KVM_REG_MIPS_MSA_IR: | ||
602 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
603 | return -EINVAL; | ||
604 | v = boot_cpu_data.msa_id; | ||
605 | break; | ||
606 | case KVM_REG_MIPS_MSA_CSR: | ||
607 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
608 | return -EINVAL; | ||
609 | v = fpu->msacsr; | ||
610 | break; | ||
611 | |||
612 | /* Co-processor 0 registers */ | ||
540 | case KVM_REG_MIPS_CP0_INDEX: | 613 | case KVM_REG_MIPS_CP0_INDEX: |
541 | v = (long)kvm_read_c0_guest_index(cop0); | 614 | v = (long)kvm_read_c0_guest_index(cop0); |
542 | break; | 615 | break; |
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
573 | case KVM_REG_MIPS_CP0_EPC: | 646 | case KVM_REG_MIPS_CP0_EPC: |
574 | v = (long)kvm_read_c0_guest_epc(cop0); | 647 | v = (long)kvm_read_c0_guest_epc(cop0); |
575 | break; | 648 | break; |
576 | case KVM_REG_MIPS_CP0_ERROREPC: | 649 | case KVM_REG_MIPS_CP0_PRID: |
577 | v = (long)kvm_read_c0_guest_errorepc(cop0); | 650 | v = (long)kvm_read_c0_guest_prid(cop0); |
578 | break; | 651 | break; |
579 | case KVM_REG_MIPS_CP0_CONFIG: | 652 | case KVM_REG_MIPS_CP0_CONFIG: |
580 | v = (long)kvm_read_c0_guest_config(cop0); | 653 | v = (long)kvm_read_c0_guest_config(cop0); |
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
588 | case KVM_REG_MIPS_CP0_CONFIG3: | 661 | case KVM_REG_MIPS_CP0_CONFIG3: |
589 | v = (long)kvm_read_c0_guest_config3(cop0); | 662 | v = (long)kvm_read_c0_guest_config3(cop0); |
590 | break; | 663 | break; |
664 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
665 | v = (long)kvm_read_c0_guest_config4(cop0); | ||
666 | break; | ||
667 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
668 | v = (long)kvm_read_c0_guest_config5(cop0); | ||
669 | break; | ||
591 | case KVM_REG_MIPS_CP0_CONFIG7: | 670 | case KVM_REG_MIPS_CP0_CONFIG7: |
592 | v = (long)kvm_read_c0_guest_config7(cop0); | 671 | v = (long)kvm_read_c0_guest_config7(cop0); |
593 | break; | 672 | break; |
673 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
674 | v = (long)kvm_read_c0_guest_errorepc(cop0); | ||
675 | break; | ||
594 | /* registers to be handled specially */ | 676 | /* registers to be handled specially */ |
595 | case KVM_REG_MIPS_CP0_COUNT: | 677 | case KVM_REG_MIPS_CP0_COUNT: |
596 | case KVM_REG_MIPS_COUNT_CTL: | 678 | case KVM_REG_MIPS_COUNT_CTL: |
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
612 | u32 v32 = (u32)v; | 694 | u32 v32 = (u32)v; |
613 | 695 | ||
614 | return put_user(v32, uaddr32); | 696 | return put_user(v32, uaddr32); |
697 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
698 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
699 | |||
700 | return copy_to_user(uaddr, vs, 16); | ||
615 | } else { | 701 | } else { |
616 | return -EINVAL; | 702 | return -EINVAL; |
617 | } | 703 | } |
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
621 | const struct kvm_one_reg *reg) | 707 | const struct kvm_one_reg *reg) |
622 | { | 708 | { |
623 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 709 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
624 | u64 v; | 710 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
711 | s64 v; | ||
712 | s64 vs[2]; | ||
713 | unsigned int idx; | ||
625 | 714 | ||
626 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | 715 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
627 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | 716 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; |
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
635 | if (get_user(v32, uaddr32) != 0) | 724 | if (get_user(v32, uaddr32) != 0) |
636 | return -EFAULT; | 725 | return -EFAULT; |
637 | v = (s64)v32; | 726 | v = (s64)v32; |
727 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
728 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
729 | |||
730 | return copy_from_user(vs, uaddr, 16); | ||
638 | } else { | 731 | } else { |
639 | return -EINVAL; | 732 | return -EINVAL; |
640 | } | 733 | } |
641 | 734 | ||
642 | switch (reg->id) { | 735 | switch (reg->id) { |
736 | /* General purpose registers */ | ||
643 | case KVM_REG_MIPS_R0: | 737 | case KVM_REG_MIPS_R0: |
644 | /* Silently ignore requests to set $0 */ | 738 | /* Silently ignore requests to set $0 */ |
645 | break; | 739 | break; |
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
656 | vcpu->arch.pc = v; | 750 | vcpu->arch.pc = v; |
657 | break; | 751 | break; |
658 | 752 | ||
753 | /* Floating point registers */ | ||
754 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
755 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
756 | return -EINVAL; | ||
757 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
758 | /* Odd singles in top of even double when FR=0 */ | ||
759 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
760 | set_fpr32(&fpu->fpr[idx], 0, v); | ||
761 | else | ||
762 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | ||
763 | break; | ||
764 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
765 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
766 | return -EINVAL; | ||
767 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
768 | /* Can't access odd doubles in FR=0 mode */ | ||
769 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
770 | return -EINVAL; | ||
771 | set_fpr64(&fpu->fpr[idx], 0, v); | ||
772 | break; | ||
773 | case KVM_REG_MIPS_FCR_IR: | ||
774 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
775 | return -EINVAL; | ||
776 | /* Read-only */ | ||
777 | break; | ||
778 | case KVM_REG_MIPS_FCR_CSR: | ||
779 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
780 | return -EINVAL; | ||
781 | fpu->fcr31 = v; | ||
782 | break; | ||
783 | |||
784 | /* MIPS SIMD Architecture (MSA) registers */ | ||
785 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
786 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
787 | return -EINVAL; | ||
788 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
789 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
790 | /* least significant byte first */ | ||
791 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | ||
792 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | ||
793 | #else | ||
794 | /* most significant byte first */ | ||
795 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | ||
796 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | ||
797 | #endif | ||
798 | break; | ||
799 | case KVM_REG_MIPS_MSA_IR: | ||
800 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
801 | return -EINVAL; | ||
802 | /* Read-only */ | ||
803 | break; | ||
804 | case KVM_REG_MIPS_MSA_CSR: | ||
805 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
806 | return -EINVAL; | ||
807 | fpu->msacsr = v; | ||
808 | break; | ||
809 | |||
810 | /* Co-processor 0 registers */ | ||
659 | case KVM_REG_MIPS_CP0_INDEX: | 811 | case KVM_REG_MIPS_CP0_INDEX: |
660 | kvm_write_c0_guest_index(cop0, v); | 812 | kvm_write_c0_guest_index(cop0, v); |
661 | break; | 813 | break; |
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
686 | case KVM_REG_MIPS_CP0_EPC: | 838 | case KVM_REG_MIPS_CP0_EPC: |
687 | kvm_write_c0_guest_epc(cop0, v); | 839 | kvm_write_c0_guest_epc(cop0, v); |
688 | break; | 840 | break; |
841 | case KVM_REG_MIPS_CP0_PRID: | ||
842 | kvm_write_c0_guest_prid(cop0, v); | ||
843 | break; | ||
689 | case KVM_REG_MIPS_CP0_ERROREPC: | 844 | case KVM_REG_MIPS_CP0_ERROREPC: |
690 | kvm_write_c0_guest_errorepc(cop0, v); | 845 | kvm_write_c0_guest_errorepc(cop0, v); |
691 | break; | 846 | break; |
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
693 | case KVM_REG_MIPS_CP0_COUNT: | 848 | case KVM_REG_MIPS_CP0_COUNT: |
694 | case KVM_REG_MIPS_CP0_COMPARE: | 849 | case KVM_REG_MIPS_CP0_COMPARE: |
695 | case KVM_REG_MIPS_CP0_CAUSE: | 850 | case KVM_REG_MIPS_CP0_CAUSE: |
851 | case KVM_REG_MIPS_CP0_CONFIG: | ||
852 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
853 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
854 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
855 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
856 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
696 | case KVM_REG_MIPS_COUNT_CTL: | 857 | case KVM_REG_MIPS_COUNT_CTL: |
697 | case KVM_REG_MIPS_COUNT_RESUME: | 858 | case KVM_REG_MIPS_COUNT_RESUME: |
698 | case KVM_REG_MIPS_COUNT_HZ: | 859 | case KVM_REG_MIPS_COUNT_HZ: |
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
703 | return 0; | 864 | return 0; |
704 | } | 865 | } |
705 | 866 | ||
867 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | ||
868 | struct kvm_enable_cap *cap) | ||
869 | { | ||
870 | int r = 0; | ||
871 | |||
872 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | ||
873 | return -EINVAL; | ||
874 | if (cap->flags) | ||
875 | return -EINVAL; | ||
876 | if (cap->args[0]) | ||
877 | return -EINVAL; | ||
878 | |||
879 | switch (cap->cap) { | ||
880 | case KVM_CAP_MIPS_FPU: | ||
881 | vcpu->arch.fpu_enabled = true; | ||
882 | break; | ||
883 | case KVM_CAP_MIPS_MSA: | ||
884 | vcpu->arch.msa_enabled = true; | ||
885 | break; | ||
886 | default: | ||
887 | r = -EINVAL; | ||
888 | break; | ||
889 | } | ||
890 | |||
891 | return r; | ||
892 | } | ||
893 | |||
706 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | 894 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
707 | unsigned long arg) | 895 | unsigned long arg) |
708 | { | 896 | { |
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | |||
760 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | 948 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
761 | break; | 949 | break; |
762 | } | 950 | } |
951 | case KVM_ENABLE_CAP: { | ||
952 | struct kvm_enable_cap cap; | ||
953 | |||
954 | r = -EFAULT; | ||
955 | if (copy_from_user(&cap, argp, sizeof(cap))) | ||
956 | goto out; | ||
957 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | ||
958 | break; | ||
959 | } | ||
763 | default: | 960 | default: |
764 | r = -ENOIOCTLCMD; | 961 | r = -ENOIOCTLCMD; |
765 | } | 962 | } |
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
868 | 1065 | ||
869 | switch (ext) { | 1066 | switch (ext) { |
870 | case KVM_CAP_ONE_REG: | 1067 | case KVM_CAP_ONE_REG: |
1068 | case KVM_CAP_ENABLE_CAP: | ||
871 | r = 1; | 1069 | r = 1; |
872 | break; | 1070 | break; |
873 | case KVM_CAP_COALESCED_MMIO: | 1071 | case KVM_CAP_COALESCED_MMIO: |
874 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 1072 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
875 | break; | 1073 | break; |
1074 | case KVM_CAP_MIPS_FPU: | ||
1075 | r = !!cpu_has_fpu; | ||
1076 | break; | ||
1077 | case KVM_CAP_MIPS_MSA: | ||
1078 | /* | ||
1079 | * We don't support MSA vector partitioning yet: | ||
1080 | * 1) It would require explicit support which can't be tested | ||
1081 | * yet due to lack of support in current hardware. | ||
1082 | * 2) It extends the state that would need to be saved/restored | ||
1083 | * by e.g. QEMU for migration. | ||
1084 | * | ||
1085 | * When vector partitioning hardware becomes available, support | ||
1086 | * could be added by requiring a flag when enabling | ||
1087 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | ||
1088 | * to save/restore the appropriate extra state. | ||
1089 | */ | ||
1090 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | ||
1091 | break; | ||
876 | default: | 1092 | default: |
877 | r = 0; | 1093 | r = 0; |
878 | break; | 1094 | break; |
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1119 | ret = kvm_mips_callbacks->handle_break(vcpu); | 1335 | ret = kvm_mips_callbacks->handle_break(vcpu); |
1120 | break; | 1336 | break; |
1121 | 1337 | ||
1338 | case T_TRAP: | ||
1339 | ++vcpu->stat.trap_inst_exits; | ||
1340 | trace_kvm_exit(vcpu, TRAP_INST_EXITS); | ||
1341 | ret = kvm_mips_callbacks->handle_trap(vcpu); | ||
1342 | break; | ||
1343 | |||
1344 | case T_MSAFPE: | ||
1345 | ++vcpu->stat.msa_fpe_exits; | ||
1346 | trace_kvm_exit(vcpu, MSA_FPE_EXITS); | ||
1347 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); | ||
1348 | break; | ||
1349 | |||
1350 | case T_FPE: | ||
1351 | ++vcpu->stat.fpe_exits; | ||
1352 | trace_kvm_exit(vcpu, FPE_EXITS); | ||
1353 | ret = kvm_mips_callbacks->handle_fpe(vcpu); | ||
1354 | break; | ||
1355 | |||
1356 | case T_MSADIS: | ||
1357 | ++vcpu->stat.msa_disabled_exits; | ||
1358 | trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); | ||
1359 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); | ||
1360 | break; | ||
1361 | |||
1122 | default: | 1362 | default: |
1123 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", | 1363 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1124 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, | 1364 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, |
@@ -1146,12 +1386,233 @@ skip_emul: | |||
1146 | } | 1386 | } |
1147 | } | 1387 | } |
1148 | 1388 | ||
1389 | if (ret == RESUME_GUEST) { | ||
1390 | /* | ||
1391 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context | ||
1392 | * is live), restore FCR31 / MSACSR. | ||
1393 | * | ||
1394 | * This should be before returning to the guest exception | ||
1395 | * vector, as it may well cause an [MSA] FP exception if there | ||
1396 | * are pending exception bits unmasked. (see | ||
1397 | * kvm_mips_csr_die_notifier() for how that is handled). | ||
1398 | */ | ||
1399 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | ||
1400 | read_c0_status() & ST0_CU1) | ||
1401 | __kvm_restore_fcsr(&vcpu->arch); | ||
1402 | |||
1403 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | ||
1404 | read_c0_config5() & MIPS_CONF5_MSAEN) | ||
1405 | __kvm_restore_msacsr(&vcpu->arch); | ||
1406 | } | ||
1407 | |||
1149 | /* Disable HTW before returning to guest or host */ | 1408 | /* Disable HTW before returning to guest or host */ |
1150 | htw_stop(); | 1409 | htw_stop(); |
1151 | 1410 | ||
1152 | return ret; | 1411 | return ret; |
1153 | } | 1412 | } |
1154 | 1413 | ||
1414 | /* Enable FPU for guest and restore context */ | ||
1415 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | ||
1416 | { | ||
1417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1418 | unsigned int sr, cfg5; | ||
1419 | |||
1420 | preempt_disable(); | ||
1421 | |||
1422 | sr = kvm_read_c0_guest_status(cop0); | ||
1423 | |||
1424 | /* | ||
1425 | * If MSA state is already live, it is undefined how it interacts with | ||
1426 | * FR=0 FPU state, and we don't want to hit reserved instruction | ||
1427 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | ||
1428 | * play it safe and save it first. | ||
1429 | * | ||
1430 | * In theory we shouldn't ever hit this case since kvm_lose_fpu() should | ||
1431 | * get called when guest CU1 is set, however we can't trust the guest | ||
1432 | * not to clobber the status register directly via the commpage. | ||
1433 | */ | ||
1434 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | ||
1435 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | ||
1436 | kvm_lose_fpu(vcpu); | ||
1437 | |||
1438 | /* | ||
1439 | * Enable FPU for guest | ||
1440 | * We set FR and FRE according to guest context | ||
1441 | */ | ||
1442 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1443 | if (cpu_has_fre) { | ||
1444 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1445 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1446 | } | ||
1447 | enable_fpu_hazard(); | ||
1448 | |||
1449 | /* If guest FPU state not active, restore it now */ | ||
1450 | if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { | ||
1451 | __kvm_restore_fpu(&vcpu->arch); | ||
1452 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; | ||
1453 | } | ||
1454 | |||
1455 | preempt_enable(); | ||
1456 | } | ||
1457 | |||
1458 | #ifdef CONFIG_CPU_HAS_MSA | ||
1459 | /* Enable MSA for guest and restore context */ | ||
1460 | void kvm_own_msa(struct kvm_vcpu *vcpu) | ||
1461 | { | ||
1462 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1463 | unsigned int sr, cfg5; | ||
1464 | |||
1465 | preempt_disable(); | ||
1466 | |||
1467 | /* | ||
1468 | * Enable FPU if enabled in guest, since we're restoring FPU context | ||
1469 | * anyway. We set FR and FRE according to guest context. | ||
1470 | */ | ||
1471 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
1472 | sr = kvm_read_c0_guest_status(cop0); | ||
1473 | |||
1474 | /* | ||
1475 | * If FR=0 FPU state is already live, it is undefined how it | ||
1476 | * interacts with MSA state, so play it safe and save it first. | ||
1477 | */ | ||
1478 | if (!(sr & ST0_FR) && | ||
1479 | (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | | ||
1480 | KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) | ||
1481 | kvm_lose_fpu(vcpu); | ||
1482 | |||
1483 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1484 | if (sr & ST0_CU1 && cpu_has_fre) { | ||
1485 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1486 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | /* Enable MSA for guest */ | ||
1491 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1492 | enable_fpu_hazard(); | ||
1493 | |||
1494 | switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { | ||
1495 | case KVM_MIPS_FPU_FPU: | ||
1496 | /* | ||
1497 | * Guest FPU state already loaded, only restore upper MSA state | ||
1498 | */ | ||
1499 | __kvm_restore_msa_upper(&vcpu->arch); | ||
1500 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; | ||
1501 | break; | ||
1502 | case 0: | ||
1503 | /* Neither FPU or MSA already active, restore full MSA state */ | ||
1504 | __kvm_restore_msa(&vcpu->arch); | ||
1505 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; | ||
1506 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1507 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; | ||
1508 | break; | ||
1509 | default: | ||
1510 | break; | ||
1511 | } | ||
1512 | |||
1513 | preempt_enable(); | ||
1514 | } | ||
1515 | #endif | ||
1516 | |||
1517 | /* Drop FPU & MSA without saving it */ | ||
1518 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) | ||
1519 | { | ||
1520 | preempt_disable(); | ||
1521 | if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { | ||
1522 | disable_msa(); | ||
1523 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; | ||
1524 | } | ||
1525 | if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { | ||
1526 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1527 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; | ||
1528 | } | ||
1529 | preempt_enable(); | ||
1530 | } | ||
1531 | |||
1532 | /* Save and disable FPU & MSA */ | ||
1533 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) | ||
1534 | { | ||
1535 | /* | ||
1536 | * FPU & MSA get disabled in root context (hardware) when it is disabled | ||
1537 | * in guest context (software), but the register state in the hardware | ||
1538 | * may still be in use. This is why we explicitly re-enable the hardware | ||
1539 | * before saving. | ||
1540 | */ | ||
1541 | |||
1542 | preempt_disable(); | ||
1543 | if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { | ||
1544 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1545 | enable_fpu_hazard(); | ||
1546 | |||
1547 | __kvm_save_msa(&vcpu->arch); | ||
1548 | |||
1549 | /* Disable MSA & FPU */ | ||
1550 | disable_msa(); | ||
1551 | if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | ||
1552 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1553 | vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); | ||
1554 | } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { | ||
1555 | set_c0_status(ST0_CU1); | ||
1556 | enable_fpu_hazard(); | ||
1557 | |||
1558 | __kvm_save_fpu(&vcpu->arch); | ||
1559 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; | ||
1560 | |||
1561 | /* Disable FPU */ | ||
1562 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1563 | } | ||
1564 | preempt_enable(); | ||
1565 | } | ||
1566 | |||
1567 | /* | ||
1568 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are | ||
1569 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | ||
1570 | * exception if cause bits are set in the value being written. | ||
1571 | */ | ||
1572 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | ||
1573 | unsigned long cmd, void *ptr) | ||
1574 | { | ||
1575 | struct die_args *args = (struct die_args *)ptr; | ||
1576 | struct pt_regs *regs = args->regs; | ||
1577 | unsigned long pc; | ||
1578 | |||
1579 | /* Only interested in FPE and MSAFPE */ | ||
1580 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | ||
1581 | return NOTIFY_DONE; | ||
1582 | |||
1583 | /* Return immediately if guest context isn't active */ | ||
1584 | if (!(current->flags & PF_VCPU)) | ||
1585 | return NOTIFY_DONE; | ||
1586 | |||
1587 | /* Should never get here from user mode */ | ||
1588 | BUG_ON(user_mode(regs)); | ||
1589 | |||
1590 | pc = instruction_pointer(regs); | ||
1591 | switch (cmd) { | ||
1592 | case DIE_FP: | ||
1593 | /* match 2nd instruction in __kvm_restore_fcsr */ | ||
1594 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | ||
1595 | return NOTIFY_DONE; | ||
1596 | break; | ||
1597 | case DIE_MSAFP: | ||
1598 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | ||
1599 | if (!cpu_has_msa || | ||
1600 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | ||
1601 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | ||
1602 | return NOTIFY_DONE; | ||
1603 | break; | ||
1604 | } | ||
1605 | |||
1606 | /* Move PC forward a little and continue executing */ | ||
1607 | instruction_pointer(regs) += 4; | ||
1608 | |||
1609 | return NOTIFY_STOP; | ||
1610 | } | ||
1611 | |||
1612 | static struct notifier_block kvm_mips_csr_die_notifier = { | ||
1613 | .notifier_call = kvm_mips_csr_die_notify, | ||
1614 | }; | ||
1615 | |||
1155 | int __init kvm_mips_init(void) | 1616 | int __init kvm_mips_init(void) |
1156 | { | 1617 | { |
1157 | int ret; | 1618 | int ret; |
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void) | |||
1161 | if (ret) | 1622 | if (ret) |
1162 | return ret; | 1623 | return ret; |
1163 | 1624 | ||
1625 | register_die_notifier(&kvm_mips_csr_die_notifier); | ||
1626 | |||
1164 | /* | 1627 | /* |
1165 | * On MIPS, kernel modules are executed from "mapped space", which | 1628 | * On MIPS, kernel modules are executed from "mapped space", which |
1166 | * requires TLBs. The TLB handling code is statically linked with | 1629 | * requires TLBs. The TLB handling code is statically linked with |
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void) | |||
1173 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; | 1636 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; |
1174 | kvm_mips_is_error_pfn = is_error_pfn; | 1637 | kvm_mips_is_error_pfn = is_error_pfn; |
1175 | 1638 | ||
1176 | pr_info("KVM/MIPS Initialized\n"); | ||
1177 | return 0; | 1639 | return 0; |
1178 | } | 1640 | } |
1179 | 1641 | ||
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void) | |||
1185 | kvm_mips_release_pfn_clean = NULL; | 1647 | kvm_mips_release_pfn_clean = NULL; |
1186 | kvm_mips_is_error_pfn = NULL; | 1648 | kvm_mips_is_error_pfn = NULL; |
1187 | 1649 | ||
1188 | pr_info("KVM/MIPS unloaded\n"); | 1650 | unregister_die_notifier(&kvm_mips_csr_die_notifier); |
1189 | } | 1651 | } |
1190 | 1652 | ||
1191 | module_init(kvm_mips_init); | 1653 | module_init(kvm_mips_init); |
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S new file mode 100644 index 000000000000..d02f0c6cc2cc --- /dev/null +++ b/arch/mips/kvm/msa.S | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * MIPS SIMD Architecture (MSA) context handling code for KVM. | ||
7 | * | ||
8 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | |||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/asmmacro.h> | ||
14 | #include <asm/regdef.h> | ||
15 | |||
16 | .set noreorder | ||
17 | .set noat | ||
18 | |||
19 | LEAF(__kvm_save_msa) | ||
20 | st_d 0, VCPU_FPR0, a0 | ||
21 | st_d 1, VCPU_FPR1, a0 | ||
22 | st_d 2, VCPU_FPR2, a0 | ||
23 | st_d 3, VCPU_FPR3, a0 | ||
24 | st_d 4, VCPU_FPR4, a0 | ||
25 | st_d 5, VCPU_FPR5, a0 | ||
26 | st_d 6, VCPU_FPR6, a0 | ||
27 | st_d 7, VCPU_FPR7, a0 | ||
28 | st_d 8, VCPU_FPR8, a0 | ||
29 | st_d 9, VCPU_FPR9, a0 | ||
30 | st_d 10, VCPU_FPR10, a0 | ||
31 | st_d 11, VCPU_FPR11, a0 | ||
32 | st_d 12, VCPU_FPR12, a0 | ||
33 | st_d 13, VCPU_FPR13, a0 | ||
34 | st_d 14, VCPU_FPR14, a0 | ||
35 | st_d 15, VCPU_FPR15, a0 | ||
36 | st_d 16, VCPU_FPR16, a0 | ||
37 | st_d 17, VCPU_FPR17, a0 | ||
38 | st_d 18, VCPU_FPR18, a0 | ||
39 | st_d 19, VCPU_FPR19, a0 | ||
40 | st_d 20, VCPU_FPR20, a0 | ||
41 | st_d 21, VCPU_FPR21, a0 | ||
42 | st_d 22, VCPU_FPR22, a0 | ||
43 | st_d 23, VCPU_FPR23, a0 | ||
44 | st_d 24, VCPU_FPR24, a0 | ||
45 | st_d 25, VCPU_FPR25, a0 | ||
46 | st_d 26, VCPU_FPR26, a0 | ||
47 | st_d 27, VCPU_FPR27, a0 | ||
48 | st_d 28, VCPU_FPR28, a0 | ||
49 | st_d 29, VCPU_FPR29, a0 | ||
50 | st_d 30, VCPU_FPR30, a0 | ||
51 | st_d 31, VCPU_FPR31, a0 | ||
52 | jr ra | ||
53 | nop | ||
54 | END(__kvm_save_msa) | ||
55 | |||
56 | LEAF(__kvm_restore_msa) | ||
57 | ld_d 0, VCPU_FPR0, a0 | ||
58 | ld_d 1, VCPU_FPR1, a0 | ||
59 | ld_d 2, VCPU_FPR2, a0 | ||
60 | ld_d 3, VCPU_FPR3, a0 | ||
61 | ld_d 4, VCPU_FPR4, a0 | ||
62 | ld_d 5, VCPU_FPR5, a0 | ||
63 | ld_d 6, VCPU_FPR6, a0 | ||
64 | ld_d 7, VCPU_FPR7, a0 | ||
65 | ld_d 8, VCPU_FPR8, a0 | ||
66 | ld_d 9, VCPU_FPR9, a0 | ||
67 | ld_d 10, VCPU_FPR10, a0 | ||
68 | ld_d 11, VCPU_FPR11, a0 | ||
69 | ld_d 12, VCPU_FPR12, a0 | ||
70 | ld_d 13, VCPU_FPR13, a0 | ||
71 | ld_d 14, VCPU_FPR14, a0 | ||
72 | ld_d 15, VCPU_FPR15, a0 | ||
73 | ld_d 16, VCPU_FPR16, a0 | ||
74 | ld_d 17, VCPU_FPR17, a0 | ||
75 | ld_d 18, VCPU_FPR18, a0 | ||
76 | ld_d 19, VCPU_FPR19, a0 | ||
77 | ld_d 20, VCPU_FPR20, a0 | ||
78 | ld_d 21, VCPU_FPR21, a0 | ||
79 | ld_d 22, VCPU_FPR22, a0 | ||
80 | ld_d 23, VCPU_FPR23, a0 | ||
81 | ld_d 24, VCPU_FPR24, a0 | ||
82 | ld_d 25, VCPU_FPR25, a0 | ||
83 | ld_d 26, VCPU_FPR26, a0 | ||
84 | ld_d 27, VCPU_FPR27, a0 | ||
85 | ld_d 28, VCPU_FPR28, a0 | ||
86 | ld_d 29, VCPU_FPR29, a0 | ||
87 | ld_d 30, VCPU_FPR30, a0 | ||
88 | ld_d 31, VCPU_FPR31, a0 | ||
89 | jr ra | ||
90 | nop | ||
91 | END(__kvm_restore_msa) | ||
92 | |||
93 | .macro kvm_restore_msa_upper wr, off, base | ||
94 | .set push | ||
95 | .set noat | ||
96 | #ifdef CONFIG_64BIT | ||
97 | ld $1, \off(\base) | ||
98 | insert_d \wr, 1 | ||
99 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
100 | lw $1, \off(\base) | ||
101 | insert_w \wr, 2 | ||
102 | lw $1, (\off+4)(\base) | ||
103 | insert_w \wr, 3 | ||
104 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
105 | lw $1, (\off+4)(\base) | ||
106 | insert_w \wr, 2 | ||
107 | lw $1, \off(\base) | ||
108 | insert_w \wr, 3 | ||
109 | #endif | ||
110 | .set pop | ||
111 | .endm | ||
112 | |||
113 | LEAF(__kvm_restore_msa_upper) | ||
114 | kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 | ||
115 | kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 | ||
116 | kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 | ||
117 | kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 | ||
118 | kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 | ||
119 | kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 | ||
120 | kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 | ||
121 | kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 | ||
122 | kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 | ||
123 | kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 | ||
124 | kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 | ||
125 | kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 | ||
126 | kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 | ||
127 | kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 | ||
128 | kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 | ||
129 | kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 | ||
130 | kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 | ||
131 | kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 | ||
132 | kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 | ||
133 | kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 | ||
134 | kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 | ||
135 | kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 | ||
136 | kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 | ||
137 | kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 | ||
138 | kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 | ||
139 | kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 | ||
140 | kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 | ||
141 | kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 | ||
142 | kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 | ||
143 | kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 | ||
144 | kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 | ||
145 | kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 | ||
146 | jr ra | ||
147 | nop | ||
148 | END(__kvm_restore_msa_upper) | ||
149 | |||
150 | LEAF(__kvm_restore_msacsr) | ||
151 | lw t0, VCPU_MSA_CSR(a0) | ||
152 | /* | ||
153 | * The ctcmsa must stay at this offset in __kvm_restore_msacsr. | ||
154 | * See kvm_mips_csr_die_notify() which handles t0 containing a value | ||
155 | * which triggers an MSA FP Exception, which must be stepped over and | ||
156 | * ignored since the set cause bits must remain there for the guest. | ||
157 | */ | ||
158 | _ctcmsa MSA_CSR, t0 | ||
159 | jr ra | ||
160 | nop | ||
161 | END(__kvm_restore_msacsr) | ||
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index a74d6024c5ad..888bb67070ac 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c | |||
@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { | |||
25 | "System Call", | 25 | "System Call", |
26 | "Reserved Inst", | 26 | "Reserved Inst", |
27 | "Break Inst", | 27 | "Break Inst", |
28 | "Trap Inst", | ||
29 | "MSA FPE", | ||
30 | "FPE", | ||
31 | "MSA Disabled", | ||
28 | "D-Cache Flushes", | 32 | "D-Cache Flushes", |
29 | }; | 33 | }; |
30 | 34 | ||
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index b6beb0e07b1b..aed0ac2a4972 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
733 | } | 733 | } |
734 | } | 734 | } |
735 | 735 | ||
736 | /* restore guest state to registers */ | ||
737 | kvm_mips_callbacks->vcpu_set_regs(vcpu); | ||
738 | |||
736 | local_irq_restore(flags); | 739 | local_irq_restore(flags); |
737 | 740 | ||
738 | } | 741 | } |
@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
751 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); | 754 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); |
752 | vcpu->arch.last_sched_cpu = cpu; | 755 | vcpu->arch.last_sched_cpu = cpu; |
753 | 756 | ||
757 | /* save guest state in registers */ | ||
758 | kvm_mips_callbacks->vcpu_get_regs(vcpu); | ||
759 | |||
754 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | 760 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
755 | ASID_VERSION_MASK)) { | 761 | ASID_VERSION_MASK)) { |
756 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, | 762 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, |
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index fd7257b70e65..d836ed5b0bc7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
39 | 39 | ||
40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | 40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
41 | { | 41 | { |
42 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
42 | struct kvm_run *run = vcpu->run; | 43 | struct kvm_run *run = vcpu->run; |
43 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 44 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
44 | unsigned long cause = vcpu->arch.host_cp0_cause; | 45 | unsigned long cause = vcpu->arch.host_cp0_cause; |
45 | enum emulation_result er = EMULATE_DONE; | 46 | enum emulation_result er = EMULATE_DONE; |
46 | int ret = RESUME_GUEST; | 47 | int ret = RESUME_GUEST; |
47 | 48 | ||
48 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) | 49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
49 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | 50 | /* FPU Unusable */ |
50 | else | 51 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || |
52 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { | ||
53 | /* | ||
54 | * Unusable/no FPU in guest: | ||
55 | * deliver guest COP1 Unusable Exception | ||
56 | */ | ||
57 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | ||
58 | } else { | ||
59 | /* Restore FPU state */ | ||
60 | kvm_own_fpu(vcpu); | ||
61 | er = EMULATE_DONE; | ||
62 | } | ||
63 | } else { | ||
51 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 64 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
65 | } | ||
52 | 66 | ||
53 | switch (er) { | 67 | switch (er) { |
54 | case EMULATE_DONE: | 68 | case EMULATE_DONE: |
@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |||
330 | return ret; | 344 | return ret; |
331 | } | 345 | } |
332 | 346 | ||
347 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) | ||
348 | { | ||
349 | struct kvm_run *run = vcpu->run; | ||
350 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | ||
351 | unsigned long cause = vcpu->arch.host_cp0_cause; | ||
352 | enum emulation_result er = EMULATE_DONE; | ||
353 | int ret = RESUME_GUEST; | ||
354 | |||
355 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); | ||
356 | if (er == EMULATE_DONE) { | ||
357 | ret = RESUME_GUEST; | ||
358 | } else { | ||
359 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
360 | ret = RESUME_HOST; | ||
361 | } | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) | ||
366 | { | ||
367 | struct kvm_run *run = vcpu->run; | ||
368 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | ||
369 | unsigned long cause = vcpu->arch.host_cp0_cause; | ||
370 | enum emulation_result er = EMULATE_DONE; | ||
371 | int ret = RESUME_GUEST; | ||
372 | |||
373 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); | ||
374 | if (er == EMULATE_DONE) { | ||
375 | ret = RESUME_GUEST; | ||
376 | } else { | ||
377 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
378 | ret = RESUME_HOST; | ||
379 | } | ||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) | ||
384 | { | ||
385 | struct kvm_run *run = vcpu->run; | ||
386 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | ||
387 | unsigned long cause = vcpu->arch.host_cp0_cause; | ||
388 | enum emulation_result er = EMULATE_DONE; | ||
389 | int ret = RESUME_GUEST; | ||
390 | |||
391 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); | ||
392 | if (er == EMULATE_DONE) { | ||
393 | ret = RESUME_GUEST; | ||
394 | } else { | ||
395 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
396 | ret = RESUME_HOST; | ||
397 | } | ||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. | ||
403 | * @vcpu: Virtual CPU context. | ||
404 | * | ||
405 | * Handle when the guest attempts to use MSA when it is disabled. | ||
406 | */ | ||
407 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) | ||
408 | { | ||
409 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
410 | struct kvm_run *run = vcpu->run; | ||
411 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | ||
412 | unsigned long cause = vcpu->arch.host_cp0_cause; | ||
413 | enum emulation_result er = EMULATE_DONE; | ||
414 | int ret = RESUME_GUEST; | ||
415 | |||
416 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || | ||
417 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { | ||
418 | /* | ||
419 | * No MSA in guest, or FPU enabled and not in FR=1 mode, | ||
420 | * guest reserved instruction exception | ||
421 | */ | ||
422 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | ||
423 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { | ||
424 | /* MSA disabled by guest, guest MSA disabled exception */ | ||
425 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); | ||
426 | } else { | ||
427 | /* Restore MSA/FPU state */ | ||
428 | kvm_own_msa(vcpu); | ||
429 | er = EMULATE_DONE; | ||
430 | } | ||
431 | |||
432 | switch (er) { | ||
433 | case EMULATE_DONE: | ||
434 | ret = RESUME_GUEST; | ||
435 | break; | ||
436 | |||
437 | case EMULATE_FAIL: | ||
438 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
439 | ret = RESUME_HOST; | ||
440 | break; | ||
441 | |||
442 | default: | ||
443 | BUG(); | ||
444 | } | ||
445 | return ret; | ||
446 | } | ||
447 | |||
333 | static int kvm_trap_emul_vm_init(struct kvm *kvm) | 448 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
334 | { | 449 | { |
335 | return 0; | 450 | return 0; |
@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
351 | * guest will come up as expected, for now we simulate a MIPS 24kc | 466 | * guest will come up as expected, for now we simulate a MIPS 24kc |
352 | */ | 467 | */ |
353 | kvm_write_c0_guest_prid(cop0, 0x00019300); | 468 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
354 | kvm_write_c0_guest_config(cop0, | 469 | /* Have config1, Cacheable, noncoherent, write-back, write allocate */ |
355 | MIPS_CONFIG0 | (0x1 << CP0C0_AR) | | 470 | kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | |
471 | (0x1 << CP0C0_AR) | | ||
356 | (MMU_TYPE_R4000 << CP0C0_MT)); | 472 | (MMU_TYPE_R4000 << CP0C0_MT)); |
357 | 473 | ||
358 | /* Read the cache characteristics from the host Config1 Register */ | 474 | /* Read the cache characteristics from the host Config1 Register */ |
@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
368 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); | 484 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); |
369 | kvm_write_c0_guest_config1(cop0, config1); | 485 | kvm_write_c0_guest_config1(cop0, config1); |
370 | 486 | ||
371 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); | 487 | /* Have config3, no tertiary/secondary caches implemented */ |
372 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ | 488 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); |
373 | kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | | 489 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ |
374 | (1 << CP0C3_ULRI)); | 490 | |
491 | /* Have config4, UserLocal */ | ||
492 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | ||
493 | |||
494 | /* Have config5 */ | ||
495 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | ||
496 | |||
497 | /* No config6 */ | ||
498 | kvm_write_c0_guest_config5(cop0, 0); | ||
375 | 499 | ||
376 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | 500 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
377 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | 501 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
416 | { | 540 | { |
417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 541 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
418 | int ret = 0; | 542 | int ret = 0; |
543 | unsigned int cur, change; | ||
419 | 544 | ||
420 | switch (reg->id) { | 545 | switch (reg->id) { |
421 | case KVM_REG_MIPS_CP0_COUNT: | 546 | case KVM_REG_MIPS_CP0_COUNT: |
@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
444 | kvm_write_c0_guest_cause(cop0, v); | 569 | kvm_write_c0_guest_cause(cop0, v); |
445 | } | 570 | } |
446 | break; | 571 | break; |
572 | case KVM_REG_MIPS_CP0_CONFIG: | ||
573 | /* read-only for now */ | ||
574 | break; | ||
575 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
576 | cur = kvm_read_c0_guest_config1(cop0); | ||
577 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | ||
578 | if (change) { | ||
579 | v = cur ^ change; | ||
580 | kvm_write_c0_guest_config1(cop0, v); | ||
581 | } | ||
582 | break; | ||
583 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
584 | /* read-only for now */ | ||
585 | break; | ||
586 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
587 | cur = kvm_read_c0_guest_config3(cop0); | ||
588 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | ||
589 | if (change) { | ||
590 | v = cur ^ change; | ||
591 | kvm_write_c0_guest_config3(cop0, v); | ||
592 | } | ||
593 | break; | ||
594 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
595 | cur = kvm_read_c0_guest_config4(cop0); | ||
596 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | ||
597 | if (change) { | ||
598 | v = cur ^ change; | ||
599 | kvm_write_c0_guest_config4(cop0, v); | ||
600 | } | ||
601 | break; | ||
602 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
603 | cur = kvm_read_c0_guest_config5(cop0); | ||
604 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | ||
605 | if (change) { | ||
606 | v = cur ^ change; | ||
607 | kvm_write_c0_guest_config5(cop0, v); | ||
608 | } | ||
609 | break; | ||
447 | case KVM_REG_MIPS_COUNT_CTL: | 610 | case KVM_REG_MIPS_COUNT_CTL: |
448 | ret = kvm_mips_set_count_ctl(vcpu, v); | 611 | ret = kvm_mips_set_count_ctl(vcpu, v); |
449 | break; | 612 | break; |
@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
459 | return ret; | 622 | return ret; |
460 | } | 623 | } |
461 | 624 | ||
625 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) | ||
626 | { | ||
627 | kvm_lose_fpu(vcpu); | ||
628 | |||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) | ||
633 | { | ||
634 | return 0; | ||
635 | } | ||
636 | |||
462 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | 637 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
463 | /* exit handlers */ | 638 | /* exit handlers */ |
464 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | 639 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, |
@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
470 | .handle_syscall = kvm_trap_emul_handle_syscall, | 645 | .handle_syscall = kvm_trap_emul_handle_syscall, |
471 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | 646 | .handle_res_inst = kvm_trap_emul_handle_res_inst, |
472 | .handle_break = kvm_trap_emul_handle_break, | 647 | .handle_break = kvm_trap_emul_handle_break, |
648 | .handle_trap = kvm_trap_emul_handle_trap, | ||
649 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, | ||
650 | .handle_fpe = kvm_trap_emul_handle_fpe, | ||
651 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, | ||
473 | 652 | ||
474 | .vm_init = kvm_trap_emul_vm_init, | 653 | .vm_init = kvm_trap_emul_vm_init, |
475 | .vcpu_init = kvm_trap_emul_vcpu_init, | 654 | .vcpu_init = kvm_trap_emul_vcpu_init, |
@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
483 | .irq_clear = kvm_mips_irq_clear_cb, | 662 | .irq_clear = kvm_mips_irq_clear_cb, |
484 | .get_one_reg = kvm_trap_emul_get_one_reg, | 663 | .get_one_reg = kvm_trap_emul_get_one_reg, |
485 | .set_one_reg = kvm_trap_emul_set_one_reg, | 664 | .set_one_reg = kvm_trap_emul_set_one_reg, |
665 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, | ||
666 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, | ||
486 | }; | 667 | }; |
487 | 668 | ||
488 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | 669 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c index 39b3a8f816f2..6249cdc834d1 100644 --- a/arch/powerpc/kvm/mpic.c +++ b/arch/powerpc/kvm/mpic.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <asm/kvm_para.h> | 34 | #include <asm/kvm_para.h> |
35 | #include <asm/kvm_host.h> | 35 | #include <asm/kvm_host.h> |
36 | #include <asm/kvm_ppc.h> | 36 | #include <asm/kvm_ppc.h> |
37 | #include "iodev.h" | 37 | #include <kvm/iodev.h> |
38 | 38 | ||
39 | #define MAX_CPU 32 | 39 | #define MAX_CPU 32 |
40 | #define MAX_SRC 256 | 40 | #define MAX_SRC 256 |
@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ) | |||
289 | clear_bit(n_IRQ, q->queue); | 289 | clear_bit(n_IRQ, q->queue); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ) | ||
293 | { | ||
294 | return test_bit(n_IRQ, q->queue); | ||
295 | } | ||
296 | |||
297 | static void IRQ_check(struct openpic *opp, struct irq_queue *q) | 292 | static void IRQ_check(struct openpic *opp, struct irq_queue *q) |
298 | { | 293 | { |
299 | int irq = -1; | 294 | int irq = -1; |
@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val) | |||
1374 | return -ENXIO; | 1369 | return -ENXIO; |
1375 | } | 1370 | } |
1376 | 1371 | ||
1377 | static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr, | 1372 | static int kvm_mpic_read(struct kvm_vcpu *vcpu, |
1378 | int len, void *ptr) | 1373 | struct kvm_io_device *this, |
1374 | gpa_t addr, int len, void *ptr) | ||
1379 | { | 1375 | { |
1380 | struct openpic *opp = container_of(this, struct openpic, mmio); | 1376 | struct openpic *opp = container_of(this, struct openpic, mmio); |
1381 | int ret; | 1377 | int ret; |
@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr, | |||
1415 | return ret; | 1411 | return ret; |
1416 | } | 1412 | } |
1417 | 1413 | ||
1418 | static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr, | 1414 | static int kvm_mpic_write(struct kvm_vcpu *vcpu, |
1419 | int len, const void *ptr) | 1415 | struct kvm_io_device *this, |
1416 | gpa_t addr, int len, const void *ptr) | ||
1420 | { | 1417 | { |
1421 | struct openpic *opp = container_of(this, struct openpic, mmio); | 1418 | struct openpic *opp = container_of(this, struct openpic, mmio); |
1422 | int ret; | 1419 | int ret; |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 27c0face86f4..24bfe401373e 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
807 | 807 | ||
808 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 808 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
809 | 809 | ||
810 | ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, | 810 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
811 | bytes, &run->mmio.data); | 811 | bytes, &run->mmio.data); |
812 | 812 | ||
813 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 813 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
880 | 880 | ||
881 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 881 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
882 | 882 | ||
883 | ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr, | 883 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
884 | bytes, &run->mmio.data); | 884 | bytes, &run->mmio.data); |
885 | 885 | ||
886 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 886 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index f407bbf5ee94..d01fc588b5c3 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -172,7 +172,9 @@ struct kvm_s390_sie_block { | |||
172 | __u32 fac; /* 0x01a0 */ | 172 | __u32 fac; /* 0x01a0 */ |
173 | __u8 reserved1a4[20]; /* 0x01a4 */ | 173 | __u8 reserved1a4[20]; /* 0x01a4 */ |
174 | __u64 cbrlo; /* 0x01b8 */ | 174 | __u64 cbrlo; /* 0x01b8 */ |
175 | __u8 reserved1c0[30]; /* 0x01c0 */ | 175 | __u8 reserved1c0[8]; /* 0x01c0 */ |
176 | __u32 ecd; /* 0x01c8 */ | ||
177 | __u8 reserved1cc[18]; /* 0x01cc */ | ||
176 | __u64 pp; /* 0x01de */ | 178 | __u64 pp; /* 0x01de */ |
177 | __u8 reserved1e6[2]; /* 0x01e6 */ | 179 | __u8 reserved1e6[2]; /* 0x01e6 */ |
178 | __u64 itdba; /* 0x01e8 */ | 180 | __u64 itdba; /* 0x01e8 */ |
@@ -183,11 +185,17 @@ struct kvm_s390_itdb { | |||
183 | __u8 data[256]; | 185 | __u8 data[256]; |
184 | } __packed; | 186 | } __packed; |
185 | 187 | ||
188 | struct kvm_s390_vregs { | ||
189 | __vector128 vrs[32]; | ||
190 | __u8 reserved200[512]; /* for future vector expansion */ | ||
191 | } __packed; | ||
192 | |||
186 | struct sie_page { | 193 | struct sie_page { |
187 | struct kvm_s390_sie_block sie_block; | 194 | struct kvm_s390_sie_block sie_block; |
188 | __u8 reserved200[1024]; /* 0x0200 */ | 195 | __u8 reserved200[1024]; /* 0x0200 */ |
189 | struct kvm_s390_itdb itdb; /* 0x0600 */ | 196 | struct kvm_s390_itdb itdb; /* 0x0600 */ |
190 | __u8 reserved700[2304]; /* 0x0700 */ | 197 | __u8 reserved700[1280]; /* 0x0700 */ |
198 | struct kvm_s390_vregs vregs; /* 0x0c00 */ | ||
191 | } __packed; | 199 | } __packed; |
192 | 200 | ||
193 | struct kvm_vcpu_stat { | 201 | struct kvm_vcpu_stat { |
@@ -238,6 +246,7 @@ struct kvm_vcpu_stat { | |||
238 | u32 instruction_sigp_stop; | 246 | u32 instruction_sigp_stop; |
239 | u32 instruction_sigp_stop_store_status; | 247 | u32 instruction_sigp_stop_store_status; |
240 | u32 instruction_sigp_store_status; | 248 | u32 instruction_sigp_store_status; |
249 | u32 instruction_sigp_store_adtl_status; | ||
241 | u32 instruction_sigp_arch; | 250 | u32 instruction_sigp_arch; |
242 | u32 instruction_sigp_prefix; | 251 | u32 instruction_sigp_prefix; |
243 | u32 instruction_sigp_restart; | 252 | u32 instruction_sigp_restart; |
@@ -270,6 +279,7 @@ struct kvm_vcpu_stat { | |||
270 | #define PGM_SPECIAL_OPERATION 0x13 | 279 | #define PGM_SPECIAL_OPERATION 0x13 |
271 | #define PGM_OPERAND 0x15 | 280 | #define PGM_OPERAND 0x15 |
272 | #define PGM_TRACE_TABEL 0x16 | 281 | #define PGM_TRACE_TABEL 0x16 |
282 | #define PGM_VECTOR_PROCESSING 0x1b | ||
273 | #define PGM_SPACE_SWITCH 0x1c | 283 | #define PGM_SPACE_SWITCH 0x1c |
274 | #define PGM_HFP_SQUARE_ROOT 0x1d | 284 | #define PGM_HFP_SQUARE_ROOT 0x1d |
275 | #define PGM_PC_TRANSLATION_SPEC 0x1f | 285 | #define PGM_PC_TRANSLATION_SPEC 0x1f |
@@ -334,6 +344,11 @@ enum irq_types { | |||
334 | IRQ_PEND_COUNT | 344 | IRQ_PEND_COUNT |
335 | }; | 345 | }; |
336 | 346 | ||
347 | /* We have 2M for virtio device descriptor pages. Smallest amount of | ||
348 | * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381 | ||
349 | */ | ||
350 | #define KVM_S390_MAX_VIRTIO_IRQS 87381 | ||
351 | |||
337 | /* | 352 | /* |
338 | * Repressible (non-floating) machine check interrupts | 353 | * Repressible (non-floating) machine check interrupts |
339 | * subclass bits in MCIC | 354 | * subclass bits in MCIC |
@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt { | |||
411 | unsigned long pending_irqs; | 426 | unsigned long pending_irqs; |
412 | }; | 427 | }; |
413 | 428 | ||
429 | #define FIRQ_LIST_IO_ISC_0 0 | ||
430 | #define FIRQ_LIST_IO_ISC_1 1 | ||
431 | #define FIRQ_LIST_IO_ISC_2 2 | ||
432 | #define FIRQ_LIST_IO_ISC_3 3 | ||
433 | #define FIRQ_LIST_IO_ISC_4 4 | ||
434 | #define FIRQ_LIST_IO_ISC_5 5 | ||
435 | #define FIRQ_LIST_IO_ISC_6 6 | ||
436 | #define FIRQ_LIST_IO_ISC_7 7 | ||
437 | #define FIRQ_LIST_PFAULT 8 | ||
438 | #define FIRQ_LIST_VIRTIO 9 | ||
439 | #define FIRQ_LIST_COUNT 10 | ||
440 | #define FIRQ_CNTR_IO 0 | ||
441 | #define FIRQ_CNTR_SERVICE 1 | ||
442 | #define FIRQ_CNTR_VIRTIO 2 | ||
443 | #define FIRQ_CNTR_PFAULT 3 | ||
444 | #define FIRQ_MAX_COUNT 4 | ||
445 | |||
414 | struct kvm_s390_float_interrupt { | 446 | struct kvm_s390_float_interrupt { |
447 | unsigned long pending_irqs; | ||
415 | spinlock_t lock; | 448 | spinlock_t lock; |
416 | struct list_head list; | 449 | struct list_head lists[FIRQ_LIST_COUNT]; |
417 | atomic_t active; | 450 | int counters[FIRQ_MAX_COUNT]; |
451 | struct kvm_s390_mchk_info mchk; | ||
452 | struct kvm_s390_ext_info srv_signal; | ||
418 | int next_rr_cpu; | 453 | int next_rr_cpu; |
419 | unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; | 454 | unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
420 | unsigned int irq_count; | ||
421 | }; | 455 | }; |
422 | 456 | ||
423 | struct kvm_hw_wp_info_arch { | 457 | struct kvm_hw_wp_info_arch { |
@@ -465,6 +499,7 @@ struct kvm_vcpu_arch { | |||
465 | s390_fp_regs host_fpregs; | 499 | s390_fp_regs host_fpregs; |
466 | unsigned int host_acrs[NUM_ACRS]; | 500 | unsigned int host_acrs[NUM_ACRS]; |
467 | s390_fp_regs guest_fpregs; | 501 | s390_fp_regs guest_fpregs; |
502 | struct kvm_s390_vregs *host_vregs; | ||
468 | struct kvm_s390_local_interrupt local_int; | 503 | struct kvm_s390_local_interrupt local_int; |
469 | struct hrtimer ckc_timer; | 504 | struct hrtimer ckc_timer; |
470 | struct kvm_s390_pgm_info pgm; | 505 | struct kvm_s390_pgm_info pgm; |
@@ -553,6 +588,7 @@ struct kvm_arch{ | |||
553 | int use_cmma; | 588 | int use_cmma; |
554 | int user_cpu_state_ctrl; | 589 | int user_cpu_state_ctrl; |
555 | int user_sigp; | 590 | int user_sigp; |
591 | int user_stsi; | ||
556 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; | 592 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; |
557 | wait_queue_head_t ipte_wq; | 593 | wait_queue_head_t ipte_wq; |
558 | int ipte_lock_count; | 594 | int ipte_lock_count; |
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9c77e60b9a26..ef1a5fcc6c66 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h | |||
@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { | |||
150 | #define KVM_SYNC_CRS (1UL << 3) | 150 | #define KVM_SYNC_CRS (1UL << 3) |
151 | #define KVM_SYNC_ARCH0 (1UL << 4) | 151 | #define KVM_SYNC_ARCH0 (1UL << 4) |
152 | #define KVM_SYNC_PFAULT (1UL << 5) | 152 | #define KVM_SYNC_PFAULT (1UL << 5) |
153 | #define KVM_SYNC_VRS (1UL << 6) | ||
153 | /* definition of registers in kvm_run */ | 154 | /* definition of registers in kvm_run */ |
154 | struct kvm_sync_regs { | 155 | struct kvm_sync_regs { |
155 | __u64 prefix; /* prefix register */ | 156 | __u64 prefix; /* prefix register */ |
@@ -164,6 +165,9 @@ struct kvm_sync_regs { | |||
164 | __u64 pft; /* pfault token [PFAULT] */ | 165 | __u64 pft; /* pfault token [PFAULT] */ |
165 | __u64 pfs; /* pfault select [PFAULT] */ | 166 | __u64 pfs; /* pfault select [PFAULT] */ |
166 | __u64 pfc; /* pfault compare [PFAULT] */ | 167 | __u64 pfc; /* pfault compare [PFAULT] */ |
168 | __u64 vrs[32][2]; /* vector registers */ | ||
169 | __u8 reserved[512]; /* for future vector expansion */ | ||
170 | __u32 fpc; /* only valid with vector registers */ | ||
167 | }; | 171 | }; |
168 | 172 | ||
169 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) | 173 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) |
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index d4096fdfc6ab..ee69c0854c88 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h | |||
@@ -230,7 +230,7 @@ | |||
230 | * and returns a key, which can be used to find a mnemonic name | 230 | * and returns a key, which can be used to find a mnemonic name |
231 | * of the instruction in the icpt_insn_codes table. | 231 | * of the instruction in the icpt_insn_codes table. |
232 | */ | 232 | */ |
233 | #define icpt_insn_decoder(insn) \ | 233 | #define icpt_insn_decoder(insn) ( \ |
234 | INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ | 234 | INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ |
235 | INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ | 235 | INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ |
236 | INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ | 236 | INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ |
@@ -239,6 +239,6 @@ | |||
239 | INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ | 239 | INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ |
240 | INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ | 240 | INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ |
241 | INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ | 241 | INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ |
242 | INSN_DECODE(insn) | 242 | INSN_DECODE(insn)) |
243 | 243 | ||
244 | #endif /* _UAPI_ASM_S390_SIE_H */ | 244 | #endif /* _UAPI_ASM_S390_SIE_H */ |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index e07e91605353..8dc4db10d160 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -171,6 +171,7 @@ int main(void) | |||
171 | #else /* CONFIG_32BIT */ | 171 | #else /* CONFIG_32BIT */ |
172 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); | 172 | DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); |
173 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); | 173 | DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); |
174 | DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); | ||
174 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); | 175 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); |
175 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); | 176 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); |
176 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | 177 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9254afff250c..fc7ec95848c3 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
77 | 77 | ||
78 | if (vcpu->run->s.regs.gprs[rx] & 7) | 78 | if (vcpu->run->s.regs.gprs[rx] & 7) |
79 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 79 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
80 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); | 80 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); |
81 | if (rc) | 81 | if (rc) |
82 | return kvm_s390_inject_prog_cond(vcpu, rc); | 82 | return kvm_s390_inject_prog_cond(vcpu, rc); |
83 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) | 83 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) |
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) | |||
213 | * - gpr 3 contains the virtqueue index (passed as datamatch) | 213 | * - gpr 3 contains the virtqueue index (passed as datamatch) |
214 | * - gpr 4 contains the index on the bus (optionally) | 214 | * - gpr 4 contains the index on the bus (optionally) |
215 | */ | 215 | */ |
216 | ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, | 216 | ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, |
217 | vcpu->run->s.regs.gprs[2] & 0xffffffff, | 217 | vcpu->run->s.regs.gprs[2] & 0xffffffff, |
218 | 8, &vcpu->run->s.regs.gprs[3], | 218 | 8, &vcpu->run->s.regs.gprs[3], |
219 | vcpu->run->s.regs.gprs[4]); | 219 | vcpu->run->s.regs.gprs[4]); |
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) | |||
230 | 230 | ||
231 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | 231 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) |
232 | { | 232 | { |
233 | int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; | 233 | int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; |
234 | 234 | ||
235 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 235 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
236 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 236 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 267523cac6de..a7559f7207df 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include "kvm-s390.h" | 11 | #include "kvm-s390.h" |
12 | #include "gaccess.h" | 12 | #include "gaccess.h" |
13 | #include <asm/switch_to.h> | ||
13 | 14 | ||
14 | union asce { | 15 | union asce { |
15 | unsigned long val; | 16 | unsigned long val; |
@@ -207,6 +208,54 @@ union raddress { | |||
207 | unsigned long pfra : 52; /* Page-Frame Real Address */ | 208 | unsigned long pfra : 52; /* Page-Frame Real Address */ |
208 | }; | 209 | }; |
209 | 210 | ||
211 | union alet { | ||
212 | u32 val; | ||
213 | struct { | ||
214 | u32 reserved : 7; | ||
215 | u32 p : 1; | ||
216 | u32 alesn : 8; | ||
217 | u32 alen : 16; | ||
218 | }; | ||
219 | }; | ||
220 | |||
221 | union ald { | ||
222 | u32 val; | ||
223 | struct { | ||
224 | u32 : 1; | ||
225 | u32 alo : 24; | ||
226 | u32 all : 7; | ||
227 | }; | ||
228 | }; | ||
229 | |||
230 | struct ale { | ||
231 | unsigned long i : 1; /* ALEN-Invalid Bit */ | ||
232 | unsigned long : 5; | ||
233 | unsigned long fo : 1; /* Fetch-Only Bit */ | ||
234 | unsigned long p : 1; /* Private Bit */ | ||
235 | unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ | ||
236 | unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ | ||
237 | unsigned long : 32; | ||
238 | unsigned long : 1; | ||
239 | unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ | ||
240 | unsigned long : 6; | ||
241 | unsigned long astesn : 32; /* ASTE Sequence Number */ | ||
242 | } __packed; | ||
243 | |||
244 | struct aste { | ||
245 | unsigned long i : 1; /* ASX-Invalid Bit */ | ||
246 | unsigned long ato : 29; /* Authority-Table Origin */ | ||
247 | unsigned long : 1; | ||
248 | unsigned long b : 1; /* Base-Space Bit */ | ||
249 | unsigned long ax : 16; /* Authorization Index */ | ||
250 | unsigned long atl : 12; /* Authority-Table Length */ | ||
251 | unsigned long : 2; | ||
252 | unsigned long ca : 1; /* Controlled-ASN Bit */ | ||
253 | unsigned long ra : 1; /* Reusable-ASN Bit */ | ||
254 | unsigned long asce : 64; /* Address-Space-Control Element */ | ||
255 | unsigned long ald : 32; | ||
256 | unsigned long astesn : 32; | ||
257 | /* .. more fields there */ | ||
258 | } __packed; | ||
210 | 259 | ||
211 | int ipte_lock_held(struct kvm_vcpu *vcpu) | 260 | int ipte_lock_held(struct kvm_vcpu *vcpu) |
212 | { | 261 | { |
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu) | |||
307 | ipte_unlock_simple(vcpu); | 356 | ipte_unlock_simple(vcpu); |
308 | } | 357 | } |
309 | 358 | ||
310 | static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) | 359 | static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, |
360 | int write) | ||
361 | { | ||
362 | union alet alet; | ||
363 | struct ale ale; | ||
364 | struct aste aste; | ||
365 | unsigned long ald_addr, authority_table_addr; | ||
366 | union ald ald; | ||
367 | int eax, rc; | ||
368 | u8 authority_table; | ||
369 | |||
370 | if (ar >= NUM_ACRS) | ||
371 | return -EINVAL; | ||
372 | |||
373 | save_access_regs(vcpu->run->s.regs.acrs); | ||
374 | alet.val = vcpu->run->s.regs.acrs[ar]; | ||
375 | |||
376 | if (ar == 0 || alet.val == 0) { | ||
377 | asce->val = vcpu->arch.sie_block->gcr[1]; | ||
378 | return 0; | ||
379 | } else if (alet.val == 1) { | ||
380 | asce->val = vcpu->arch.sie_block->gcr[7]; | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | if (alet.reserved) | ||
385 | return PGM_ALET_SPECIFICATION; | ||
386 | |||
387 | if (alet.p) | ||
388 | ald_addr = vcpu->arch.sie_block->gcr[5]; | ||
389 | else | ||
390 | ald_addr = vcpu->arch.sie_block->gcr[2]; | ||
391 | ald_addr &= 0x7fffffc0; | ||
392 | |||
393 | rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); | ||
394 | if (rc) | ||
395 | return rc; | ||
396 | |||
397 | if (alet.alen / 8 > ald.all) | ||
398 | return PGM_ALEN_TRANSLATION; | ||
399 | |||
400 | if (0x7fffffff - ald.alo * 128 < alet.alen * 16) | ||
401 | return PGM_ADDRESSING; | ||
402 | |||
403 | rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, | ||
404 | sizeof(struct ale)); | ||
405 | if (rc) | ||
406 | return rc; | ||
407 | |||
408 | if (ale.i == 1) | ||
409 | return PGM_ALEN_TRANSLATION; | ||
410 | if (ale.alesn != alet.alesn) | ||
411 | return PGM_ALE_SEQUENCE; | ||
412 | |||
413 | rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); | ||
414 | if (rc) | ||
415 | return rc; | ||
416 | |||
417 | if (aste.i) | ||
418 | return PGM_ASTE_VALIDITY; | ||
419 | if (aste.astesn != ale.astesn) | ||
420 | return PGM_ASTE_SEQUENCE; | ||
421 | |||
422 | if (ale.p == 1) { | ||
423 | eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; | ||
424 | if (ale.aleax != eax) { | ||
425 | if (eax / 16 > aste.atl) | ||
426 | return PGM_EXTENDED_AUTHORITY; | ||
427 | |||
428 | authority_table_addr = aste.ato * 4 + eax / 4; | ||
429 | |||
430 | rc = read_guest_real(vcpu, authority_table_addr, | ||
431 | &authority_table, | ||
432 | sizeof(u8)); | ||
433 | if (rc) | ||
434 | return rc; | ||
435 | |||
436 | if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) | ||
437 | return PGM_EXTENDED_AUTHORITY; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | if (ale.fo == 1 && write) | ||
442 | return PGM_PROTECTION; | ||
443 | |||
444 | asce->val = aste.asce; | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | struct trans_exc_code_bits { | ||
449 | unsigned long addr : 52; /* Translation-exception Address */ | ||
450 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
451 | unsigned long : 6; | ||
452 | unsigned long b60 : 1; | ||
453 | unsigned long b61 : 1; | ||
454 | unsigned long as : 2; /* ASCE Identifier */ | ||
455 | }; | ||
456 | |||
457 | enum { | ||
458 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
459 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
460 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
461 | }; | ||
462 | |||
463 | static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, | ||
464 | ar_t ar, int write) | ||
311 | { | 465 | { |
466 | int rc; | ||
467 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
468 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
469 | struct trans_exc_code_bits *tec_bits; | ||
470 | |||
471 | memset(pgm, 0, sizeof(*pgm)); | ||
472 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
473 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | ||
474 | tec_bits->as = psw_bits(*psw).as; | ||
475 | |||
476 | if (!psw_bits(*psw).t) { | ||
477 | asce->val = 0; | ||
478 | asce->r = 1; | ||
479 | return 0; | ||
480 | } | ||
481 | |||
312 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { | 482 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { |
313 | case PSW_AS_PRIMARY: | 483 | case PSW_AS_PRIMARY: |
314 | return vcpu->arch.sie_block->gcr[1]; | 484 | asce->val = vcpu->arch.sie_block->gcr[1]; |
485 | return 0; | ||
315 | case PSW_AS_SECONDARY: | 486 | case PSW_AS_SECONDARY: |
316 | return vcpu->arch.sie_block->gcr[7]; | 487 | asce->val = vcpu->arch.sie_block->gcr[7]; |
488 | return 0; | ||
317 | case PSW_AS_HOME: | 489 | case PSW_AS_HOME: |
318 | return vcpu->arch.sie_block->gcr[13]; | 490 | asce->val = vcpu->arch.sie_block->gcr[13]; |
491 | return 0; | ||
492 | case PSW_AS_ACCREG: | ||
493 | rc = ar_translation(vcpu, asce, ar, write); | ||
494 | switch (rc) { | ||
495 | case PGM_ALEN_TRANSLATION: | ||
496 | case PGM_ALE_SEQUENCE: | ||
497 | case PGM_ASTE_VALIDITY: | ||
498 | case PGM_ASTE_SEQUENCE: | ||
499 | case PGM_EXTENDED_AUTHORITY: | ||
500 | vcpu->arch.pgm.exc_access_id = ar; | ||
501 | break; | ||
502 | case PGM_PROTECTION: | ||
503 | tec_bits->b60 = 1; | ||
504 | tec_bits->b61 = 1; | ||
505 | break; | ||
506 | } | ||
507 | if (rc > 0) | ||
508 | pgm->code = rc; | ||
509 | return rc; | ||
319 | } | 510 | } |
320 | return 0; | 511 | return 0; |
321 | } | 512 | } |
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | |||
330 | * @vcpu: virtual cpu | 521 | * @vcpu: virtual cpu |
331 | * @gva: guest virtual address | 522 | * @gva: guest virtual address |
332 | * @gpa: points to where guest physical (absolute) address should be stored | 523 | * @gpa: points to where guest physical (absolute) address should be stored |
524 | * @asce: effective asce | ||
333 | * @write: indicates if access is a write access | 525 | * @write: indicates if access is a write access |
334 | * | 526 | * |
335 | * Translate a guest virtual address into a guest absolute address by means | 527 | * Translate a guest virtual address into a guest absolute address by means |
336 | * of dynamic address translation as specified by the architecuture. | 528 | * of dynamic address translation as specified by the architecture. |
337 | * If the resulting absolute address is not available in the configuration | 529 | * If the resulting absolute address is not available in the configuration |
338 | * an addressing exception is indicated and @gpa will not be changed. | 530 | * an addressing exception is indicated and @gpa will not be changed. |
339 | * | 531 | * |
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | |||
345 | * by the architecture | 537 | * by the architecture |
346 | */ | 538 | */ |
347 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | 539 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, |
348 | unsigned long *gpa, int write) | 540 | unsigned long *gpa, const union asce asce, |
541 | int write) | ||
349 | { | 542 | { |
350 | union vaddress vaddr = {.addr = gva}; | 543 | union vaddress vaddr = {.addr = gva}; |
351 | union raddress raddr = {.addr = gva}; | 544 | union raddress raddr = {.addr = gva}; |
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | |||
354 | union ctlreg0 ctlreg0; | 547 | union ctlreg0 ctlreg0; |
355 | unsigned long ptr; | 548 | unsigned long ptr; |
356 | int edat1, edat2; | 549 | int edat1, edat2; |
357 | union asce asce; | ||
358 | 550 | ||
359 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; | 551 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; |
360 | edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); | 552 | edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); |
361 | edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); | 553 | edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); |
362 | asce.val = get_vcpu_asce(vcpu); | ||
363 | if (asce.r) | 554 | if (asce.r) |
364 | goto real_address; | 555 | goto real_address; |
365 | ptr = asce.origin * 4096; | 556 | ptr = asce.origin * 4096; |
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga) | |||
506 | return (ga & ~0x11fful) == 0; | 697 | return (ga & ~0x11fful) == 0; |
507 | } | 698 | } |
508 | 699 | ||
509 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu) | 700 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu, |
701 | const union asce asce) | ||
510 | { | 702 | { |
511 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | 703 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; |
512 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 704 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
513 | union asce asce; | ||
514 | 705 | ||
515 | if (!ctlreg0.lap) | 706 | if (!ctlreg0.lap) |
516 | return 0; | 707 | return 0; |
517 | asce.val = get_vcpu_asce(vcpu); | ||
518 | if (psw_bits(*psw).t && asce.p) | 708 | if (psw_bits(*psw).t && asce.p) |
519 | return 0; | 709 | return 0; |
520 | return 1; | 710 | return 1; |
521 | } | 711 | } |
522 | 712 | ||
523 | struct trans_exc_code_bits { | ||
524 | unsigned long addr : 52; /* Translation-exception Address */ | ||
525 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
526 | unsigned long : 7; | ||
527 | unsigned long b61 : 1; | ||
528 | unsigned long as : 2; /* ASCE Identifier */ | ||
529 | }; | ||
530 | |||
531 | enum { | ||
532 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
533 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
534 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
535 | }; | ||
536 | |||
537 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | 713 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, |
538 | unsigned long *pages, unsigned long nr_pages, | 714 | unsigned long *pages, unsigned long nr_pages, |
539 | int write) | 715 | const union asce asce, int write) |
540 | { | 716 | { |
541 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 717 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
542 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 718 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
543 | struct trans_exc_code_bits *tec_bits; | 719 | struct trans_exc_code_bits *tec_bits; |
544 | int lap_enabled, rc; | 720 | int lap_enabled, rc; |
545 | 721 | ||
546 | memset(pgm, 0, sizeof(*pgm)); | ||
547 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 722 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
548 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | 723 | lap_enabled = low_address_protection_enabled(vcpu, asce); |
549 | tec_bits->as = psw_bits(*psw).as; | ||
550 | lap_enabled = low_address_protection_enabled(vcpu); | ||
551 | while (nr_pages) { | 724 | while (nr_pages) { |
552 | ga = kvm_s390_logical_to_effective(vcpu, ga); | 725 | ga = kvm_s390_logical_to_effective(vcpu, ga); |
553 | tec_bits->addr = ga >> PAGE_SHIFT; | 726 | tec_bits->addr = ga >> PAGE_SHIFT; |
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | |||
557 | } | 730 | } |
558 | ga &= PAGE_MASK; | 731 | ga &= PAGE_MASK; |
559 | if (psw_bits(*psw).t) { | 732 | if (psw_bits(*psw).t) { |
560 | rc = guest_translate(vcpu, ga, pages, write); | 733 | rc = guest_translate(vcpu, ga, pages, asce, write); |
561 | if (rc < 0) | 734 | if (rc < 0) |
562 | return rc; | 735 | return rc; |
563 | if (rc == PGM_PROTECTION) | 736 | if (rc == PGM_PROTECTION) |
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | |||
578 | return 0; | 751 | return 0; |
579 | } | 752 | } |
580 | 753 | ||
581 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 754 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
582 | unsigned long len, int write) | 755 | unsigned long len, int write) |
583 | { | 756 | { |
584 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 757 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | |||
591 | 764 | ||
592 | if (!len) | 765 | if (!len) |
593 | return 0; | 766 | return 0; |
594 | /* Access register mode is not supported yet. */ | 767 | rc = get_vcpu_asce(vcpu, &asce, ar, write); |
595 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | 768 | if (rc) |
596 | return -EOPNOTSUPP; | 769 | return rc; |
597 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; | 770 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; |
598 | pages = pages_array; | 771 | pages = pages_array; |
599 | if (nr_pages > ARRAY_SIZE(pages_array)) | 772 | if (nr_pages > ARRAY_SIZE(pages_array)) |
600 | pages = vmalloc(nr_pages * sizeof(unsigned long)); | 773 | pages = vmalloc(nr_pages * sizeof(unsigned long)); |
601 | if (!pages) | 774 | if (!pages) |
602 | return -ENOMEM; | 775 | return -ENOMEM; |
603 | asce.val = get_vcpu_asce(vcpu); | ||
604 | need_ipte_lock = psw_bits(*psw).t && !asce.r; | 776 | need_ipte_lock = psw_bits(*psw).t && !asce.r; |
605 | if (need_ipte_lock) | 777 | if (need_ipte_lock) |
606 | ipte_lock(vcpu); | 778 | ipte_lock(vcpu); |
607 | rc = guest_page_range(vcpu, ga, pages, nr_pages, write); | 779 | rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); |
608 | for (idx = 0; idx < nr_pages && !rc; idx++) { | 780 | for (idx = 0; idx < nr_pages && !rc; idx++) { |
609 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); | 781 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); |
610 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | 782 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); |
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
652 | * Note: The IPTE lock is not taken during this function, so the caller | 824 | * Note: The IPTE lock is not taken during this function, so the caller |
653 | * has to take care of this. | 825 | * has to take care of this. |
654 | */ | 826 | */ |
655 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | 827 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, |
656 | unsigned long *gpa, int write) | 828 | unsigned long *gpa, int write) |
657 | { | 829 | { |
658 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 830 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | |||
661 | union asce asce; | 833 | union asce asce; |
662 | int rc; | 834 | int rc; |
663 | 835 | ||
664 | /* Access register mode is not supported yet. */ | ||
665 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | ||
666 | return -EOPNOTSUPP; | ||
667 | |||
668 | gva = kvm_s390_logical_to_effective(vcpu, gva); | 836 | gva = kvm_s390_logical_to_effective(vcpu, gva); |
669 | memset(pgm, 0, sizeof(*pgm)); | ||
670 | tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 837 | tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
671 | tec->as = psw_bits(*psw).as; | 838 | rc = get_vcpu_asce(vcpu, &asce, ar, write); |
672 | tec->fsi = write ? FSI_STORE : FSI_FETCH; | ||
673 | tec->addr = gva >> PAGE_SHIFT; | 839 | tec->addr = gva >> PAGE_SHIFT; |
674 | if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { | 840 | if (rc) |
841 | return rc; | ||
842 | if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { | ||
675 | if (write) { | 843 | if (write) { |
676 | rc = pgm->code = PGM_PROTECTION; | 844 | rc = pgm->code = PGM_PROTECTION; |
677 | return rc; | 845 | return rc; |
678 | } | 846 | } |
679 | } | 847 | } |
680 | 848 | ||
681 | asce.val = get_vcpu_asce(vcpu); | ||
682 | if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ | 849 | if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ |
683 | rc = guest_translate(vcpu, gva, gpa, write); | 850 | rc = guest_translate(vcpu, gva, gpa, asce, write); |
684 | if (rc > 0) { | 851 | if (rc > 0) { |
685 | if (rc == PGM_PROTECTION) | 852 | if (rc == PGM_PROTECTION) |
686 | tec->b61 = 1; | 853 | tec->b61 = 1; |
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | |||
697 | } | 864 | } |
698 | 865 | ||
699 | /** | 866 | /** |
700 | * kvm_s390_check_low_addr_protection - check for low-address protection | 867 | * check_gva_range - test a range of guest virtual addresses for accessibility |
701 | * @ga: Guest address | 868 | */ |
869 | int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, | ||
870 | unsigned long length, int is_write) | ||
871 | { | ||
872 | unsigned long gpa; | ||
873 | unsigned long currlen; | ||
874 | int rc = 0; | ||
875 | |||
876 | ipte_lock(vcpu); | ||
877 | while (length > 0 && !rc) { | ||
878 | currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); | ||
879 | rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); | ||
880 | gva += currlen; | ||
881 | length -= currlen; | ||
882 | } | ||
883 | ipte_unlock(vcpu); | ||
884 | |||
885 | return rc; | ||
886 | } | ||
887 | |||
888 | /** | ||
889 | * kvm_s390_check_low_addr_prot_real - check for low-address protection | ||
890 | * @gra: Guest real address | ||
702 | * | 891 | * |
703 | * Checks whether an address is subject to low-address protection and set | 892 | * Checks whether an address is subject to low-address protection and set |
704 | * up vcpu->arch.pgm accordingly if necessary. | 893 | * up vcpu->arch.pgm accordingly if necessary. |
705 | * | 894 | * |
706 | * Return: 0 if no protection exception, or PGM_PROTECTION if protected. | 895 | * Return: 0 if no protection exception, or PGM_PROTECTION if protected. |
707 | */ | 896 | */ |
708 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) | 897 | int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) |
709 | { | 898 | { |
710 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 899 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
711 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 900 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
712 | struct trans_exc_code_bits *tec_bits; | 901 | struct trans_exc_code_bits *tec_bits; |
902 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | ||
713 | 903 | ||
714 | if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) | 904 | if (!ctlreg0.lap || !is_low_address(gra)) |
715 | return 0; | 905 | return 0; |
716 | 906 | ||
717 | memset(pgm, 0, sizeof(*pgm)); | 907 | memset(pgm, 0, sizeof(*pgm)); |
718 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 908 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
719 | tec_bits->fsi = FSI_STORE; | 909 | tec_bits->fsi = FSI_STORE; |
720 | tec_bits->as = psw_bits(*psw).as; | 910 | tec_bits->as = psw_bits(*psw).as; |
721 | tec_bits->addr = ga >> PAGE_SHIFT; | 911 | tec_bits->addr = gra >> PAGE_SHIFT; |
722 | pgm->code = PGM_PROTECTION; | 912 | pgm->code = PGM_PROTECTION; |
723 | 913 | ||
724 | return pgm->code; | 914 | return pgm->code; |
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 0149cf15058a..ef03726cc661 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | |||
156 | } | 156 | } |
157 | 157 | ||
158 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | 158 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, |
159 | unsigned long *gpa, int write); | 159 | ar_t ar, unsigned long *gpa, int write); |
160 | int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, | ||
161 | unsigned long length, int is_write); | ||
160 | 162 | ||
161 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 163 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
162 | unsigned long len, int write); | 164 | unsigned long len, int write); |
163 | 165 | ||
164 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | 166 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, |
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
168 | * write_guest - copy data from kernel space to guest space | 170 | * write_guest - copy data from kernel space to guest space |
169 | * @vcpu: virtual cpu | 171 | * @vcpu: virtual cpu |
170 | * @ga: guest address | 172 | * @ga: guest address |
173 | * @ar: access register | ||
171 | * @data: source address in kernel space | 174 | * @data: source address in kernel space |
172 | * @len: number of bytes to copy | 175 | * @len: number of bytes to copy |
173 | * | 176 | * |
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
176 | * If DAT is off data will be copied to guest real or absolute memory. | 179 | * If DAT is off data will be copied to guest real or absolute memory. |
177 | * If DAT is on data will be copied to the address space as specified by | 180 | * If DAT is on data will be copied to the address space as specified by |
178 | * the address space bits of the PSW: | 181 | * the address space bits of the PSW: |
179 | * Primary, secondory or home space (access register mode is currently not | 182 | * Primary, secondary, home space or access register mode. |
180 | * implemented). | ||
181 | * The addressing mode of the PSW is also inspected, so that address wrap | 183 | * The addressing mode of the PSW is also inspected, so that address wrap |
182 | * around is taken into account for 24-, 31- and 64-bit addressing mode, | 184 | * around is taken into account for 24-, 31- and 64-bit addressing mode, |
183 | * if the to be copied data crosses page boundaries in guest address space. | 185 | * if the to be copied data crosses page boundaries in guest address space. |
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
210 | * if data has been changed in guest space in case of an exception. | 212 | * if data has been changed in guest space in case of an exception. |
211 | */ | 213 | */ |
212 | static inline __must_check | 214 | static inline __must_check |
213 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 215 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
214 | unsigned long len) | 216 | unsigned long len) |
215 | { | 217 | { |
216 | return access_guest(vcpu, ga, data, len, 1); | 218 | return access_guest(vcpu, ga, ar, data, len, 1); |
217 | } | 219 | } |
218 | 220 | ||
219 | /** | 221 | /** |
220 | * read_guest - copy data from guest space to kernel space | 222 | * read_guest - copy data from guest space to kernel space |
221 | * @vcpu: virtual cpu | 223 | * @vcpu: virtual cpu |
222 | * @ga: guest address | 224 | * @ga: guest address |
225 | * @ar: access register | ||
223 | * @data: destination address in kernel space | 226 | * @data: destination address in kernel space |
224 | * @len: number of bytes to copy | 227 | * @len: number of bytes to copy |
225 | * | 228 | * |
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | |||
229 | * data will be copied from guest space to kernel space. | 232 | * data will be copied from guest space to kernel space. |
230 | */ | 233 | */ |
231 | static inline __must_check | 234 | static inline __must_check |
232 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 235 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
233 | unsigned long len) | 236 | unsigned long len) |
234 | { | 237 | { |
235 | return access_guest(vcpu, ga, data, len, 0); | 238 | return access_guest(vcpu, ga, ar, data, len, 0); |
236 | } | 239 | } |
237 | 240 | ||
238 | /** | 241 | /** |
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | |||
330 | void ipte_lock(struct kvm_vcpu *vcpu); | 333 | void ipte_lock(struct kvm_vcpu *vcpu); |
331 | void ipte_unlock(struct kvm_vcpu *vcpu); | 334 | void ipte_unlock(struct kvm_vcpu *vcpu); |
332 | int ipte_lock_held(struct kvm_vcpu *vcpu); | 335 | int ipte_lock_held(struct kvm_vcpu *vcpu); |
333 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); | 336 | int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); |
334 | 337 | ||
335 | #endif /* __KVM_S390_GACCESS_H */ | 338 | #endif /* __KVM_S390_GACCESS_H */ |
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 3e8d4092ce30..e97b3455d7e6 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c | |||
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, | |||
191 | if (!wp_info->old_data) | 191 | if (!wp_info->old_data) |
192 | return -ENOMEM; | 192 | return -ENOMEM; |
193 | /* try to backup the original value */ | 193 | /* try to backup the original value */ |
194 | ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, | 194 | ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, |
195 | wp_info->len); | 195 | wp_info->len); |
196 | if (ret) { | 196 | if (ret) { |
197 | kfree(wp_info->old_data); | 197 | kfree(wp_info->old_data); |
198 | wp_info->old_data = NULL; | 198 | wp_info->old_data = NULL; |
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) | |||
362 | continue; | 362 | continue; |
363 | 363 | ||
364 | /* refetch the wp data and compare it to the old value */ | 364 | /* refetch the wp data and compare it to the old value */ |
365 | if (!read_guest(vcpu, wp_info->phys_addr, temp, | 365 | if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, |
366 | wp_info->len)) { | 366 | wp_info->len)) { |
367 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { | 367 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { |
368 | kfree(temp); | 368 | kfree(temp); |
369 | return wp_info; | 369 | return wp_info; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index bebd2157edd0..9e3779e3e496 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, | |||
165 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; | 165 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; |
166 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; | 166 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; |
167 | break; | 167 | break; |
168 | case PGM_VECTOR_PROCESSING: | ||
168 | case PGM_DATA: | 169 | case PGM_DATA: |
169 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; | 170 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; |
170 | break; | 171 | break; |
@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | |||
319 | 320 | ||
320 | /* Make sure that the source is paged-in */ | 321 | /* Make sure that the source is paged-in */ |
321 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], | 322 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], |
322 | &srcaddr, 0); | 323 | reg2, &srcaddr, 0); |
323 | if (rc) | 324 | if (rc) |
324 | return kvm_s390_inject_prog_cond(vcpu, rc); | 325 | return kvm_s390_inject_prog_cond(vcpu, rc); |
325 | rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); | 326 | rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); |
@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | |||
328 | 329 | ||
329 | /* Make sure that the destination is paged-in */ | 330 | /* Make sure that the destination is paged-in */ |
330 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], | 331 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], |
331 | &dstaddr, 1); | 332 | reg1, &dstaddr, 1); |
332 | if (rc) | 333 | if (rc) |
333 | return kvm_s390_inject_prog_cond(vcpu, rc); | 334 | return kvm_s390_inject_prog_cond(vcpu, rc); |
334 | rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); | 335 | rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 073b5f387d1d..9de47265ef73 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * handling kvm guest interrupts | 2 | * handling kvm guest interrupts |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008,2014 | 4 | * Copyright IBM Corp. 2008, 2015 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -17,9 +17,12 @@ | |||
17 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/bitmap.h> | 19 | #include <linux/bitmap.h> |
20 | #include <linux/vmalloc.h> | ||
20 | #include <asm/asm-offsets.h> | 21 | #include <asm/asm-offsets.h> |
22 | #include <asm/dis.h> | ||
21 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
22 | #include <asm/sclp.h> | 24 | #include <asm/sclp.h> |
25 | #include <asm/isc.h> | ||
23 | #include "kvm-s390.h" | 26 | #include "kvm-s390.h" |
24 | #include "gaccess.h" | 27 | #include "gaccess.h" |
25 | #include "trace-s390.h" | 28 | #include "trace-s390.h" |
@@ -32,11 +35,6 @@ | |||
32 | #define PFAULT_DONE 0x0680 | 35 | #define PFAULT_DONE 0x0680 |
33 | #define VIRTIO_PARAM 0x0d00 | 36 | #define VIRTIO_PARAM 0x0d00 |
34 | 37 | ||
35 | static int is_ioint(u64 type) | ||
36 | { | ||
37 | return ((type & 0xfffe0000u) != 0xfffe0000u); | ||
38 | } | ||
39 | |||
40 | int psw_extint_disabled(struct kvm_vcpu *vcpu) | 38 | int psw_extint_disabled(struct kvm_vcpu *vcpu) |
41 | { | 39 | { |
42 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | 40 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); |
@@ -72,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | |||
72 | return 1; | 70 | return 1; |
73 | } | 71 | } |
74 | 72 | ||
75 | static u64 int_word_to_isc_bits(u32 int_word) | 73 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) |
74 | { | ||
75 | if (!(vcpu->arch.sie_block->ckc < | ||
76 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
77 | return 0; | ||
78 | return ckc_interrupts_enabled(vcpu); | ||
79 | } | ||
80 | |||
81 | static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | return !psw_extint_disabled(vcpu) && | ||
84 | (vcpu->arch.sie_block->gcr[0] & 0x400ul); | ||
85 | } | ||
86 | |||
87 | static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) | ||
88 | { | ||
89 | return (vcpu->arch.sie_block->cputm >> 63) && | ||
90 | cpu_timer_interrupts_enabled(vcpu); | ||
91 | } | ||
92 | |||
93 | static inline int is_ioirq(unsigned long irq_type) | ||
76 | { | 94 | { |
77 | u8 isc = (int_word & 0x38000000) >> 27; | 95 | return ((irq_type >= IRQ_PEND_IO_ISC_0) && |
96 | (irq_type <= IRQ_PEND_IO_ISC_7)); | ||
97 | } | ||
78 | 98 | ||
99 | static uint64_t isc_to_isc_bits(int isc) | ||
100 | { | ||
79 | return (0x80 >> isc) << 24; | 101 | return (0x80 >> isc) << 24; |
80 | } | 102 | } |
81 | 103 | ||
82 | static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | 104 | static inline u8 int_word_to_isc(u32 int_word) |
83 | struct kvm_s390_interrupt_info *inti) | ||
84 | { | 105 | { |
85 | switch (inti->type) { | 106 | return (int_word & 0x38000000) >> 27; |
86 | case KVM_S390_INT_EXTERNAL_CALL: | 107 | } |
87 | if (psw_extint_disabled(vcpu)) | 108 | |
88 | return 0; | 109 | static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) |
89 | if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) | 110 | { |
90 | return 1; | 111 | return vcpu->kvm->arch.float_int.pending_irqs; |
91 | return 0; | ||
92 | case KVM_S390_INT_EMERGENCY: | ||
93 | if (psw_extint_disabled(vcpu)) | ||
94 | return 0; | ||
95 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | ||
96 | return 1; | ||
97 | return 0; | ||
98 | case KVM_S390_INT_CLOCK_COMP: | ||
99 | return ckc_interrupts_enabled(vcpu); | ||
100 | case KVM_S390_INT_CPU_TIMER: | ||
101 | if (psw_extint_disabled(vcpu)) | ||
102 | return 0; | ||
103 | if (vcpu->arch.sie_block->gcr[0] & 0x400ul) | ||
104 | return 1; | ||
105 | return 0; | ||
106 | case KVM_S390_INT_SERVICE: | ||
107 | case KVM_S390_INT_PFAULT_INIT: | ||
108 | case KVM_S390_INT_PFAULT_DONE: | ||
109 | case KVM_S390_INT_VIRTIO: | ||
110 | if (psw_extint_disabled(vcpu)) | ||
111 | return 0; | ||
112 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
113 | return 1; | ||
114 | return 0; | ||
115 | case KVM_S390_PROGRAM_INT: | ||
116 | case KVM_S390_SIGP_STOP: | ||
117 | case KVM_S390_SIGP_SET_PREFIX: | ||
118 | case KVM_S390_RESTART: | ||
119 | return 1; | ||
120 | case KVM_S390_MCHK: | ||
121 | if (psw_mchk_disabled(vcpu)) | ||
122 | return 0; | ||
123 | if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) | ||
124 | return 1; | ||
125 | return 0; | ||
126 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
127 | if (psw_ioint_disabled(vcpu)) | ||
128 | return 0; | ||
129 | if (vcpu->arch.sie_block->gcr[6] & | ||
130 | int_word_to_isc_bits(inti->io.io_int_word)) | ||
131 | return 1; | ||
132 | return 0; | ||
133 | default: | ||
134 | printk(KERN_WARNING "illegal interrupt type %llx\n", | ||
135 | inti->type); | ||
136 | BUG(); | ||
137 | } | ||
138 | return 0; | ||
139 | } | 112 | } |
140 | 113 | ||
141 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | 114 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) |
@@ -143,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | |||
143 | return vcpu->arch.local_int.pending_irqs; | 116 | return vcpu->arch.local_int.pending_irqs; |
144 | } | 117 | } |
145 | 118 | ||
146 | static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | 119 | static unsigned long disable_iscs(struct kvm_vcpu *vcpu, |
120 | unsigned long active_mask) | ||
121 | { | ||
122 | int i; | ||
123 | |||
124 | for (i = 0; i <= MAX_ISC; i++) | ||
125 | if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) | ||
126 | active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); | ||
127 | |||
128 | return active_mask; | ||
129 | } | ||
130 | |||
131 | static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) | ||
147 | { | 132 | { |
148 | unsigned long active_mask = pending_local_irqs(vcpu); | 133 | unsigned long active_mask; |
134 | |||
135 | active_mask = pending_local_irqs(vcpu); | ||
136 | active_mask |= pending_floating_irqs(vcpu); | ||
149 | 137 | ||
150 | if (psw_extint_disabled(vcpu)) | 138 | if (psw_extint_disabled(vcpu)) |
151 | active_mask &= ~IRQ_PEND_EXT_MASK; | 139 | active_mask &= ~IRQ_PEND_EXT_MASK; |
140 | if (psw_ioint_disabled(vcpu)) | ||
141 | active_mask &= ~IRQ_PEND_IO_MASK; | ||
142 | else | ||
143 | active_mask = disable_iscs(vcpu, active_mask); | ||
152 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) | 144 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) |
153 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); | 145 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); |
154 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) | 146 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) |
@@ -157,8 +149,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | |||
157 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); | 149 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
158 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) | 150 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) |
159 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); | 151 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
152 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) | ||
153 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); | ||
160 | if (psw_mchk_disabled(vcpu)) | 154 | if (psw_mchk_disabled(vcpu)) |
161 | active_mask &= ~IRQ_PEND_MCHK_MASK; | 155 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
156 | if (!(vcpu->arch.sie_block->gcr[14] & | ||
157 | vcpu->kvm->arch.float_int.mchk.cr14)) | ||
158 | __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); | ||
162 | 159 | ||
163 | /* | 160 | /* |
164 | * STOP irqs will never be actively delivered. They are triggered via | 161 | * STOP irqs will never be actively delivered. They are triggered via |
@@ -200,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | |||
200 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 197 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); |
201 | } | 198 | } |
202 | 199 | ||
200 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | ||
201 | { | ||
202 | if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) | ||
203 | return; | ||
204 | else if (psw_ioint_disabled(vcpu)) | ||
205 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
206 | else | ||
207 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
208 | } | ||
209 | |||
203 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) | 210 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
204 | { | 211 | { |
205 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) | 212 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
@@ -226,47 +233,17 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) | |||
226 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | 233 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
227 | } | 234 | } |
228 | 235 | ||
229 | /* Set interception request for non-deliverable local interrupts */ | 236 | /* Set interception request for non-deliverable interrupts */ |
230 | static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) | 237 | static void set_intercept_indicators(struct kvm_vcpu *vcpu) |
231 | { | 238 | { |
239 | set_intercept_indicators_io(vcpu); | ||
232 | set_intercept_indicators_ext(vcpu); | 240 | set_intercept_indicators_ext(vcpu); |
233 | set_intercept_indicators_mchk(vcpu); | 241 | set_intercept_indicators_mchk(vcpu); |
234 | set_intercept_indicators_stop(vcpu); | 242 | set_intercept_indicators_stop(vcpu); |
235 | } | 243 | } |
236 | 244 | ||
237 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | ||
238 | struct kvm_s390_interrupt_info *inti) | ||
239 | { | ||
240 | switch (inti->type) { | ||
241 | case KVM_S390_INT_SERVICE: | ||
242 | case KVM_S390_INT_PFAULT_DONE: | ||
243 | case KVM_S390_INT_VIRTIO: | ||
244 | if (psw_extint_disabled(vcpu)) | ||
245 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
246 | else | ||
247 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
248 | break; | ||
249 | case KVM_S390_MCHK: | ||
250 | if (psw_mchk_disabled(vcpu)) | ||
251 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; | ||
252 | else | ||
253 | vcpu->arch.sie_block->lctl |= LCTL_CR14; | ||
254 | break; | ||
255 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
256 | if (psw_ioint_disabled(vcpu)) | ||
257 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
258 | else | ||
259 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
260 | break; | ||
261 | default: | ||
262 | BUG(); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static u16 get_ilc(struct kvm_vcpu *vcpu) | 245 | static u16 get_ilc(struct kvm_vcpu *vcpu) |
267 | { | 246 | { |
268 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
269 | |||
270 | switch (vcpu->arch.sie_block->icptcode) { | 247 | switch (vcpu->arch.sie_block->icptcode) { |
271 | case ICPT_INST: | 248 | case ICPT_INST: |
272 | case ICPT_INSTPROGI: | 249 | case ICPT_INSTPROGI: |
@@ -274,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) | |||
274 | case ICPT_PARTEXEC: | 251 | case ICPT_PARTEXEC: |
275 | case ICPT_IOINST: | 252 | case ICPT_IOINST: |
276 | /* last instruction only stored for these icptcodes */ | 253 | /* last instruction only stored for these icptcodes */ |
277 | return table[vcpu->arch.sie_block->ipa >> 14]; | 254 | return insn_length(vcpu->arch.sie_block->ipa >> 8); |
278 | case ICPT_PROGI: | 255 | case ICPT_PROGI: |
279 | return vcpu->arch.sie_block->pgmilc; | 256 | return vcpu->arch.sie_block->pgmilc; |
280 | default: | 257 | default: |
@@ -350,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) | |||
350 | 327 | ||
351 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) | 328 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) |
352 | { | 329 | { |
330 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
353 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 331 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
354 | struct kvm_s390_mchk_info mchk; | 332 | struct kvm_s390_mchk_info mchk = {}; |
355 | int rc; | 333 | unsigned long adtl_status_addr; |
334 | int deliver = 0; | ||
335 | int rc = 0; | ||
356 | 336 | ||
337 | spin_lock(&fi->lock); | ||
357 | spin_lock(&li->lock); | 338 | spin_lock(&li->lock); |
358 | mchk = li->irq.mchk; | 339 | if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || |
340 | test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { | ||
341 | /* | ||
342 | * If there was an exigent machine check pending, then any | ||
343 | * repressible machine checks that might have been pending | ||
344 | * are indicated along with it, so always clear bits for | ||
345 | * repressible and exigent interrupts | ||
346 | */ | ||
347 | mchk = li->irq.mchk; | ||
348 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | ||
349 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | ||
350 | memset(&li->irq.mchk, 0, sizeof(mchk)); | ||
351 | deliver = 1; | ||
352 | } | ||
359 | /* | 353 | /* |
360 | * If there was an exigent machine check pending, then any repressible | 354 | * We indicate floating repressible conditions along with |
361 | * machine checks that might have been pending are indicated along | 355 | * other pending conditions. Channel Report Pending and Channel |
362 | * with it, so always clear both bits | 356 | * Subsystem damage are the only two and and are indicated by |
357 | * bits in mcic and masked in cr14. | ||
363 | */ | 358 | */ |
364 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | 359 | if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { |
365 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | 360 | mchk.mcic |= fi->mchk.mcic; |
366 | memset(&li->irq.mchk, 0, sizeof(mchk)); | 361 | mchk.cr14 |= fi->mchk.cr14; |
362 | memset(&fi->mchk, 0, sizeof(mchk)); | ||
363 | deliver = 1; | ||
364 | } | ||
367 | spin_unlock(&li->lock); | 365 | spin_unlock(&li->lock); |
366 | spin_unlock(&fi->lock); | ||
368 | 367 | ||
369 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | 368 | if (deliver) { |
370 | mchk.mcic); | 369 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", |
371 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | 370 | mchk.mcic); |
372 | mchk.cr14, mchk.mcic); | 371 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
373 | 372 | KVM_S390_MCHK, | |
374 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | 373 | mchk.cr14, mchk.mcic); |
375 | rc |= put_guest_lc(vcpu, mchk.mcic, | 374 | |
376 | (u64 __user *) __LC_MCCK_CODE); | 375 | rc = kvm_s390_vcpu_store_status(vcpu, |
377 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, | 376 | KVM_S390_STORE_STATUS_PREFIXED); |
378 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | 377 | rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, |
379 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | 378 | &adtl_status_addr, |
380 | &mchk.fixed_logout, sizeof(mchk.fixed_logout)); | 379 | sizeof(unsigned long)); |
381 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | 380 | rc |= kvm_s390_vcpu_store_adtl_status(vcpu, |
382 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 381 | adtl_status_addr); |
383 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | 382 | rc |= put_guest_lc(vcpu, mchk.mcic, |
384 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 383 | (u64 __user *) __LC_MCCK_CODE); |
384 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, | ||
385 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
386 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
387 | &mchk.fixed_logout, | ||
388 | sizeof(mchk.fixed_logout)); | ||
389 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
390 | &vcpu->arch.sie_block->gpsw, | ||
391 | sizeof(psw_t)); | ||
392 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
393 | &vcpu->arch.sie_block->gpsw, | ||
394 | sizeof(psw_t)); | ||
395 | } | ||
385 | return rc ? -EFAULT : 0; | 396 | return rc ? -EFAULT : 0; |
386 | } | 397 | } |
387 | 398 | ||
@@ -484,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
484 | { | 495 | { |
485 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 496 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
486 | struct kvm_s390_pgm_info pgm_info; | 497 | struct kvm_s390_pgm_info pgm_info; |
487 | int rc = 0; | 498 | int rc = 0, nullifying = false; |
488 | u16 ilc = get_ilc(vcpu); | 499 | u16 ilc = get_ilc(vcpu); |
489 | 500 | ||
490 | spin_lock(&li->lock); | 501 | spin_lock(&li->lock); |
@@ -509,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
509 | case PGM_LX_TRANSLATION: | 520 | case PGM_LX_TRANSLATION: |
510 | case PGM_PRIMARY_AUTHORITY: | 521 | case PGM_PRIMARY_AUTHORITY: |
511 | case PGM_SECONDARY_AUTHORITY: | 522 | case PGM_SECONDARY_AUTHORITY: |
523 | nullifying = true; | ||
524 | /* fall through */ | ||
512 | case PGM_SPACE_SWITCH: | 525 | case PGM_SPACE_SWITCH: |
513 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, | 526 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
514 | (u64 *)__LC_TRANS_EXC_CODE); | 527 | (u64 *)__LC_TRANS_EXC_CODE); |
@@ -521,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
521 | case PGM_EXTENDED_AUTHORITY: | 534 | case PGM_EXTENDED_AUTHORITY: |
522 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, | 535 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, |
523 | (u8 *)__LC_EXC_ACCESS_ID); | 536 | (u8 *)__LC_EXC_ACCESS_ID); |
537 | nullifying = true; | ||
524 | break; | 538 | break; |
525 | case PGM_ASCE_TYPE: | 539 | case PGM_ASCE_TYPE: |
526 | case PGM_PAGE_TRANSLATION: | 540 | case PGM_PAGE_TRANSLATION: |
@@ -534,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
534 | (u8 *)__LC_EXC_ACCESS_ID); | 548 | (u8 *)__LC_EXC_ACCESS_ID); |
535 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, | 549 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, |
536 | (u8 *)__LC_OP_ACCESS_ID); | 550 | (u8 *)__LC_OP_ACCESS_ID); |
551 | nullifying = true; | ||
537 | break; | 552 | break; |
538 | case PGM_MONITOR: | 553 | case PGM_MONITOR: |
539 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, | 554 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, |
@@ -541,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
541 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, | 556 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, |
542 | (u64 *)__LC_MON_CODE); | 557 | (u64 *)__LC_MON_CODE); |
543 | break; | 558 | break; |
559 | case PGM_VECTOR_PROCESSING: | ||
544 | case PGM_DATA: | 560 | case PGM_DATA: |
545 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, | 561 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, |
546 | (u32 *)__LC_DATA_EXC_CODE); | 562 | (u32 *)__LC_DATA_EXC_CODE); |
@@ -551,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
551 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, | 567 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
552 | (u8 *)__LC_EXC_ACCESS_ID); | 568 | (u8 *)__LC_EXC_ACCESS_ID); |
553 | break; | 569 | break; |
570 | case PGM_STACK_FULL: | ||
571 | case PGM_STACK_EMPTY: | ||
572 | case PGM_STACK_SPECIFICATION: | ||
573 | case PGM_STACK_TYPE: | ||
574 | case PGM_STACK_OPERATION: | ||
575 | case PGM_TRACE_TABEL: | ||
576 | case PGM_CRYPTO_OPERATION: | ||
577 | nullifying = true; | ||
578 | break; | ||
554 | } | 579 | } |
555 | 580 | ||
556 | if (pgm_info.code & PGM_PER) { | 581 | if (pgm_info.code & PGM_PER) { |
@@ -564,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
564 | (u8 *) __LC_PER_ACCESS_ID); | 589 | (u8 *) __LC_PER_ACCESS_ID); |
565 | } | 590 | } |
566 | 591 | ||
592 | if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) | ||
593 | kvm_s390_rewind_psw(vcpu, ilc); | ||
594 | |||
567 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); | 595 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); |
596 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, | ||
597 | (u64 *) __LC_LAST_BREAK); | ||
568 | rc |= put_guest_lc(vcpu, pgm_info.code, | 598 | rc |= put_guest_lc(vcpu, pgm_info.code, |
569 | (u16 *)__LC_PGM_INT_CODE); | 599 | (u16 *)__LC_PGM_INT_CODE); |
570 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, | 600 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, |
@@ -574,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
574 | return rc ? -EFAULT : 0; | 604 | return rc ? -EFAULT : 0; |
575 | } | 605 | } |
576 | 606 | ||
577 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | 607 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
578 | struct kvm_s390_interrupt_info *inti) | ||
579 | { | 608 | { |
580 | int rc; | 609 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
610 | struct kvm_s390_ext_info ext; | ||
611 | int rc = 0; | ||
612 | |||
613 | spin_lock(&fi->lock); | ||
614 | if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { | ||
615 | spin_unlock(&fi->lock); | ||
616 | return 0; | ||
617 | } | ||
618 | ext = fi->srv_signal; | ||
619 | memset(&fi->srv_signal, 0, sizeof(ext)); | ||
620 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
621 | spin_unlock(&fi->lock); | ||
581 | 622 | ||
582 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 623 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
583 | inti->ext.ext_params); | 624 | ext.ext_params); |
584 | vcpu->stat.deliver_service_signal++; | 625 | vcpu->stat.deliver_service_signal++; |
585 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 626 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
586 | inti->ext.ext_params, 0); | 627 | ext.ext_params, 0); |
587 | 628 | ||
588 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); | 629 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
589 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); | 630 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
@@ -591,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | |||
591 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 632 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
592 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 633 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
593 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 634 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
594 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 635 | rc |= put_guest_lc(vcpu, ext.ext_params, |
595 | (u32 *)__LC_EXT_PARAMS); | 636 | (u32 *)__LC_EXT_PARAMS); |
637 | |||
596 | return rc ? -EFAULT : 0; | 638 | return rc ? -EFAULT : 0; |
597 | } | 639 | } |
598 | 640 | ||
599 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, | 641 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
600 | struct kvm_s390_interrupt_info *inti) | ||
601 | { | 642 | { |
602 | int rc; | 643 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
644 | struct kvm_s390_interrupt_info *inti; | ||
645 | int rc = 0; | ||
603 | 646 | ||
604 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 647 | spin_lock(&fi->lock); |
605 | KVM_S390_INT_PFAULT_DONE, 0, | 648 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], |
606 | inti->ext.ext_params2); | 649 | struct kvm_s390_interrupt_info, |
650 | list); | ||
651 | if (inti) { | ||
652 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
653 | KVM_S390_INT_PFAULT_DONE, 0, | ||
654 | inti->ext.ext_params2); | ||
655 | list_del(&inti->list); | ||
656 | fi->counters[FIRQ_CNTR_PFAULT] -= 1; | ||
657 | } | ||
658 | if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) | ||
659 | clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
660 | spin_unlock(&fi->lock); | ||
607 | 661 | ||
608 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 662 | if (inti) { |
609 | rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); | 663 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
610 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 664 | (u16 *)__LC_EXT_INT_CODE); |
611 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 665 | rc |= put_guest_lc(vcpu, PFAULT_DONE, |
612 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 666 | (u16 *)__LC_EXT_CPU_ADDR); |
613 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 667 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
614 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 668 | &vcpu->arch.sie_block->gpsw, |
615 | (u64 *)__LC_EXT_PARAMS2); | 669 | sizeof(psw_t)); |
670 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
671 | &vcpu->arch.sie_block->gpsw, | ||
672 | sizeof(psw_t)); | ||
673 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
674 | (u64 *)__LC_EXT_PARAMS2); | ||
675 | kfree(inti); | ||
676 | } | ||
616 | return rc ? -EFAULT : 0; | 677 | return rc ? -EFAULT : 0; |
617 | } | 678 | } |
618 | 679 | ||
619 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, | 680 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) |
620 | struct kvm_s390_interrupt_info *inti) | ||
621 | { | 681 | { |
622 | int rc; | 682 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
683 | struct kvm_s390_interrupt_info *inti; | ||
684 | int rc = 0; | ||
623 | 685 | ||
624 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 686 | spin_lock(&fi->lock); |
625 | inti->ext.ext_params, inti->ext.ext_params2); | 687 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], |
626 | vcpu->stat.deliver_virtio_interrupt++; | 688 | struct kvm_s390_interrupt_info, |
627 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 689 | list); |
628 | inti->ext.ext_params, | 690 | if (inti) { |
629 | inti->ext.ext_params2); | 691 | VCPU_EVENT(vcpu, 4, |
692 | "interrupt: virtio parm:%x,parm64:%llx", | ||
693 | inti->ext.ext_params, inti->ext.ext_params2); | ||
694 | vcpu->stat.deliver_virtio_interrupt++; | ||
695 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
696 | inti->type, | ||
697 | inti->ext.ext_params, | ||
698 | inti->ext.ext_params2); | ||
699 | list_del(&inti->list); | ||
700 | fi->counters[FIRQ_CNTR_VIRTIO] -= 1; | ||
701 | } | ||
702 | if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) | ||
703 | clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
704 | spin_unlock(&fi->lock); | ||
630 | 705 | ||
631 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 706 | if (inti) { |
632 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); | 707 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
633 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 708 | (u16 *)__LC_EXT_INT_CODE); |
634 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 709 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, |
635 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 710 | (u16 *)__LC_EXT_CPU_ADDR); |
636 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 711 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
637 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 712 | &vcpu->arch.sie_block->gpsw, |
638 | (u32 *)__LC_EXT_PARAMS); | 713 | sizeof(psw_t)); |
639 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 714 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
640 | (u64 *)__LC_EXT_PARAMS2); | 715 | &vcpu->arch.sie_block->gpsw, |
716 | sizeof(psw_t)); | ||
717 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | ||
718 | (u32 *)__LC_EXT_PARAMS); | ||
719 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
720 | (u64 *)__LC_EXT_PARAMS2); | ||
721 | kfree(inti); | ||
722 | } | ||
641 | return rc ? -EFAULT : 0; | 723 | return rc ? -EFAULT : 0; |
642 | } | 724 | } |
643 | 725 | ||
644 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | 726 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, |
645 | struct kvm_s390_interrupt_info *inti) | 727 | unsigned long irq_type) |
646 | { | 728 | { |
647 | int rc; | 729 | struct list_head *isc_list; |
730 | struct kvm_s390_float_interrupt *fi; | ||
731 | struct kvm_s390_interrupt_info *inti = NULL; | ||
732 | int rc = 0; | ||
648 | 733 | ||
649 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | 734 | fi = &vcpu->kvm->arch.float_int; |
650 | vcpu->stat.deliver_io_int++; | ||
651 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | ||
652 | ((__u32)inti->io.subchannel_id << 16) | | ||
653 | inti->io.subchannel_nr, | ||
654 | ((__u64)inti->io.io_int_parm << 32) | | ||
655 | inti->io.io_int_word); | ||
656 | |||
657 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
658 | (u16 *)__LC_SUBCHANNEL_ID); | ||
659 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
660 | (u16 *)__LC_SUBCHANNEL_NR); | ||
661 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
662 | (u32 *)__LC_IO_INT_PARM); | ||
663 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
664 | (u32 *)__LC_IO_INT_WORD); | ||
665 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
666 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
667 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
668 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
669 | return rc ? -EFAULT : 0; | ||
670 | } | ||
671 | 735 | ||
672 | static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, | 736 | spin_lock(&fi->lock); |
673 | struct kvm_s390_interrupt_info *inti) | 737 | isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; |
674 | { | 738 | inti = list_first_entry_or_null(isc_list, |
675 | struct kvm_s390_mchk_info *mchk = &inti->mchk; | 739 | struct kvm_s390_interrupt_info, |
676 | int rc; | 740 | list); |
741 | if (inti) { | ||
742 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | ||
743 | vcpu->stat.deliver_io_int++; | ||
744 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
745 | inti->type, | ||
746 | ((__u32)inti->io.subchannel_id << 16) | | ||
747 | inti->io.subchannel_nr, | ||
748 | ((__u64)inti->io.io_int_parm << 32) | | ||
749 | inti->io.io_int_word); | ||
750 | list_del(&inti->list); | ||
751 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
752 | } | ||
753 | if (list_empty(isc_list)) | ||
754 | clear_bit(irq_type, &fi->pending_irqs); | ||
755 | spin_unlock(&fi->lock); | ||
756 | |||
757 | if (inti) { | ||
758 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
759 | (u16 *)__LC_SUBCHANNEL_ID); | ||
760 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
761 | (u16 *)__LC_SUBCHANNEL_NR); | ||
762 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
763 | (u32 *)__LC_IO_INT_PARM); | ||
764 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
765 | (u32 *)__LC_IO_INT_WORD); | ||
766 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
767 | &vcpu->arch.sie_block->gpsw, | ||
768 | sizeof(psw_t)); | ||
769 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
770 | &vcpu->arch.sie_block->gpsw, | ||
771 | sizeof(psw_t)); | ||
772 | kfree(inti); | ||
773 | } | ||
677 | 774 | ||
678 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | ||
679 | mchk->mcic); | ||
680 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | ||
681 | mchk->cr14, mchk->mcic); | ||
682 | |||
683 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | ||
684 | rc |= put_guest_lc(vcpu, mchk->mcic, | ||
685 | (u64 __user *) __LC_MCCK_CODE); | ||
686 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, | ||
687 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
688 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
689 | &mchk->fixed_logout, sizeof(mchk->fixed_logout)); | ||
690 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
691 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
692 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
693 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
694 | return rc ? -EFAULT : 0; | 775 | return rc ? -EFAULT : 0; |
695 | } | 776 | } |
696 | 777 | ||
@@ -698,6 +779,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | |||
698 | 779 | ||
699 | static const deliver_irq_t deliver_irq_funcs[] = { | 780 | static const deliver_irq_t deliver_irq_funcs[] = { |
700 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | 781 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, |
782 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, | ||
701 | [IRQ_PEND_PROG] = __deliver_prog, | 783 | [IRQ_PEND_PROG] = __deliver_prog, |
702 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | 784 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, |
703 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | 785 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, |
@@ -706,36 +788,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { | |||
706 | [IRQ_PEND_RESTART] = __deliver_restart, | 788 | [IRQ_PEND_RESTART] = __deliver_restart, |
707 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | 789 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, |
708 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | 790 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, |
791 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, | ||
792 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, | ||
793 | [IRQ_PEND_VIRTIO] = __deliver_virtio, | ||
709 | }; | 794 | }; |
710 | 795 | ||
711 | static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, | ||
712 | struct kvm_s390_interrupt_info *inti) | ||
713 | { | ||
714 | int rc; | ||
715 | |||
716 | switch (inti->type) { | ||
717 | case KVM_S390_INT_SERVICE: | ||
718 | rc = __deliver_service(vcpu, inti); | ||
719 | break; | ||
720 | case KVM_S390_INT_PFAULT_DONE: | ||
721 | rc = __deliver_pfault_done(vcpu, inti); | ||
722 | break; | ||
723 | case KVM_S390_INT_VIRTIO: | ||
724 | rc = __deliver_virtio(vcpu, inti); | ||
725 | break; | ||
726 | case KVM_S390_MCHK: | ||
727 | rc = __deliver_mchk_floating(vcpu, inti); | ||
728 | break; | ||
729 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
730 | rc = __deliver_io(vcpu, inti); | ||
731 | break; | ||
732 | default: | ||
733 | BUG(); | ||
734 | } | ||
735 | |||
736 | return rc; | ||
737 | } | ||
738 | |||
739 | /* Check whether an external call is pending (deliverable or not) */ | 796 | /* Check whether an external call is pending (deliverable or not) */ |
740 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | 797 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
741 | { | 798 | { |
@@ -751,21 +808,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | |||
751 | 808 | ||
752 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) | 809 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) |
753 | { | 810 | { |
754 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
755 | struct kvm_s390_interrupt_info *inti; | ||
756 | int rc; | 811 | int rc; |
757 | 812 | ||
758 | rc = !!deliverable_local_irqs(vcpu); | 813 | rc = !!deliverable_irqs(vcpu); |
759 | |||
760 | if ((!rc) && atomic_read(&fi->active)) { | ||
761 | spin_lock(&fi->lock); | ||
762 | list_for_each_entry(inti, &fi->list, list) | ||
763 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
764 | rc = 1; | ||
765 | break; | ||
766 | } | ||
767 | spin_unlock(&fi->lock); | ||
768 | } | ||
769 | 814 | ||
770 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) | 815 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) |
771 | rc = 1; | 816 | rc = 1; |
@@ -784,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) | |||
784 | 829 | ||
785 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 830 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
786 | { | 831 | { |
787 | if (!(vcpu->arch.sie_block->ckc < | 832 | return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); |
788 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
789 | return 0; | ||
790 | if (!ckc_interrupts_enabled(vcpu)) | ||
791 | return 0; | ||
792 | return 1; | ||
793 | } | 833 | } |
794 | 834 | ||
795 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 835 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
@@ -884,60 +924,45 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
884 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 924 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
885 | { | 925 | { |
886 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 926 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
887 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
888 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
889 | deliver_irq_t func; | 927 | deliver_irq_t func; |
890 | int deliver; | ||
891 | int rc = 0; | 928 | int rc = 0; |
892 | unsigned long irq_type; | 929 | unsigned long irq_type; |
893 | unsigned long deliverable_irqs; | 930 | unsigned long irqs; |
894 | 931 | ||
895 | __reset_intercept_indicators(vcpu); | 932 | __reset_intercept_indicators(vcpu); |
896 | 933 | ||
897 | /* pending ckc conditions might have been invalidated */ | 934 | /* pending ckc conditions might have been invalidated */ |
898 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 935 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
899 | if (kvm_cpu_has_pending_timer(vcpu)) | 936 | if (ckc_irq_pending(vcpu)) |
900 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 937 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
901 | 938 | ||
939 | /* pending cpu timer conditions might have been invalidated */ | ||
940 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
941 | if (cpu_timer_irq_pending(vcpu)) | ||
942 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
943 | |||
902 | do { | 944 | do { |
903 | deliverable_irqs = deliverable_local_irqs(vcpu); | 945 | irqs = deliverable_irqs(vcpu); |
904 | /* bits are in the order of interrupt priority */ | 946 | /* bits are in the order of interrupt priority */ |
905 | irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); | 947 | irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); |
906 | if (irq_type == IRQ_PEND_COUNT) | 948 | if (irq_type == IRQ_PEND_COUNT) |
907 | break; | 949 | break; |
908 | func = deliver_irq_funcs[irq_type]; | 950 | if (is_ioirq(irq_type)) { |
909 | if (!func) { | 951 | rc = __deliver_io(vcpu, irq_type); |
910 | WARN_ON_ONCE(func == NULL); | 952 | } else { |
911 | clear_bit(irq_type, &li->pending_irqs); | 953 | func = deliver_irq_funcs[irq_type]; |
912 | continue; | 954 | if (!func) { |
955 | WARN_ON_ONCE(func == NULL); | ||
956 | clear_bit(irq_type, &li->pending_irqs); | ||
957 | continue; | ||
958 | } | ||
959 | rc = func(vcpu); | ||
913 | } | 960 | } |
914 | rc = func(vcpu); | 961 | if (rc) |
915 | } while (!rc && irq_type != IRQ_PEND_COUNT); | 962 | break; |
963 | } while (!rc); | ||
916 | 964 | ||
917 | set_intercept_indicators_local(vcpu); | 965 | set_intercept_indicators(vcpu); |
918 | |||
919 | if (!rc && atomic_read(&fi->active)) { | ||
920 | do { | ||
921 | deliver = 0; | ||
922 | spin_lock(&fi->lock); | ||
923 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
924 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
925 | list_del(&inti->list); | ||
926 | fi->irq_count--; | ||
927 | deliver = 1; | ||
928 | break; | ||
929 | } | ||
930 | __set_intercept_indicator(vcpu, inti); | ||
931 | } | ||
932 | if (list_empty(&fi->list)) | ||
933 | atomic_set(&fi->active, 0); | ||
934 | spin_unlock(&fi->lock); | ||
935 | if (deliver) { | ||
936 | rc = __deliver_floating_interrupt(vcpu, inti); | ||
937 | kfree(inti); | ||
938 | } | ||
939 | } while (!rc && deliver); | ||
940 | } | ||
941 | 966 | ||
942 | return rc; | 967 | return rc; |
943 | } | 968 | } |
@@ -1172,80 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1172 | return 0; | 1197 | return 0; |
1173 | } | 1198 | } |
1174 | 1199 | ||
1200 | static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, | ||
1201 | int isc, u32 schid) | ||
1202 | { | ||
1203 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1204 | struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1205 | struct kvm_s390_interrupt_info *iter; | ||
1206 | u16 id = (schid & 0xffff0000U) >> 16; | ||
1207 | u16 nr = schid & 0x0000ffffU; | ||
1175 | 1208 | ||
1209 | spin_lock(&fi->lock); | ||
1210 | list_for_each_entry(iter, isc_list, list) { | ||
1211 | if (schid && (id != iter->io.subchannel_id || | ||
1212 | nr != iter->io.subchannel_nr)) | ||
1213 | continue; | ||
1214 | /* found an appropriate entry */ | ||
1215 | list_del_init(&iter->list); | ||
1216 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
1217 | if (list_empty(isc_list)) | ||
1218 | clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1219 | spin_unlock(&fi->lock); | ||
1220 | return iter; | ||
1221 | } | ||
1222 | spin_unlock(&fi->lock); | ||
1223 | return NULL; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Dequeue and return an I/O interrupt matching any of the interruption | ||
1228 | * subclasses as designated by the isc mask in cr6 and the schid (if != 0). | ||
1229 | */ | ||
1176 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 1230 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
1177 | u64 cr6, u64 schid) | 1231 | u64 isc_mask, u32 schid) |
1232 | { | ||
1233 | struct kvm_s390_interrupt_info *inti = NULL; | ||
1234 | int isc; | ||
1235 | |||
1236 | for (isc = 0; isc <= MAX_ISC && !inti; isc++) { | ||
1237 | if (isc_mask & isc_to_isc_bits(isc)) | ||
1238 | inti = get_io_int(kvm, isc, schid); | ||
1239 | } | ||
1240 | return inti; | ||
1241 | } | ||
1242 | |||
1243 | #define SCCB_MASK 0xFFFFFFF8 | ||
1244 | #define SCCB_EVENT_PENDING 0x3 | ||
1245 | |||
1246 | static int __inject_service(struct kvm *kvm, | ||
1247 | struct kvm_s390_interrupt_info *inti) | ||
1248 | { | ||
1249 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1250 | |||
1251 | spin_lock(&fi->lock); | ||
1252 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; | ||
1253 | /* | ||
1254 | * Early versions of the QEMU s390 bios will inject several | ||
1255 | * service interrupts after another without handling a | ||
1256 | * condition code indicating busy. | ||
1257 | * We will silently ignore those superfluous sccb values. | ||
1258 | * A future version of QEMU will take care of serialization | ||
1259 | * of servc requests | ||
1260 | */ | ||
1261 | if (fi->srv_signal.ext_params & SCCB_MASK) | ||
1262 | goto out; | ||
1263 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; | ||
1264 | set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
1265 | out: | ||
1266 | spin_unlock(&fi->lock); | ||
1267 | kfree(inti); | ||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static int __inject_virtio(struct kvm *kvm, | ||
1272 | struct kvm_s390_interrupt_info *inti) | ||
1273 | { | ||
1274 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1275 | |||
1276 | spin_lock(&fi->lock); | ||
1277 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { | ||
1278 | spin_unlock(&fi->lock); | ||
1279 | return -EBUSY; | ||
1280 | } | ||
1281 | fi->counters[FIRQ_CNTR_VIRTIO] += 1; | ||
1282 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); | ||
1283 | set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
1284 | spin_unlock(&fi->lock); | ||
1285 | return 0; | ||
1286 | } | ||
1287 | |||
1288 | static int __inject_pfault_done(struct kvm *kvm, | ||
1289 | struct kvm_s390_interrupt_info *inti) | ||
1290 | { | ||
1291 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1292 | |||
1293 | spin_lock(&fi->lock); | ||
1294 | if (fi->counters[FIRQ_CNTR_PFAULT] >= | ||
1295 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { | ||
1296 | spin_unlock(&fi->lock); | ||
1297 | return -EBUSY; | ||
1298 | } | ||
1299 | fi->counters[FIRQ_CNTR_PFAULT] += 1; | ||
1300 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); | ||
1301 | set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
1302 | spin_unlock(&fi->lock); | ||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | #define CR_PENDING_SUBCLASS 28 | ||
1307 | static int __inject_float_mchk(struct kvm *kvm, | ||
1308 | struct kvm_s390_interrupt_info *inti) | ||
1309 | { | ||
1310 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1311 | |||
1312 | spin_lock(&fi->lock); | ||
1313 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); | ||
1314 | fi->mchk.mcic |= inti->mchk.mcic; | ||
1315 | set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); | ||
1316 | spin_unlock(&fi->lock); | ||
1317 | kfree(inti); | ||
1318 | return 0; | ||
1319 | } | ||
1320 | |||
1321 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | ||
1178 | { | 1322 | { |
1179 | struct kvm_s390_float_interrupt *fi; | 1323 | struct kvm_s390_float_interrupt *fi; |
1180 | struct kvm_s390_interrupt_info *inti, *iter; | 1324 | struct list_head *list; |
1325 | int isc; | ||
1181 | 1326 | ||
1182 | if ((!schid && !cr6) || (schid && cr6)) | ||
1183 | return NULL; | ||
1184 | fi = &kvm->arch.float_int; | 1327 | fi = &kvm->arch.float_int; |
1185 | spin_lock(&fi->lock); | 1328 | spin_lock(&fi->lock); |
1186 | inti = NULL; | 1329 | if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { |
1187 | list_for_each_entry(iter, &fi->list, list) { | 1330 | spin_unlock(&fi->lock); |
1188 | if (!is_ioint(iter->type)) | 1331 | return -EBUSY; |
1189 | continue; | ||
1190 | if (cr6 && | ||
1191 | ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) | ||
1192 | continue; | ||
1193 | if (schid) { | ||
1194 | if (((schid & 0x00000000ffff0000) >> 16) != | ||
1195 | iter->io.subchannel_id) | ||
1196 | continue; | ||
1197 | if ((schid & 0x000000000000ffff) != | ||
1198 | iter->io.subchannel_nr) | ||
1199 | continue; | ||
1200 | } | ||
1201 | inti = iter; | ||
1202 | break; | ||
1203 | } | ||
1204 | if (inti) { | ||
1205 | list_del_init(&inti->list); | ||
1206 | fi->irq_count--; | ||
1207 | } | 1332 | } |
1208 | if (list_empty(&fi->list)) | 1333 | fi->counters[FIRQ_CNTR_IO] += 1; |
1209 | atomic_set(&fi->active, 0); | 1334 | |
1335 | isc = int_word_to_isc(inti->io.io_int_word); | ||
1336 | list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1337 | list_add_tail(&inti->list, list); | ||
1338 | set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1210 | spin_unlock(&fi->lock); | 1339 | spin_unlock(&fi->lock); |
1211 | return inti; | 1340 | return 0; |
1212 | } | 1341 | } |
1213 | 1342 | ||
1214 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | 1343 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
1215 | { | 1344 | { |
1216 | struct kvm_s390_local_interrupt *li; | 1345 | struct kvm_s390_local_interrupt *li; |
1217 | struct kvm_s390_float_interrupt *fi; | 1346 | struct kvm_s390_float_interrupt *fi; |
1218 | struct kvm_s390_interrupt_info *iter; | ||
1219 | struct kvm_vcpu *dst_vcpu = NULL; | 1347 | struct kvm_vcpu *dst_vcpu = NULL; |
1220 | int sigcpu; | 1348 | int sigcpu; |
1221 | int rc = 0; | 1349 | u64 type = READ_ONCE(inti->type); |
1350 | int rc; | ||
1222 | 1351 | ||
1223 | fi = &kvm->arch.float_int; | 1352 | fi = &kvm->arch.float_int; |
1224 | spin_lock(&fi->lock); | 1353 | |
1225 | if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { | 1354 | switch (type) { |
1355 | case KVM_S390_MCHK: | ||
1356 | rc = __inject_float_mchk(kvm, inti); | ||
1357 | break; | ||
1358 | case KVM_S390_INT_VIRTIO: | ||
1359 | rc = __inject_virtio(kvm, inti); | ||
1360 | break; | ||
1361 | case KVM_S390_INT_SERVICE: | ||
1362 | rc = __inject_service(kvm, inti); | ||
1363 | break; | ||
1364 | case KVM_S390_INT_PFAULT_DONE: | ||
1365 | rc = __inject_pfault_done(kvm, inti); | ||
1366 | break; | ||
1367 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
1368 | rc = __inject_io(kvm, inti); | ||
1369 | break; | ||
1370 | default: | ||
1226 | rc = -EINVAL; | 1371 | rc = -EINVAL; |
1227 | goto unlock_fi; | ||
1228 | } | 1372 | } |
1229 | fi->irq_count++; | 1373 | if (rc) |
1230 | if (!is_ioint(inti->type)) { | 1374 | return rc; |
1231 | list_add_tail(&inti->list, &fi->list); | ||
1232 | } else { | ||
1233 | u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); | ||
1234 | 1375 | ||
1235 | /* Keep I/O interrupts sorted in isc order. */ | ||
1236 | list_for_each_entry(iter, &fi->list, list) { | ||
1237 | if (!is_ioint(iter->type)) | ||
1238 | continue; | ||
1239 | if (int_word_to_isc_bits(iter->io.io_int_word) | ||
1240 | <= isc_bits) | ||
1241 | continue; | ||
1242 | break; | ||
1243 | } | ||
1244 | list_add_tail(&inti->list, &iter->list); | ||
1245 | } | ||
1246 | atomic_set(&fi->active, 1); | ||
1247 | if (atomic_read(&kvm->online_vcpus) == 0) | ||
1248 | goto unlock_fi; | ||
1249 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | 1376 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); |
1250 | if (sigcpu == KVM_MAX_VCPUS) { | 1377 | if (sigcpu == KVM_MAX_VCPUS) { |
1251 | do { | 1378 | do { |
@@ -1257,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1257 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 1384 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
1258 | li = &dst_vcpu->arch.local_int; | 1385 | li = &dst_vcpu->arch.local_int; |
1259 | spin_lock(&li->lock); | 1386 | spin_lock(&li->lock); |
1260 | switch (inti->type) { | 1387 | switch (type) { |
1261 | case KVM_S390_MCHK: | 1388 | case KVM_S390_MCHK: |
1262 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 1389 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
1263 | break; | 1390 | break; |
@@ -1270,9 +1397,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1270 | } | 1397 | } |
1271 | spin_unlock(&li->lock); | 1398 | spin_unlock(&li->lock); |
1272 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); | 1399 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
1273 | unlock_fi: | 1400 | return 0; |
1274 | spin_unlock(&fi->lock); | 1401 | |
1275 | return rc; | ||
1276 | } | 1402 | } |
1277 | 1403 | ||
1278 | int kvm_s390_inject_vm(struct kvm *kvm, | 1404 | int kvm_s390_inject_vm(struct kvm *kvm, |
@@ -1332,10 +1458,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
1332 | return rc; | 1458 | return rc; |
1333 | } | 1459 | } |
1334 | 1460 | ||
1335 | void kvm_s390_reinject_io_int(struct kvm *kvm, | 1461 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
1336 | struct kvm_s390_interrupt_info *inti) | 1462 | struct kvm_s390_interrupt_info *inti) |
1337 | { | 1463 | { |
1338 | __inject_vm(kvm, inti); | 1464 | return __inject_vm(kvm, inti); |
1339 | } | 1465 | } |
1340 | 1466 | ||
1341 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, | 1467 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
@@ -1388,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) | |||
1388 | spin_unlock(&li->lock); | 1514 | spin_unlock(&li->lock); |
1389 | } | 1515 | } |
1390 | 1516 | ||
1391 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | 1517 | static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
1392 | { | 1518 | { |
1393 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
1394 | int rc; | 1519 | int rc; |
1395 | 1520 | ||
1396 | spin_lock(&li->lock); | ||
1397 | switch (irq->type) { | 1521 | switch (irq->type) { |
1398 | case KVM_S390_PROGRAM_INT: | 1522 | case KVM_S390_PROGRAM_INT: |
1399 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", | 1523 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", |
@@ -1433,83 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1433 | default: | 1557 | default: |
1434 | rc = -EINVAL; | 1558 | rc = -EINVAL; |
1435 | } | 1559 | } |
1560 | |||
1561 | return rc; | ||
1562 | } | ||
1563 | |||
1564 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | ||
1565 | { | ||
1566 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
1567 | int rc; | ||
1568 | |||
1569 | spin_lock(&li->lock); | ||
1570 | rc = do_inject_vcpu(vcpu, irq); | ||
1436 | spin_unlock(&li->lock); | 1571 | spin_unlock(&li->lock); |
1437 | if (!rc) | 1572 | if (!rc) |
1438 | kvm_s390_vcpu_wakeup(vcpu); | 1573 | kvm_s390_vcpu_wakeup(vcpu); |
1439 | return rc; | 1574 | return rc; |
1440 | } | 1575 | } |
1441 | 1576 | ||
1442 | void kvm_s390_clear_float_irqs(struct kvm *kvm) | 1577 | static inline void clear_irq_list(struct list_head *_list) |
1443 | { | 1578 | { |
1444 | struct kvm_s390_float_interrupt *fi; | 1579 | struct kvm_s390_interrupt_info *inti, *n; |
1445 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
1446 | 1580 | ||
1447 | fi = &kvm->arch.float_int; | 1581 | list_for_each_entry_safe(inti, n, _list, list) { |
1448 | spin_lock(&fi->lock); | ||
1449 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
1450 | list_del(&inti->list); | 1582 | list_del(&inti->list); |
1451 | kfree(inti); | 1583 | kfree(inti); |
1452 | } | 1584 | } |
1453 | fi->irq_count = 0; | ||
1454 | atomic_set(&fi->active, 0); | ||
1455 | spin_unlock(&fi->lock); | ||
1456 | } | 1585 | } |
1457 | 1586 | ||
1458 | static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, | 1587 | static void inti_to_irq(struct kvm_s390_interrupt_info *inti, |
1459 | u8 *addr) | 1588 | struct kvm_s390_irq *irq) |
1460 | { | 1589 | { |
1461 | struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; | 1590 | irq->type = inti->type; |
1462 | struct kvm_s390_irq irq = {0}; | ||
1463 | |||
1464 | irq.type = inti->type; | ||
1465 | switch (inti->type) { | 1591 | switch (inti->type) { |
1466 | case KVM_S390_INT_PFAULT_INIT: | 1592 | case KVM_S390_INT_PFAULT_INIT: |
1467 | case KVM_S390_INT_PFAULT_DONE: | 1593 | case KVM_S390_INT_PFAULT_DONE: |
1468 | case KVM_S390_INT_VIRTIO: | 1594 | case KVM_S390_INT_VIRTIO: |
1469 | case KVM_S390_INT_SERVICE: | 1595 | irq->u.ext = inti->ext; |
1470 | irq.u.ext = inti->ext; | ||
1471 | break; | 1596 | break; |
1472 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1597 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1473 | irq.u.io = inti->io; | 1598 | irq->u.io = inti->io; |
1474 | break; | 1599 | break; |
1475 | case KVM_S390_MCHK: | ||
1476 | irq.u.mchk = inti->mchk; | ||
1477 | break; | ||
1478 | default: | ||
1479 | return -EINVAL; | ||
1480 | } | 1600 | } |
1601 | } | ||
1481 | 1602 | ||
1482 | if (copy_to_user(uptr, &irq, sizeof(irq))) | 1603 | void kvm_s390_clear_float_irqs(struct kvm *kvm) |
1483 | return -EFAULT; | 1604 | { |
1605 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1606 | int i; | ||
1484 | 1607 | ||
1485 | return 0; | 1608 | spin_lock(&fi->lock); |
1486 | } | 1609 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
1610 | clear_irq_list(&fi->lists[i]); | ||
1611 | for (i = 0; i < FIRQ_MAX_COUNT; i++) | ||
1612 | fi->counters[i] = 0; | ||
1613 | spin_unlock(&fi->lock); | ||
1614 | }; | ||
1487 | 1615 | ||
1488 | static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) | 1616 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
1489 | { | 1617 | { |
1490 | struct kvm_s390_interrupt_info *inti; | 1618 | struct kvm_s390_interrupt_info *inti; |
1491 | struct kvm_s390_float_interrupt *fi; | 1619 | struct kvm_s390_float_interrupt *fi; |
1620 | struct kvm_s390_irq *buf; | ||
1621 | struct kvm_s390_irq *irq; | ||
1622 | int max_irqs; | ||
1492 | int ret = 0; | 1623 | int ret = 0; |
1493 | int n = 0; | 1624 | int n = 0; |
1625 | int i; | ||
1626 | |||
1627 | if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) | ||
1628 | return -EINVAL; | ||
1629 | |||
1630 | /* | ||
1631 | * We are already using -ENOMEM to signal | ||
1632 | * userspace it may retry with a bigger buffer, | ||
1633 | * so we need to use something else for this case | ||
1634 | */ | ||
1635 | buf = vzalloc(len); | ||
1636 | if (!buf) | ||
1637 | return -ENOBUFS; | ||
1638 | |||
1639 | max_irqs = len / sizeof(struct kvm_s390_irq); | ||
1494 | 1640 | ||
1495 | fi = &kvm->arch.float_int; | 1641 | fi = &kvm->arch.float_int; |
1496 | spin_lock(&fi->lock); | 1642 | spin_lock(&fi->lock); |
1497 | 1643 | for (i = 0; i < FIRQ_LIST_COUNT; i++) { | |
1498 | list_for_each_entry(inti, &fi->list, list) { | 1644 | list_for_each_entry(inti, &fi->lists[i], list) { |
1499 | if (len < sizeof(struct kvm_s390_irq)) { | 1645 | if (n == max_irqs) { |
1646 | /* signal userspace to try again */ | ||
1647 | ret = -ENOMEM; | ||
1648 | goto out; | ||
1649 | } | ||
1650 | inti_to_irq(inti, &buf[n]); | ||
1651 | n++; | ||
1652 | } | ||
1653 | } | ||
1654 | if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { | ||
1655 | if (n == max_irqs) { | ||
1500 | /* signal userspace to try again */ | 1656 | /* signal userspace to try again */ |
1501 | ret = -ENOMEM; | 1657 | ret = -ENOMEM; |
1502 | break; | 1658 | goto out; |
1503 | } | 1659 | } |
1504 | ret = copy_irq_to_user(inti, buf); | 1660 | irq = (struct kvm_s390_irq *) &buf[n]; |
1505 | if (ret) | 1661 | irq->type = KVM_S390_INT_SERVICE; |
1506 | break; | 1662 | irq->u.ext = fi->srv_signal; |
1507 | buf += sizeof(struct kvm_s390_irq); | ||
1508 | len -= sizeof(struct kvm_s390_irq); | ||
1509 | n++; | 1663 | n++; |
1510 | } | 1664 | } |
1665 | if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { | ||
1666 | if (n == max_irqs) { | ||
1667 | /* signal userspace to try again */ | ||
1668 | ret = -ENOMEM; | ||
1669 | goto out; | ||
1670 | } | ||
1671 | irq = (struct kvm_s390_irq *) &buf[n]; | ||
1672 | irq->type = KVM_S390_MCHK; | ||
1673 | irq->u.mchk = fi->mchk; | ||
1674 | n++; | ||
1675 | } | ||
1511 | 1676 | ||
1677 | out: | ||
1512 | spin_unlock(&fi->lock); | 1678 | spin_unlock(&fi->lock); |
1679 | if (!ret && n > 0) { | ||
1680 | if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) | ||
1681 | ret = -EFAULT; | ||
1682 | } | ||
1683 | vfree(buf); | ||
1513 | 1684 | ||
1514 | return ret < 0 ? ret : n; | 1685 | return ret < 0 ? ret : n; |
1515 | } | 1686 | } |
@@ -1520,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1520 | 1691 | ||
1521 | switch (attr->group) { | 1692 | switch (attr->group) { |
1522 | case KVM_DEV_FLIC_GET_ALL_IRQS: | 1693 | case KVM_DEV_FLIC_GET_ALL_IRQS: |
1523 | r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, | 1694 | r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, |
1524 | attr->attr); | 1695 | attr->attr); |
1525 | break; | 1696 | break; |
1526 | default: | 1697 | default: |
@@ -1952,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, | |||
1952 | { | 2123 | { |
1953 | return -EINVAL; | 2124 | return -EINVAL; |
1954 | } | 2125 | } |
2126 | |||
2127 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) | ||
2128 | { | ||
2129 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
2130 | struct kvm_s390_irq *buf; | ||
2131 | int r = 0; | ||
2132 | int n; | ||
2133 | |||
2134 | buf = vmalloc(len); | ||
2135 | if (!buf) | ||
2136 | return -ENOMEM; | ||
2137 | |||
2138 | if (copy_from_user((void *) buf, irqstate, len)) { | ||
2139 | r = -EFAULT; | ||
2140 | goto out_free; | ||
2141 | } | ||
2142 | |||
2143 | /* | ||
2144 | * Don't allow setting the interrupt state | ||
2145 | * when there are already interrupts pending | ||
2146 | */ | ||
2147 | spin_lock(&li->lock); | ||
2148 | if (li->pending_irqs) { | ||
2149 | r = -EBUSY; | ||
2150 | goto out_unlock; | ||
2151 | } | ||
2152 | |||
2153 | for (n = 0; n < len / sizeof(*buf); n++) { | ||
2154 | r = do_inject_vcpu(vcpu, &buf[n]); | ||
2155 | if (r) | ||
2156 | break; | ||
2157 | } | ||
2158 | |||
2159 | out_unlock: | ||
2160 | spin_unlock(&li->lock); | ||
2161 | out_free: | ||
2162 | vfree(buf); | ||
2163 | |||
2164 | return r; | ||
2165 | } | ||
2166 | |||
2167 | static void store_local_irq(struct kvm_s390_local_interrupt *li, | ||
2168 | struct kvm_s390_irq *irq, | ||
2169 | unsigned long irq_type) | ||
2170 | { | ||
2171 | switch (irq_type) { | ||
2172 | case IRQ_PEND_MCHK_EX: | ||
2173 | case IRQ_PEND_MCHK_REP: | ||
2174 | irq->type = KVM_S390_MCHK; | ||
2175 | irq->u.mchk = li->irq.mchk; | ||
2176 | break; | ||
2177 | case IRQ_PEND_PROG: | ||
2178 | irq->type = KVM_S390_PROGRAM_INT; | ||
2179 | irq->u.pgm = li->irq.pgm; | ||
2180 | break; | ||
2181 | case IRQ_PEND_PFAULT_INIT: | ||
2182 | irq->type = KVM_S390_INT_PFAULT_INIT; | ||
2183 | irq->u.ext = li->irq.ext; | ||
2184 | break; | ||
2185 | case IRQ_PEND_EXT_EXTERNAL: | ||
2186 | irq->type = KVM_S390_INT_EXTERNAL_CALL; | ||
2187 | irq->u.extcall = li->irq.extcall; | ||
2188 | break; | ||
2189 | case IRQ_PEND_EXT_CLOCK_COMP: | ||
2190 | irq->type = KVM_S390_INT_CLOCK_COMP; | ||
2191 | break; | ||
2192 | case IRQ_PEND_EXT_CPU_TIMER: | ||
2193 | irq->type = KVM_S390_INT_CPU_TIMER; | ||
2194 | break; | ||
2195 | case IRQ_PEND_SIGP_STOP: | ||
2196 | irq->type = KVM_S390_SIGP_STOP; | ||
2197 | irq->u.stop = li->irq.stop; | ||
2198 | break; | ||
2199 | case IRQ_PEND_RESTART: | ||
2200 | irq->type = KVM_S390_RESTART; | ||
2201 | break; | ||
2202 | case IRQ_PEND_SET_PREFIX: | ||
2203 | irq->type = KVM_S390_SIGP_SET_PREFIX; | ||
2204 | irq->u.prefix = li->irq.prefix; | ||
2205 | break; | ||
2206 | } | ||
2207 | } | ||
2208 | |||
2209 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) | ||
2210 | { | ||
2211 | uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; | ||
2212 | unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; | ||
2213 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
2214 | unsigned long pending_irqs; | ||
2215 | struct kvm_s390_irq irq; | ||
2216 | unsigned long irq_type; | ||
2217 | int cpuaddr; | ||
2218 | int n = 0; | ||
2219 | |||
2220 | spin_lock(&li->lock); | ||
2221 | pending_irqs = li->pending_irqs; | ||
2222 | memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, | ||
2223 | sizeof(sigp_emerg_pending)); | ||
2224 | spin_unlock(&li->lock); | ||
2225 | |||
2226 | for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { | ||
2227 | memset(&irq, 0, sizeof(irq)); | ||
2228 | if (irq_type == IRQ_PEND_EXT_EMERGENCY) | ||
2229 | continue; | ||
2230 | if (n + sizeof(irq) > len) | ||
2231 | return -ENOBUFS; | ||
2232 | store_local_irq(&vcpu->arch.local_int, &irq, irq_type); | ||
2233 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2234 | return -EFAULT; | ||
2235 | n += sizeof(irq); | ||
2236 | } | ||
2237 | |||
2238 | if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { | ||
2239 | for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { | ||
2240 | memset(&irq, 0, sizeof(irq)); | ||
2241 | if (n + sizeof(irq) > len) | ||
2242 | return -ENOBUFS; | ||
2243 | irq.type = KVM_S390_INT_EMERGENCY; | ||
2244 | irq.u.emerg.code = cpuaddr; | ||
2245 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2246 | return -EFAULT; | ||
2247 | n += sizeof(irq); | ||
2248 | } | ||
2249 | } | ||
2250 | |||
2251 | if ((sigp_ctrl & SIGP_CTRL_C) && | ||
2252 | (atomic_read(&vcpu->arch.sie_block->cpuflags) & | ||
2253 | CPUSTAT_ECALL_PEND)) { | ||
2254 | if (n + sizeof(irq) > len) | ||
2255 | return -ENOBUFS; | ||
2256 | memset(&irq, 0, sizeof(irq)); | ||
2257 | irq.type = KVM_S390_INT_EXTERNAL_CALL; | ||
2258 | irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; | ||
2259 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2260 | return -EFAULT; | ||
2261 | n += sizeof(irq); | ||
2262 | } | ||
2263 | |||
2264 | return n; | ||
2265 | } | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 19e17bd7aec0..afa2bd750ffc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -25,11 +25,13 @@ | |||
25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/vmalloc.h> | ||
28 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
29 | #include <asm/lowcore.h> | 30 | #include <asm/lowcore.h> |
30 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
31 | #include <asm/nmi.h> | 32 | #include <asm/nmi.h> |
32 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
34 | #include <asm/isc.h> | ||
33 | #include <asm/sclp.h> | 35 | #include <asm/sclp.h> |
34 | #include "kvm-s390.h" | 36 | #include "kvm-s390.h" |
35 | #include "gaccess.h" | 37 | #include "gaccess.h" |
@@ -38,6 +40,11 @@ | |||
38 | #include "trace.h" | 40 | #include "trace.h" |
39 | #include "trace-s390.h" | 41 | #include "trace-s390.h" |
40 | 42 | ||
43 | #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ | ||
44 | #define LOCAL_IRQS 32 | ||
45 | #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ | ||
46 | (KVM_MAX_VCPUS + LOCAL_IRQS)) | ||
47 | |||
41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 48 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
42 | 49 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 50 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
@@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
87 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 94 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
88 | { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, | 95 | { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, |
89 | { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, | 96 | { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, |
97 | { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, | ||
90 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | 98 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
91 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | 99 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
92 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | 100 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
@@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
101 | 109 | ||
102 | /* upper facilities limit for kvm */ | 110 | /* upper facilities limit for kvm */ |
103 | unsigned long kvm_s390_fac_list_mask[] = { | 111 | unsigned long kvm_s390_fac_list_mask[] = { |
104 | 0xff82fffbf4fc2000UL, | 112 | 0xffe6fffbfcfdfc40UL, |
105 | 0x005c000000000000UL, | 113 | 0x205c800000000000UL, |
106 | }; | 114 | }; |
107 | 115 | ||
108 | unsigned long kvm_s390_fac_list_mask_size(void) | 116 | unsigned long kvm_s390_fac_list_mask_size(void) |
@@ -171,9 +179,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
171 | case KVM_CAP_S390_IRQCHIP: | 179 | case KVM_CAP_S390_IRQCHIP: |
172 | case KVM_CAP_VM_ATTRIBUTES: | 180 | case KVM_CAP_VM_ATTRIBUTES: |
173 | case KVM_CAP_MP_STATE: | 181 | case KVM_CAP_MP_STATE: |
182 | case KVM_CAP_S390_INJECT_IRQ: | ||
174 | case KVM_CAP_S390_USER_SIGP: | 183 | case KVM_CAP_S390_USER_SIGP: |
184 | case KVM_CAP_S390_USER_STSI: | ||
185 | case KVM_CAP_S390_SKEYS: | ||
186 | case KVM_CAP_S390_IRQ_STATE: | ||
175 | r = 1; | 187 | r = 1; |
176 | break; | 188 | break; |
189 | case KVM_CAP_S390_MEM_OP: | ||
190 | r = MEM_OP_MAX_SIZE; | ||
191 | break; | ||
177 | case KVM_CAP_NR_VCPUS: | 192 | case KVM_CAP_NR_VCPUS: |
178 | case KVM_CAP_MAX_VCPUS: | 193 | case KVM_CAP_MAX_VCPUS: |
179 | r = KVM_MAX_VCPUS; | 194 | r = KVM_MAX_VCPUS; |
@@ -184,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
184 | case KVM_CAP_S390_COW: | 199 | case KVM_CAP_S390_COW: |
185 | r = MACHINE_HAS_ESOP; | 200 | r = MACHINE_HAS_ESOP; |
186 | break; | 201 | break; |
202 | case KVM_CAP_S390_VECTOR_REGISTERS: | ||
203 | r = MACHINE_HAS_VX; | ||
204 | break; | ||
187 | default: | 205 | default: |
188 | r = 0; | 206 | r = 0; |
189 | } | 207 | } |
@@ -264,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
264 | kvm->arch.user_sigp = 1; | 282 | kvm->arch.user_sigp = 1; |
265 | r = 0; | 283 | r = 0; |
266 | break; | 284 | break; |
285 | case KVM_CAP_S390_VECTOR_REGISTERS: | ||
286 | if (MACHINE_HAS_VX) { | ||
287 | set_kvm_facility(kvm->arch.model.fac->mask, 129); | ||
288 | set_kvm_facility(kvm->arch.model.fac->list, 129); | ||
289 | r = 0; | ||
290 | } else | ||
291 | r = -EINVAL; | ||
292 | break; | ||
293 | case KVM_CAP_S390_USER_STSI: | ||
294 | kvm->arch.user_stsi = 1; | ||
295 | r = 0; | ||
296 | break; | ||
267 | default: | 297 | default: |
268 | r = -EINVAL; | 298 | r = -EINVAL; |
269 | break; | 299 | break; |
@@ -708,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | |||
708 | return ret; | 738 | return ret; |
709 | } | 739 | } |
710 | 740 | ||
741 | static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) | ||
742 | { | ||
743 | uint8_t *keys; | ||
744 | uint64_t hva; | ||
745 | unsigned long curkey; | ||
746 | int i, r = 0; | ||
747 | |||
748 | if (args->flags != 0) | ||
749 | return -EINVAL; | ||
750 | |||
751 | /* Is this guest using storage keys? */ | ||
752 | if (!mm_use_skey(current->mm)) | ||
753 | return KVM_S390_GET_SKEYS_NONE; | ||
754 | |||
755 | /* Enforce sane limit on memory allocation */ | ||
756 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) | ||
757 | return -EINVAL; | ||
758 | |||
759 | keys = kmalloc_array(args->count, sizeof(uint8_t), | ||
760 | GFP_KERNEL | __GFP_NOWARN); | ||
761 | if (!keys) | ||
762 | keys = vmalloc(sizeof(uint8_t) * args->count); | ||
763 | if (!keys) | ||
764 | return -ENOMEM; | ||
765 | |||
766 | for (i = 0; i < args->count; i++) { | ||
767 | hva = gfn_to_hva(kvm, args->start_gfn + i); | ||
768 | if (kvm_is_error_hva(hva)) { | ||
769 | r = -EFAULT; | ||
770 | goto out; | ||
771 | } | ||
772 | |||
773 | curkey = get_guest_storage_key(current->mm, hva); | ||
774 | if (IS_ERR_VALUE(curkey)) { | ||
775 | r = curkey; | ||
776 | goto out; | ||
777 | } | ||
778 | keys[i] = curkey; | ||
779 | } | ||
780 | |||
781 | r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, | ||
782 | sizeof(uint8_t) * args->count); | ||
783 | if (r) | ||
784 | r = -EFAULT; | ||
785 | out: | ||
786 | kvfree(keys); | ||
787 | return r; | ||
788 | } | ||
789 | |||
790 | static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) | ||
791 | { | ||
792 | uint8_t *keys; | ||
793 | uint64_t hva; | ||
794 | int i, r = 0; | ||
795 | |||
796 | if (args->flags != 0) | ||
797 | return -EINVAL; | ||
798 | |||
799 | /* Enforce sane limit on memory allocation */ | ||
800 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) | ||
801 | return -EINVAL; | ||
802 | |||
803 | keys = kmalloc_array(args->count, sizeof(uint8_t), | ||
804 | GFP_KERNEL | __GFP_NOWARN); | ||
805 | if (!keys) | ||
806 | keys = vmalloc(sizeof(uint8_t) * args->count); | ||
807 | if (!keys) | ||
808 | return -ENOMEM; | ||
809 | |||
810 | r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, | ||
811 | sizeof(uint8_t) * args->count); | ||
812 | if (r) { | ||
813 | r = -EFAULT; | ||
814 | goto out; | ||
815 | } | ||
816 | |||
817 | /* Enable storage key handling for the guest */ | ||
818 | s390_enable_skey(); | ||
819 | |||
820 | for (i = 0; i < args->count; i++) { | ||
821 | hva = gfn_to_hva(kvm, args->start_gfn + i); | ||
822 | if (kvm_is_error_hva(hva)) { | ||
823 | r = -EFAULT; | ||
824 | goto out; | ||
825 | } | ||
826 | |||
827 | /* Lowest order bit is reserved */ | ||
828 | if (keys[i] & 0x01) { | ||
829 | r = -EINVAL; | ||
830 | goto out; | ||
831 | } | ||
832 | |||
833 | r = set_guest_storage_key(current->mm, hva, | ||
834 | (unsigned long)keys[i], 0); | ||
835 | if (r) | ||
836 | goto out; | ||
837 | } | ||
838 | out: | ||
839 | kvfree(keys); | ||
840 | return r; | ||
841 | } | ||
842 | |||
711 | long kvm_arch_vm_ioctl(struct file *filp, | 843 | long kvm_arch_vm_ioctl(struct file *filp, |
712 | unsigned int ioctl, unsigned long arg) | 844 | unsigned int ioctl, unsigned long arg) |
713 | { | 845 | { |
@@ -767,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
767 | r = kvm_s390_vm_has_attr(kvm, &attr); | 899 | r = kvm_s390_vm_has_attr(kvm, &attr); |
768 | break; | 900 | break; |
769 | } | 901 | } |
902 | case KVM_S390_GET_SKEYS: { | ||
903 | struct kvm_s390_skeys args; | ||
904 | |||
905 | r = -EFAULT; | ||
906 | if (copy_from_user(&args, argp, | ||
907 | sizeof(struct kvm_s390_skeys))) | ||
908 | break; | ||
909 | r = kvm_s390_get_skeys(kvm, &args); | ||
910 | break; | ||
911 | } | ||
912 | case KVM_S390_SET_SKEYS: { | ||
913 | struct kvm_s390_skeys args; | ||
914 | |||
915 | r = -EFAULT; | ||
916 | if (copy_from_user(&args, argp, | ||
917 | sizeof(struct kvm_s390_skeys))) | ||
918 | break; | ||
919 | r = kvm_s390_set_skeys(kvm, &args); | ||
920 | break; | ||
921 | } | ||
770 | default: | 922 | default: |
771 | r = -ENOTTY; | 923 | r = -ENOTTY; |
772 | } | 924 | } |
@@ -887,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
887 | 1039 | ||
888 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); | 1040 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); |
889 | if (!kvm->arch.dbf) | 1041 | if (!kvm->arch.dbf) |
890 | goto out_nodbf; | 1042 | goto out_err; |
891 | 1043 | ||
892 | /* | 1044 | /* |
893 | * The architectural maximum amount of facilities is 16 kbit. To store | 1045 | * The architectural maximum amount of facilities is 16 kbit. To store |
@@ -899,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
899 | kvm->arch.model.fac = | 1051 | kvm->arch.model.fac = |
900 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1052 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
901 | if (!kvm->arch.model.fac) | 1053 | if (!kvm->arch.model.fac) |
902 | goto out_nofac; | 1054 | goto out_err; |
903 | 1055 | ||
904 | /* Populate the facility mask initially. */ | 1056 | /* Populate the facility mask initially. */ |
905 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, | 1057 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
@@ -919,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
919 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 1071 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
920 | 1072 | ||
921 | if (kvm_s390_crypto_init(kvm) < 0) | 1073 | if (kvm_s390_crypto_init(kvm) < 0) |
922 | goto out_crypto; | 1074 | goto out_err; |
923 | 1075 | ||
924 | spin_lock_init(&kvm->arch.float_int.lock); | 1076 | spin_lock_init(&kvm->arch.float_int.lock); |
925 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 1077 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
1078 | INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); | ||
926 | init_waitqueue_head(&kvm->arch.ipte_wq); | 1079 | init_waitqueue_head(&kvm->arch.ipte_wq); |
927 | mutex_init(&kvm->arch.ipte_mutex); | 1080 | mutex_init(&kvm->arch.ipte_mutex); |
928 | 1081 | ||
@@ -934,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
934 | } else { | 1087 | } else { |
935 | kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); | 1088 | kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); |
936 | if (!kvm->arch.gmap) | 1089 | if (!kvm->arch.gmap) |
937 | goto out_nogmap; | 1090 | goto out_err; |
938 | kvm->arch.gmap->private = kvm; | 1091 | kvm->arch.gmap->private = kvm; |
939 | kvm->arch.gmap->pfault_enabled = 0; | 1092 | kvm->arch.gmap->pfault_enabled = 0; |
940 | } | 1093 | } |
@@ -946,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
946 | spin_lock_init(&kvm->arch.start_stop_lock); | 1099 | spin_lock_init(&kvm->arch.start_stop_lock); |
947 | 1100 | ||
948 | return 0; | 1101 | return 0; |
949 | out_nogmap: | 1102 | out_err: |
950 | kfree(kvm->arch.crypto.crycb); | 1103 | kfree(kvm->arch.crypto.crycb); |
951 | out_crypto: | ||
952 | free_page((unsigned long)kvm->arch.model.fac); | 1104 | free_page((unsigned long)kvm->arch.model.fac); |
953 | out_nofac: | ||
954 | debug_unregister(kvm->arch.dbf); | 1105 | debug_unregister(kvm->arch.dbf); |
955 | out_nodbf: | ||
956 | free_page((unsigned long)(kvm->arch.sca)); | 1106 | free_page((unsigned long)(kvm->arch.sca)); |
957 | out_err: | ||
958 | return rc; | 1107 | return rc; |
959 | } | 1108 | } |
960 | 1109 | ||
@@ -1034,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1034 | KVM_SYNC_CRS | | 1183 | KVM_SYNC_CRS | |
1035 | KVM_SYNC_ARCH0 | | 1184 | KVM_SYNC_ARCH0 | |
1036 | KVM_SYNC_PFAULT; | 1185 | KVM_SYNC_PFAULT; |
1186 | if (test_kvm_facility(vcpu->kvm, 129)) | ||
1187 | vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; | ||
1037 | 1188 | ||
1038 | if (kvm_is_ucontrol(vcpu->kvm)) | 1189 | if (kvm_is_ucontrol(vcpu->kvm)) |
1039 | return __kvm_ucontrol_vcpu_init(vcpu); | 1190 | return __kvm_ucontrol_vcpu_init(vcpu); |
@@ -1044,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1044 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1195 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1045 | { | 1196 | { |
1046 | save_fp_ctl(&vcpu->arch.host_fpregs.fpc); | 1197 | save_fp_ctl(&vcpu->arch.host_fpregs.fpc); |
1047 | save_fp_regs(vcpu->arch.host_fpregs.fprs); | 1198 | if (test_kvm_facility(vcpu->kvm, 129)) |
1199 | save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); | ||
1200 | else | ||
1201 | save_fp_regs(vcpu->arch.host_fpregs.fprs); | ||
1048 | save_access_regs(vcpu->arch.host_acrs); | 1202 | save_access_regs(vcpu->arch.host_acrs); |
1049 | restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | 1203 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1050 | restore_fp_regs(vcpu->arch.guest_fpregs.fprs); | 1204 | restore_fp_ctl(&vcpu->run->s.regs.fpc); |
1205 | restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
1206 | } else { | ||
1207 | restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
1208 | restore_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
1209 | } | ||
1051 | restore_access_regs(vcpu->run->s.regs.acrs); | 1210 | restore_access_regs(vcpu->run->s.regs.acrs); |
1052 | gmap_enable(vcpu->arch.gmap); | 1211 | gmap_enable(vcpu->arch.gmap); |
1053 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1212 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
@@ -1057,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
1057 | { | 1216 | { |
1058 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1217 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
1059 | gmap_disable(vcpu->arch.gmap); | 1218 | gmap_disable(vcpu->arch.gmap); |
1060 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | 1219 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1061 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | 1220 | save_fp_ctl(&vcpu->run->s.regs.fpc); |
1221 | save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
1222 | } else { | ||
1223 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
1224 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
1225 | } | ||
1062 | save_access_regs(vcpu->run->s.regs.acrs); | 1226 | save_access_regs(vcpu->run->s.regs.acrs); |
1063 | restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); | 1227 | restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); |
1064 | restore_fp_regs(vcpu->arch.host_fpregs.fprs); | 1228 | if (test_kvm_facility(vcpu->kvm, 129)) |
1229 | restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); | ||
1230 | else | ||
1231 | restore_fp_regs(vcpu->arch.host_fpregs.fprs); | ||
1065 | restore_access_regs(vcpu->arch.host_acrs); | 1232 | restore_access_regs(vcpu->arch.host_acrs); |
1066 | } | 1233 | } |
1067 | 1234 | ||
@@ -1129,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) | |||
1129 | return 0; | 1296 | return 0; |
1130 | } | 1297 | } |
1131 | 1298 | ||
1299 | static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) | ||
1300 | { | ||
1301 | struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; | ||
1302 | |||
1303 | vcpu->arch.cpu_id = model->cpu_id; | ||
1304 | vcpu->arch.sie_block->ibc = model->ibc; | ||
1305 | vcpu->arch.sie_block->fac = (int) (long) model->fac->list; | ||
1306 | } | ||
1307 | |||
1132 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 1308 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
1133 | { | 1309 | { |
1134 | int rc = 0; | 1310 | int rc = 0; |
@@ -1137,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1137 | CPUSTAT_SM | | 1313 | CPUSTAT_SM | |
1138 | CPUSTAT_STOPPED | | 1314 | CPUSTAT_STOPPED | |
1139 | CPUSTAT_GED); | 1315 | CPUSTAT_GED); |
1316 | kvm_s390_vcpu_setup_model(vcpu); | ||
1317 | |||
1140 | vcpu->arch.sie_block->ecb = 6; | 1318 | vcpu->arch.sie_block->ecb = 6; |
1141 | if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) | 1319 | if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) |
1142 | vcpu->arch.sie_block->ecb |= 0x10; | 1320 | vcpu->arch.sie_block->ecb |= 0x10; |
@@ -1147,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1147 | vcpu->arch.sie_block->eca |= 1; | 1325 | vcpu->arch.sie_block->eca |= 1; |
1148 | if (sclp_has_sigpif()) | 1326 | if (sclp_has_sigpif()) |
1149 | vcpu->arch.sie_block->eca |= 0x10000000U; | 1327 | vcpu->arch.sie_block->eca |= 0x10000000U; |
1150 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | | 1328 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1151 | ICTL_TPROT; | 1329 | vcpu->arch.sie_block->eca |= 0x00020000; |
1330 | vcpu->arch.sie_block->ecd |= 0x20000000; | ||
1331 | } | ||
1332 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; | ||
1152 | 1333 | ||
1153 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { | 1334 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { |
1154 | rc = kvm_s390_vcpu_setup_cmma(vcpu); | 1335 | rc = kvm_s390_vcpu_setup_cmma(vcpu); |
@@ -1158,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1158 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1339 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1159 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 1340 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
1160 | 1341 | ||
1161 | mutex_lock(&vcpu->kvm->lock); | ||
1162 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | ||
1163 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | ||
1164 | mutex_unlock(&vcpu->kvm->lock); | ||
1165 | |||
1166 | kvm_s390_vcpu_crypto_setup(vcpu); | 1342 | kvm_s390_vcpu_crypto_setup(vcpu); |
1167 | 1343 | ||
1168 | return rc; | 1344 | return rc; |
@@ -1190,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1190 | 1366 | ||
1191 | vcpu->arch.sie_block = &sie_page->sie_block; | 1367 | vcpu->arch.sie_block = &sie_page->sie_block; |
1192 | vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; | 1368 | vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; |
1369 | vcpu->arch.host_vregs = &sie_page->vregs; | ||
1193 | 1370 | ||
1194 | vcpu->arch.sie_block->icpua = id; | 1371 | vcpu->arch.sie_block->icpua = id; |
1195 | if (!kvm_is_ucontrol(kvm)) { | 1372 | if (!kvm_is_ucontrol(kvm)) { |
@@ -1205,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1205 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1382 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1206 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1383 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1207 | } | 1384 | } |
1208 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; | ||
1209 | 1385 | ||
1210 | spin_lock_init(&vcpu->arch.local_int.lock); | 1386 | spin_lock_init(&vcpu->arch.local_int.lock); |
1211 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1387 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
@@ -1725,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) | |||
1725 | return 0; | 1901 | return 0; |
1726 | } | 1902 | } |
1727 | 1903 | ||
1904 | static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) | ||
1905 | { | ||
1906 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
1907 | u8 opcode; | ||
1908 | int rc; | ||
1909 | |||
1910 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | ||
1911 | trace_kvm_s390_sie_fault(vcpu); | ||
1912 | |||
1913 | /* | ||
1914 | * We want to inject an addressing exception, which is defined as a | ||
1915 | * suppressing or terminating exception. However, since we came here | ||
1916 | * by a DAT access exception, the PSW still points to the faulting | ||
1917 | * instruction since DAT exceptions are nullifying. So we've got | ||
1918 | * to look up the current opcode to get the length of the instruction | ||
1919 | * to be able to forward the PSW. | ||
1920 | */ | ||
1921 | rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); | ||
1922 | if (rc) | ||
1923 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
1924 | psw->addr = __rewind_psw(*psw, -insn_length(opcode)); | ||
1925 | |||
1926 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
1927 | } | ||
1928 | |||
1728 | static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | 1929 | static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) |
1729 | { | 1930 | { |
1730 | int rc = -1; | 1931 | int rc = -1; |
@@ -1756,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
1756 | } | 1957 | } |
1757 | } | 1958 | } |
1758 | 1959 | ||
1759 | if (rc == -1) { | 1960 | if (rc == -1) |
1760 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | 1961 | rc = vcpu_post_run_fault_in_sie(vcpu); |
1761 | trace_kvm_s390_sie_fault(vcpu); | ||
1762 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
1763 | } | ||
1764 | 1962 | ||
1765 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 1963 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
1766 | 1964 | ||
@@ -1976,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
1976 | return kvm_s390_store_status_unloaded(vcpu, addr); | 2174 | return kvm_s390_store_status_unloaded(vcpu, addr); |
1977 | } | 2175 | } |
1978 | 2176 | ||
2177 | /* | ||
2178 | * store additional status at address | ||
2179 | */ | ||
2180 | int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, | ||
2181 | unsigned long gpa) | ||
2182 | { | ||
2183 | /* Only bits 0-53 are used for address formation */ | ||
2184 | if (!(gpa & ~0x3ff)) | ||
2185 | return 0; | ||
2186 | |||
2187 | return write_guest_abs(vcpu, gpa & ~0x3ff, | ||
2188 | (void *)&vcpu->run->s.regs.vrs, 512); | ||
2189 | } | ||
2190 | |||
2191 | int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
2192 | { | ||
2193 | if (!test_kvm_facility(vcpu->kvm, 129)) | ||
2194 | return 0; | ||
2195 | |||
2196 | /* | ||
2197 | * The guest VXRS are in the host VXRs due to the lazy | ||
2198 | * copying in vcpu load/put. Let's update our copies before we save | ||
2199 | * it into the save area. | ||
2200 | */ | ||
2201 | save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
2202 | |||
2203 | return kvm_s390_store_adtl_status_unloaded(vcpu, addr); | ||
2204 | } | ||
2205 | |||
1979 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | 2206 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
1980 | { | 2207 | { |
1981 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); | 2208 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); |
@@ -2100,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
2100 | return r; | 2327 | return r; |
2101 | } | 2328 | } |
2102 | 2329 | ||
2330 | static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, | ||
2331 | struct kvm_s390_mem_op *mop) | ||
2332 | { | ||
2333 | void __user *uaddr = (void __user *)mop->buf; | ||
2334 | void *tmpbuf = NULL; | ||
2335 | int r, srcu_idx; | ||
2336 | const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION | ||
2337 | | KVM_S390_MEMOP_F_CHECK_ONLY; | ||
2338 | |||
2339 | if (mop->flags & ~supported_flags) | ||
2340 | return -EINVAL; | ||
2341 | |||
2342 | if (mop->size > MEM_OP_MAX_SIZE) | ||
2343 | return -E2BIG; | ||
2344 | |||
2345 | if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { | ||
2346 | tmpbuf = vmalloc(mop->size); | ||
2347 | if (!tmpbuf) | ||
2348 | return -ENOMEM; | ||
2349 | } | ||
2350 | |||
2351 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
2352 | |||
2353 | switch (mop->op) { | ||
2354 | case KVM_S390_MEMOP_LOGICAL_READ: | ||
2355 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { | ||
2356 | r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); | ||
2357 | break; | ||
2358 | } | ||
2359 | r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); | ||
2360 | if (r == 0) { | ||
2361 | if (copy_to_user(uaddr, tmpbuf, mop->size)) | ||
2362 | r = -EFAULT; | ||
2363 | } | ||
2364 | break; | ||
2365 | case KVM_S390_MEMOP_LOGICAL_WRITE: | ||
2366 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { | ||
2367 | r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); | ||
2368 | break; | ||
2369 | } | ||
2370 | if (copy_from_user(tmpbuf, uaddr, mop->size)) { | ||
2371 | r = -EFAULT; | ||
2372 | break; | ||
2373 | } | ||
2374 | r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); | ||
2375 | break; | ||
2376 | default: | ||
2377 | r = -EINVAL; | ||
2378 | } | ||
2379 | |||
2380 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | ||
2381 | |||
2382 | if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) | ||
2383 | kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
2384 | |||
2385 | vfree(tmpbuf); | ||
2386 | return r; | ||
2387 | } | ||
2388 | |||
2103 | long kvm_arch_vcpu_ioctl(struct file *filp, | 2389 | long kvm_arch_vcpu_ioctl(struct file *filp, |
2104 | unsigned int ioctl, unsigned long arg) | 2390 | unsigned int ioctl, unsigned long arg) |
2105 | { | 2391 | { |
@@ -2109,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
2109 | long r; | 2395 | long r; |
2110 | 2396 | ||
2111 | switch (ioctl) { | 2397 | switch (ioctl) { |
2398 | case KVM_S390_IRQ: { | ||
2399 | struct kvm_s390_irq s390irq; | ||
2400 | |||
2401 | r = -EFAULT; | ||
2402 | if (copy_from_user(&s390irq, argp, sizeof(s390irq))) | ||
2403 | break; | ||
2404 | r = kvm_s390_inject_vcpu(vcpu, &s390irq); | ||
2405 | break; | ||
2406 | } | ||
2112 | case KVM_S390_INTERRUPT: { | 2407 | case KVM_S390_INTERRUPT: { |
2113 | struct kvm_s390_interrupt s390int; | 2408 | struct kvm_s390_interrupt s390int; |
2114 | struct kvm_s390_irq s390irq; | 2409 | struct kvm_s390_irq s390irq; |
@@ -2199,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
2199 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 2494 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
2200 | break; | 2495 | break; |
2201 | } | 2496 | } |
2497 | case KVM_S390_MEM_OP: { | ||
2498 | struct kvm_s390_mem_op mem_op; | ||
2499 | |||
2500 | if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) | ||
2501 | r = kvm_s390_guest_mem_op(vcpu, &mem_op); | ||
2502 | else | ||
2503 | r = -EFAULT; | ||
2504 | break; | ||
2505 | } | ||
2506 | case KVM_S390_SET_IRQ_STATE: { | ||
2507 | struct kvm_s390_irq_state irq_state; | ||
2508 | |||
2509 | r = -EFAULT; | ||
2510 | if (copy_from_user(&irq_state, argp, sizeof(irq_state))) | ||
2511 | break; | ||
2512 | if (irq_state.len > VCPU_IRQS_MAX_BUF || | ||
2513 | irq_state.len == 0 || | ||
2514 | irq_state.len % sizeof(struct kvm_s390_irq) > 0) { | ||
2515 | r = -EINVAL; | ||
2516 | break; | ||
2517 | } | ||
2518 | r = kvm_s390_set_irq_state(vcpu, | ||
2519 | (void __user *) irq_state.buf, | ||
2520 | irq_state.len); | ||
2521 | break; | ||
2522 | } | ||
2523 | case KVM_S390_GET_IRQ_STATE: { | ||
2524 | struct kvm_s390_irq_state irq_state; | ||
2525 | |||
2526 | r = -EFAULT; | ||
2527 | if (copy_from_user(&irq_state, argp, sizeof(irq_state))) | ||
2528 | break; | ||
2529 | if (irq_state.len == 0) { | ||
2530 | r = -EINVAL; | ||
2531 | break; | ||
2532 | } | ||
2533 | r = kvm_s390_get_irq_state(vcpu, | ||
2534 | (__u8 __user *) irq_state.buf, | ||
2535 | irq_state.len); | ||
2536 | break; | ||
2537 | } | ||
2202 | default: | 2538 | default: |
2203 | r = -ENOTTY; | 2539 | r = -ENOTTY; |
2204 | } | 2540 | } |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c34109aa552d..ca108b90ae56 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) | |||
70 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); | 70 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) | 73 | typedef u8 __bitwise ar_t; |
74 | |||
75 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) | ||
74 | { | 76 | { |
75 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 77 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
76 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | 78 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); |
77 | 79 | ||
80 | if (ar) | ||
81 | *ar = base2; | ||
82 | |||
78 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 83 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
79 | } | 84 | } |
80 | 85 | ||
81 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, | 86 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, |
82 | u64 *address1, u64 *address2) | 87 | u64 *address1, u64 *address2, |
88 | ar_t *ar_b1, ar_t *ar_b2) | ||
83 | { | 89 | { |
84 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; | 90 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; |
85 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; | 91 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; |
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, | |||
88 | 94 | ||
89 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; | 95 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; |
90 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 96 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
97 | |||
98 | if (ar_b1) | ||
99 | *ar_b1 = base1; | ||
100 | if (ar_b2) | ||
101 | *ar_b2 = base2; | ||
91 | } | 102 | } |
92 | 103 | ||
93 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) | 104 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) |
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2 | |||
98 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; | 109 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; |
99 | } | 110 | } |
100 | 111 | ||
101 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) | 112 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) |
102 | { | 113 | { |
103 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 114 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
104 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | 115 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + |
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) | |||
107 | if (disp2 & 0x80000) | 118 | if (disp2 & 0x80000) |
108 | disp2+=0xfff00000; | 119 | disp2+=0xfff00000; |
109 | 120 | ||
121 | if (ar) | ||
122 | *ar = base2; | ||
123 | |||
110 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; | 124 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; |
111 | } | 125 | } |
112 | 126 | ||
113 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) | 127 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) |
114 | { | 128 | { |
115 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 129 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
116 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | 130 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); |
117 | 131 | ||
132 | if (ar) | ||
133 | *ar = base2; | ||
134 | |||
118 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 135 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
119 | } | 136 | } |
120 | 137 | ||
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
125 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | 142 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; |
126 | } | 143 | } |
127 | 144 | ||
128 | /* test availability of facility in a kvm intance */ | 145 | /* test availability of facility in a kvm instance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 146 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 147 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->mask) && | 148 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | 149 | __test_facility(nr, kvm->arch.model.fac->list); |
133 | } | 150 | } |
134 | 151 | ||
152 | static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) | ||
153 | { | ||
154 | unsigned char *ptr; | ||
155 | |||
156 | if (nr >= MAX_FACILITY_BIT) | ||
157 | return -EINVAL; | ||
158 | ptr = (unsigned char *) fac_list + (nr >> 3); | ||
159 | *ptr |= (0x80UL >> (nr & 7)); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
135 | /* are cpu states controlled by user space */ | 163 | /* are cpu states controlled by user space */ |
136 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | 164 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) |
137 | { | 165 | { |
@@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
150 | struct kvm_s390_irq *irq); | 178 | struct kvm_s390_irq *irq); |
151 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 179 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
152 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 180 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
153 | u64 cr6, u64 schid); | 181 | u64 isc_mask, u32 schid); |
154 | void kvm_s390_reinject_io_int(struct kvm *kvm, | 182 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
155 | struct kvm_s390_interrupt_info *inti); | 183 | struct kvm_s390_interrupt_info *inti); |
156 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); | 184 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
157 | 185 | ||
158 | /* implemented in intercept.c */ | 186 | /* implemented in intercept.c */ |
@@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | |||
177 | /* implemented in kvm-s390.c */ | 205 | /* implemented in kvm-s390.c */ |
178 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | 206 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
179 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 207 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
208 | int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, | ||
209 | unsigned long addr); | ||
180 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 210 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
211 | int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); | ||
181 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); | 212 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); |
182 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); | 213 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); |
183 | void s390_vcpu_block(struct kvm_vcpu *vcpu); | 214 | void s390_vcpu_block(struct kvm_vcpu *vcpu); |
@@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); | |||
241 | extern struct kvm_device_ops kvm_flic_ops; | 272 | extern struct kvm_device_ops kvm_flic_ops; |
242 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); | 273 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); |
243 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); | 274 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); |
275 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, | ||
276 | void __user *buf, int len); | ||
277 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, | ||
278 | __u8 __user *buf, int len); | ||
244 | 279 | ||
245 | /* implemented in guestdbg.c */ | 280 | /* implemented in guestdbg.c */ |
246 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | 281 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 351116939ea2..d22d8ee1ff9d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
36 | struct kvm_vcpu *cpup; | 36 | struct kvm_vcpu *cpup; |
37 | s64 hostclk, val; | 37 | s64 hostclk, val; |
38 | int i, rc; | 38 | int i, rc; |
39 | ar_t ar; | ||
39 | u64 op2; | 40 | u64 op2; |
40 | 41 | ||
41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 42 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 43 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
43 | 44 | ||
44 | op2 = kvm_s390_get_base_disp_s(vcpu); | 45 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 46 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 47 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
47 | rc = read_guest(vcpu, op2, &val, sizeof(val)); | 48 | rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); |
48 | if (rc) | 49 | if (rc) |
49 | return kvm_s390_inject_prog_cond(vcpu, rc); | 50 | return kvm_s390_inject_prog_cond(vcpu, rc); |
50 | 51 | ||
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) | |||
68 | u64 operand2; | 69 | u64 operand2; |
69 | u32 address; | 70 | u32 address; |
70 | int rc; | 71 | int rc; |
72 | ar_t ar; | ||
71 | 73 | ||
72 | vcpu->stat.instruction_spx++; | 74 | vcpu->stat.instruction_spx++; |
73 | 75 | ||
74 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 76 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
75 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 77 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
76 | 78 | ||
77 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 79 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
78 | 80 | ||
79 | /* must be word boundary */ | 81 | /* must be word boundary */ |
80 | if (operand2 & 3) | 82 | if (operand2 & 3) |
81 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 83 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
82 | 84 | ||
83 | /* get the value */ | 85 | /* get the value */ |
84 | rc = read_guest(vcpu, operand2, &address, sizeof(address)); | 86 | rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); |
85 | if (rc) | 87 | if (rc) |
86 | return kvm_s390_inject_prog_cond(vcpu, rc); | 88 | return kvm_s390_inject_prog_cond(vcpu, rc); |
87 | 89 | ||
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
107 | u64 operand2; | 109 | u64 operand2; |
108 | u32 address; | 110 | u32 address; |
109 | int rc; | 111 | int rc; |
112 | ar_t ar; | ||
110 | 113 | ||
111 | vcpu->stat.instruction_stpx++; | 114 | vcpu->stat.instruction_stpx++; |
112 | 115 | ||
113 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 116 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
114 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 117 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
115 | 118 | ||
116 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 119 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
117 | 120 | ||
118 | /* must be word boundary */ | 121 | /* must be word boundary */ |
119 | if (operand2 & 3) | 122 | if (operand2 & 3) |
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
122 | address = kvm_s390_get_prefix(vcpu); | 125 | address = kvm_s390_get_prefix(vcpu); |
123 | 126 | ||
124 | /* get the value */ | 127 | /* get the value */ |
125 | rc = write_guest(vcpu, operand2, &address, sizeof(address)); | 128 | rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); |
126 | if (rc) | 129 | if (rc) |
127 | return kvm_s390_inject_prog_cond(vcpu, rc); | 130 | return kvm_s390_inject_prog_cond(vcpu, rc); |
128 | 131 | ||
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | |||
136 | u16 vcpu_id = vcpu->vcpu_id; | 139 | u16 vcpu_id = vcpu->vcpu_id; |
137 | u64 ga; | 140 | u64 ga; |
138 | int rc; | 141 | int rc; |
142 | ar_t ar; | ||
139 | 143 | ||
140 | vcpu->stat.instruction_stap++; | 144 | vcpu->stat.instruction_stap++; |
141 | 145 | ||
142 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 146 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
143 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 147 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
144 | 148 | ||
145 | ga = kvm_s390_get_base_disp_s(vcpu); | 149 | ga = kvm_s390_get_base_disp_s(vcpu, &ar); |
146 | 150 | ||
147 | if (ga & 1) | 151 | if (ga & 1) |
148 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 152 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
149 | 153 | ||
150 | rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); | 154 | rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); |
151 | if (rc) | 155 | if (rc) |
152 | return kvm_s390_inject_prog_cond(vcpu, rc); | 156 | return kvm_s390_inject_prog_cond(vcpu, rc); |
153 | 157 | ||
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
207 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); | 211 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); |
208 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | 212 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; |
209 | addr = kvm_s390_logical_to_effective(vcpu, addr); | 213 | addr = kvm_s390_logical_to_effective(vcpu, addr); |
210 | if (kvm_s390_check_low_addr_protection(vcpu, addr)) | 214 | if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) |
211 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | 215 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
212 | addr = kvm_s390_real_to_abs(vcpu, addr); | 216 | addr = kvm_s390_real_to_abs(vcpu, addr); |
213 | 217 | ||
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
229 | struct kvm_s390_interrupt_info *inti; | 233 | struct kvm_s390_interrupt_info *inti; |
230 | unsigned long len; | 234 | unsigned long len; |
231 | u32 tpi_data[3]; | 235 | u32 tpi_data[3]; |
232 | int cc, rc; | 236 | int rc; |
233 | u64 addr; | 237 | u64 addr; |
238 | ar_t ar; | ||
234 | 239 | ||
235 | rc = 0; | 240 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
236 | addr = kvm_s390_get_base_disp_s(vcpu); | ||
237 | if (addr & 3) | 241 | if (addr & 3) |
238 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 242 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
239 | cc = 0; | 243 | |
240 | inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); | 244 | inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); |
241 | if (!inti) | 245 | if (!inti) { |
242 | goto no_interrupt; | 246 | kvm_s390_set_psw_cc(vcpu, 0); |
243 | cc = 1; | 247 | return 0; |
248 | } | ||
249 | |||
244 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; | 250 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; |
245 | tpi_data[1] = inti->io.io_int_parm; | 251 | tpi_data[1] = inti->io.io_int_parm; |
246 | tpi_data[2] = inti->io.io_int_word; | 252 | tpi_data[2] = inti->io.io_int_word; |
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
250 | * provided area. | 256 | * provided area. |
251 | */ | 257 | */ |
252 | len = sizeof(tpi_data) - 4; | 258 | len = sizeof(tpi_data) - 4; |
253 | rc = write_guest(vcpu, addr, &tpi_data, len); | 259 | rc = write_guest(vcpu, addr, ar, &tpi_data, len); |
254 | if (rc) | 260 | if (rc) { |
255 | return kvm_s390_inject_prog_cond(vcpu, rc); | 261 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
262 | goto reinject_interrupt; | ||
263 | } | ||
256 | } else { | 264 | } else { |
257 | /* | 265 | /* |
258 | * Store the three-word I/O interruption code into | 266 | * Store the three-word I/O interruption code into |
259 | * the appropriate lowcore area. | 267 | * the appropriate lowcore area. |
260 | */ | 268 | */ |
261 | len = sizeof(tpi_data); | 269 | len = sizeof(tpi_data); |
262 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) | 270 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { |
271 | /* failed writes to the low core are not recoverable */ | ||
263 | rc = -EFAULT; | 272 | rc = -EFAULT; |
273 | goto reinject_interrupt; | ||
274 | } | ||
264 | } | 275 | } |
276 | |||
277 | /* irq was successfully handed to the guest */ | ||
278 | kfree(inti); | ||
279 | kvm_s390_set_psw_cc(vcpu, 1); | ||
280 | return 0; | ||
281 | reinject_interrupt: | ||
265 | /* | 282 | /* |
266 | * If we encounter a problem storing the interruption code, the | 283 | * If we encounter a problem storing the interruption code, the |
267 | * instruction is suppressed from the guest's view: reinject the | 284 | * instruction is suppressed from the guest's view: reinject the |
268 | * interrupt. | 285 | * interrupt. |
269 | */ | 286 | */ |
270 | if (!rc) | 287 | if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { |
271 | kfree(inti); | 288 | kfree(inti); |
272 | else | 289 | rc = -EFAULT; |
273 | kvm_s390_reinject_io_int(vcpu->kvm, inti); | 290 | } |
274 | no_interrupt: | 291 | /* don't set the cc, a pgm irq was injected or we drop to user space */ |
275 | /* Set condition code and we're done. */ | ||
276 | if (!rc) | ||
277 | kvm_s390_set_psw_cc(vcpu, cc); | ||
278 | return rc ? -EFAULT : 0; | 292 | return rc ? -EFAULT : 0; |
279 | } | 293 | } |
280 | 294 | ||
281 | static int handle_tsch(struct kvm_vcpu *vcpu) | 295 | static int handle_tsch(struct kvm_vcpu *vcpu) |
282 | { | 296 | { |
283 | struct kvm_s390_interrupt_info *inti; | 297 | struct kvm_s390_interrupt_info *inti = NULL; |
298 | const u64 isc_mask = 0xffUL << 24; /* all iscs set */ | ||
284 | 299 | ||
285 | inti = kvm_s390_get_io_int(vcpu->kvm, 0, | 300 | /* a valid schid has at least one bit set */ |
286 | vcpu->run->s.regs.gprs[1]); | 301 | if (vcpu->run->s.regs.gprs[1]) |
302 | inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, | ||
303 | vcpu->run->s.regs.gprs[1]); | ||
287 | 304 | ||
288 | /* | 305 | /* |
289 | * Prepare exit to userspace. | 306 | * Prepare exit to userspace. |
@@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
386 | psw_compat_t new_psw; | 403 | psw_compat_t new_psw; |
387 | u64 addr; | 404 | u64 addr; |
388 | int rc; | 405 | int rc; |
406 | ar_t ar; | ||
389 | 407 | ||
390 | if (gpsw->mask & PSW_MASK_PSTATE) | 408 | if (gpsw->mask & PSW_MASK_PSTATE) |
391 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 409 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
392 | 410 | ||
393 | addr = kvm_s390_get_base_disp_s(vcpu); | 411 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
394 | if (addr & 7) | 412 | if (addr & 7) |
395 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 413 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
396 | 414 | ||
397 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); | 415 | rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); |
398 | if (rc) | 416 | if (rc) |
399 | return kvm_s390_inject_prog_cond(vcpu, rc); | 417 | return kvm_s390_inject_prog_cond(vcpu, rc); |
400 | if (!(new_psw.mask & PSW32_MASK_BASE)) | 418 | if (!(new_psw.mask & PSW32_MASK_BASE)) |
@@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
412 | psw_t new_psw; | 430 | psw_t new_psw; |
413 | u64 addr; | 431 | u64 addr; |
414 | int rc; | 432 | int rc; |
433 | ar_t ar; | ||
415 | 434 | ||
416 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 435 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
417 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 436 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
418 | 437 | ||
419 | addr = kvm_s390_get_base_disp_s(vcpu); | 438 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
420 | if (addr & 7) | 439 | if (addr & 7) |
421 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 440 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
422 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); | 441 | rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); |
423 | if (rc) | 442 | if (rc) |
424 | return kvm_s390_inject_prog_cond(vcpu, rc); | 443 | return kvm_s390_inject_prog_cond(vcpu, rc); |
425 | vcpu->arch.sie_block->gpsw = new_psw; | 444 | vcpu->arch.sie_block->gpsw = new_psw; |
@@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu) | |||
433 | u64 stidp_data = vcpu->arch.stidp_data; | 452 | u64 stidp_data = vcpu->arch.stidp_data; |
434 | u64 operand2; | 453 | u64 operand2; |
435 | int rc; | 454 | int rc; |
455 | ar_t ar; | ||
436 | 456 | ||
437 | vcpu->stat.instruction_stidp++; | 457 | vcpu->stat.instruction_stidp++; |
438 | 458 | ||
439 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 459 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
440 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 460 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
441 | 461 | ||
442 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 462 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
443 | 463 | ||
444 | if (operand2 & 7) | 464 | if (operand2 & 7) |
445 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 465 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
446 | 466 | ||
447 | rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); | 467 | rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); |
448 | if (rc) | 468 | if (rc) |
449 | return kvm_s390_inject_prog_cond(vcpu, rc); | 469 | return kvm_s390_inject_prog_cond(vcpu, rc); |
450 | 470 | ||
@@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | |||
467 | for (n = mem->count - 1; n > 0 ; n--) | 487 | for (n = mem->count - 1; n > 0 ; n--) |
468 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); | 488 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); |
469 | 489 | ||
490 | memset(&mem->vm[0], 0, sizeof(mem->vm[0])); | ||
470 | mem->vm[0].cpus_total = cpus; | 491 | mem->vm[0].cpus_total = cpus; |
471 | mem->vm[0].cpus_configured = cpus; | 492 | mem->vm[0].cpus_configured = cpus; |
472 | mem->vm[0].cpus_standby = 0; | 493 | mem->vm[0].cpus_standby = 0; |
@@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | |||
478 | ASCEBC(mem->vm[0].cpi, 16); | 499 | ASCEBC(mem->vm[0].cpi, 16); |
479 | } | 500 | } |
480 | 501 | ||
502 | static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, | ||
503 | u8 fc, u8 sel1, u16 sel2) | ||
504 | { | ||
505 | vcpu->run->exit_reason = KVM_EXIT_S390_STSI; | ||
506 | vcpu->run->s390_stsi.addr = addr; | ||
507 | vcpu->run->s390_stsi.ar = ar; | ||
508 | vcpu->run->s390_stsi.fc = fc; | ||
509 | vcpu->run->s390_stsi.sel1 = sel1; | ||
510 | vcpu->run->s390_stsi.sel2 = sel2; | ||
511 | } | ||
512 | |||
481 | static int handle_stsi(struct kvm_vcpu *vcpu) | 513 | static int handle_stsi(struct kvm_vcpu *vcpu) |
482 | { | 514 | { |
483 | int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; | 515 | int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; |
@@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
486 | unsigned long mem = 0; | 518 | unsigned long mem = 0; |
487 | u64 operand2; | 519 | u64 operand2; |
488 | int rc = 0; | 520 | int rc = 0; |
521 | ar_t ar; | ||
489 | 522 | ||
490 | vcpu->stat.instruction_stsi++; | 523 | vcpu->stat.instruction_stsi++; |
491 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); | 524 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); |
@@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
508 | return 0; | 541 | return 0; |
509 | } | 542 | } |
510 | 543 | ||
511 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 544 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
512 | 545 | ||
513 | if (operand2 & 0xfff) | 546 | if (operand2 & 0xfff) |
514 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 547 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
532 | break; | 565 | break; |
533 | } | 566 | } |
534 | 567 | ||
535 | rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); | 568 | rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); |
536 | if (rc) { | 569 | if (rc) { |
537 | rc = kvm_s390_inject_prog_cond(vcpu, rc); | 570 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
538 | goto out; | 571 | goto out; |
539 | } | 572 | } |
573 | if (vcpu->kvm->arch.user_stsi) { | ||
574 | insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); | ||
575 | rc = -EREMOTE; | ||
576 | } | ||
540 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); | 577 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); |
541 | free_page(mem); | 578 | free_page(mem); |
542 | kvm_s390_set_psw_cc(vcpu, 0); | 579 | kvm_s390_set_psw_cc(vcpu, 0); |
543 | vcpu->run->s.regs.gprs[0] = 0; | 580 | vcpu->run->s.regs.gprs[0] = 0; |
544 | return 0; | 581 | return rc; |
545 | out_no_data: | 582 | out_no_data: |
546 | kvm_s390_set_psw_cc(vcpu, 3); | 583 | kvm_s390_set_psw_cc(vcpu, 3); |
547 | out: | 584 | out: |
@@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
670 | } | 707 | } |
671 | 708 | ||
672 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { | 709 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { |
673 | if (kvm_s390_check_low_addr_protection(vcpu, start)) | 710 | if (kvm_s390_check_low_addr_prot_real(vcpu, start)) |
674 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | 711 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
675 | } | 712 | } |
676 | 713 | ||
@@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
776 | int reg, rc, nr_regs; | 813 | int reg, rc, nr_regs; |
777 | u32 ctl_array[16]; | 814 | u32 ctl_array[16]; |
778 | u64 ga; | 815 | u64 ga; |
816 | ar_t ar; | ||
779 | 817 | ||
780 | vcpu->stat.instruction_lctl++; | 818 | vcpu->stat.instruction_lctl++; |
781 | 819 | ||
782 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 820 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
783 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 821 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
784 | 822 | ||
785 | ga = kvm_s390_get_base_disp_rs(vcpu); | 823 | ga = kvm_s390_get_base_disp_rs(vcpu, &ar); |
786 | 824 | ||
787 | if (ga & 3) | 825 | if (ga & 3) |
788 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 826 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
791 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); | 829 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); |
792 | 830 | ||
793 | nr_regs = ((reg3 - reg1) & 0xf) + 1; | 831 | nr_regs = ((reg3 - reg1) & 0xf) + 1; |
794 | rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); | 832 | rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); |
795 | if (rc) | 833 | if (rc) |
796 | return kvm_s390_inject_prog_cond(vcpu, rc); | 834 | return kvm_s390_inject_prog_cond(vcpu, rc); |
797 | reg = reg1; | 835 | reg = reg1; |
@@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | |||
814 | int reg, rc, nr_regs; | 852 | int reg, rc, nr_regs; |
815 | u32 ctl_array[16]; | 853 | u32 ctl_array[16]; |
816 | u64 ga; | 854 | u64 ga; |
855 | ar_t ar; | ||
817 | 856 | ||
818 | vcpu->stat.instruction_stctl++; | 857 | vcpu->stat.instruction_stctl++; |
819 | 858 | ||
820 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 859 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
821 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 860 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
822 | 861 | ||
823 | ga = kvm_s390_get_base_disp_rs(vcpu); | 862 | ga = kvm_s390_get_base_disp_rs(vcpu, &ar); |
824 | 863 | ||
825 | if (ga & 3) | 864 | if (ga & 3) |
826 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 865 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | |||
836 | break; | 875 | break; |
837 | reg = (reg + 1) % 16; | 876 | reg = (reg + 1) % 16; |
838 | } while (1); | 877 | } while (1); |
839 | rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); | 878 | rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); |
840 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 879 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
841 | } | 880 | } |
842 | 881 | ||
@@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
847 | int reg, rc, nr_regs; | 886 | int reg, rc, nr_regs; |
848 | u64 ctl_array[16]; | 887 | u64 ctl_array[16]; |
849 | u64 ga; | 888 | u64 ga; |
889 | ar_t ar; | ||
850 | 890 | ||
851 | vcpu->stat.instruction_lctlg++; | 891 | vcpu->stat.instruction_lctlg++; |
852 | 892 | ||
853 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 893 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
854 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 894 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
855 | 895 | ||
856 | ga = kvm_s390_get_base_disp_rsy(vcpu); | 896 | ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); |
857 | 897 | ||
858 | if (ga & 7) | 898 | if (ga & 7) |
859 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 899 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
862 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); | 902 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); |
863 | 903 | ||
864 | nr_regs = ((reg3 - reg1) & 0xf) + 1; | 904 | nr_regs = ((reg3 - reg1) & 0xf) + 1; |
865 | rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); | 905 | rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); |
866 | if (rc) | 906 | if (rc) |
867 | return kvm_s390_inject_prog_cond(vcpu, rc); | 907 | return kvm_s390_inject_prog_cond(vcpu, rc); |
868 | reg = reg1; | 908 | reg = reg1; |
@@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
884 | int reg, rc, nr_regs; | 924 | int reg, rc, nr_regs; |
885 | u64 ctl_array[16]; | 925 | u64 ctl_array[16]; |
886 | u64 ga; | 926 | u64 ga; |
927 | ar_t ar; | ||
887 | 928 | ||
888 | vcpu->stat.instruction_stctg++; | 929 | vcpu->stat.instruction_stctg++; |
889 | 930 | ||
890 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 931 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
891 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 932 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
892 | 933 | ||
893 | ga = kvm_s390_get_base_disp_rsy(vcpu); | 934 | ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); |
894 | 935 | ||
895 | if (ga & 7) | 936 | if (ga & 7) |
896 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 937 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
906 | break; | 947 | break; |
907 | reg = (reg + 1) % 16; | 948 | reg = (reg + 1) % 16; |
908 | } while (1); | 949 | } while (1); |
909 | rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); | 950 | rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); |
910 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 951 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
911 | } | 952 | } |
912 | 953 | ||
@@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
931 | unsigned long hva, gpa; | 972 | unsigned long hva, gpa; |
932 | int ret = 0, cc = 0; | 973 | int ret = 0, cc = 0; |
933 | bool writable; | 974 | bool writable; |
975 | ar_t ar; | ||
934 | 976 | ||
935 | vcpu->stat.instruction_tprot++; | 977 | vcpu->stat.instruction_tprot++; |
936 | 978 | ||
937 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 979 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
938 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 980 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
939 | 981 | ||
940 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); | 982 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); |
941 | 983 | ||
942 | /* we only handle the Linux memory detection case: | 984 | /* we only handle the Linux memory detection case: |
943 | * access key == 0 | 985 | * access key == 0 |
@@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
946 | return -EOPNOTSUPP; | 988 | return -EOPNOTSUPP; |
947 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) | 989 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) |
948 | ipte_lock(vcpu); | 990 | ipte_lock(vcpu); |
949 | ret = guest_translate_address(vcpu, address1, &gpa, 1); | 991 | ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); |
950 | if (ret == PGM_PROTECTION) { | 992 | if (ret == PGM_PROTECTION) { |
951 | /* Write protected? Try again with read-only... */ | 993 | /* Write protected? Try again with read-only... */ |
952 | cc = 1; | 994 | cc = 1; |
953 | ret = guest_translate_address(vcpu, address1, &gpa, 0); | 995 | ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); |
954 | } | 996 | } |
955 | if (ret) { | 997 | if (ret) { |
956 | if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { | 998 | if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 23b1e86b2122..72e58bd2bee7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) | |||
393 | case SIGP_STORE_STATUS_AT_ADDRESS: | 393 | case SIGP_STORE_STATUS_AT_ADDRESS: |
394 | vcpu->stat.instruction_sigp_store_status++; | 394 | vcpu->stat.instruction_sigp_store_status++; |
395 | break; | 395 | break; |
396 | case SIGP_STORE_ADDITIONAL_STATUS: | ||
397 | vcpu->stat.instruction_sigp_store_adtl_status++; | ||
398 | break; | ||
396 | case SIGP_SET_PREFIX: | 399 | case SIGP_SET_PREFIX: |
397 | vcpu->stat.instruction_sigp_prefix++; | 400 | vcpu->stat.instruction_sigp_prefix++; |
398 | break; | 401 | break; |
@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
431 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 434 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
432 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 435 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
433 | 436 | ||
434 | order_code = kvm_s390_get_base_disp_rs(vcpu); | 437 | order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
435 | if (handle_sigp_order_in_user_space(vcpu, order_code)) | 438 | if (handle_sigp_order_in_user_space(vcpu, order_code)) |
436 | return -EOPNOTSUPP; | 439 | return -EOPNOTSUPP; |
437 | 440 | ||
@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
473 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | 476 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; |
474 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | 477 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; |
475 | struct kvm_vcpu *dest_vcpu; | 478 | struct kvm_vcpu *dest_vcpu; |
476 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu); | 479 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
477 | 480 | ||
478 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | 481 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); |
479 | 482 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a236e39cc385..dea2e7e962e3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) | |||
81 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | 81 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
82 | } | 82 | } |
83 | 83 | ||
84 | #define SELECTOR_TI_MASK (1 << 2) | ||
85 | #define SELECTOR_RPL_MASK 0x03 | ||
86 | |||
87 | #define IOPL_SHIFT 12 | ||
88 | |||
89 | #define KVM_PERMILLE_MMU_PAGES 20 | 84 | #define KVM_PERMILLE_MMU_PAGES 20 |
90 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | 85 | #define KVM_MIN_ALLOC_MMU_PAGES 64 |
91 | #define KVM_MMU_HASH_SHIFT 10 | 86 | #define KVM_MMU_HASH_SHIFT 10 |
@@ -345,6 +340,7 @@ struct kvm_pmu { | |||
345 | enum { | 340 | enum { |
346 | KVM_DEBUGREG_BP_ENABLED = 1, | 341 | KVM_DEBUGREG_BP_ENABLED = 1, |
347 | KVM_DEBUGREG_WONT_EXIT = 2, | 342 | KVM_DEBUGREG_WONT_EXIT = 2, |
343 | KVM_DEBUGREG_RELOAD = 4, | ||
348 | }; | 344 | }; |
349 | 345 | ||
350 | struct kvm_vcpu_arch { | 346 | struct kvm_vcpu_arch { |
@@ -431,6 +427,9 @@ struct kvm_vcpu_arch { | |||
431 | 427 | ||
432 | int cpuid_nent; | 428 | int cpuid_nent; |
433 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; | 429 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
430 | |||
431 | int maxphyaddr; | ||
432 | |||
434 | /* emulate context */ | 433 | /* emulate context */ |
435 | 434 | ||
436 | struct x86_emulate_ctxt emulate_ctxt; | 435 | struct x86_emulate_ctxt emulate_ctxt; |
@@ -550,11 +549,20 @@ struct kvm_arch_memory_slot { | |||
550 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; | 549 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
551 | }; | 550 | }; |
552 | 551 | ||
552 | /* | ||
553 | * We use as the mode the number of bits allocated in the LDR for the | ||
554 | * logical processor ID. It happens that these are all powers of two. | ||
555 | * This makes it is very easy to detect cases where the APICs are | ||
556 | * configured for multiple modes; in that case, we cannot use the map and | ||
557 | * hence cannot use kvm_irq_delivery_to_apic_fast either. | ||
558 | */ | ||
559 | #define KVM_APIC_MODE_XAPIC_CLUSTER 4 | ||
560 | #define KVM_APIC_MODE_XAPIC_FLAT 8 | ||
561 | #define KVM_APIC_MODE_X2APIC 16 | ||
562 | |||
553 | struct kvm_apic_map { | 563 | struct kvm_apic_map { |
554 | struct rcu_head rcu; | 564 | struct rcu_head rcu; |
555 | u8 ldr_bits; | 565 | u8 mode; |
556 | /* fields bellow are used to decode ldr values in different modes */ | ||
557 | u32 cid_shift, cid_mask, lid_mask, broadcast; | ||
558 | struct kvm_lapic *phys_map[256]; | 566 | struct kvm_lapic *phys_map[256]; |
559 | /* first index is cluster id second is cpu id in a cluster */ | 567 | /* first index is cluster id second is cpu id in a cluster */ |
560 | struct kvm_lapic *logical_map[16][16]; | 568 | struct kvm_lapic *logical_map[16][16]; |
@@ -859,6 +867,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | |||
859 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 867 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
860 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, | 868 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
861 | struct kvm_memory_slot *memslot); | 869 | struct kvm_memory_slot *memslot); |
870 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, | ||
871 | struct kvm_memory_slot *memslot); | ||
862 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, | 872 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
863 | struct kvm_memory_slot *memslot); | 873 | struct kvm_memory_slot *memslot); |
864 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | 874 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, |
@@ -933,6 +943,7 @@ struct x86_emulate_ctxt; | |||
933 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); | 943 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
934 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | 944 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
935 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | 945 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
946 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu); | ||
936 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); | 947 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
937 | 948 | ||
938 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 949 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
@@ -1128,7 +1139,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | |||
1128 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | 1139 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
1129 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 1140 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
1130 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 1141 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
1131 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); | ||
1132 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); | 1142 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
1133 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 1143 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1134 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 1144 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index e62cf897f781..c1adf33fdd0d 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void) | |||
115 | 115 | ||
116 | static inline bool kvm_para_available(void) | 116 | static inline bool kvm_para_available(void) |
117 | { | 117 | { |
118 | return 0; | 118 | return false; |
119 | } | 119 | } |
120 | 120 | ||
121 | static inline unsigned int kvm_arch_para_features(void) | 121 | static inline unsigned int kvm_arch_para_features(void) |
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index d6b078e9fa28..25b1cc07d496 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h | |||
@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, | |||
95 | 95 | ||
96 | struct pvclock_vsyscall_time_info { | 96 | struct pvclock_vsyscall_time_info { |
97 | struct pvclock_vcpu_time_info pvti; | 97 | struct pvclock_vcpu_time_info pvti; |
98 | u32 migrate_count; | ||
98 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 99 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
99 | 100 | ||
100 | #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) | 101 | #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) |
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index c5f1a1deb91a..1fe92181ee9e 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h | |||
@@ -67,6 +67,7 @@ | |||
67 | #define EXIT_REASON_EPT_VIOLATION 48 | 67 | #define EXIT_REASON_EPT_VIOLATION 48 |
68 | #define EXIT_REASON_EPT_MISCONFIG 49 | 68 | #define EXIT_REASON_EPT_MISCONFIG 49 |
69 | #define EXIT_REASON_INVEPT 50 | 69 | #define EXIT_REASON_INVEPT 50 |
70 | #define EXIT_REASON_RDTSCP 51 | ||
70 | #define EXIT_REASON_PREEMPTION_TIMER 52 | 71 | #define EXIT_REASON_PREEMPTION_TIMER 52 |
71 | #define EXIT_REASON_INVVPID 53 | 72 | #define EXIT_REASON_INVVPID 53 |
72 | #define EXIT_REASON_WBINVD 54 | 73 | #define EXIT_REASON_WBINVD 54 |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 2f355d229a58..e5ecd20e72dd 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c | |||
@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, | |||
141 | set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); | 141 | set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); |
142 | } | 142 | } |
143 | 143 | ||
144 | static struct pvclock_vsyscall_time_info *pvclock_vdso_info; | ||
145 | |||
146 | static struct pvclock_vsyscall_time_info * | ||
147 | pvclock_get_vsyscall_user_time_info(int cpu) | ||
148 | { | ||
149 | if (!pvclock_vdso_info) { | ||
150 | BUG(); | ||
151 | return NULL; | ||
152 | } | ||
153 | |||
154 | return &pvclock_vdso_info[cpu]; | ||
155 | } | ||
156 | |||
157 | struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) | ||
158 | { | ||
159 | return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; | ||
160 | } | ||
161 | |||
144 | #ifdef CONFIG_X86_64 | 162 | #ifdef CONFIG_X86_64 |
163 | static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, | ||
164 | void *v) | ||
165 | { | ||
166 | struct task_migration_notifier *mn = v; | ||
167 | struct pvclock_vsyscall_time_info *pvti; | ||
168 | |||
169 | pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); | ||
170 | |||
171 | /* this is NULL when pvclock vsyscall is not initialized */ | ||
172 | if (unlikely(pvti == NULL)) | ||
173 | return NOTIFY_DONE; | ||
174 | |||
175 | pvti->migrate_count++; | ||
176 | |||
177 | return NOTIFY_DONE; | ||
178 | } | ||
179 | |||
180 | static struct notifier_block pvclock_migrate = { | ||
181 | .notifier_call = pvclock_task_migrate, | ||
182 | }; | ||
183 | |||
145 | /* | 184 | /* |
146 | * Initialize the generic pvclock vsyscall state. This will allocate | 185 | * Initialize the generic pvclock vsyscall state. This will allocate |
147 | * a/some page(s) for the per-vcpu pvclock information, set up a | 186 | * a/some page(s) for the per-vcpu pvclock information, set up a |
@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, | |||
155 | 194 | ||
156 | WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); | 195 | WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); |
157 | 196 | ||
197 | pvclock_vdso_info = i; | ||
198 | |||
158 | for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { | 199 | for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { |
159 | __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, | 200 | __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, |
160 | __pa(i) + (idx*PAGE_SIZE), | 201 | __pa(i) + (idx*PAGE_SIZE), |
161 | PAGE_KERNEL_VVAR); | 202 | PAGE_KERNEL_VVAR); |
162 | } | 203 | } |
163 | 204 | ||
205 | |||
206 | register_task_migration_notifier(&pvclock_migrate); | ||
207 | |||
164 | return 0; | 208 | return 0; |
165 | } | 209 | } |
166 | #endif | 210 | #endif |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 08f790dfadc9..16e8f962eaad 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | 1 | ||
2 | ccflags-y += -Ivirt/kvm -Iarch/x86/kvm | 2 | ccflags-y += -Iarch/x86/kvm |
3 | 3 | ||
4 | CFLAGS_x86.o := -I. | 4 | CFLAGS_x86.o := -I. |
5 | CFLAGS_svm.o := -I. | 5 | CFLAGS_svm.o := -I. |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 8a80737ee6e6..59b69f6a2844 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -104,6 +104,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) | |||
104 | ((best->eax & 0xff00) >> 8) != 0) | 104 | ((best->eax & 0xff00) >> 8) != 0) |
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | 106 | ||
107 | /* Update physical-address width */ | ||
108 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); | ||
109 | |||
107 | kvm_pmu_cpuid_update(vcpu); | 110 | kvm_pmu_cpuid_update(vcpu); |
108 | return 0; | 111 | return 0; |
109 | } | 112 | } |
@@ -135,6 +138,21 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) | |||
135 | } | 138 | } |
136 | } | 139 | } |
137 | 140 | ||
141 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) | ||
142 | { | ||
143 | struct kvm_cpuid_entry2 *best; | ||
144 | |||
145 | best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); | ||
146 | if (!best || best->eax < 0x80000008) | ||
147 | goto not_found; | ||
148 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); | ||
149 | if (best) | ||
150 | return best->eax & 0xff; | ||
151 | not_found: | ||
152 | return 36; | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr); | ||
155 | |||
138 | /* when an old userspace process fills a new kernel module */ | 156 | /* when an old userspace process fills a new kernel module */ |
139 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | 157 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
140 | struct kvm_cpuid *cpuid, | 158 | struct kvm_cpuid *cpuid, |
@@ -757,21 +775,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, | |||
757 | } | 775 | } |
758 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); | 776 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); |
759 | 777 | ||
760 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) | ||
761 | { | ||
762 | struct kvm_cpuid_entry2 *best; | ||
763 | |||
764 | best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); | ||
765 | if (!best || best->eax < 0x80000008) | ||
766 | goto not_found; | ||
767 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); | ||
768 | if (best) | ||
769 | return best->eax & 0xff; | ||
770 | not_found: | ||
771 | return 36; | ||
772 | } | ||
773 | EXPORT_SYMBOL_GPL(cpuid_maxphyaddr); | ||
774 | |||
775 | /* | 778 | /* |
776 | * If no match is found, check whether we exceed the vCPU's limit | 779 | * If no match is found, check whether we exceed the vCPU's limit |
777 | * and return the content of the highest valid _standard_ leaf instead. | 780 | * and return the content of the highest valid _standard_ leaf instead. |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 4452eedfaedd..c3b1ad9fca81 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -20,13 +20,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, | |||
20 | struct kvm_cpuid_entry2 __user *entries); | 20 | struct kvm_cpuid_entry2 __user *entries); |
21 | void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); | 21 | void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
22 | 22 | ||
23 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); | ||
24 | |||
25 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) | ||
26 | { | ||
27 | return vcpu->arch.maxphyaddr; | ||
28 | } | ||
23 | 29 | ||
24 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) | 30 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) |
25 | { | 31 | { |
26 | struct kvm_cpuid_entry2 *best; | 32 | struct kvm_cpuid_entry2 *best; |
27 | 33 | ||
28 | if (!static_cpu_has(X86_FEATURE_XSAVE)) | 34 | if (!static_cpu_has(X86_FEATURE_XSAVE)) |
29 | return 0; | 35 | return false; |
30 | 36 | ||
31 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 37 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
32 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); | 38 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 106c01557f2b..630bcb0d7a04 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -248,27 +248,7 @@ struct mode_dual { | |||
248 | struct opcode mode64; | 248 | struct opcode mode64; |
249 | }; | 249 | }; |
250 | 250 | ||
251 | /* EFLAGS bit definitions. */ | ||
252 | #define EFLG_ID (1<<21) | ||
253 | #define EFLG_VIP (1<<20) | ||
254 | #define EFLG_VIF (1<<19) | ||
255 | #define EFLG_AC (1<<18) | ||
256 | #define EFLG_VM (1<<17) | ||
257 | #define EFLG_RF (1<<16) | ||
258 | #define EFLG_IOPL (3<<12) | ||
259 | #define EFLG_NT (1<<14) | ||
260 | #define EFLG_OF (1<<11) | ||
261 | #define EFLG_DF (1<<10) | ||
262 | #define EFLG_IF (1<<9) | ||
263 | #define EFLG_TF (1<<8) | ||
264 | #define EFLG_SF (1<<7) | ||
265 | #define EFLG_ZF (1<<6) | ||
266 | #define EFLG_AF (1<<4) | ||
267 | #define EFLG_PF (1<<2) | ||
268 | #define EFLG_CF (1<<0) | ||
269 | |||
270 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a | 251 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a |
271 | #define EFLG_RESERVED_ONE_MASK 2 | ||
272 | 252 | ||
273 | enum x86_transfer_type { | 253 | enum x86_transfer_type { |
274 | X86_TRANSFER_NONE, | 254 | X86_TRANSFER_NONE, |
@@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) | |||
317 | * These EFLAGS bits are restored from saved value during emulation, and | 297 | * These EFLAGS bits are restored from saved value during emulation, and |
318 | * any changes are written back to the saved value after emulation. | 298 | * any changes are written back to the saved value after emulation. |
319 | */ | 299 | */ |
320 | #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) | 300 | #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ |
301 | X86_EFLAGS_PF|X86_EFLAGS_CF) | ||
321 | 302 | ||
322 | #ifdef CONFIG_X86_64 | 303 | #ifdef CONFIG_X86_64 |
323 | #define ON64(x) x | 304 | #define ON64(x) x |
@@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask) | |||
478 | *dest = (*dest & ~mask) | (src & mask); | 459 | *dest = (*dest & ~mask) | (src & mask); |
479 | } | 460 | } |
480 | 461 | ||
462 | static void assign_register(unsigned long *reg, u64 val, int bytes) | ||
463 | { | ||
464 | /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ | ||
465 | switch (bytes) { | ||
466 | case 1: | ||
467 | *(u8 *)reg = (u8)val; | ||
468 | break; | ||
469 | case 2: | ||
470 | *(u16 *)reg = (u16)val; | ||
471 | break; | ||
472 | case 4: | ||
473 | *reg = (u32)val; | ||
474 | break; /* 64b: zero-extend */ | ||
475 | case 8: | ||
476 | *reg = val; | ||
477 | break; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) | 481 | static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) |
482 | { | 482 | { |
483 | return (1UL << (ctxt->ad_bytes << 3)) - 1; | 483 | return (1UL << (ctxt->ad_bytes << 3)) - 1; |
@@ -943,6 +943,22 @@ FASTOP2(xadd); | |||
943 | 943 | ||
944 | FASTOP2R(cmp, cmp_r); | 944 | FASTOP2R(cmp, cmp_r); |
945 | 945 | ||
946 | static int em_bsf_c(struct x86_emulate_ctxt *ctxt) | ||
947 | { | ||
948 | /* If src is zero, do not writeback, but update flags */ | ||
949 | if (ctxt->src.val == 0) | ||
950 | ctxt->dst.type = OP_NONE; | ||
951 | return fastop(ctxt, em_bsf); | ||
952 | } | ||
953 | |||
954 | static int em_bsr_c(struct x86_emulate_ctxt *ctxt) | ||
955 | { | ||
956 | /* If src is zero, do not writeback, but update flags */ | ||
957 | if (ctxt->src.val == 0) | ||
958 | ctxt->dst.type = OP_NONE; | ||
959 | return fastop(ctxt, em_bsr); | ||
960 | } | ||
961 | |||
946 | static u8 test_cc(unsigned int condition, unsigned long flags) | 962 | static u8 test_cc(unsigned int condition, unsigned long flags) |
947 | { | 963 | { |
948 | u8 rc; | 964 | u8 rc; |
@@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1399 | unsigned int in_page, n; | 1415 | unsigned int in_page, n; |
1400 | unsigned int count = ctxt->rep_prefix ? | 1416 | unsigned int count = ctxt->rep_prefix ? |
1401 | address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; | 1417 | address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; |
1402 | in_page = (ctxt->eflags & EFLG_DF) ? | 1418 | in_page = (ctxt->eflags & X86_EFLAGS_DF) ? |
1403 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : | 1419 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : |
1404 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); | 1420 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); |
1405 | n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); | 1421 | n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); |
@@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1412 | } | 1428 | } |
1413 | 1429 | ||
1414 | if (ctxt->rep_prefix && (ctxt->d & String) && | 1430 | if (ctxt->rep_prefix && (ctxt->d & String) && |
1415 | !(ctxt->eflags & EFLG_DF)) { | 1431 | !(ctxt->eflags & X86_EFLAGS_DF)) { |
1416 | ctxt->dst.data = rc->data + rc->pos; | 1432 | ctxt->dst.data = rc->data + rc->pos; |
1417 | ctxt->dst.type = OP_MEM_STR; | 1433 | ctxt->dst.type = OP_MEM_STR; |
1418 | ctxt->dst.count = (rc->end - rc->pos) / size; | 1434 | ctxt->dst.count = (rc->end - rc->pos) / size; |
@@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1691 | 1707 | ||
1692 | static void write_register_operand(struct operand *op) | 1708 | static void write_register_operand(struct operand *op) |
1693 | { | 1709 | { |
1694 | /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ | 1710 | return assign_register(op->addr.reg, op->val, op->bytes); |
1695 | switch (op->bytes) { | ||
1696 | case 1: | ||
1697 | *(u8 *)op->addr.reg = (u8)op->val; | ||
1698 | break; | ||
1699 | case 2: | ||
1700 | *(u16 *)op->addr.reg = (u16)op->val; | ||
1701 | break; | ||
1702 | case 4: | ||
1703 | *op->addr.reg = (u32)op->val; | ||
1704 | break; /* 64b: zero-extend */ | ||
1705 | case 8: | ||
1706 | *op->addr.reg = op->val; | ||
1707 | break; | ||
1708 | } | ||
1709 | } | 1711 | } |
1710 | 1712 | ||
1711 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) | 1713 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) |
@@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1792 | { | 1794 | { |
1793 | int rc; | 1795 | int rc; |
1794 | unsigned long val, change_mask; | 1796 | unsigned long val, change_mask; |
1795 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 1797 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; |
1796 | int cpl = ctxt->ops->cpl(ctxt); | 1798 | int cpl = ctxt->ops->cpl(ctxt); |
1797 | 1799 | ||
1798 | rc = emulate_pop(ctxt, &val, len); | 1800 | rc = emulate_pop(ctxt, &val, len); |
1799 | if (rc != X86EMUL_CONTINUE) | 1801 | if (rc != X86EMUL_CONTINUE) |
1800 | return rc; | 1802 | return rc; |
1801 | 1803 | ||
1802 | change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | 1804 | change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | |
1803 | | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; | 1805 | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | |
1806 | X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | | ||
1807 | X86_EFLAGS_AC | X86_EFLAGS_ID; | ||
1804 | 1808 | ||
1805 | switch(ctxt->mode) { | 1809 | switch(ctxt->mode) { |
1806 | case X86EMUL_MODE_PROT64: | 1810 | case X86EMUL_MODE_PROT64: |
1807 | case X86EMUL_MODE_PROT32: | 1811 | case X86EMUL_MODE_PROT32: |
1808 | case X86EMUL_MODE_PROT16: | 1812 | case X86EMUL_MODE_PROT16: |
1809 | if (cpl == 0) | 1813 | if (cpl == 0) |
1810 | change_mask |= EFLG_IOPL; | 1814 | change_mask |= X86_EFLAGS_IOPL; |
1811 | if (cpl <= iopl) | 1815 | if (cpl <= iopl) |
1812 | change_mask |= EFLG_IF; | 1816 | change_mask |= X86_EFLAGS_IF; |
1813 | break; | 1817 | break; |
1814 | case X86EMUL_MODE_VM86: | 1818 | case X86EMUL_MODE_VM86: |
1815 | if (iopl < 3) | 1819 | if (iopl < 3) |
1816 | return emulate_gp(ctxt, 0); | 1820 | return emulate_gp(ctxt, 0); |
1817 | change_mask |= EFLG_IF; | 1821 | change_mask |= X86_EFLAGS_IF; |
1818 | break; | 1822 | break; |
1819 | default: /* real mode */ | 1823 | default: /* real mode */ |
1820 | change_mask |= (EFLG_IOPL | EFLG_IF); | 1824 | change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); |
1821 | break; | 1825 | break; |
1822 | } | 1826 | } |
1823 | 1827 | ||
@@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt) | |||
1918 | 1922 | ||
1919 | static int em_pushf(struct x86_emulate_ctxt *ctxt) | 1923 | static int em_pushf(struct x86_emulate_ctxt *ctxt) |
1920 | { | 1924 | { |
1921 | ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM; | 1925 | ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; |
1922 | return em_push(ctxt); | 1926 | return em_push(ctxt); |
1923 | } | 1927 | } |
1924 | 1928 | ||
@@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) | |||
1926 | { | 1930 | { |
1927 | int rc = X86EMUL_CONTINUE; | 1931 | int rc = X86EMUL_CONTINUE; |
1928 | int reg = VCPU_REGS_RDI; | 1932 | int reg = VCPU_REGS_RDI; |
1933 | u32 val; | ||
1929 | 1934 | ||
1930 | while (reg >= VCPU_REGS_RAX) { | 1935 | while (reg >= VCPU_REGS_RAX) { |
1931 | if (reg == VCPU_REGS_RSP) { | 1936 | if (reg == VCPU_REGS_RSP) { |
@@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) | |||
1933 | --reg; | 1938 | --reg; |
1934 | } | 1939 | } |
1935 | 1940 | ||
1936 | rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); | 1941 | rc = emulate_pop(ctxt, &val, ctxt->op_bytes); |
1937 | if (rc != X86EMUL_CONTINUE) | 1942 | if (rc != X86EMUL_CONTINUE) |
1938 | break; | 1943 | break; |
1944 | assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); | ||
1939 | --reg; | 1945 | --reg; |
1940 | } | 1946 | } |
1941 | return rc; | 1947 | return rc; |
@@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) | |||
1956 | if (rc != X86EMUL_CONTINUE) | 1962 | if (rc != X86EMUL_CONTINUE) |
1957 | return rc; | 1963 | return rc; |
1958 | 1964 | ||
1959 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); | 1965 | ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); |
1960 | 1966 | ||
1961 | ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); | 1967 | ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); |
1962 | rc = em_push(ctxt); | 1968 | rc = em_push(ctxt); |
@@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) | |||
2022 | unsigned long temp_eip = 0; | 2028 | unsigned long temp_eip = 0; |
2023 | unsigned long temp_eflags = 0; | 2029 | unsigned long temp_eflags = 0; |
2024 | unsigned long cs = 0; | 2030 | unsigned long cs = 0; |
2025 | unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | | 2031 | unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | |
2026 | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | | 2032 | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | |
2027 | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ | 2033 | X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | |
2028 | unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; | 2034 | X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | |
2035 | X86_EFLAGS_AC | X86_EFLAGS_ID | | ||
2036 | X86_EFLAGS_FIXED; | ||
2037 | unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | | ||
2038 | X86_EFLAGS_VIP; | ||
2029 | 2039 | ||
2030 | /* TODO: Add stack limit check */ | 2040 | /* TODO: Add stack limit check */ |
2031 | 2041 | ||
@@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) | |||
2054 | 2064 | ||
2055 | ctxt->_eip = temp_eip; | 2065 | ctxt->_eip = temp_eip; |
2056 | 2066 | ||
2057 | |||
2058 | if (ctxt->op_bytes == 4) | 2067 | if (ctxt->op_bytes == 4) |
2059 | ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); | 2068 | ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); |
2060 | else if (ctxt->op_bytes == 2) { | 2069 | else if (ctxt->op_bytes == 2) { |
@@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) | |||
2063 | } | 2072 | } |
2064 | 2073 | ||
2065 | ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ | 2074 | ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ |
2066 | ctxt->eflags |= EFLG_RESERVED_ONE_MASK; | 2075 | ctxt->eflags |= X86_EFLAGS_FIXED; |
2067 | ctxt->ops->set_nmi_mask(ctxt, false); | 2076 | ctxt->ops->set_nmi_mask(ctxt, false); |
2068 | 2077 | ||
2069 | return rc; | 2078 | return rc; |
@@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) | |||
2145 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { | 2154 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { |
2146 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); | 2155 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); |
2147 | *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); | 2156 | *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); |
2148 | ctxt->eflags &= ~EFLG_ZF; | 2157 | ctxt->eflags &= ~X86_EFLAGS_ZF; |
2149 | } else { | 2158 | } else { |
2150 | ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | | 2159 | ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | |
2151 | (u32) reg_read(ctxt, VCPU_REGS_RBX); | 2160 | (u32) reg_read(ctxt, VCPU_REGS_RBX); |
2152 | 2161 | ||
2153 | ctxt->eflags |= EFLG_ZF; | 2162 | ctxt->eflags |= X86_EFLAGS_ZF; |
2154 | } | 2163 | } |
2155 | return X86EMUL_CONTINUE; | 2164 | return X86EMUL_CONTINUE; |
2156 | } | 2165 | } |
@@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | |||
2222 | ctxt->src.val = ctxt->dst.orig_val; | 2231 | ctxt->src.val = ctxt->dst.orig_val; |
2223 | fastop(ctxt, em_cmp); | 2232 | fastop(ctxt, em_cmp); |
2224 | 2233 | ||
2225 | if (ctxt->eflags & EFLG_ZF) { | 2234 | if (ctxt->eflags & X86_EFLAGS_ZF) { |
2226 | /* Success: write back to memory; no update of EAX */ | 2235 | /* Success: write back to memory; no update of EAX */ |
2227 | ctxt->src.type = OP_NONE; | 2236 | ctxt->src.type = OP_NONE; |
2228 | ctxt->dst.val = ctxt->src.orig_val; | 2237 | ctxt->dst.val = ctxt->src.orig_val; |
@@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2381 | 2390 | ||
2382 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); | 2391 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
2383 | ctxt->eflags &= ~msr_data; | 2392 | ctxt->eflags &= ~msr_data; |
2384 | ctxt->eflags |= EFLG_RESERVED_ONE_MASK; | 2393 | ctxt->eflags |= X86_EFLAGS_FIXED; |
2385 | #endif | 2394 | #endif |
2386 | } else { | 2395 | } else { |
2387 | /* legacy mode */ | 2396 | /* legacy mode */ |
2388 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 2397 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
2389 | ctxt->_eip = (u32)msr_data; | 2398 | ctxt->_eip = (u32)msr_data; |
2390 | 2399 | ||
2391 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); | 2400 | ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); |
2392 | } | 2401 | } |
2393 | 2402 | ||
2394 | return X86EMUL_CONTINUE; | 2403 | return X86EMUL_CONTINUE; |
@@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) | |||
2425 | if ((msr_data & 0xfffc) == 0x0) | 2434 | if ((msr_data & 0xfffc) == 0x0) |
2426 | return emulate_gp(ctxt, 0); | 2435 | return emulate_gp(ctxt, 0); |
2427 | 2436 | ||
2428 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); | 2437 | ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); |
2429 | cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK; | 2438 | cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; |
2430 | ss_sel = cs_sel + 8; | 2439 | ss_sel = cs_sel + 8; |
2431 | if (efer & EFER_LMA) { | 2440 | if (efer & EFER_LMA) { |
2432 | cs.d = 0; | 2441 | cs.d = 0; |
@@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2493 | return emulate_gp(ctxt, 0); | 2502 | return emulate_gp(ctxt, 0); |
2494 | break; | 2503 | break; |
2495 | } | 2504 | } |
2496 | cs_sel |= SELECTOR_RPL_MASK; | 2505 | cs_sel |= SEGMENT_RPL_MASK; |
2497 | ss_sel |= SELECTOR_RPL_MASK; | 2506 | ss_sel |= SEGMENT_RPL_MASK; |
2498 | 2507 | ||
2499 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); | 2508 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2500 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); | 2509 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
@@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) | |||
2512 | return false; | 2521 | return false; |
2513 | if (ctxt->mode == X86EMUL_MODE_VM86) | 2522 | if (ctxt->mode == X86EMUL_MODE_VM86) |
2514 | return true; | 2523 | return true; |
2515 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 2524 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; |
2516 | return ctxt->ops->cpl(ctxt) > iopl; | 2525 | return ctxt->ops->cpl(ctxt) > iopl; |
2517 | } | 2526 | } |
2518 | 2527 | ||
@@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2782 | return ret; | 2791 | return ret; |
2783 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, | 2792 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, |
2784 | X86_TRANSFER_TASK_SWITCH, NULL); | 2793 | X86_TRANSFER_TASK_SWITCH, NULL); |
2785 | if (ret != X86EMUL_CONTINUE) | ||
2786 | return ret; | ||
2787 | 2794 | ||
2788 | return X86EMUL_CONTINUE; | 2795 | return ret; |
2789 | } | 2796 | } |
2790 | 2797 | ||
2791 | static int task_switch_32(struct x86_emulate_ctxt *ctxt, | 2798 | static int task_switch_32(struct x86_emulate_ctxt *ctxt, |
@@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2954 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, | 2961 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, |
2955 | struct operand *op) | 2962 | struct operand *op) |
2956 | { | 2963 | { |
2957 | int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; | 2964 | int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; |
2958 | 2965 | ||
2959 | register_address_increment(ctxt, reg, df * op->bytes); | 2966 | register_address_increment(ctxt, reg, df * op->bytes); |
2960 | op->addr.mem.ea = register_address(ctxt, reg); | 2967 | op->addr.mem.ea = register_address(ctxt, reg); |
@@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt) | |||
3323 | return X86EMUL_CONTINUE; | 3330 | return X86EMUL_CONTINUE; |
3324 | } | 3331 | } |
3325 | 3332 | ||
3326 | static int em_vmcall(struct x86_emulate_ctxt *ctxt) | 3333 | static int em_hypercall(struct x86_emulate_ctxt *ctxt) |
3327 | { | 3334 | { |
3328 | int rc = ctxt->ops->fix_hypercall(ctxt); | 3335 | int rc = ctxt->ops->fix_hypercall(ctxt); |
3329 | 3336 | ||
@@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt) | |||
3395 | return em_lgdt_lidt(ctxt, true); | 3402 | return em_lgdt_lidt(ctxt, true); |
3396 | } | 3403 | } |
3397 | 3404 | ||
3398 | static int em_vmmcall(struct x86_emulate_ctxt *ctxt) | ||
3399 | { | ||
3400 | int rc; | ||
3401 | |||
3402 | rc = ctxt->ops->fix_hypercall(ctxt); | ||
3403 | |||
3404 | /* Disable writeback. */ | ||
3405 | ctxt->dst.type = OP_NONE; | ||
3406 | return rc; | ||
3407 | } | ||
3408 | |||
3409 | static int em_lidt(struct x86_emulate_ctxt *ctxt) | 3405 | static int em_lidt(struct x86_emulate_ctxt *ctxt) |
3410 | { | 3406 | { |
3411 | return em_lgdt_lidt(ctxt, false); | 3407 | return em_lgdt_lidt(ctxt, false); |
@@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt) | |||
3504 | { | 3500 | { |
3505 | u32 flags; | 3501 | u32 flags; |
3506 | 3502 | ||
3507 | flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; | 3503 | flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | |
3504 | X86_EFLAGS_SF; | ||
3508 | flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; | 3505 | flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; |
3509 | 3506 | ||
3510 | ctxt->eflags &= ~0xffUL; | 3507 | ctxt->eflags &= ~0xffUL; |
@@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) | |||
3769 | 3766 | ||
3770 | static const struct opcode group7_rm0[] = { | 3767 | static const struct opcode group7_rm0[] = { |
3771 | N, | 3768 | N, |
3772 | I(SrcNone | Priv | EmulateOnUD, em_vmcall), | 3769 | I(SrcNone | Priv | EmulateOnUD, em_hypercall), |
3773 | N, N, N, N, N, N, | 3770 | N, N, N, N, N, N, |
3774 | }; | 3771 | }; |
3775 | 3772 | ||
@@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = { | |||
3781 | 3778 | ||
3782 | static const struct opcode group7_rm3[] = { | 3779 | static const struct opcode group7_rm3[] = { |
3783 | DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), | 3780 | DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), |
3784 | II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), | 3781 | II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), |
3785 | DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), | 3782 | DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), |
3786 | DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), | 3783 | DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), |
3787 | DIP(SrcNone | Prot | Priv, stgi, check_svme), | 3784 | DIP(SrcNone | Prot | Priv, stgi, check_svme), |
@@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = { | |||
4192 | N, N, | 4189 | N, N, |
4193 | G(BitOp, group8), | 4190 | G(BitOp, group8), |
4194 | F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), | 4191 | F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), |
4195 | F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), | 4192 | I(DstReg | SrcMem | ModRM, em_bsf_c), |
4193 | I(DstReg | SrcMem | ModRM, em_bsr_c), | ||
4196 | D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), | 4194 | D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), |
4197 | /* 0xC0 - 0xC7 */ | 4195 | /* 0xC0 - 0xC7 */ |
4198 | F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), | 4196 | F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), |
@@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) | |||
4759 | if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || | 4757 | if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || |
4760 | (ctxt->b == 0xae) || (ctxt->b == 0xaf)) | 4758 | (ctxt->b == 0xae) || (ctxt->b == 0xaf)) |
4761 | && (((ctxt->rep_prefix == REPE_PREFIX) && | 4759 | && (((ctxt->rep_prefix == REPE_PREFIX) && |
4762 | ((ctxt->eflags & EFLG_ZF) == 0)) | 4760 | ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) |
4763 | || ((ctxt->rep_prefix == REPNE_PREFIX) && | 4761 | || ((ctxt->rep_prefix == REPNE_PREFIX) && |
4764 | ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) | 4762 | ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) |
4765 | return true; | 4763 | return true; |
4766 | 4764 | ||
4767 | return false; | 4765 | return false; |
@@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4913 | /* All REP prefixes have the same first termination condition */ | 4911 | /* All REP prefixes have the same first termination condition */ |
4914 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { | 4912 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { |
4915 | ctxt->eip = ctxt->_eip; | 4913 | ctxt->eip = ctxt->_eip; |
4916 | ctxt->eflags &= ~EFLG_RF; | 4914 | ctxt->eflags &= ~X86_EFLAGS_RF; |
4917 | goto done; | 4915 | goto done; |
4918 | } | 4916 | } |
4919 | } | 4917 | } |
@@ -4963,9 +4961,9 @@ special_insn: | |||
4963 | } | 4961 | } |
4964 | 4962 | ||
4965 | if (ctxt->rep_prefix && (ctxt->d & String)) | 4963 | if (ctxt->rep_prefix && (ctxt->d & String)) |
4966 | ctxt->eflags |= EFLG_RF; | 4964 | ctxt->eflags |= X86_EFLAGS_RF; |
4967 | else | 4965 | else |
4968 | ctxt->eflags &= ~EFLG_RF; | 4966 | ctxt->eflags &= ~X86_EFLAGS_RF; |
4969 | 4967 | ||
4970 | if (ctxt->execute) { | 4968 | if (ctxt->execute) { |
4971 | if (ctxt->d & Fastop) { | 4969 | if (ctxt->d & Fastop) { |
@@ -5014,7 +5012,7 @@ special_insn: | |||
5014 | rc = emulate_int(ctxt, ctxt->src.val); | 5012 | rc = emulate_int(ctxt, ctxt->src.val); |
5015 | break; | 5013 | break; |
5016 | case 0xce: /* into */ | 5014 | case 0xce: /* into */ |
5017 | if (ctxt->eflags & EFLG_OF) | 5015 | if (ctxt->eflags & X86_EFLAGS_OF) |
5018 | rc = emulate_int(ctxt, 4); | 5016 | rc = emulate_int(ctxt, 4); |
5019 | break; | 5017 | break; |
5020 | case 0xe9: /* jmp rel */ | 5018 | case 0xe9: /* jmp rel */ |
@@ -5027,19 +5025,19 @@ special_insn: | |||
5027 | break; | 5025 | break; |
5028 | case 0xf5: /* cmc */ | 5026 | case 0xf5: /* cmc */ |
5029 | /* complement carry flag from eflags reg */ | 5027 | /* complement carry flag from eflags reg */ |
5030 | ctxt->eflags ^= EFLG_CF; | 5028 | ctxt->eflags ^= X86_EFLAGS_CF; |
5031 | break; | 5029 | break; |
5032 | case 0xf8: /* clc */ | 5030 | case 0xf8: /* clc */ |
5033 | ctxt->eflags &= ~EFLG_CF; | 5031 | ctxt->eflags &= ~X86_EFLAGS_CF; |
5034 | break; | 5032 | break; |
5035 | case 0xf9: /* stc */ | 5033 | case 0xf9: /* stc */ |
5036 | ctxt->eflags |= EFLG_CF; | 5034 | ctxt->eflags |= X86_EFLAGS_CF; |
5037 | break; | 5035 | break; |
5038 | case 0xfc: /* cld */ | 5036 | case 0xfc: /* cld */ |
5039 | ctxt->eflags &= ~EFLG_DF; | 5037 | ctxt->eflags &= ~X86_EFLAGS_DF; |
5040 | break; | 5038 | break; |
5041 | case 0xfd: /* std */ | 5039 | case 0xfd: /* std */ |
5042 | ctxt->eflags |= EFLG_DF; | 5040 | ctxt->eflags |= X86_EFLAGS_DF; |
5043 | break; | 5041 | break; |
5044 | default: | 5042 | default: |
5045 | goto cannot_emulate; | 5043 | goto cannot_emulate; |
@@ -5100,7 +5098,7 @@ writeback: | |||
5100 | } | 5098 | } |
5101 | goto done; /* skip rip writeback */ | 5099 | goto done; /* skip rip writeback */ |
5102 | } | 5100 | } |
5103 | ctxt->eflags &= ~EFLG_RF; | 5101 | ctxt->eflags &= ~X86_EFLAGS_RF; |
5104 | } | 5102 | } |
5105 | 5103 | ||
5106 | ctxt->eip = ctxt->_eip; | 5104 | ctxt->eip = ctxt->_eip; |
@@ -5137,8 +5135,7 @@ twobyte_insn: | |||
5137 | case 0x40 ... 0x4f: /* cmov */ | 5135 | case 0x40 ... 0x4f: /* cmov */ |
5138 | if (test_cc(ctxt->b, ctxt->eflags)) | 5136 | if (test_cc(ctxt->b, ctxt->eflags)) |
5139 | ctxt->dst.val = ctxt->src.val; | 5137 | ctxt->dst.val = ctxt->src.val; |
5140 | else if (ctxt->mode != X86EMUL_MODE_PROT64 || | 5138 | else if (ctxt->op_bytes != 4) |
5141 | ctxt->op_bytes != 4) | ||
5142 | ctxt->dst.type = OP_NONE; /* no writeback */ | 5139 | ctxt->dst.type = OP_NONE; /* no writeback */ |
5143 | break; | 5140 | break; |
5144 | case 0x80 ... 0x8f: /* jnz rel, etc*/ | 5141 | case 0x80 ... 0x8f: /* jnz rel, etc*/ |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 298781d4cfb4..4dce6f8b6129 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr) | |||
443 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); | 443 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); |
444 | } | 444 | } |
445 | 445 | ||
446 | static int pit_ioport_write(struct kvm_io_device *this, | 446 | static int pit_ioport_write(struct kvm_vcpu *vcpu, |
447 | struct kvm_io_device *this, | ||
447 | gpa_t addr, int len, const void *data) | 448 | gpa_t addr, int len, const void *data) |
448 | { | 449 | { |
449 | struct kvm_pit *pit = dev_to_pit(this); | 450 | struct kvm_pit *pit = dev_to_pit(this); |
@@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this, | |||
519 | return 0; | 520 | return 0; |
520 | } | 521 | } |
521 | 522 | ||
522 | static int pit_ioport_read(struct kvm_io_device *this, | 523 | static int pit_ioport_read(struct kvm_vcpu *vcpu, |
524 | struct kvm_io_device *this, | ||
523 | gpa_t addr, int len, void *data) | 525 | gpa_t addr, int len, void *data) |
524 | { | 526 | { |
525 | struct kvm_pit *pit = dev_to_pit(this); | 527 | struct kvm_pit *pit = dev_to_pit(this); |
@@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this, | |||
589 | return 0; | 591 | return 0; |
590 | } | 592 | } |
591 | 593 | ||
592 | static int speaker_ioport_write(struct kvm_io_device *this, | 594 | static int speaker_ioport_write(struct kvm_vcpu *vcpu, |
595 | struct kvm_io_device *this, | ||
593 | gpa_t addr, int len, const void *data) | 596 | gpa_t addr, int len, const void *data) |
594 | { | 597 | { |
595 | struct kvm_pit *pit = speaker_to_pit(this); | 598 | struct kvm_pit *pit = speaker_to_pit(this); |
@@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this, | |||
606 | return 0; | 609 | return 0; |
607 | } | 610 | } |
608 | 611 | ||
609 | static int speaker_ioport_read(struct kvm_io_device *this, | 612 | static int speaker_ioport_read(struct kvm_vcpu *vcpu, |
610 | gpa_t addr, int len, void *data) | 613 | struct kvm_io_device *this, |
614 | gpa_t addr, int len, void *data) | ||
611 | { | 615 | { |
612 | struct kvm_pit *pit = speaker_to_pit(this); | 616 | struct kvm_pit *pit = speaker_to_pit(this); |
613 | struct kvm_kpit_state *pit_state = &pit->pit_state; | 617 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index dd1b16b611b0..c84990b42b5b 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kthread.h> | 4 | #include <linux/kthread.h> |
5 | 5 | ||
6 | #include "iodev.h" | 6 | #include <kvm/iodev.h> |
7 | 7 | ||
8 | struct kvm_kpit_channel_state { | 8 | struct kvm_kpit_channel_state { |
9 | u32 count; /* can be 65536 */ | 9 | u32 count; /* can be 65536 */ |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 9541ba34126b..fef922ff2635 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -529,42 +529,42 @@ static int picdev_read(struct kvm_pic *s, | |||
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
532 | static int picdev_master_write(struct kvm_io_device *dev, | 532 | static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
533 | gpa_t addr, int len, const void *val) | 533 | gpa_t addr, int len, const void *val) |
534 | { | 534 | { |
535 | return picdev_write(container_of(dev, struct kvm_pic, dev_master), | 535 | return picdev_write(container_of(dev, struct kvm_pic, dev_master), |
536 | addr, len, val); | 536 | addr, len, val); |
537 | } | 537 | } |
538 | 538 | ||
539 | static int picdev_master_read(struct kvm_io_device *dev, | 539 | static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
540 | gpa_t addr, int len, void *val) | 540 | gpa_t addr, int len, void *val) |
541 | { | 541 | { |
542 | return picdev_read(container_of(dev, struct kvm_pic, dev_master), | 542 | return picdev_read(container_of(dev, struct kvm_pic, dev_master), |
543 | addr, len, val); | 543 | addr, len, val); |
544 | } | 544 | } |
545 | 545 | ||
546 | static int picdev_slave_write(struct kvm_io_device *dev, | 546 | static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
547 | gpa_t addr, int len, const void *val) | 547 | gpa_t addr, int len, const void *val) |
548 | { | 548 | { |
549 | return picdev_write(container_of(dev, struct kvm_pic, dev_slave), | 549 | return picdev_write(container_of(dev, struct kvm_pic, dev_slave), |
550 | addr, len, val); | 550 | addr, len, val); |
551 | } | 551 | } |
552 | 552 | ||
553 | static int picdev_slave_read(struct kvm_io_device *dev, | 553 | static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
554 | gpa_t addr, int len, void *val) | 554 | gpa_t addr, int len, void *val) |
555 | { | 555 | { |
556 | return picdev_read(container_of(dev, struct kvm_pic, dev_slave), | 556 | return picdev_read(container_of(dev, struct kvm_pic, dev_slave), |
557 | addr, len, val); | 557 | addr, len, val); |
558 | } | 558 | } |
559 | 559 | ||
560 | static int picdev_eclr_write(struct kvm_io_device *dev, | 560 | static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
561 | gpa_t addr, int len, const void *val) | 561 | gpa_t addr, int len, const void *val) |
562 | { | 562 | { |
563 | return picdev_write(container_of(dev, struct kvm_pic, dev_eclr), | 563 | return picdev_write(container_of(dev, struct kvm_pic, dev_eclr), |
564 | addr, len, val); | 564 | addr, len, val); |
565 | } | 565 | } |
566 | 566 | ||
567 | static int picdev_eclr_read(struct kvm_io_device *dev, | 567 | static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
568 | gpa_t addr, int len, void *val) | 568 | gpa_t addr, int len, void *val) |
569 | { | 569 | { |
570 | return picdev_read(container_of(dev, struct kvm_pic, dev_eclr), | 570 | return picdev_read(container_of(dev, struct kvm_pic, dev_eclr), |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 46d4449772bc..28146f03c514 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
@@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, | |||
206 | 206 | ||
207 | old_irr = ioapic->irr; | 207 | old_irr = ioapic->irr; |
208 | ioapic->irr |= mask; | 208 | ioapic->irr |= mask; |
209 | if (edge) | ||
210 | ioapic->irr_delivered &= ~mask; | ||
209 | if ((edge && old_irr == ioapic->irr) || | 211 | if ((edge && old_irr == ioapic->irr) || |
210 | (!edge && entry.fields.remote_irr)) { | 212 | (!edge && entry.fields.remote_irr)) { |
211 | ret = 0; | 213 | ret = 0; |
@@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) | |||
349 | irqe.shorthand = 0; | 351 | irqe.shorthand = 0; |
350 | 352 | ||
351 | if (irqe.trig_mode == IOAPIC_EDGE_TRIG) | 353 | if (irqe.trig_mode == IOAPIC_EDGE_TRIG) |
352 | ioapic->irr &= ~(1 << irq); | 354 | ioapic->irr_delivered |= 1 << irq; |
353 | 355 | ||
354 | if (irq == RTC_GSI && line_status) { | 356 | if (irq == RTC_GSI && line_status) { |
355 | /* | 357 | /* |
@@ -473,13 +475,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
473 | } | 475 | } |
474 | } | 476 | } |
475 | 477 | ||
476 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) | ||
477 | { | ||
478 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
479 | smp_rmb(); | ||
480 | return test_bit(vector, ioapic->handled_vectors); | ||
481 | } | ||
482 | |||
483 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) | 478 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) |
484 | { | 479 | { |
485 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | 480 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; |
@@ -500,8 +495,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) | |||
500 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); | 495 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); |
501 | } | 496 | } |
502 | 497 | ||
503 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | 498 | static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
504 | void *val) | 499 | gpa_t addr, int len, void *val) |
505 | { | 500 | { |
506 | struct kvm_ioapic *ioapic = to_ioapic(this); | 501 | struct kvm_ioapic *ioapic = to_ioapic(this); |
507 | u32 result; | 502 | u32 result; |
@@ -543,8 +538,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
543 | return 0; | 538 | return 0; |
544 | } | 539 | } |
545 | 540 | ||
546 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | 541 | static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
547 | const void *val) | 542 | gpa_t addr, int len, const void *val) |
548 | { | 543 | { |
549 | struct kvm_ioapic *ioapic = to_ioapic(this); | 544 | struct kvm_ioapic *ioapic = to_ioapic(this); |
550 | u32 data; | 545 | u32 data; |
@@ -599,6 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | |||
599 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; | 594 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; |
600 | ioapic->ioregsel = 0; | 595 | ioapic->ioregsel = 0; |
601 | ioapic->irr = 0; | 596 | ioapic->irr = 0; |
597 | ioapic->irr_delivered = 0; | ||
602 | ioapic->id = 0; | 598 | ioapic->id = 0; |
603 | memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); | 599 | memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); |
604 | rtc_irq_eoi_tracking_reset(ioapic); | 600 | rtc_irq_eoi_tracking_reset(ioapic); |
@@ -656,6 +652,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
656 | 652 | ||
657 | spin_lock(&ioapic->lock); | 653 | spin_lock(&ioapic->lock); |
658 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | 654 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); |
655 | state->irr &= ~ioapic->irr_delivered; | ||
659 | spin_unlock(&ioapic->lock); | 656 | spin_unlock(&ioapic->lock); |
660 | return 0; | 657 | return 0; |
661 | } | 658 | } |
@@ -669,6 +666,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | |||
669 | spin_lock(&ioapic->lock); | 666 | spin_lock(&ioapic->lock); |
670 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | 667 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); |
671 | ioapic->irr = 0; | 668 | ioapic->irr = 0; |
669 | ioapic->irr_delivered = 0; | ||
672 | update_handled_vectors(ioapic); | 670 | update_handled_vectors(ioapic); |
673 | kvm_vcpu_request_scan_ioapic(kvm); | 671 | kvm_vcpu_request_scan_ioapic(kvm); |
674 | kvm_ioapic_inject_all(ioapic, state->irr); | 672 | kvm_ioapic_inject_all(ioapic, state->irr); |
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index c2e36d934af4..ca0b0b4e6256 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/kvm_host.h> | 4 | #include <linux/kvm_host.h> |
5 | 5 | ||
6 | #include "iodev.h" | 6 | #include <kvm/iodev.h> |
7 | 7 | ||
8 | struct kvm; | 8 | struct kvm; |
9 | struct kvm_vcpu; | 9 | struct kvm_vcpu; |
@@ -77,6 +77,7 @@ struct kvm_ioapic { | |||
77 | struct rtc_status rtc_status; | 77 | struct rtc_status rtc_status; |
78 | struct delayed_work eoi_inject; | 78 | struct delayed_work eoi_inject; |
79 | u32 irq_eoi[IOAPIC_NUM_PINS]; | 79 | u32 irq_eoi[IOAPIC_NUM_PINS]; |
80 | u32 irr_delivered; | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | #ifdef DEBUG | 83 | #ifdef DEBUG |
@@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | |||
97 | return kvm->arch.vioapic; | 98 | return kvm->arch.vioapic; |
98 | } | 99 | } |
99 | 100 | ||
101 | static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) | ||
102 | { | ||
103 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
104 | smp_rmb(); | ||
105 | return test_bit(vector, ioapic->handled_vectors); | ||
106 | } | ||
107 | |||
100 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); | 108 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); |
101 | bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 109 | bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
102 | int short_hand, unsigned int dest, int dest_mode); | 110 | int short_hand, unsigned int dest, int dest_mode); |
103 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | 111 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); |
104 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, | 112 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, |
105 | int trigger_mode); | 113 | int trigger_mode); |
106 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); | ||
107 | int kvm_ioapic_init(struct kvm *kvm); | 114 | int kvm_ioapic_init(struct kvm *kvm); |
108 | void kvm_ioapic_destroy(struct kvm *kvm); | 115 | void kvm_ioapic_destroy(struct kvm *kvm); |
109 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 116 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 2d03568e9498..ad68c73008c5 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/kvm_host.h> | 27 | #include <linux/kvm_host.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | 29 | ||
30 | #include "iodev.h" | 30 | #include <kvm/iodev.h> |
31 | #include "ioapic.h" | 31 | #include "ioapic.h" |
32 | #include "lapic.h" | 32 | #include "lapic.h" |
33 | 33 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4ee827d7bf36..d67206a7b99a 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) | |||
133 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; | 133 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; |
134 | } | 134 | } |
135 | 135 | ||
136 | /* The logical map is definitely wrong if we have multiple | ||
137 | * modes at the same time. (Physical map is always right.) | ||
138 | */ | ||
139 | static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map) | ||
140 | { | ||
141 | return !(map->mode & (map->mode - 1)); | ||
142 | } | ||
143 | |||
144 | static inline void | ||
145 | apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid) | ||
146 | { | ||
147 | unsigned lid_bits; | ||
148 | |||
149 | BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4); | ||
150 | BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8); | ||
151 | BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16); | ||
152 | lid_bits = map->mode; | ||
153 | |||
154 | *cid = dest_id >> lid_bits; | ||
155 | *lid = dest_id & ((1 << lid_bits) - 1); | ||
156 | } | ||
157 | |||
136 | static void recalculate_apic_map(struct kvm *kvm) | 158 | static void recalculate_apic_map(struct kvm *kvm) |
137 | { | 159 | { |
138 | struct kvm_apic_map *new, *old = NULL; | 160 | struct kvm_apic_map *new, *old = NULL; |
@@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
146 | if (!new) | 168 | if (!new) |
147 | goto out; | 169 | goto out; |
148 | 170 | ||
149 | new->ldr_bits = 8; | ||
150 | /* flat mode is default */ | ||
151 | new->cid_shift = 8; | ||
152 | new->cid_mask = 0; | ||
153 | new->lid_mask = 0xff; | ||
154 | new->broadcast = APIC_BROADCAST; | ||
155 | |||
156 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
157 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
158 | |||
159 | if (!kvm_apic_present(vcpu)) | ||
160 | continue; | ||
161 | |||
162 | if (apic_x2apic_mode(apic)) { | ||
163 | new->ldr_bits = 32; | ||
164 | new->cid_shift = 16; | ||
165 | new->cid_mask = new->lid_mask = 0xffff; | ||
166 | new->broadcast = X2APIC_BROADCAST; | ||
167 | } else if (kvm_apic_get_reg(apic, APIC_LDR)) { | ||
168 | if (kvm_apic_get_reg(apic, APIC_DFR) == | ||
169 | APIC_DFR_CLUSTER) { | ||
170 | new->cid_shift = 4; | ||
171 | new->cid_mask = 0xf; | ||
172 | new->lid_mask = 0xf; | ||
173 | } else { | ||
174 | new->cid_shift = 8; | ||
175 | new->cid_mask = 0; | ||
176 | new->lid_mask = 0xff; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * All APICs have to be configured in the same mode by an OS. | ||
182 | * We take advatage of this while building logical id loockup | ||
183 | * table. After reset APICs are in software disabled mode, so if | ||
184 | * we find apic with different setting we assume this is the mode | ||
185 | * OS wants all apics to be in; build lookup table accordingly. | ||
186 | */ | ||
187 | if (kvm_apic_sw_enabled(apic)) | ||
188 | break; | ||
189 | } | ||
190 | |||
191 | kvm_for_each_vcpu(i, vcpu, kvm) { | 171 | kvm_for_each_vcpu(i, vcpu, kvm) { |
192 | struct kvm_lapic *apic = vcpu->arch.apic; | 172 | struct kvm_lapic *apic = vcpu->arch.apic; |
193 | u16 cid, lid; | 173 | u16 cid, lid; |
@@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
198 | 178 | ||
199 | aid = kvm_apic_id(apic); | 179 | aid = kvm_apic_id(apic); |
200 | ldr = kvm_apic_get_reg(apic, APIC_LDR); | 180 | ldr = kvm_apic_get_reg(apic, APIC_LDR); |
201 | cid = apic_cluster_id(new, ldr); | ||
202 | lid = apic_logical_id(new, ldr); | ||
203 | 181 | ||
204 | if (aid < ARRAY_SIZE(new->phys_map)) | 182 | if (aid < ARRAY_SIZE(new->phys_map)) |
205 | new->phys_map[aid] = apic; | 183 | new->phys_map[aid] = apic; |
184 | |||
185 | if (apic_x2apic_mode(apic)) { | ||
186 | new->mode |= KVM_APIC_MODE_X2APIC; | ||
187 | } else if (ldr) { | ||
188 | ldr = GET_APIC_LOGICAL_ID(ldr); | ||
189 | if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) | ||
190 | new->mode |= KVM_APIC_MODE_XAPIC_FLAT; | ||
191 | else | ||
192 | new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER; | ||
193 | } | ||
194 | |||
195 | if (!kvm_apic_logical_map_valid(new)) | ||
196 | continue; | ||
197 | |||
198 | apic_logical_id(new, ldr, &cid, &lid); | ||
199 | |||
206 | if (lid && cid < ARRAY_SIZE(new->logical_map)) | 200 | if (lid && cid < ARRAY_SIZE(new->logical_map)) |
207 | new->logical_map[cid][ffs(lid) - 1] = apic; | 201 | new->logical_map[cid][ffs(lid) - 1] = apic; |
208 | } | 202 | } |
@@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) | |||
588 | apic_update_ppr(apic); | 582 | apic_update_ppr(apic); |
589 | } | 583 | } |
590 | 584 | ||
591 | static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest) | 585 | static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) |
592 | { | 586 | { |
593 | return dest == (apic_x2apic_mode(apic) ? | 587 | if (apic_x2apic_mode(apic)) |
594 | X2APIC_BROADCAST : APIC_BROADCAST); | 588 | return mda == X2APIC_BROADCAST; |
589 | |||
590 | return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST; | ||
595 | } | 591 | } |
596 | 592 | ||
597 | static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest) | 593 | static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) |
598 | { | 594 | { |
599 | return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest); | 595 | if (kvm_apic_broadcast(apic, mda)) |
596 | return true; | ||
597 | |||
598 | if (apic_x2apic_mode(apic)) | ||
599 | return mda == kvm_apic_id(apic); | ||
600 | |||
601 | return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic)); | ||
600 | } | 602 | } |
601 | 603 | ||
602 | static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) | 604 | static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) |
@@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) | |||
613 | && (logical_id & mda & 0xffff) != 0; | 615 | && (logical_id & mda & 0xffff) != 0; |
614 | 616 | ||
615 | logical_id = GET_APIC_LOGICAL_ID(logical_id); | 617 | logical_id = GET_APIC_LOGICAL_ID(logical_id); |
618 | mda = GET_APIC_DEST_FIELD(mda); | ||
616 | 619 | ||
617 | switch (kvm_apic_get_reg(apic, APIC_DFR)) { | 620 | switch (kvm_apic_get_reg(apic, APIC_DFR)) { |
618 | case APIC_DFR_FLAT: | 621 | case APIC_DFR_FLAT: |
@@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) | |||
627 | } | 630 | } |
628 | } | 631 | } |
629 | 632 | ||
633 | /* KVM APIC implementation has two quirks | ||
634 | * - dest always begins at 0 while xAPIC MDA has offset 24, | ||
635 | * - IOxAPIC messages have to be delivered (directly) to x2APIC. | ||
636 | */ | ||
637 | static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source, | ||
638 | struct kvm_lapic *target) | ||
639 | { | ||
640 | bool ipi = source != NULL; | ||
641 | bool x2apic_mda = apic_x2apic_mode(ipi ? source : target); | ||
642 | |||
643 | if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda) | ||
644 | return X2APIC_BROADCAST; | ||
645 | |||
646 | return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id); | ||
647 | } | ||
648 | |||
630 | bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 649 | bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
631 | int short_hand, unsigned int dest, int dest_mode) | 650 | int short_hand, unsigned int dest, int dest_mode) |
632 | { | 651 | { |
633 | struct kvm_lapic *target = vcpu->arch.apic; | 652 | struct kvm_lapic *target = vcpu->arch.apic; |
653 | u32 mda = kvm_apic_mda(dest, source, target); | ||
634 | 654 | ||
635 | apic_debug("target %p, source %p, dest 0x%x, " | 655 | apic_debug("target %p, source %p, dest 0x%x, " |
636 | "dest_mode 0x%x, short_hand 0x%x\n", | 656 | "dest_mode 0x%x, short_hand 0x%x\n", |
@@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | |||
640 | switch (short_hand) { | 660 | switch (short_hand) { |
641 | case APIC_DEST_NOSHORT: | 661 | case APIC_DEST_NOSHORT: |
642 | if (dest_mode == APIC_DEST_PHYSICAL) | 662 | if (dest_mode == APIC_DEST_PHYSICAL) |
643 | return kvm_apic_match_physical_addr(target, dest); | 663 | return kvm_apic_match_physical_addr(target, mda); |
644 | else | 664 | else |
645 | return kvm_apic_match_logical_addr(target, dest); | 665 | return kvm_apic_match_logical_addr(target, mda); |
646 | case APIC_DEST_SELF: | 666 | case APIC_DEST_SELF: |
647 | return target == source; | 667 | return target == source; |
648 | case APIC_DEST_ALLINC: | 668 | case APIC_DEST_ALLINC: |
@@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
664 | struct kvm_lapic **dst; | 684 | struct kvm_lapic **dst; |
665 | int i; | 685 | int i; |
666 | bool ret = false; | 686 | bool ret = false; |
687 | bool x2apic_ipi = src && apic_x2apic_mode(src); | ||
667 | 688 | ||
668 | *r = -1; | 689 | *r = -1; |
669 | 690 | ||
@@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
675 | if (irq->shorthand) | 696 | if (irq->shorthand) |
676 | return false; | 697 | return false; |
677 | 698 | ||
699 | if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST)) | ||
700 | return false; | ||
701 | |||
678 | rcu_read_lock(); | 702 | rcu_read_lock(); |
679 | map = rcu_dereference(kvm->arch.apic_map); | 703 | map = rcu_dereference(kvm->arch.apic_map); |
680 | 704 | ||
681 | if (!map) | 705 | if (!map) |
682 | goto out; | 706 | goto out; |
683 | 707 | ||
684 | if (irq->dest_id == map->broadcast) | ||
685 | goto out; | ||
686 | |||
687 | ret = true; | 708 | ret = true; |
688 | 709 | ||
689 | if (irq->dest_mode == APIC_DEST_PHYSICAL) { | 710 | if (irq->dest_mode == APIC_DEST_PHYSICAL) { |
@@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
692 | 713 | ||
693 | dst = &map->phys_map[irq->dest_id]; | 714 | dst = &map->phys_map[irq->dest_id]; |
694 | } else { | 715 | } else { |
695 | u32 mda = irq->dest_id << (32 - map->ldr_bits); | 716 | u16 cid; |
696 | u16 cid = apic_cluster_id(map, mda); | 717 | |
718 | if (!kvm_apic_logical_map_valid(map)) { | ||
719 | ret = false; | ||
720 | goto out; | ||
721 | } | ||
722 | |||
723 | apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap); | ||
697 | 724 | ||
698 | if (cid >= ARRAY_SIZE(map->logical_map)) | 725 | if (cid >= ARRAY_SIZE(map->logical_map)) |
699 | goto out; | 726 | goto out; |
700 | 727 | ||
701 | dst = map->logical_map[cid]; | 728 | dst = map->logical_map[cid]; |
702 | 729 | ||
703 | bitmap = apic_logical_id(map, mda); | ||
704 | |||
705 | if (irq->delivery_mode == APIC_DM_LOWEST) { | 730 | if (irq->delivery_mode == APIC_DM_LOWEST) { |
706 | int l = -1; | 731 | int l = -1; |
707 | for_each_set_bit(i, &bitmap, 16) { | 732 | for_each_set_bit(i, &bitmap, 16) { |
@@ -1037,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) | |||
1037 | addr < apic->base_address + LAPIC_MMIO_LENGTH; | 1062 | addr < apic->base_address + LAPIC_MMIO_LENGTH; |
1038 | } | 1063 | } |
1039 | 1064 | ||
1040 | static int apic_mmio_read(struct kvm_io_device *this, | 1065 | static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
1041 | gpa_t address, int len, void *data) | 1066 | gpa_t address, int len, void *data) |
1042 | { | 1067 | { |
1043 | struct kvm_lapic *apic = to_lapic(this); | 1068 | struct kvm_lapic *apic = to_lapic(this); |
@@ -1357,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) | |||
1357 | return ret; | 1382 | return ret; |
1358 | } | 1383 | } |
1359 | 1384 | ||
1360 | static int apic_mmio_write(struct kvm_io_device *this, | 1385 | static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, |
1361 | gpa_t address, int len, const void *data) | 1386 | gpa_t address, int len, const void *data) |
1362 | { | 1387 | { |
1363 | struct kvm_lapic *apic = to_lapic(this); | 1388 | struct kvm_lapic *apic = to_lapic(this); |
@@ -1497,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1497 | return; | 1522 | return; |
1498 | } | 1523 | } |
1499 | 1524 | ||
1500 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
1501 | value &= ~MSR_IA32_APICBASE_BSP; | ||
1502 | vcpu->arch.apic_base = value; | 1525 | vcpu->arch.apic_base = value; |
1503 | 1526 | ||
1504 | /* update jump label if enable bit changes */ | 1527 | /* update jump label if enable bit changes */ |
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 0bc6c656625b..9d28383fc1e7 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __KVM_X86_LAPIC_H | 1 | #ifndef __KVM_X86_LAPIC_H |
2 | #define __KVM_X86_LAPIC_H | 2 | #define __KVM_X86_LAPIC_H |
3 | 3 | ||
4 | #include "iodev.h" | 4 | #include <kvm/iodev.h> |
5 | 5 | ||
6 | #include <linux/kvm_host.h> | 6 | #include <linux/kvm_host.h> |
7 | 7 | ||
@@ -148,21 +148,6 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm) | |||
148 | return kvm_x86_ops->vm_has_apicv(kvm); | 148 | return kvm_x86_ops->vm_has_apicv(kvm); |
149 | } | 149 | } |
150 | 150 | ||
151 | static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr) | ||
152 | { | ||
153 | u16 cid; | ||
154 | ldr >>= 32 - map->ldr_bits; | ||
155 | cid = (ldr >> map->cid_shift) & map->cid_mask; | ||
156 | |||
157 | return cid; | ||
158 | } | ||
159 | |||
160 | static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) | ||
161 | { | ||
162 | ldr >>= (32 - map->ldr_bits); | ||
163 | return ldr & map->lid_mask; | ||
164 | } | ||
165 | |||
166 | static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) | 151 | static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) |
167 | { | 152 | { |
168 | return vcpu->arch.apic->pending_events; | 153 | return vcpu->arch.apic->pending_events; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index cee759299a35..146f295ee322 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4465,6 +4465,79 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, | |||
4465 | kvm_flush_remote_tlbs(kvm); | 4465 | kvm_flush_remote_tlbs(kvm); |
4466 | } | 4466 | } |
4467 | 4467 | ||
4468 | static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, | ||
4469 | unsigned long *rmapp) | ||
4470 | { | ||
4471 | u64 *sptep; | ||
4472 | struct rmap_iterator iter; | ||
4473 | int need_tlb_flush = 0; | ||
4474 | pfn_t pfn; | ||
4475 | struct kvm_mmu_page *sp; | ||
4476 | |||
4477 | for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { | ||
4478 | BUG_ON(!(*sptep & PT_PRESENT_MASK)); | ||
4479 | |||
4480 | sp = page_header(__pa(sptep)); | ||
4481 | pfn = spte_to_pfn(*sptep); | ||
4482 | |||
4483 | /* | ||
4484 | * Only EPT supported for now; otherwise, one would need to | ||
4485 | * find out efficiently whether the guest page tables are | ||
4486 | * also using huge pages. | ||
4487 | */ | ||
4488 | if (sp->role.direct && | ||
4489 | !kvm_is_reserved_pfn(pfn) && | ||
4490 | PageTransCompound(pfn_to_page(pfn))) { | ||
4491 | drop_spte(kvm, sptep); | ||
4492 | sptep = rmap_get_first(*rmapp, &iter); | ||
4493 | need_tlb_flush = 1; | ||
4494 | } else | ||
4495 | sptep = rmap_get_next(&iter); | ||
4496 | } | ||
4497 | |||
4498 | return need_tlb_flush; | ||
4499 | } | ||
4500 | |||
4501 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, | ||
4502 | struct kvm_memory_slot *memslot) | ||
4503 | { | ||
4504 | bool flush = false; | ||
4505 | unsigned long *rmapp; | ||
4506 | unsigned long last_index, index; | ||
4507 | gfn_t gfn_start, gfn_end; | ||
4508 | |||
4509 | spin_lock(&kvm->mmu_lock); | ||
4510 | |||
4511 | gfn_start = memslot->base_gfn; | ||
4512 | gfn_end = memslot->base_gfn + memslot->npages - 1; | ||
4513 | |||
4514 | if (gfn_start >= gfn_end) | ||
4515 | goto out; | ||
4516 | |||
4517 | rmapp = memslot->arch.rmap[0]; | ||
4518 | last_index = gfn_to_index(gfn_end, memslot->base_gfn, | ||
4519 | PT_PAGE_TABLE_LEVEL); | ||
4520 | |||
4521 | for (index = 0; index <= last_index; ++index, ++rmapp) { | ||
4522 | if (*rmapp) | ||
4523 | flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp); | ||
4524 | |||
4525 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { | ||
4526 | if (flush) { | ||
4527 | kvm_flush_remote_tlbs(kvm); | ||
4528 | flush = false; | ||
4529 | } | ||
4530 | cond_resched_lock(&kvm->mmu_lock); | ||
4531 | } | ||
4532 | } | ||
4533 | |||
4534 | if (flush) | ||
4535 | kvm_flush_remote_tlbs(kvm); | ||
4536 | |||
4537 | out: | ||
4538 | spin_unlock(&kvm->mmu_lock); | ||
4539 | } | ||
4540 | |||
4468 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, | 4541 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
4469 | struct kvm_memory_slot *memslot) | 4542 | struct kvm_memory_slot *memslot) |
4470 | { | 4543 | { |
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 8e6b7d869d2f..29fbf9dfdc54 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | /* mapping between fixed pmc index and arch_events array */ | 40 | /* mapping between fixed pmc index and arch_events array */ |
41 | int fixed_pmc_events[] = {1, 0, 7}; | 41 | static int fixed_pmc_events[] = {1, 0, 7}; |
42 | 42 | ||
43 | static bool pmc_is_gp(struct kvm_pmc *pmc) | 43 | static bool pmc_is_gp(struct kvm_pmc *pmc) |
44 | { | 44 | { |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cc618c882f90..ce741b8650f6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1261,7 +1261,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
1261 | 1261 | ||
1262 | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | | 1262 | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | |
1263 | MSR_IA32_APICBASE_ENABLE; | 1263 | MSR_IA32_APICBASE_ENABLE; |
1264 | if (kvm_vcpu_is_bsp(&svm->vcpu)) | 1264 | if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) |
1265 | svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; | 1265 | svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; |
1266 | 1266 | ||
1267 | svm_init_osvw(&svm->vcpu); | 1267 | svm_init_osvw(&svm->vcpu); |
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm) | |||
1929 | static int halt_interception(struct vcpu_svm *svm) | 1929 | static int halt_interception(struct vcpu_svm *svm) |
1930 | { | 1930 | { |
1931 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; | 1931 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; |
1932 | skip_emulated_instruction(&svm->vcpu); | ||
1933 | return kvm_emulate_halt(&svm->vcpu); | 1932 | return kvm_emulate_halt(&svm->vcpu); |
1934 | } | 1933 | } |
1935 | 1934 | ||
1936 | static int vmmcall_interception(struct vcpu_svm *svm) | 1935 | static int vmmcall_interception(struct vcpu_svm *svm) |
1937 | { | 1936 | { |
1938 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 1937 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
1939 | skip_emulated_instruction(&svm->vcpu); | ||
1940 | kvm_emulate_hypercall(&svm->vcpu); | 1938 | kvm_emulate_hypercall(&svm->vcpu); |
1941 | return 1; | 1939 | return 1; |
1942 | } | 1940 | } |
@@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm) | |||
2757 | { | 2755 | { |
2758 | struct kvm_vcpu *vcpu = &svm->vcpu; | 2756 | struct kvm_vcpu *vcpu = &svm->vcpu; |
2759 | 2757 | ||
2760 | trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX], | 2758 | trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), |
2761 | vcpu->arch.regs[VCPU_REGS_RAX]); | 2759 | kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); |
2762 | 2760 | ||
2763 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ | 2761 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
2764 | kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); | 2762 | kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); |
2765 | 2763 | ||
2766 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 2764 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
2767 | skip_emulated_instruction(&svm->vcpu); | 2765 | skip_emulated_instruction(&svm->vcpu); |
@@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm) | |||
2770 | 2768 | ||
2771 | static int skinit_interception(struct vcpu_svm *svm) | 2769 | static int skinit_interception(struct vcpu_svm *svm) |
2772 | { | 2770 | { |
2773 | trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]); | 2771 | trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); |
2774 | 2772 | ||
2775 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | 2773 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
2776 | return 1; | 2774 | return 1; |
2777 | } | 2775 | } |
2778 | 2776 | ||
2777 | static int wbinvd_interception(struct vcpu_svm *svm) | ||
2778 | { | ||
2779 | kvm_emulate_wbinvd(&svm->vcpu); | ||
2780 | return 1; | ||
2781 | } | ||
2782 | |||
2779 | static int xsetbv_interception(struct vcpu_svm *svm) | 2783 | static int xsetbv_interception(struct vcpu_svm *svm) |
2780 | { | 2784 | { |
2781 | u64 new_bv = kvm_read_edx_eax(&svm->vcpu); | 2785 | u64 new_bv = kvm_read_edx_eax(&svm->vcpu); |
@@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm) | |||
2902 | return 1; | 2906 | return 1; |
2903 | } | 2907 | } |
2904 | 2908 | ||
2905 | bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) | 2909 | static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, |
2910 | unsigned long val) | ||
2906 | { | 2911 | { |
2907 | unsigned long cr0 = svm->vcpu.arch.cr0; | 2912 | unsigned long cr0 = svm->vcpu.arch.cr0; |
2908 | bool ret = false; | 2913 | bool ret = false; |
@@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm) | |||
2940 | return emulate_on_interception(svm); | 2945 | return emulate_on_interception(svm); |
2941 | 2946 | ||
2942 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; | 2947 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
2943 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; | 2948 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
2949 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; | ||
2950 | else | ||
2951 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; | ||
2944 | 2952 | ||
2945 | err = 0; | 2953 | err = 0; |
2946 | if (cr >= 16) { /* mov to cr */ | 2954 | if (cr >= 16) { /* mov to cr */ |
@@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
3133 | 3141 | ||
3134 | static int rdmsr_interception(struct vcpu_svm *svm) | 3142 | static int rdmsr_interception(struct vcpu_svm *svm) |
3135 | { | 3143 | { |
3136 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 3144 | u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); |
3137 | u64 data; | 3145 | u64 data; |
3138 | 3146 | ||
3139 | if (svm_get_msr(&svm->vcpu, ecx, &data)) { | 3147 | if (svm_get_msr(&svm->vcpu, ecx, &data)) { |
@@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm) | |||
3142 | } else { | 3150 | } else { |
3143 | trace_kvm_msr_read(ecx, data); | 3151 | trace_kvm_msr_read(ecx, data); |
3144 | 3152 | ||
3145 | svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; | 3153 | kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); |
3146 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; | 3154 | kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); |
3147 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 3155 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
3148 | skip_emulated_instruction(&svm->vcpu); | 3156 | skip_emulated_instruction(&svm->vcpu); |
3149 | } | 3157 | } |
@@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
3246 | static int wrmsr_interception(struct vcpu_svm *svm) | 3254 | static int wrmsr_interception(struct vcpu_svm *svm) |
3247 | { | 3255 | { |
3248 | struct msr_data msr; | 3256 | struct msr_data msr; |
3249 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 3257 | u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); |
3250 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | 3258 | u64 data = kvm_read_edx_eax(&svm->vcpu); |
3251 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ||
3252 | 3259 | ||
3253 | msr.data = data; | 3260 | msr.data = data; |
3254 | msr.index = ecx; | 3261 | msr.index = ecx; |
@@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
3325 | [SVM_EXIT_READ_CR3] = cr_interception, | 3332 | [SVM_EXIT_READ_CR3] = cr_interception, |
3326 | [SVM_EXIT_READ_CR4] = cr_interception, | 3333 | [SVM_EXIT_READ_CR4] = cr_interception, |
3327 | [SVM_EXIT_READ_CR8] = cr_interception, | 3334 | [SVM_EXIT_READ_CR8] = cr_interception, |
3328 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, | 3335 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
3329 | [SVM_EXIT_WRITE_CR0] = cr_interception, | 3336 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
3330 | [SVM_EXIT_WRITE_CR3] = cr_interception, | 3337 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
3331 | [SVM_EXIT_WRITE_CR4] = cr_interception, | 3338 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
@@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
3376 | [SVM_EXIT_STGI] = stgi_interception, | 3383 | [SVM_EXIT_STGI] = stgi_interception, |
3377 | [SVM_EXIT_CLGI] = clgi_interception, | 3384 | [SVM_EXIT_CLGI] = clgi_interception, |
3378 | [SVM_EXIT_SKINIT] = skinit_interception, | 3385 | [SVM_EXIT_SKINIT] = skinit_interception, |
3379 | [SVM_EXIT_WBINVD] = emulate_on_interception, | 3386 | [SVM_EXIT_WBINVD] = wbinvd_interception, |
3380 | [SVM_EXIT_MONITOR] = monitor_interception, | 3387 | [SVM_EXIT_MONITOR] = monitor_interception, |
3381 | [SVM_EXIT_MWAIT] = mwait_interception, | 3388 | [SVM_EXIT_MWAIT] = mwait_interception, |
3382 | [SVM_EXIT_XSETBV] = xsetbv_interception, | 3389 | [SVM_EXIT_XSETBV] = xsetbv_interception, |
@@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu) | |||
3555 | 3562 | ||
3556 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | 3563 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
3557 | || !svm_exit_handlers[exit_code]) { | 3564 | || !svm_exit_handlers[exit_code]) { |
3558 | WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code); | 3565 | WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code); |
3559 | kvm_queue_exception(vcpu, UD_VECTOR); | 3566 | kvm_queue_exception(vcpu, UD_VECTOR); |
3560 | return 1; | 3567 | return 1; |
3561 | } | 3568 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ae4f6d35d19c..f5e8dce8046c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2470,6 +2470,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2470 | vmx->nested.nested_vmx_secondary_ctls_low = 0; | 2470 | vmx->nested.nested_vmx_secondary_ctls_low = 0; |
2471 | vmx->nested.nested_vmx_secondary_ctls_high &= | 2471 | vmx->nested.nested_vmx_secondary_ctls_high &= |
2472 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | 2472 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
2473 | SECONDARY_EXEC_RDTSCP | | ||
2473 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | | 2474 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
2474 | SECONDARY_EXEC_APIC_REGISTER_VIRT | | 2475 | SECONDARY_EXEC_APIC_REGISTER_VIRT | |
2475 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | 2476 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
@@ -3268,8 +3269,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, | |||
3268 | * default value. | 3269 | * default value. |
3269 | */ | 3270 | */ |
3270 | if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) | 3271 | if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) |
3271 | save->selector &= ~SELECTOR_RPL_MASK; | 3272 | save->selector &= ~SEGMENT_RPL_MASK; |
3272 | save->dpl = save->selector & SELECTOR_RPL_MASK; | 3273 | save->dpl = save->selector & SEGMENT_RPL_MASK; |
3273 | save->s = 1; | 3274 | save->s = 1; |
3274 | } | 3275 | } |
3275 | vmx_set_segment(vcpu, save, seg); | 3276 | vmx_set_segment(vcpu, save, seg); |
@@ -3842,7 +3843,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) | |||
3842 | unsigned int cs_rpl; | 3843 | unsigned int cs_rpl; |
3843 | 3844 | ||
3844 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | 3845 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
3845 | cs_rpl = cs.selector & SELECTOR_RPL_MASK; | 3846 | cs_rpl = cs.selector & SEGMENT_RPL_MASK; |
3846 | 3847 | ||
3847 | if (cs.unusable) | 3848 | if (cs.unusable) |
3848 | return false; | 3849 | return false; |
@@ -3870,7 +3871,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu) | |||
3870 | unsigned int ss_rpl; | 3871 | unsigned int ss_rpl; |
3871 | 3872 | ||
3872 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | 3873 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
3873 | ss_rpl = ss.selector & SELECTOR_RPL_MASK; | 3874 | ss_rpl = ss.selector & SEGMENT_RPL_MASK; |
3874 | 3875 | ||
3875 | if (ss.unusable) | 3876 | if (ss.unusable) |
3876 | return true; | 3877 | return true; |
@@ -3892,7 +3893,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) | |||
3892 | unsigned int rpl; | 3893 | unsigned int rpl; |
3893 | 3894 | ||
3894 | vmx_get_segment(vcpu, &var, seg); | 3895 | vmx_get_segment(vcpu, &var, seg); |
3895 | rpl = var.selector & SELECTOR_RPL_MASK; | 3896 | rpl = var.selector & SEGMENT_RPL_MASK; |
3896 | 3897 | ||
3897 | if (var.unusable) | 3898 | if (var.unusable) |
3898 | return true; | 3899 | return true; |
@@ -3919,7 +3920,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu) | |||
3919 | 3920 | ||
3920 | if (tr.unusable) | 3921 | if (tr.unusable) |
3921 | return false; | 3922 | return false; |
3922 | if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | 3923 | if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ |
3923 | return false; | 3924 | return false; |
3924 | if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ | 3925 | if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ |
3925 | return false; | 3926 | return false; |
@@ -3937,7 +3938,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu) | |||
3937 | 3938 | ||
3938 | if (ldtr.unusable) | 3939 | if (ldtr.unusable) |
3939 | return true; | 3940 | return true; |
3940 | if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ | 3941 | if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ |
3941 | return false; | 3942 | return false; |
3942 | if (ldtr.type != 2) | 3943 | if (ldtr.type != 2) |
3943 | return false; | 3944 | return false; |
@@ -3954,8 +3955,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) | |||
3954 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); | 3955 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
3955 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); | 3956 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
3956 | 3957 | ||
3957 | return ((cs.selector & SELECTOR_RPL_MASK) == | 3958 | return ((cs.selector & SEGMENT_RPL_MASK) == |
3958 | (ss.selector & SELECTOR_RPL_MASK)); | 3959 | (ss.selector & SEGMENT_RPL_MASK)); |
3959 | } | 3960 | } |
3960 | 3961 | ||
3961 | /* | 3962 | /* |
@@ -4711,7 +4712,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
4711 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 4712 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
4712 | kvm_set_cr8(&vmx->vcpu, 0); | 4713 | kvm_set_cr8(&vmx->vcpu, 0); |
4713 | apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; | 4714 | apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; |
4714 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) | 4715 | if (kvm_vcpu_is_reset_bsp(&vmx->vcpu)) |
4715 | apic_base_msr.data |= MSR_IA32_APICBASE_BSP; | 4716 | apic_base_msr.data |= MSR_IA32_APICBASE_BSP; |
4716 | apic_base_msr.host_initiated = true; | 4717 | apic_base_msr.host_initiated = true; |
4717 | kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); | 4718 | kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); |
@@ -5006,7 +5007,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
5006 | if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { | 5007 | if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { |
5007 | if (vcpu->arch.halt_request) { | 5008 | if (vcpu->arch.halt_request) { |
5008 | vcpu->arch.halt_request = 0; | 5009 | vcpu->arch.halt_request = 0; |
5009 | return kvm_emulate_halt(vcpu); | 5010 | return kvm_vcpu_halt(vcpu); |
5010 | } | 5011 | } |
5011 | return 1; | 5012 | return 1; |
5012 | } | 5013 | } |
@@ -5071,6 +5072,10 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
5071 | } | 5072 | } |
5072 | 5073 | ||
5073 | if (is_invalid_opcode(intr_info)) { | 5074 | if (is_invalid_opcode(intr_info)) { |
5075 | if (is_guest_mode(vcpu)) { | ||
5076 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
5077 | return 1; | ||
5078 | } | ||
5074 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); | 5079 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); |
5075 | if (er != EMULATE_DONE) | 5080 | if (er != EMULATE_DONE) |
5076 | kvm_queue_exception(vcpu, UD_VECTOR); | 5081 | kvm_queue_exception(vcpu, UD_VECTOR); |
@@ -5090,9 +5095,10 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
5090 | !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { | 5095 | !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { |
5091 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 5096 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
5092 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; | 5097 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; |
5093 | vcpu->run->internal.ndata = 2; | 5098 | vcpu->run->internal.ndata = 3; |
5094 | vcpu->run->internal.data[0] = vect_info; | 5099 | vcpu->run->internal.data[0] = vect_info; |
5095 | vcpu->run->internal.data[1] = intr_info; | 5100 | vcpu->run->internal.data[1] = intr_info; |
5101 | vcpu->run->internal.data[2] = error_code; | ||
5096 | return 0; | 5102 | return 0; |
5097 | } | 5103 | } |
5098 | 5104 | ||
@@ -5533,13 +5539,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) | |||
5533 | 5539 | ||
5534 | static int handle_halt(struct kvm_vcpu *vcpu) | 5540 | static int handle_halt(struct kvm_vcpu *vcpu) |
5535 | { | 5541 | { |
5536 | skip_emulated_instruction(vcpu); | ||
5537 | return kvm_emulate_halt(vcpu); | 5542 | return kvm_emulate_halt(vcpu); |
5538 | } | 5543 | } |
5539 | 5544 | ||
5540 | static int handle_vmcall(struct kvm_vcpu *vcpu) | 5545 | static int handle_vmcall(struct kvm_vcpu *vcpu) |
5541 | { | 5546 | { |
5542 | skip_emulated_instruction(vcpu); | ||
5543 | kvm_emulate_hypercall(vcpu); | 5547 | kvm_emulate_hypercall(vcpu); |
5544 | return 1; | 5548 | return 1; |
5545 | } | 5549 | } |
@@ -5570,7 +5574,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu) | |||
5570 | 5574 | ||
5571 | static int handle_wbinvd(struct kvm_vcpu *vcpu) | 5575 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
5572 | { | 5576 | { |
5573 | skip_emulated_instruction(vcpu); | ||
5574 | kvm_emulate_wbinvd(vcpu); | 5577 | kvm_emulate_wbinvd(vcpu); |
5575 | return 1; | 5578 | return 1; |
5576 | } | 5579 | } |
@@ -5828,7 +5831,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) | |||
5828 | gpa_t gpa; | 5831 | gpa_t gpa; |
5829 | 5832 | ||
5830 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | 5833 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
5831 | if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { | 5834 | if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { |
5832 | skip_emulated_instruction(vcpu); | 5835 | skip_emulated_instruction(vcpu); |
5833 | return 1; | 5836 | return 1; |
5834 | } | 5837 | } |
@@ -5909,7 +5912,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5909 | 5912 | ||
5910 | if (vcpu->arch.halt_request) { | 5913 | if (vcpu->arch.halt_request) { |
5911 | vcpu->arch.halt_request = 0; | 5914 | vcpu->arch.halt_request = 0; |
5912 | ret = kvm_emulate_halt(vcpu); | 5915 | ret = kvm_vcpu_halt(vcpu); |
5913 | goto out; | 5916 | goto out; |
5914 | } | 5917 | } |
5915 | 5918 | ||
@@ -7318,21 +7321,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, | |||
7318 | else if (port < 0x10000) | 7321 | else if (port < 0x10000) |
7319 | bitmap = vmcs12->io_bitmap_b; | 7322 | bitmap = vmcs12->io_bitmap_b; |
7320 | else | 7323 | else |
7321 | return 1; | 7324 | return true; |
7322 | bitmap += (port & 0x7fff) / 8; | 7325 | bitmap += (port & 0x7fff) / 8; |
7323 | 7326 | ||
7324 | if (last_bitmap != bitmap) | 7327 | if (last_bitmap != bitmap) |
7325 | if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) | 7328 | if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) |
7326 | return 1; | 7329 | return true; |
7327 | if (b & (1 << (port & 7))) | 7330 | if (b & (1 << (port & 7))) |
7328 | return 1; | 7331 | return true; |
7329 | 7332 | ||
7330 | port++; | 7333 | port++; |
7331 | size--; | 7334 | size--; |
7332 | last_bitmap = bitmap; | 7335 | last_bitmap = bitmap; |
7333 | } | 7336 | } |
7334 | 7337 | ||
7335 | return 0; | 7338 | return false; |
7336 | } | 7339 | } |
7337 | 7340 | ||
7338 | /* | 7341 | /* |
@@ -7348,7 +7351,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, | |||
7348 | gpa_t bitmap; | 7351 | gpa_t bitmap; |
7349 | 7352 | ||
7350 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) | 7353 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) |
7351 | return 1; | 7354 | return true; |
7352 | 7355 | ||
7353 | /* | 7356 | /* |
7354 | * The MSR_BITMAP page is divided into four 1024-byte bitmaps, | 7357 | * The MSR_BITMAP page is divided into four 1024-byte bitmaps, |
@@ -7367,10 +7370,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, | |||
7367 | if (msr_index < 1024*8) { | 7370 | if (msr_index < 1024*8) { |
7368 | unsigned char b; | 7371 | unsigned char b; |
7369 | if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) | 7372 | if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) |
7370 | return 1; | 7373 | return true; |
7371 | return 1 & (b >> (msr_index & 7)); | 7374 | return 1 & (b >> (msr_index & 7)); |
7372 | } else | 7375 | } else |
7373 | return 1; /* let L1 handle the wrong parameter */ | 7376 | return true; /* let L1 handle the wrong parameter */ |
7374 | } | 7377 | } |
7375 | 7378 | ||
7376 | /* | 7379 | /* |
@@ -7392,7 +7395,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7392 | case 0: | 7395 | case 0: |
7393 | if (vmcs12->cr0_guest_host_mask & | 7396 | if (vmcs12->cr0_guest_host_mask & |
7394 | (val ^ vmcs12->cr0_read_shadow)) | 7397 | (val ^ vmcs12->cr0_read_shadow)) |
7395 | return 1; | 7398 | return true; |
7396 | break; | 7399 | break; |
7397 | case 3: | 7400 | case 3: |
7398 | if ((vmcs12->cr3_target_count >= 1 && | 7401 | if ((vmcs12->cr3_target_count >= 1 && |
@@ -7403,37 +7406,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7403 | vmcs12->cr3_target_value2 == val) || | 7406 | vmcs12->cr3_target_value2 == val) || |
7404 | (vmcs12->cr3_target_count >= 4 && | 7407 | (vmcs12->cr3_target_count >= 4 && |
7405 | vmcs12->cr3_target_value3 == val)) | 7408 | vmcs12->cr3_target_value3 == val)) |
7406 | return 0; | 7409 | return false; |
7407 | if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) | 7410 | if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) |
7408 | return 1; | 7411 | return true; |
7409 | break; | 7412 | break; |
7410 | case 4: | 7413 | case 4: |
7411 | if (vmcs12->cr4_guest_host_mask & | 7414 | if (vmcs12->cr4_guest_host_mask & |
7412 | (vmcs12->cr4_read_shadow ^ val)) | 7415 | (vmcs12->cr4_read_shadow ^ val)) |
7413 | return 1; | 7416 | return true; |
7414 | break; | 7417 | break; |
7415 | case 8: | 7418 | case 8: |
7416 | if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) | 7419 | if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) |
7417 | return 1; | 7420 | return true; |
7418 | break; | 7421 | break; |
7419 | } | 7422 | } |
7420 | break; | 7423 | break; |
7421 | case 2: /* clts */ | 7424 | case 2: /* clts */ |
7422 | if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && | 7425 | if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && |
7423 | (vmcs12->cr0_read_shadow & X86_CR0_TS)) | 7426 | (vmcs12->cr0_read_shadow & X86_CR0_TS)) |
7424 | return 1; | 7427 | return true; |
7425 | break; | 7428 | break; |
7426 | case 1: /* mov from cr */ | 7429 | case 1: /* mov from cr */ |
7427 | switch (cr) { | 7430 | switch (cr) { |
7428 | case 3: | 7431 | case 3: |
7429 | if (vmcs12->cpu_based_vm_exec_control & | 7432 | if (vmcs12->cpu_based_vm_exec_control & |
7430 | CPU_BASED_CR3_STORE_EXITING) | 7433 | CPU_BASED_CR3_STORE_EXITING) |
7431 | return 1; | 7434 | return true; |
7432 | break; | 7435 | break; |
7433 | case 8: | 7436 | case 8: |
7434 | if (vmcs12->cpu_based_vm_exec_control & | 7437 | if (vmcs12->cpu_based_vm_exec_control & |
7435 | CPU_BASED_CR8_STORE_EXITING) | 7438 | CPU_BASED_CR8_STORE_EXITING) |
7436 | return 1; | 7439 | return true; |
7437 | break; | 7440 | break; |
7438 | } | 7441 | } |
7439 | break; | 7442 | break; |
@@ -7444,14 +7447,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7444 | */ | 7447 | */ |
7445 | if (vmcs12->cr0_guest_host_mask & 0xe & | 7448 | if (vmcs12->cr0_guest_host_mask & 0xe & |
7446 | (val ^ vmcs12->cr0_read_shadow)) | 7449 | (val ^ vmcs12->cr0_read_shadow)) |
7447 | return 1; | 7450 | return true; |
7448 | if ((vmcs12->cr0_guest_host_mask & 0x1) && | 7451 | if ((vmcs12->cr0_guest_host_mask & 0x1) && |
7449 | !(vmcs12->cr0_read_shadow & 0x1) && | 7452 | !(vmcs12->cr0_read_shadow & 0x1) && |
7450 | (val & 0x1)) | 7453 | (val & 0x1)) |
7451 | return 1; | 7454 | return true; |
7452 | break; | 7455 | break; |
7453 | } | 7456 | } |
7454 | return 0; | 7457 | return false; |
7455 | } | 7458 | } |
7456 | 7459 | ||
7457 | /* | 7460 | /* |
@@ -7474,48 +7477,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7474 | KVM_ISA_VMX); | 7477 | KVM_ISA_VMX); |
7475 | 7478 | ||
7476 | if (vmx->nested.nested_run_pending) | 7479 | if (vmx->nested.nested_run_pending) |
7477 | return 0; | 7480 | return false; |
7478 | 7481 | ||
7479 | if (unlikely(vmx->fail)) { | 7482 | if (unlikely(vmx->fail)) { |
7480 | pr_info_ratelimited("%s failed vm entry %x\n", __func__, | 7483 | pr_info_ratelimited("%s failed vm entry %x\n", __func__, |
7481 | vmcs_read32(VM_INSTRUCTION_ERROR)); | 7484 | vmcs_read32(VM_INSTRUCTION_ERROR)); |
7482 | return 1; | 7485 | return true; |
7483 | } | 7486 | } |
7484 | 7487 | ||
7485 | switch (exit_reason) { | 7488 | switch (exit_reason) { |
7486 | case EXIT_REASON_EXCEPTION_NMI: | 7489 | case EXIT_REASON_EXCEPTION_NMI: |
7487 | if (!is_exception(intr_info)) | 7490 | if (!is_exception(intr_info)) |
7488 | return 0; | 7491 | return false; |
7489 | else if (is_page_fault(intr_info)) | 7492 | else if (is_page_fault(intr_info)) |
7490 | return enable_ept; | 7493 | return enable_ept; |
7491 | else if (is_no_device(intr_info) && | 7494 | else if (is_no_device(intr_info) && |
7492 | !(vmcs12->guest_cr0 & X86_CR0_TS)) | 7495 | !(vmcs12->guest_cr0 & X86_CR0_TS)) |
7493 | return 0; | 7496 | return false; |
7494 | return vmcs12->exception_bitmap & | 7497 | return vmcs12->exception_bitmap & |
7495 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); | 7498 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); |
7496 | case EXIT_REASON_EXTERNAL_INTERRUPT: | 7499 | case EXIT_REASON_EXTERNAL_INTERRUPT: |
7497 | return 0; | 7500 | return false; |
7498 | case EXIT_REASON_TRIPLE_FAULT: | 7501 | case EXIT_REASON_TRIPLE_FAULT: |
7499 | return 1; | 7502 | return true; |
7500 | case EXIT_REASON_PENDING_INTERRUPT: | 7503 | case EXIT_REASON_PENDING_INTERRUPT: |
7501 | return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); | 7504 | return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); |
7502 | case EXIT_REASON_NMI_WINDOW: | 7505 | case EXIT_REASON_NMI_WINDOW: |
7503 | return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); | 7506 | return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); |
7504 | case EXIT_REASON_TASK_SWITCH: | 7507 | case EXIT_REASON_TASK_SWITCH: |
7505 | return 1; | 7508 | return true; |
7506 | case EXIT_REASON_CPUID: | 7509 | case EXIT_REASON_CPUID: |
7507 | if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) | 7510 | if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) |
7508 | return 0; | 7511 | return false; |
7509 | return 1; | 7512 | return true; |
7510 | case EXIT_REASON_HLT: | 7513 | case EXIT_REASON_HLT: |
7511 | return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); | 7514 | return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); |
7512 | case EXIT_REASON_INVD: | 7515 | case EXIT_REASON_INVD: |
7513 | return 1; | 7516 | return true; |
7514 | case EXIT_REASON_INVLPG: | 7517 | case EXIT_REASON_INVLPG: |
7515 | return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); | 7518 | return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); |
7516 | case EXIT_REASON_RDPMC: | 7519 | case EXIT_REASON_RDPMC: |
7517 | return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); | 7520 | return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); |
7518 | case EXIT_REASON_RDTSC: | 7521 | case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: |
7519 | return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); | 7522 | return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); |
7520 | case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: | 7523 | case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: |
7521 | case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: | 7524 | case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: |
@@ -7527,7 +7530,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7527 | * VMX instructions trap unconditionally. This allows L1 to | 7530 | * VMX instructions trap unconditionally. This allows L1 to |
7528 | * emulate them for its L2 guest, i.e., allows 3-level nesting! | 7531 | * emulate them for its L2 guest, i.e., allows 3-level nesting! |
7529 | */ | 7532 | */ |
7530 | return 1; | 7533 | return true; |
7531 | case EXIT_REASON_CR_ACCESS: | 7534 | case EXIT_REASON_CR_ACCESS: |
7532 | return nested_vmx_exit_handled_cr(vcpu, vmcs12); | 7535 | return nested_vmx_exit_handled_cr(vcpu, vmcs12); |
7533 | case EXIT_REASON_DR_ACCESS: | 7536 | case EXIT_REASON_DR_ACCESS: |
@@ -7538,7 +7541,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7538 | case EXIT_REASON_MSR_WRITE: | 7541 | case EXIT_REASON_MSR_WRITE: |
7539 | return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); | 7542 | return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); |
7540 | case EXIT_REASON_INVALID_STATE: | 7543 | case EXIT_REASON_INVALID_STATE: |
7541 | return 1; | 7544 | return true; |
7542 | case EXIT_REASON_MWAIT_INSTRUCTION: | 7545 | case EXIT_REASON_MWAIT_INSTRUCTION: |
7543 | return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); | 7546 | return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); |
7544 | case EXIT_REASON_MONITOR_INSTRUCTION: | 7547 | case EXIT_REASON_MONITOR_INSTRUCTION: |
@@ -7548,7 +7551,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7548 | nested_cpu_has2(vmcs12, | 7551 | nested_cpu_has2(vmcs12, |
7549 | SECONDARY_EXEC_PAUSE_LOOP_EXITING); | 7552 | SECONDARY_EXEC_PAUSE_LOOP_EXITING); |
7550 | case EXIT_REASON_MCE_DURING_VMENTRY: | 7553 | case EXIT_REASON_MCE_DURING_VMENTRY: |
7551 | return 0; | 7554 | return false; |
7552 | case EXIT_REASON_TPR_BELOW_THRESHOLD: | 7555 | case EXIT_REASON_TPR_BELOW_THRESHOLD: |
7553 | return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); | 7556 | return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); |
7554 | case EXIT_REASON_APIC_ACCESS: | 7557 | case EXIT_REASON_APIC_ACCESS: |
@@ -7557,7 +7560,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7557 | case EXIT_REASON_APIC_WRITE: | 7560 | case EXIT_REASON_APIC_WRITE: |
7558 | case EXIT_REASON_EOI_INDUCED: | 7561 | case EXIT_REASON_EOI_INDUCED: |
7559 | /* apic_write and eoi_induced should exit unconditionally. */ | 7562 | /* apic_write and eoi_induced should exit unconditionally. */ |
7560 | return 1; | 7563 | return true; |
7561 | case EXIT_REASON_EPT_VIOLATION: | 7564 | case EXIT_REASON_EPT_VIOLATION: |
7562 | /* | 7565 | /* |
7563 | * L0 always deals with the EPT violation. If nested EPT is | 7566 | * L0 always deals with the EPT violation. If nested EPT is |
@@ -7565,7 +7568,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7565 | * missing in the guest EPT table (EPT12), the EPT violation | 7568 | * missing in the guest EPT table (EPT12), the EPT violation |
7566 | * will be injected with nested_ept_inject_page_fault() | 7569 | * will be injected with nested_ept_inject_page_fault() |
7567 | */ | 7570 | */ |
7568 | return 0; | 7571 | return false; |
7569 | case EXIT_REASON_EPT_MISCONFIG: | 7572 | case EXIT_REASON_EPT_MISCONFIG: |
7570 | /* | 7573 | /* |
7571 | * L2 never uses directly L1's EPT, but rather L0's own EPT | 7574 | * L2 never uses directly L1's EPT, but rather L0's own EPT |
@@ -7573,11 +7576,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7573 | * (EPT on EPT). So any problems with the structure of the | 7576 | * (EPT on EPT). So any problems with the structure of the |
7574 | * table is L0's fault. | 7577 | * table is L0's fault. |
7575 | */ | 7578 | */ |
7576 | return 0; | 7579 | return false; |
7577 | case EXIT_REASON_WBINVD: | 7580 | case EXIT_REASON_WBINVD: |
7578 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); | 7581 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); |
7579 | case EXIT_REASON_XSETBV: | 7582 | case EXIT_REASON_XSETBV: |
7580 | return 1; | 7583 | return true; |
7581 | case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: | 7584 | case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: |
7582 | /* | 7585 | /* |
7583 | * This should never happen, since it is not possible to | 7586 | * This should never happen, since it is not possible to |
@@ -7587,7 +7590,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
7587 | */ | 7590 | */ |
7588 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); | 7591 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); |
7589 | default: | 7592 | default: |
7590 | return 1; | 7593 | return true; |
7591 | } | 7594 | } |
7592 | } | 7595 | } |
7593 | 7596 | ||
@@ -8522,6 +8525,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
8522 | exec_control); | 8525 | exec_control); |
8523 | } | 8526 | } |
8524 | } | 8527 | } |
8528 | if (nested && !vmx->rdtscp_enabled) | ||
8529 | vmx->nested.nested_vmx_secondary_ctls_high &= | ||
8530 | ~SECONDARY_EXEC_RDTSCP; | ||
8525 | } | 8531 | } |
8526 | 8532 | ||
8527 | /* Exposing INVPCID only when PCID is exposed */ | 8533 | /* Exposing INVPCID only when PCID is exposed */ |
@@ -8622,10 +8628,11 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, | |||
8622 | struct vmcs12 *vmcs12) | 8628 | struct vmcs12 *vmcs12) |
8623 | { | 8629 | { |
8624 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 8630 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
8631 | int maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8625 | 8632 | ||
8626 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { | 8633 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { |
8627 | /* TODO: Also verify bits beyond physical address width are 0 */ | 8634 | if (!PAGE_ALIGNED(vmcs12->apic_access_addr) || |
8628 | if (!PAGE_ALIGNED(vmcs12->apic_access_addr)) | 8635 | vmcs12->apic_access_addr >> maxphyaddr) |
8629 | return false; | 8636 | return false; |
8630 | 8637 | ||
8631 | /* | 8638 | /* |
@@ -8641,8 +8648,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, | |||
8641 | } | 8648 | } |
8642 | 8649 | ||
8643 | if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { | 8650 | if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { |
8644 | /* TODO: Also verify bits beyond physical address width are 0 */ | 8651 | if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) || |
8645 | if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr)) | 8652 | vmcs12->virtual_apic_page_addr >> maxphyaddr) |
8646 | return false; | 8653 | return false; |
8647 | 8654 | ||
8648 | if (vmx->nested.virtual_apic_page) /* shouldn't happen */ | 8655 | if (vmx->nested.virtual_apic_page) /* shouldn't happen */ |
@@ -8665,7 +8672,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, | |||
8665 | } | 8672 | } |
8666 | 8673 | ||
8667 | if (nested_cpu_has_posted_intr(vmcs12)) { | 8674 | if (nested_cpu_has_posted_intr(vmcs12)) { |
8668 | if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64)) | 8675 | if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) || |
8676 | vmcs12->posted_intr_desc_addr >> maxphyaddr) | ||
8669 | return false; | 8677 | return false; |
8670 | 8678 | ||
8671 | if (vmx->nested.pi_desc_page) { /* shouldn't happen */ | 8679 | if (vmx->nested.pi_desc_page) { /* shouldn't happen */ |
@@ -8864,9 +8872,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | |||
8864 | 8872 | ||
8865 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | 8873 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, |
8866 | unsigned long count_field, | 8874 | unsigned long count_field, |
8867 | unsigned long addr_field, | 8875 | unsigned long addr_field) |
8868 | int maxphyaddr) | ||
8869 | { | 8876 | { |
8877 | int maxphyaddr; | ||
8870 | u64 count, addr; | 8878 | u64 count, addr; |
8871 | 8879 | ||
8872 | if (vmcs12_read_any(vcpu, count_field, &count) || | 8880 | if (vmcs12_read_any(vcpu, count_field, &count) || |
@@ -8876,6 +8884,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | |||
8876 | } | 8884 | } |
8877 | if (count == 0) | 8885 | if (count == 0) |
8878 | return 0; | 8886 | return 0; |
8887 | maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8879 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || | 8888 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || |
8880 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { | 8889 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { |
8881 | pr_warn_ratelimited( | 8890 | pr_warn_ratelimited( |
@@ -8889,19 +8898,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | |||
8889 | static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, | 8898 | static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, |
8890 | struct vmcs12 *vmcs12) | 8899 | struct vmcs12 *vmcs12) |
8891 | { | 8900 | { |
8892 | int maxphyaddr; | ||
8893 | |||
8894 | if (vmcs12->vm_exit_msr_load_count == 0 && | 8901 | if (vmcs12->vm_exit_msr_load_count == 0 && |
8895 | vmcs12->vm_exit_msr_store_count == 0 && | 8902 | vmcs12->vm_exit_msr_store_count == 0 && |
8896 | vmcs12->vm_entry_msr_load_count == 0) | 8903 | vmcs12->vm_entry_msr_load_count == 0) |
8897 | return 0; /* Fast path */ | 8904 | return 0; /* Fast path */ |
8898 | maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
8899 | if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, | 8905 | if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, |
8900 | VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) || | 8906 | VM_EXIT_MSR_LOAD_ADDR) || |
8901 | nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, | 8907 | nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, |
8902 | VM_EXIT_MSR_STORE_ADDR, maxphyaddr) || | 8908 | VM_EXIT_MSR_STORE_ADDR) || |
8903 | nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, | 8909 | nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, |
8904 | VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr)) | 8910 | VM_ENTRY_MSR_LOAD_ADDR)) |
8905 | return -EINVAL; | 8911 | return -EINVAL; |
8906 | return 0; | 8912 | return 0; |
8907 | } | 8913 | } |
@@ -9151,8 +9157,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
9151 | exec_control &= ~SECONDARY_EXEC_RDTSCP; | 9157 | exec_control &= ~SECONDARY_EXEC_RDTSCP; |
9152 | /* Take the following fields only from vmcs12 */ | 9158 | /* Take the following fields only from vmcs12 */ |
9153 | exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | 9159 | exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
9160 | SECONDARY_EXEC_RDTSCP | | ||
9154 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | 9161 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
9155 | SECONDARY_EXEC_APIC_REGISTER_VIRT); | 9162 | SECONDARY_EXEC_APIC_REGISTER_VIRT); |
9156 | if (nested_cpu_has(vmcs12, | 9163 | if (nested_cpu_has(vmcs12, |
9157 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) | 9164 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) |
9158 | exec_control |= vmcs12->secondary_vm_exec_control; | 9165 | exec_control |= vmcs12->secondary_vm_exec_control; |
@@ -9385,7 +9392,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
9385 | } | 9392 | } |
9386 | 9393 | ||
9387 | if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { | 9394 | if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { |
9388 | /*TODO: Also verify bits beyond physical address width are 0*/ | ||
9389 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 9395 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
9390 | return 1; | 9396 | return 1; |
9391 | } | 9397 | } |
@@ -9524,7 +9530,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
9524 | vmcs12->launch_state = 1; | 9530 | vmcs12->launch_state = 1; |
9525 | 9531 | ||
9526 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) | 9532 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) |
9527 | return kvm_emulate_halt(vcpu); | 9533 | return kvm_vcpu_halt(vcpu); |
9528 | 9534 | ||
9529 | vmx->nested.nested_run_pending = 1; | 9535 | vmx->nested.nested_run_pending = 1; |
9530 | 9536 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 32bf19ef3115..2b2dd030ea3b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) | |||
801 | } | 801 | } |
802 | EXPORT_SYMBOL_GPL(kvm_get_cr8); | 802 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
803 | 803 | ||
804 | static void kvm_update_dr0123(struct kvm_vcpu *vcpu) | ||
805 | { | ||
806 | int i; | ||
807 | |||
808 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
809 | for (i = 0; i < KVM_NR_DB_REGS; i++) | ||
810 | vcpu->arch.eff_db[i] = vcpu->arch.db[i]; | ||
811 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; | ||
812 | } | ||
813 | } | ||
814 | |||
804 | static void kvm_update_dr6(struct kvm_vcpu *vcpu) | 815 | static void kvm_update_dr6(struct kvm_vcpu *vcpu) |
805 | { | 816 | { |
806 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | 817 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) |
@@ -3149,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
3149 | return -EINVAL; | 3160 | return -EINVAL; |
3150 | 3161 | ||
3151 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); | 3162 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); |
3163 | kvm_update_dr0123(vcpu); | ||
3152 | vcpu->arch.dr6 = dbgregs->dr6; | 3164 | vcpu->arch.dr6 = dbgregs->dr6; |
3153 | kvm_update_dr6(vcpu); | 3165 | kvm_update_dr6(vcpu); |
3154 | vcpu->arch.dr7 = dbgregs->dr7; | 3166 | vcpu->arch.dr7 = dbgregs->dr7; |
@@ -4114,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, | |||
4114 | do { | 4126 | do { |
4115 | n = min(len, 8); | 4127 | n = min(len, 8); |
4116 | if (!(vcpu->arch.apic && | 4128 | if (!(vcpu->arch.apic && |
4117 | !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) | 4129 | !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) |
4118 | && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) | 4130 | && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) |
4119 | break; | 4131 | break; |
4120 | handled += n; | 4132 | handled += n; |
4121 | addr += n; | 4133 | addr += n; |
@@ -4134,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) | |||
4134 | do { | 4146 | do { |
4135 | n = min(len, 8); | 4147 | n = min(len, 8); |
4136 | if (!(vcpu->arch.apic && | 4148 | if (!(vcpu->arch.apic && |
4137 | !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) | 4149 | !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, |
4138 | && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) | 4150 | addr, n, v)) |
4151 | && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) | ||
4139 | break; | 4152 | break; |
4140 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); | 4153 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); |
4141 | handled += n; | 4154 | handled += n; |
@@ -4475,7 +4488,8 @@ mmio: | |||
4475 | return X86EMUL_CONTINUE; | 4488 | return X86EMUL_CONTINUE; |
4476 | } | 4489 | } |
4477 | 4490 | ||
4478 | int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, | 4491 | static int emulator_read_write(struct x86_emulate_ctxt *ctxt, |
4492 | unsigned long addr, | ||
4479 | void *val, unsigned int bytes, | 4493 | void *val, unsigned int bytes, |
4480 | struct x86_exception *exception, | 4494 | struct x86_exception *exception, |
4481 | const struct read_write_emulator_ops *ops) | 4495 | const struct read_write_emulator_ops *ops) |
@@ -4538,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, | |||
4538 | exception, &read_emultor); | 4552 | exception, &read_emultor); |
4539 | } | 4553 | } |
4540 | 4554 | ||
4541 | int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, | 4555 | static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, |
4542 | unsigned long addr, | 4556 | unsigned long addr, |
4543 | const void *val, | 4557 | const void *val, |
4544 | unsigned int bytes, | 4558 | unsigned int bytes, |
@@ -4629,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) | |||
4629 | int r; | 4643 | int r; |
4630 | 4644 | ||
4631 | if (vcpu->arch.pio.in) | 4645 | if (vcpu->arch.pio.in) |
4632 | r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, | 4646 | r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, |
4633 | vcpu->arch.pio.size, pd); | 4647 | vcpu->arch.pio.size, pd); |
4634 | else | 4648 | else |
4635 | r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, | 4649 | r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, |
4636 | vcpu->arch.pio.port, vcpu->arch.pio.size, | 4650 | vcpu->arch.pio.port, vcpu->arch.pio.size, |
4637 | pd); | 4651 | pd); |
4638 | return r; | 4652 | return r; |
@@ -4705,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) | |||
4705 | kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); | 4719 | kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); |
4706 | } | 4720 | } |
4707 | 4721 | ||
4708 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | 4722 | int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) |
4709 | { | 4723 | { |
4710 | if (!need_emulate_wbinvd(vcpu)) | 4724 | if (!need_emulate_wbinvd(vcpu)) |
4711 | return X86EMUL_CONTINUE; | 4725 | return X86EMUL_CONTINUE; |
@@ -4722,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | |||
4722 | wbinvd(); | 4736 | wbinvd(); |
4723 | return X86EMUL_CONTINUE; | 4737 | return X86EMUL_CONTINUE; |
4724 | } | 4738 | } |
4739 | |||
4740 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | ||
4741 | { | ||
4742 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
4743 | return kvm_emulate_wbinvd_noskip(vcpu); | ||
4744 | } | ||
4725 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); | 4745 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); |
4726 | 4746 | ||
4747 | |||
4748 | |||
4727 | static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) | 4749 | static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) |
4728 | { | 4750 | { |
4729 | kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); | 4751 | kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); |
4730 | } | 4752 | } |
4731 | 4753 | ||
4732 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | 4754 | static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, |
4755 | unsigned long *dest) | ||
4733 | { | 4756 | { |
4734 | return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); | 4757 | return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); |
4735 | } | 4758 | } |
4736 | 4759 | ||
4737 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | 4760 | static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
4761 | unsigned long value) | ||
4738 | { | 4762 | { |
4739 | 4763 | ||
4740 | return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); | 4764 | return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); |
@@ -5816,7 +5840,7 @@ void kvm_arch_exit(void) | |||
5816 | free_percpu(shared_msrs); | 5840 | free_percpu(shared_msrs); |
5817 | } | 5841 | } |
5818 | 5842 | ||
5819 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 5843 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu) |
5820 | { | 5844 | { |
5821 | ++vcpu->stat.halt_exits; | 5845 | ++vcpu->stat.halt_exits; |
5822 | if (irqchip_in_kernel(vcpu->kvm)) { | 5846 | if (irqchip_in_kernel(vcpu->kvm)) { |
@@ -5827,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
5827 | return 0; | 5851 | return 0; |
5828 | } | 5852 | } |
5829 | } | 5853 | } |
5854 | EXPORT_SYMBOL_GPL(kvm_vcpu_halt); | ||
5855 | |||
5856 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | ||
5857 | { | ||
5858 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
5859 | return kvm_vcpu_halt(vcpu); | ||
5860 | } | ||
5830 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); | 5861 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); |
5831 | 5862 | ||
5832 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | 5863 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
@@ -5903,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) | |||
5903 | lapic_irq.dest_id = apicid; | 5934 | lapic_irq.dest_id = apicid; |
5904 | 5935 | ||
5905 | lapic_irq.delivery_mode = APIC_DM_REMRD; | 5936 | lapic_irq.delivery_mode = APIC_DM_REMRD; |
5906 | kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); | 5937 | kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); |
5907 | } | 5938 | } |
5908 | 5939 | ||
5909 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 5940 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
@@ -5911,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5911 | unsigned long nr, a0, a1, a2, a3, ret; | 5942 | unsigned long nr, a0, a1, a2, a3, ret; |
5912 | int op_64_bit, r = 1; | 5943 | int op_64_bit, r = 1; |
5913 | 5944 | ||
5945 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
5946 | |||
5914 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 5947 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
5915 | return kvm_hv_hypercall(vcpu); | 5948 | return kvm_hv_hypercall(vcpu); |
5916 | 5949 | ||
@@ -6164,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
6164 | } | 6197 | } |
6165 | 6198 | ||
6166 | /* | 6199 | /* |
6167 | * Returns 1 to let __vcpu_run() continue the guest execution loop without | 6200 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
6168 | * exiting to the userspace. Otherwise, the value will be returned to the | 6201 | * exiting to the userspace. Otherwise, the value will be returned to the |
6169 | * userspace. | 6202 | * userspace. |
6170 | */ | 6203 | */ |
@@ -6301,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6301 | set_debugreg(vcpu->arch.eff_db[2], 2); | 6334 | set_debugreg(vcpu->arch.eff_db[2], 2); |
6302 | set_debugreg(vcpu->arch.eff_db[3], 3); | 6335 | set_debugreg(vcpu->arch.eff_db[3], 3); |
6303 | set_debugreg(vcpu->arch.dr6, 6); | 6336 | set_debugreg(vcpu->arch.dr6, 6); |
6337 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; | ||
6304 | } | 6338 | } |
6305 | 6339 | ||
6306 | trace_kvm_entry(vcpu->vcpu_id); | 6340 | trace_kvm_entry(vcpu->vcpu_id); |
@@ -6382,42 +6416,47 @@ out: | |||
6382 | return r; | 6416 | return r; |
6383 | } | 6417 | } |
6384 | 6418 | ||
6419 | static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) | ||
6420 | { | ||
6421 | if (!kvm_arch_vcpu_runnable(vcpu)) { | ||
6422 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | ||
6423 | kvm_vcpu_block(vcpu); | ||
6424 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6425 | if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) | ||
6426 | return 1; | ||
6427 | } | ||
6428 | |||
6429 | kvm_apic_accept_events(vcpu); | ||
6430 | switch(vcpu->arch.mp_state) { | ||
6431 | case KVM_MP_STATE_HALTED: | ||
6432 | vcpu->arch.pv.pv_unhalted = false; | ||
6433 | vcpu->arch.mp_state = | ||
6434 | KVM_MP_STATE_RUNNABLE; | ||
6435 | case KVM_MP_STATE_RUNNABLE: | ||
6436 | vcpu->arch.apf.halted = false; | ||
6437 | break; | ||
6438 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6439 | break; | ||
6440 | default: | ||
6441 | return -EINTR; | ||
6442 | break; | ||
6443 | } | ||
6444 | return 1; | ||
6445 | } | ||
6385 | 6446 | ||
6386 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 6447 | static int vcpu_run(struct kvm_vcpu *vcpu) |
6387 | { | 6448 | { |
6388 | int r; | 6449 | int r; |
6389 | struct kvm *kvm = vcpu->kvm; | 6450 | struct kvm *kvm = vcpu->kvm; |
6390 | 6451 | ||
6391 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6452 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6392 | 6453 | ||
6393 | r = 1; | 6454 | for (;;) { |
6394 | while (r > 0) { | ||
6395 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 6455 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && |
6396 | !vcpu->arch.apf.halted) | 6456 | !vcpu->arch.apf.halted) |
6397 | r = vcpu_enter_guest(vcpu); | 6457 | r = vcpu_enter_guest(vcpu); |
6398 | else { | 6458 | else |
6399 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6459 | r = vcpu_block(kvm, vcpu); |
6400 | kvm_vcpu_block(vcpu); | ||
6401 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6402 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | ||
6403 | kvm_apic_accept_events(vcpu); | ||
6404 | switch(vcpu->arch.mp_state) { | ||
6405 | case KVM_MP_STATE_HALTED: | ||
6406 | vcpu->arch.pv.pv_unhalted = false; | ||
6407 | vcpu->arch.mp_state = | ||
6408 | KVM_MP_STATE_RUNNABLE; | ||
6409 | case KVM_MP_STATE_RUNNABLE: | ||
6410 | vcpu->arch.apf.halted = false; | ||
6411 | break; | ||
6412 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6413 | break; | ||
6414 | default: | ||
6415 | r = -EINTR; | ||
6416 | break; | ||
6417 | } | ||
6418 | } | ||
6419 | } | ||
6420 | |||
6421 | if (r <= 0) | 6460 | if (r <= 0) |
6422 | break; | 6461 | break; |
6423 | 6462 | ||
@@ -6429,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6429 | r = -EINTR; | 6468 | r = -EINTR; |
6430 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6469 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6431 | ++vcpu->stat.request_irq_exits; | 6470 | ++vcpu->stat.request_irq_exits; |
6471 | break; | ||
6432 | } | 6472 | } |
6433 | 6473 | ||
6434 | kvm_check_async_pf_completion(vcpu); | 6474 | kvm_check_async_pf_completion(vcpu); |
@@ -6437,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6437 | r = -EINTR; | 6477 | r = -EINTR; |
6438 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6478 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6439 | ++vcpu->stat.signal_exits; | 6479 | ++vcpu->stat.signal_exits; |
6480 | break; | ||
6440 | } | 6481 | } |
6441 | if (need_resched()) { | 6482 | if (need_resched()) { |
6442 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6483 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
@@ -6568,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
6568 | } else | 6609 | } else |
6569 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); | 6610 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); |
6570 | 6611 | ||
6571 | r = __vcpu_run(vcpu); | 6612 | r = vcpu_run(vcpu); |
6572 | 6613 | ||
6573 | out: | 6614 | out: |
6574 | post_kvm_run_save(vcpu); | 6615 | post_kvm_run_save(vcpu); |
@@ -7075,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
7075 | kvm_clear_exception_queue(vcpu); | 7116 | kvm_clear_exception_queue(vcpu); |
7076 | 7117 | ||
7077 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); | 7118 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); |
7119 | kvm_update_dr0123(vcpu); | ||
7078 | vcpu->arch.dr6 = DR6_INIT; | 7120 | vcpu->arch.dr6 = DR6_INIT; |
7079 | kvm_update_dr6(vcpu); | 7121 | kvm_update_dr6(vcpu); |
7080 | vcpu->arch.dr7 = DR7_FIXED_1; | 7122 | vcpu->arch.dr7 = DR7_FIXED_1; |
7081 | kvm_update_dr7(vcpu); | 7123 | kvm_update_dr7(vcpu); |
7082 | 7124 | ||
7125 | vcpu->arch.cr2 = 0; | ||
7126 | |||
7083 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7127 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7084 | vcpu->arch.apf.msr_val = 0; | 7128 | vcpu->arch.apf.msr_val = 0; |
7085 | vcpu->arch.st.msr_val = 0; | 7129 | vcpu->arch.st.msr_val = 0; |
@@ -7240,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
7240 | 7284 | ||
7241 | vcpu->arch.pv.pv_unhalted = false; | 7285 | vcpu->arch.pv.pv_unhalted = false; |
7242 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; | 7286 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; |
7243 | if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) | 7287 | if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) |
7244 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 7288 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
7245 | else | 7289 | else |
7246 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | 7290 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
@@ -7288,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
7288 | vcpu->arch.guest_supported_xcr0 = 0; | 7332 | vcpu->arch.guest_supported_xcr0 = 0; |
7289 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; | 7333 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
7290 | 7334 | ||
7335 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); | ||
7336 | |||
7291 | kvm_async_pf_hash_reset(vcpu); | 7337 | kvm_async_pf_hash_reset(vcpu); |
7292 | kvm_pmu_init(vcpu); | 7338 | kvm_pmu_init(vcpu); |
7293 | 7339 | ||
@@ -7428,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
7428 | 7474 | ||
7429 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { | 7475 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { |
7430 | if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { | 7476 | if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { |
7431 | kvm_kvfree(free->arch.rmap[i]); | 7477 | kvfree(free->arch.rmap[i]); |
7432 | free->arch.rmap[i] = NULL; | 7478 | free->arch.rmap[i] = NULL; |
7433 | } | 7479 | } |
7434 | if (i == 0) | 7480 | if (i == 0) |
@@ -7436,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
7436 | 7482 | ||
7437 | if (!dont || free->arch.lpage_info[i - 1] != | 7483 | if (!dont || free->arch.lpage_info[i - 1] != |
7438 | dont->arch.lpage_info[i - 1]) { | 7484 | dont->arch.lpage_info[i - 1]) { |
7439 | kvm_kvfree(free->arch.lpage_info[i - 1]); | 7485 | kvfree(free->arch.lpage_info[i - 1]); |
7440 | free->arch.lpage_info[i - 1] = NULL; | 7486 | free->arch.lpage_info[i - 1] = NULL; |
7441 | } | 7487 | } |
7442 | } | 7488 | } |
@@ -7490,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |||
7490 | 7536 | ||
7491 | out_free: | 7537 | out_free: |
7492 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { | 7538 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { |
7493 | kvm_kvfree(slot->arch.rmap[i]); | 7539 | kvfree(slot->arch.rmap[i]); |
7494 | slot->arch.rmap[i] = NULL; | 7540 | slot->arch.rmap[i] = NULL; |
7495 | if (i == 0) | 7541 | if (i == 0) |
7496 | continue; | 7542 | continue; |
7497 | 7543 | ||
7498 | kvm_kvfree(slot->arch.lpage_info[i - 1]); | 7544 | kvfree(slot->arch.lpage_info[i - 1]); |
7499 | slot->arch.lpage_info[i - 1] = NULL; | 7545 | slot->arch.lpage_info[i - 1] = NULL; |
7500 | } | 7546 | } |
7501 | return -ENOMEM; | 7547 | return -ENOMEM; |
@@ -7618,6 +7664,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7618 | new = id_to_memslot(kvm->memslots, mem->slot); | 7664 | new = id_to_memslot(kvm->memslots, mem->slot); |
7619 | 7665 | ||
7620 | /* | 7666 | /* |
7667 | * Dirty logging tracks sptes in 4k granularity, meaning that large | ||
7668 | * sptes have to be split. If live migration is successful, the guest | ||
7669 | * in the source machine will be destroyed and large sptes will be | ||
7670 | * created in the destination. However, if the guest continues to run | ||
7671 | * in the source machine (for example if live migration fails), small | ||
7672 | * sptes will remain around and cause bad performance. | ||
7673 | * | ||
7674 | * Scan sptes if dirty logging has been stopped, dropping those | ||
7675 | * which can be collapsed into a single large-page spte. Later | ||
7676 | * page faults will create the large-page sptes. | ||
7677 | */ | ||
7678 | if ((change != KVM_MR_DELETE) && | ||
7679 | (old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | ||
7680 | !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) | ||
7681 | kvm_mmu_zap_collapsible_sptes(kvm, new); | ||
7682 | |||
7683 | /* | ||
7621 | * Set up write protection and/or dirty logging for the new slot. | 7684 | * Set up write protection and/or dirty logging for the new slot. |
7622 | * | 7685 | * |
7623 | * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have | 7686 | * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 9793322751e0..40d2473836c9 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode) | |||
82 | cycle_t ret; | 82 | cycle_t ret; |
83 | u64 last; | 83 | u64 last; |
84 | u32 version; | 84 | u32 version; |
85 | u32 migrate_count; | ||
85 | u8 flags; | 86 | u8 flags; |
86 | unsigned cpu, cpu1; | 87 | unsigned cpu, cpu1; |
87 | 88 | ||
88 | 89 | ||
89 | /* | 90 | /* |
90 | * Note: hypervisor must guarantee that: | 91 | * When looping to get a consistent (time-info, tsc) pair, we |
91 | * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. | 92 | * also need to deal with the possibility we can switch vcpus, |
92 | * 2. that per-CPU pvclock time info is updated if the | 93 | * so make sure we always re-fetch time-info for the current vcpu. |
93 | * underlying CPU changes. | ||
94 | * 3. that version is increased whenever underlying CPU | ||
95 | * changes. | ||
96 | * | ||
97 | */ | 94 | */ |
98 | do { | 95 | do { |
99 | cpu = __getcpu() & VGETCPU_CPU_MASK; | 96 | cpu = __getcpu() & VGETCPU_CPU_MASK; |
@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode) | |||
102 | * __getcpu() calls (Gleb). | 99 | * __getcpu() calls (Gleb). |
103 | */ | 100 | */ |
104 | 101 | ||
105 | pvti = get_pvti(cpu); | 102 | /* Make sure migrate_count will change if we leave the VCPU. */ |
103 | do { | ||
104 | pvti = get_pvti(cpu); | ||
105 | migrate_count = pvti->migrate_count; | ||
106 | |||
107 | cpu1 = cpu; | ||
108 | cpu = __getcpu() & VGETCPU_CPU_MASK; | ||
109 | } while (unlikely(cpu != cpu1)); | ||
106 | 110 | ||
107 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); | 111 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); |
108 | 112 | ||
109 | /* | 113 | /* |
110 | * Test we're still on the cpu as well as the version. | 114 | * Test we're still on the cpu as well as the version. |
111 | * We could have been migrated just after the first | 115 | * - We must read TSC of pvti's VCPU. |
112 | * vgetcpu but before fetching the version, so we | 116 | * - KVM doesn't follow the versioning protocol, so data could |
113 | * wouldn't notice a version change. | 117 | * change before version if we left the VCPU. |
114 | */ | 118 | */ |
115 | cpu1 = __getcpu() & VGETCPU_CPU_MASK; | 119 | smp_rmb(); |
116 | } while (unlikely(cpu != cpu1 || | 120 | } while (unlikely((pvti->pvti.version & 1) || |
117 | (pvti->pvti.version & 1) || | 121 | pvti->pvti.version != version || |
118 | pvti->pvti.version != version)); | 122 | pvti->migrate_count != migrate_count)); |
119 | 123 | ||
120 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) | 124 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) |
121 | *mode = VCLOCK_NONE; | 125 | *mode = VCLOCK_NONE; |
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index b3f45a578344..e5966758c093 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h | |||
@@ -24,17 +24,14 @@ | |||
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | 25 | ||
26 | struct arch_timer_kvm { | 26 | struct arch_timer_kvm { |
27 | #ifdef CONFIG_KVM_ARM_TIMER | ||
28 | /* Is the timer enabled */ | 27 | /* Is the timer enabled */ |
29 | bool enabled; | 28 | bool enabled; |
30 | 29 | ||
31 | /* Virtual offset */ | 30 | /* Virtual offset */ |
32 | cycle_t cntvoff; | 31 | cycle_t cntvoff; |
33 | #endif | ||
34 | }; | 32 | }; |
35 | 33 | ||
36 | struct arch_timer_cpu { | 34 | struct arch_timer_cpu { |
37 | #ifdef CONFIG_KVM_ARM_TIMER | ||
38 | /* Registers: control register, timer value */ | 35 | /* Registers: control register, timer value */ |
39 | u32 cntv_ctl; /* Saved/restored */ | 36 | u32 cntv_ctl; /* Saved/restored */ |
40 | cycle_t cntv_cval; /* Saved/restored */ | 37 | cycle_t cntv_cval; /* Saved/restored */ |
@@ -55,10 +52,8 @@ struct arch_timer_cpu { | |||
55 | 52 | ||
56 | /* Timer IRQ */ | 53 | /* Timer IRQ */ |
57 | const struct kvm_irq_level *irq; | 54 | const struct kvm_irq_level *irq; |
58 | #endif | ||
59 | }; | 55 | }; |
60 | 56 | ||
61 | #ifdef CONFIG_KVM_ARM_TIMER | ||
62 | int kvm_timer_hyp_init(void); | 57 | int kvm_timer_hyp_init(void); |
63 | void kvm_timer_enable(struct kvm *kvm); | 58 | void kvm_timer_enable(struct kvm *kvm); |
64 | void kvm_timer_init(struct kvm *kvm); | 59 | void kvm_timer_init(struct kvm *kvm); |
@@ -72,30 +67,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); | |||
72 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); | 67 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); |
73 | int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); | 68 | int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); |
74 | 69 | ||
75 | #else | 70 | bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); |
76 | static inline int kvm_timer_hyp_init(void) | ||
77 | { | ||
78 | return 0; | ||
79 | }; | ||
80 | |||
81 | static inline void kvm_timer_enable(struct kvm *kvm) {} | ||
82 | static inline void kvm_timer_init(struct kvm *kvm) {} | ||
83 | static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, | ||
84 | const struct kvm_irq_level *irq) {} | ||
85 | static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} | ||
86 | static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} | ||
87 | static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} | ||
88 | static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} | ||
89 | |||
90 | static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | #endif | ||
100 | 71 | ||
101 | #endif | 72 | #endif |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 66203b268984..133ea00aa83b 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/irqreturn.h> | 24 | #include <linux/irqreturn.h> |
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <kvm/iodev.h> | ||
27 | 28 | ||
28 | #define VGIC_NR_IRQS_LEGACY 256 | 29 | #define VGIC_NR_IRQS_LEGACY 256 |
29 | #define VGIC_NR_SGIS 16 | 30 | #define VGIC_NR_SGIS 16 |
@@ -140,16 +141,21 @@ struct vgic_params { | |||
140 | }; | 141 | }; |
141 | 142 | ||
142 | struct vgic_vm_ops { | 143 | struct vgic_vm_ops { |
143 | bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *, | ||
144 | struct kvm_exit_mmio *); | ||
145 | bool (*queue_sgi)(struct kvm_vcpu *, int irq); | 144 | bool (*queue_sgi)(struct kvm_vcpu *, int irq); |
146 | void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source); | 145 | void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source); |
147 | int (*init_model)(struct kvm *); | 146 | int (*init_model)(struct kvm *); |
148 | int (*map_resources)(struct kvm *, const struct vgic_params *); | 147 | int (*map_resources)(struct kvm *, const struct vgic_params *); |
149 | }; | 148 | }; |
150 | 149 | ||
150 | struct vgic_io_device { | ||
151 | gpa_t addr; | ||
152 | int len; | ||
153 | const struct vgic_io_range *reg_ranges; | ||
154 | struct kvm_vcpu *redist_vcpu; | ||
155 | struct kvm_io_device dev; | ||
156 | }; | ||
157 | |||
151 | struct vgic_dist { | 158 | struct vgic_dist { |
152 | #ifdef CONFIG_KVM_ARM_VGIC | ||
153 | spinlock_t lock; | 159 | spinlock_t lock; |
154 | bool in_kernel; | 160 | bool in_kernel; |
155 | bool ready; | 161 | bool ready; |
@@ -197,6 +203,9 @@ struct vgic_dist { | |||
197 | /* Level-triggered interrupt queued on VCPU interface */ | 203 | /* Level-triggered interrupt queued on VCPU interface */ |
198 | struct vgic_bitmap irq_queued; | 204 | struct vgic_bitmap irq_queued; |
199 | 205 | ||
206 | /* Interrupt was active when unqueue from VCPU interface */ | ||
207 | struct vgic_bitmap irq_active; | ||
208 | |||
200 | /* Interrupt priority. Not used yet. */ | 209 | /* Interrupt priority. Not used yet. */ |
201 | struct vgic_bytemap irq_priority; | 210 | struct vgic_bytemap irq_priority; |
202 | 211 | ||
@@ -237,8 +246,12 @@ struct vgic_dist { | |||
237 | /* Bitmap indicating which CPU has something pending */ | 246 | /* Bitmap indicating which CPU has something pending */ |
238 | unsigned long *irq_pending_on_cpu; | 247 | unsigned long *irq_pending_on_cpu; |
239 | 248 | ||
249 | /* Bitmap indicating which CPU has active IRQs */ | ||
250 | unsigned long *irq_active_on_cpu; | ||
251 | |||
240 | struct vgic_vm_ops vm_ops; | 252 | struct vgic_vm_ops vm_ops; |
241 | #endif | 253 | struct vgic_io_device dist_iodev; |
254 | struct vgic_io_device *redist_iodevs; | ||
242 | }; | 255 | }; |
243 | 256 | ||
244 | struct vgic_v2_cpu_if { | 257 | struct vgic_v2_cpu_if { |
@@ -266,13 +279,18 @@ struct vgic_v3_cpu_if { | |||
266 | }; | 279 | }; |
267 | 280 | ||
268 | struct vgic_cpu { | 281 | struct vgic_cpu { |
269 | #ifdef CONFIG_KVM_ARM_VGIC | ||
270 | /* per IRQ to LR mapping */ | 282 | /* per IRQ to LR mapping */ |
271 | u8 *vgic_irq_lr_map; | 283 | u8 *vgic_irq_lr_map; |
272 | 284 | ||
273 | /* Pending interrupts on this VCPU */ | 285 | /* Pending/active/both interrupts on this VCPU */ |
274 | DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); | 286 | DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); |
287 | DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS); | ||
288 | DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS); | ||
289 | |||
290 | /* Pending/active/both shared interrupts, dynamically sized */ | ||
275 | unsigned long *pending_shared; | 291 | unsigned long *pending_shared; |
292 | unsigned long *active_shared; | ||
293 | unsigned long *pend_act_shared; | ||
276 | 294 | ||
277 | /* Bitmap of used/free list registers */ | 295 | /* Bitmap of used/free list registers */ |
278 | DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); | 296 | DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); |
@@ -285,7 +303,6 @@ struct vgic_cpu { | |||
285 | struct vgic_v2_cpu_if vgic_v2; | 303 | struct vgic_v2_cpu_if vgic_v2; |
286 | struct vgic_v3_cpu_if vgic_v3; | 304 | struct vgic_v3_cpu_if vgic_v3; |
287 | }; | 305 | }; |
288 | #endif | ||
289 | }; | 306 | }; |
290 | 307 | ||
291 | #define LR_EMPTY 0xff | 308 | #define LR_EMPTY 0xff |
@@ -295,10 +312,7 @@ struct vgic_cpu { | |||
295 | 312 | ||
296 | struct kvm; | 313 | struct kvm; |
297 | struct kvm_vcpu; | 314 | struct kvm_vcpu; |
298 | struct kvm_run; | ||
299 | struct kvm_exit_mmio; | ||
300 | 315 | ||
301 | #ifdef CONFIG_KVM_ARM_VGIC | ||
302 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); | 316 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); |
303 | int kvm_vgic_hyp_init(void); | 317 | int kvm_vgic_hyp_init(void); |
304 | int kvm_vgic_map_resources(struct kvm *kvm); | 318 | int kvm_vgic_map_resources(struct kvm *kvm); |
@@ -312,8 +326,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | |||
312 | bool level); | 326 | bool level); |
313 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 327 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
314 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | 328 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); |
315 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | 329 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); |
316 | struct kvm_exit_mmio *mmio); | ||
317 | 330 | ||
318 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 331 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
319 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) | 332 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) |
@@ -335,84 +348,4 @@ static inline int vgic_v3_probe(struct device_node *vgic_node, | |||
335 | } | 348 | } |
336 | #endif | 349 | #endif |
337 | 350 | ||
338 | #else | ||
339 | static inline int kvm_vgic_hyp_init(void) | ||
340 | { | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | ||
345 | { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
350 | { | ||
351 | return -ENXIO; | ||
352 | } | ||
353 | |||
354 | static inline int kvm_vgic_map_resources(struct kvm *kvm) | ||
355 | { | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static inline int kvm_vgic_create(struct kvm *kvm, u32 type) | ||
360 | { | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static inline void kvm_vgic_destroy(struct kvm *kvm) | ||
365 | { | ||
366 | } | ||
367 | |||
368 | static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
369 | { | ||
370 | } | ||
371 | |||
372 | static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | ||
373 | { | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {} | ||
378 | static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {} | ||
379 | |||
380 | static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, | ||
381 | unsigned int irq_num, bool level) | ||
382 | { | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | ||
387 | { | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
392 | struct kvm_exit_mmio *mmio) | ||
393 | { | ||
394 | return false; | ||
395 | } | ||
396 | |||
397 | static inline int irqchip_in_kernel(struct kvm *kvm) | ||
398 | { | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static inline bool vgic_initialized(struct kvm *kvm) | ||
403 | { | ||
404 | return true; | ||
405 | } | ||
406 | |||
407 | static inline bool vgic_ready(struct kvm *kvm) | ||
408 | { | ||
409 | return true; | ||
410 | } | ||
411 | |||
412 | static inline int kvm_vgic_get_max_vcpus(void) | ||
413 | { | ||
414 | return KVM_MAX_VCPUS; | ||
415 | } | ||
416 | #endif | ||
417 | |||
418 | #endif | 351 | #endif |
diff --git a/virt/kvm/iodev.h b/include/kvm/iodev.h index 12fd3caffd2b..a6d208b916f5 100644 --- a/virt/kvm/iodev.h +++ b/include/kvm/iodev.h | |||
@@ -9,17 +9,17 @@ | |||
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | */ | 13 | */ |
15 | 14 | ||
16 | #ifndef __KVM_IODEV_H__ | 15 | #ifndef __KVM_IODEV_H__ |
17 | #define __KVM_IODEV_H__ | 16 | #define __KVM_IODEV_H__ |
18 | 17 | ||
19 | #include <linux/kvm_types.h> | 18 | #include <linux/kvm_types.h> |
20 | #include <asm/errno.h> | 19 | #include <linux/errno.h> |
21 | 20 | ||
22 | struct kvm_io_device; | 21 | struct kvm_io_device; |
22 | struct kvm_vcpu; | ||
23 | 23 | ||
24 | /** | 24 | /** |
25 | * kvm_io_device_ops are called under kvm slots_lock. | 25 | * kvm_io_device_ops are called under kvm slots_lock. |
@@ -27,11 +27,13 @@ struct kvm_io_device; | |||
27 | * or non-zero to have it passed to the next device. | 27 | * or non-zero to have it passed to the next device. |
28 | **/ | 28 | **/ |
29 | struct kvm_io_device_ops { | 29 | struct kvm_io_device_ops { |
30 | int (*read)(struct kvm_io_device *this, | 30 | int (*read)(struct kvm_vcpu *vcpu, |
31 | struct kvm_io_device *this, | ||
31 | gpa_t addr, | 32 | gpa_t addr, |
32 | int len, | 33 | int len, |
33 | void *val); | 34 | void *val); |
34 | int (*write)(struct kvm_io_device *this, | 35 | int (*write)(struct kvm_vcpu *vcpu, |
36 | struct kvm_io_device *this, | ||
35 | gpa_t addr, | 37 | gpa_t addr, |
36 | int len, | 38 | int len, |
37 | const void *val); | 39 | const void *val); |
@@ -49,16 +51,20 @@ static inline void kvm_iodevice_init(struct kvm_io_device *dev, | |||
49 | dev->ops = ops; | 51 | dev->ops = ops; |
50 | } | 52 | } |
51 | 53 | ||
52 | static inline int kvm_iodevice_read(struct kvm_io_device *dev, | 54 | static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu, |
53 | gpa_t addr, int l, void *v) | 55 | struct kvm_io_device *dev, gpa_t addr, |
56 | int l, void *v) | ||
54 | { | 57 | { |
55 | return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP; | 58 | return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) |
59 | : -EOPNOTSUPP; | ||
56 | } | 60 | } |
57 | 61 | ||
58 | static inline int kvm_iodevice_write(struct kvm_io_device *dev, | 62 | static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, |
59 | gpa_t addr, int l, const void *v) | 63 | struct kvm_io_device *dev, gpa_t addr, |
64 | int l, const void *v) | ||
60 | { | 65 | { |
61 | return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP; | 66 | return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) |
67 | : -EOPNOTSUPP; | ||
62 | } | 68 | } |
63 | 69 | ||
64 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | 70 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d12b2104d19b..82af5d0b996e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -165,12 +165,12 @@ enum kvm_bus { | |||
165 | KVM_NR_BUSES | 165 | KVM_NR_BUSES |
166 | }; | 166 | }; |
167 | 167 | ||
168 | int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 168 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
169 | int len, const void *val); | 169 | int len, const void *val); |
170 | int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 170 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
171 | int len, const void *val, long cookie); | 171 | gpa_t addr, int len, const void *val, long cookie); |
172 | int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, | 172 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
173 | void *val); | 173 | int len, void *val); |
174 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 174 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
175 | int len, struct kvm_io_device *dev); | 175 | int len, struct kvm_io_device *dev); |
176 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 176 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
@@ -658,7 +658,6 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | |||
658 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); | 658 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
659 | 659 | ||
660 | void *kvm_kvzalloc(unsigned long size); | 660 | void *kvm_kvzalloc(unsigned long size); |
661 | void kvm_kvfree(const void *addr); | ||
662 | 661 | ||
663 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC | 662 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
664 | static inline struct kvm *kvm_arch_alloc_vm(void) | 663 | static inline struct kvm *kvm_arch_alloc_vm(void) |
@@ -700,6 +699,20 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) | |||
700 | #endif | 699 | #endif |
701 | } | 700 | } |
702 | 701 | ||
702 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED | ||
703 | /* | ||
704 | * returns true if the virtual interrupt controller is initialized and | ||
705 | * ready to accept virtual IRQ. On some architectures the virtual interrupt | ||
706 | * controller is dynamically instantiated and this is not always true. | ||
707 | */ | ||
708 | bool kvm_arch_intc_initialized(struct kvm *kvm); | ||
709 | #else | ||
710 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) | ||
711 | { | ||
712 | return true; | ||
713 | } | ||
714 | #endif | ||
715 | |||
703 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); | 716 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
704 | void kvm_arch_destroy_vm(struct kvm *kvm); | 717 | void kvm_arch_destroy_vm(struct kvm *kvm); |
705 | void kvm_arch_sync_events(struct kvm *kvm); | 718 | void kvm_arch_sync_events(struct kvm *kvm); |
@@ -969,11 +982,16 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
969 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | 982 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
970 | 983 | ||
971 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | 984 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
972 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | 985 | static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) |
973 | { | 986 | { |
974 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; | 987 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; |
975 | } | 988 | } |
976 | 989 | ||
990 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | ||
991 | { | ||
992 | return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; | ||
993 | } | ||
994 | |||
977 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); | 995 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); |
978 | 996 | ||
979 | #else | 997 | #else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index a419b65770d6..51348f77e431 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); | |||
176 | extern void calc_global_load(unsigned long ticks); | 176 | extern void calc_global_load(unsigned long ticks); |
177 | extern void update_cpu_load_nohz(void); | 177 | extern void update_cpu_load_nohz(void); |
178 | 178 | ||
179 | /* Notifier for when a task gets migrated to a new CPU */ | ||
180 | struct task_migration_notifier { | ||
181 | struct task_struct *task; | ||
182 | int from_cpu; | ||
183 | int to_cpu; | ||
184 | }; | ||
185 | extern void register_task_migration_notifier(struct notifier_block *n); | ||
186 | |||
179 | extern unsigned long get_parent_ip(unsigned long addr); | 187 | extern unsigned long get_parent_ip(unsigned long addr); |
180 | 188 | ||
181 | extern void dump_cpu_task(int cpu); | 189 | extern void dump_cpu_task(int cpu); |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 805570650062..f574d7be7631 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -147,6 +147,16 @@ struct kvm_pit_config { | |||
147 | 147 | ||
148 | #define KVM_PIT_SPEAKER_DUMMY 1 | 148 | #define KVM_PIT_SPEAKER_DUMMY 1 |
149 | 149 | ||
150 | struct kvm_s390_skeys { | ||
151 | __u64 start_gfn; | ||
152 | __u64 count; | ||
153 | __u64 skeydata_addr; | ||
154 | __u32 flags; | ||
155 | __u32 reserved[9]; | ||
156 | }; | ||
157 | #define KVM_S390_GET_SKEYS_NONE 1 | ||
158 | #define KVM_S390_SKEYS_MAX 1048576 | ||
159 | |||
150 | #define KVM_EXIT_UNKNOWN 0 | 160 | #define KVM_EXIT_UNKNOWN 0 |
151 | #define KVM_EXIT_EXCEPTION 1 | 161 | #define KVM_EXIT_EXCEPTION 1 |
152 | #define KVM_EXIT_IO 2 | 162 | #define KVM_EXIT_IO 2 |
@@ -172,6 +182,7 @@ struct kvm_pit_config { | |||
172 | #define KVM_EXIT_S390_TSCH 22 | 182 | #define KVM_EXIT_S390_TSCH 22 |
173 | #define KVM_EXIT_EPR 23 | 183 | #define KVM_EXIT_EPR 23 |
174 | #define KVM_EXIT_SYSTEM_EVENT 24 | 184 | #define KVM_EXIT_SYSTEM_EVENT 24 |
185 | #define KVM_EXIT_S390_STSI 25 | ||
175 | 186 | ||
176 | /* For KVM_EXIT_INTERNAL_ERROR */ | 187 | /* For KVM_EXIT_INTERNAL_ERROR */ |
177 | /* Emulate instruction failed. */ | 188 | /* Emulate instruction failed. */ |
@@ -309,6 +320,15 @@ struct kvm_run { | |||
309 | __u32 type; | 320 | __u32 type; |
310 | __u64 flags; | 321 | __u64 flags; |
311 | } system_event; | 322 | } system_event; |
323 | /* KVM_EXIT_S390_STSI */ | ||
324 | struct { | ||
325 | __u64 addr; | ||
326 | __u8 ar; | ||
327 | __u8 reserved; | ||
328 | __u8 fc; | ||
329 | __u8 sel1; | ||
330 | __u16 sel2; | ||
331 | } s390_stsi; | ||
312 | /* Fix the size of the union. */ | 332 | /* Fix the size of the union. */ |
313 | char padding[256]; | 333 | char padding[256]; |
314 | }; | 334 | }; |
@@ -324,7 +344,7 @@ struct kvm_run { | |||
324 | __u64 kvm_dirty_regs; | 344 | __u64 kvm_dirty_regs; |
325 | union { | 345 | union { |
326 | struct kvm_sync_regs regs; | 346 | struct kvm_sync_regs regs; |
327 | char padding[1024]; | 347 | char padding[2048]; |
328 | } s; | 348 | } s; |
329 | }; | 349 | }; |
330 | 350 | ||
@@ -365,6 +385,24 @@ struct kvm_translation { | |||
365 | __u8 pad[5]; | 385 | __u8 pad[5]; |
366 | }; | 386 | }; |
367 | 387 | ||
388 | /* for KVM_S390_MEM_OP */ | ||
389 | struct kvm_s390_mem_op { | ||
390 | /* in */ | ||
391 | __u64 gaddr; /* the guest address */ | ||
392 | __u64 flags; /* flags */ | ||
393 | __u32 size; /* amount of bytes */ | ||
394 | __u32 op; /* type of operation */ | ||
395 | __u64 buf; /* buffer in userspace */ | ||
396 | __u8 ar; /* the access register number */ | ||
397 | __u8 reserved[31]; /* should be set to 0 */ | ||
398 | }; | ||
399 | /* types for kvm_s390_mem_op->op */ | ||
400 | #define KVM_S390_MEMOP_LOGICAL_READ 0 | ||
401 | #define KVM_S390_MEMOP_LOGICAL_WRITE 1 | ||
402 | /* flags for kvm_s390_mem_op->flags */ | ||
403 | #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) | ||
404 | #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) | ||
405 | |||
368 | /* for KVM_INTERRUPT */ | 406 | /* for KVM_INTERRUPT */ |
369 | struct kvm_interrupt { | 407 | struct kvm_interrupt { |
370 | /* in */ | 408 | /* in */ |
@@ -520,6 +558,13 @@ struct kvm_s390_irq { | |||
520 | } u; | 558 | } u; |
521 | }; | 559 | }; |
522 | 560 | ||
561 | struct kvm_s390_irq_state { | ||
562 | __u64 buf; | ||
563 | __u32 flags; | ||
564 | __u32 len; | ||
565 | __u32 reserved[4]; | ||
566 | }; | ||
567 | |||
523 | /* for KVM_SET_GUEST_DEBUG */ | 568 | /* for KVM_SET_GUEST_DEBUG */ |
524 | 569 | ||
525 | #define KVM_GUESTDBG_ENABLE 0x00000001 | 570 | #define KVM_GUESTDBG_ENABLE 0x00000001 |
@@ -760,6 +805,14 @@ struct kvm_ppc_smmu_info { | |||
760 | #define KVM_CAP_PPC_ENABLE_HCALL 104 | 805 | #define KVM_CAP_PPC_ENABLE_HCALL 104 |
761 | #define KVM_CAP_CHECK_EXTENSION_VM 105 | 806 | #define KVM_CAP_CHECK_EXTENSION_VM 105 |
762 | #define KVM_CAP_S390_USER_SIGP 106 | 807 | #define KVM_CAP_S390_USER_SIGP 106 |
808 | #define KVM_CAP_S390_VECTOR_REGISTERS 107 | ||
809 | #define KVM_CAP_S390_MEM_OP 108 | ||
810 | #define KVM_CAP_S390_USER_STSI 109 | ||
811 | #define KVM_CAP_S390_SKEYS 110 | ||
812 | #define KVM_CAP_MIPS_FPU 111 | ||
813 | #define KVM_CAP_MIPS_MSA 112 | ||
814 | #define KVM_CAP_S390_INJECT_IRQ 113 | ||
815 | #define KVM_CAP_S390_IRQ_STATE 114 | ||
763 | 816 | ||
764 | #ifdef KVM_CAP_IRQ_ROUTING | 817 | #ifdef KVM_CAP_IRQ_ROUTING |
765 | 818 | ||
@@ -1135,6 +1188,16 @@ struct kvm_s390_ucas_mapping { | |||
1135 | #define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) | 1188 | #define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) |
1136 | #define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init) | 1189 | #define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init) |
1137 | #define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) | 1190 | #define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) |
1191 | /* Available with KVM_CAP_S390_MEM_OP */ | ||
1192 | #define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op) | ||
1193 | /* Available with KVM_CAP_S390_SKEYS */ | ||
1194 | #define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys) | ||
1195 | #define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys) | ||
1196 | /* Available with KVM_CAP_S390_INJECT_IRQ */ | ||
1197 | #define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq) | ||
1198 | /* Available with KVM_CAP_S390_IRQ_STATE */ | ||
1199 | #define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state) | ||
1200 | #define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state) | ||
1138 | 1201 | ||
1139 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | 1202 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) |
1140 | #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) | 1203 | #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 62671f53202a..3d5f6f6d14c2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
996 | rq_clock_skip_update(rq, true); | 996 | rq_clock_skip_update(rq, true); |
997 | } | 997 | } |
998 | 998 | ||
999 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
1000 | |||
1001 | void register_task_migration_notifier(struct notifier_block *n) | ||
1002 | { | ||
1003 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
1004 | } | ||
1005 | |||
999 | #ifdef CONFIG_SMP | 1006 | #ifdef CONFIG_SMP |
1000 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 1007 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
1001 | { | 1008 | { |
@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1026 | trace_sched_migrate_task(p, new_cpu); | 1033 | trace_sched_migrate_task(p, new_cpu); |
1027 | 1034 | ||
1028 | if (task_cpu(p) != new_cpu) { | 1035 | if (task_cpu(p) != new_cpu) { |
1036 | struct task_migration_notifier tmn; | ||
1037 | |||
1029 | if (p->sched_class->migrate_task_rq) | 1038 | if (p->sched_class->migrate_task_rq) |
1030 | p->sched_class->migrate_task_rq(p, new_cpu); | 1039 | p->sched_class->migrate_task_rq(p, new_cpu); |
1031 | p->se.nr_migrations++; | 1040 | p->se.nr_migrations++; |
1032 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); | 1041 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
1042 | |||
1043 | tmn.task = p; | ||
1044 | tmn.from_cpu = task_cpu(p); | ||
1045 | tmn.to_cpu = new_cpu; | ||
1046 | |||
1047 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
1033 | } | 1048 | } |
1034 | 1049 | ||
1035 | __set_task_cpu(p, new_cpu); | 1050 | __set_task_cpu(p, new_cpu); |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 6e54f3542126..98c95f2fcba4 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | |||
85 | return IRQ_HANDLED; | 85 | return IRQ_HANDLED; |
86 | } | 86 | } |
87 | 87 | ||
88 | /* | ||
89 | * Work function for handling the backup timer that we schedule when a vcpu is | ||
90 | * no longer running, but had a timer programmed to fire in the future. | ||
91 | */ | ||
88 | static void kvm_timer_inject_irq_work(struct work_struct *work) | 92 | static void kvm_timer_inject_irq_work(struct work_struct *work) |
89 | { | 93 | { |
90 | struct kvm_vcpu *vcpu; | 94 | struct kvm_vcpu *vcpu; |
91 | 95 | ||
92 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); | 96 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); |
93 | vcpu->arch.timer_cpu.armed = false; | 97 | vcpu->arch.timer_cpu.armed = false; |
94 | kvm_timer_inject_irq(vcpu); | 98 | |
99 | /* | ||
100 | * If the vcpu is blocked we want to wake it up so that it will see | ||
101 | * the timer has expired when entering the guest. | ||
102 | */ | ||
103 | kvm_vcpu_kick(vcpu); | ||
95 | } | 104 | } |
96 | 105 | ||
97 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | 106 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) |
@@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | |||
102 | return HRTIMER_NORESTART; | 111 | return HRTIMER_NORESTART; |
103 | } | 112 | } |
104 | 113 | ||
114 | bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
117 | cycle_t cval, now; | ||
118 | |||
119 | if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) || | ||
120 | !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE)) | ||
121 | return false; | ||
122 | |||
123 | cval = timer->cntv_cval; | ||
124 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
125 | |||
126 | return cval <= now; | ||
127 | } | ||
128 | |||
105 | /** | 129 | /** |
106 | * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu | 130 | * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu |
107 | * @vcpu: The vcpu pointer | 131 | * @vcpu: The vcpu pointer |
@@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |||
119 | * populate the CPU timer again. | 143 | * populate the CPU timer again. |
120 | */ | 144 | */ |
121 | timer_disarm(timer); | 145 | timer_disarm(timer); |
146 | |||
147 | /* | ||
148 | * If the timer expired while we were not scheduled, now is the time | ||
149 | * to inject it. | ||
150 | */ | ||
151 | if (kvm_timer_should_fire(vcpu)) | ||
152 | kvm_timer_inject_irq(vcpu); | ||
122 | } | 153 | } |
123 | 154 | ||
124 | /** | 155 | /** |
@@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |||
134 | cycle_t cval, now; | 165 | cycle_t cval, now; |
135 | u64 ns; | 166 | u64 ns; |
136 | 167 | ||
137 | if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) || | ||
138 | !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE)) | ||
139 | return; | ||
140 | |||
141 | cval = timer->cntv_cval; | ||
142 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
143 | |||
144 | BUG_ON(timer_is_armed(timer)); | 168 | BUG_ON(timer_is_armed(timer)); |
145 | 169 | ||
146 | if (cval <= now) { | 170 | if (kvm_timer_should_fire(vcpu)) { |
147 | /* | 171 | /* |
148 | * Timer has already expired while we were not | 172 | * Timer has already expired while we were not |
149 | * looking. Inject the interrupt and carry on. | 173 | * looking. Inject the interrupt and carry on. |
@@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |||
152 | return; | 176 | return; |
153 | } | 177 | } |
154 | 178 | ||
179 | cval = timer->cntv_cval; | ||
180 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
181 | |||
155 | ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, | 182 | ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, |
156 | &timecounter->frac); | 183 | &timecounter->frac); |
157 | timer_arm(timer, ns); | 184 | timer_arm(timer, ns); |
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c index 19c6210f02cf..13907970d11c 100644 --- a/virt/kvm/arm/vgic-v2-emul.c +++ b/virt/kvm/arm/vgic-v2-emul.c | |||
@@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | |||
107 | vcpu->vcpu_id); | 107 | vcpu->vcpu_id); |
108 | } | 108 | } |
109 | 109 | ||
110 | static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu, | ||
111 | struct kvm_exit_mmio *mmio, | ||
112 | phys_addr_t offset) | ||
113 | { | ||
114 | return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset, | ||
115 | vcpu->vcpu_id); | ||
116 | } | ||
117 | |||
118 | static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu, | ||
119 | struct kvm_exit_mmio *mmio, | ||
120 | phys_addr_t offset) | ||
121 | { | ||
122 | return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset, | ||
123 | vcpu->vcpu_id); | ||
124 | } | ||
125 | |||
110 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | 126 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, |
111 | struct kvm_exit_mmio *mmio, | 127 | struct kvm_exit_mmio *mmio, |
112 | phys_addr_t offset) | 128 | phys_addr_t offset) |
@@ -303,7 +319,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | |||
303 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | 319 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); |
304 | } | 320 | } |
305 | 321 | ||
306 | static const struct kvm_mmio_range vgic_dist_ranges[] = { | 322 | static const struct vgic_io_range vgic_dist_ranges[] = { |
307 | { | 323 | { |
308 | .base = GIC_DIST_CTRL, | 324 | .base = GIC_DIST_CTRL, |
309 | .len = 12, | 325 | .len = 12, |
@@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = { | |||
344 | .base = GIC_DIST_ACTIVE_SET, | 360 | .base = GIC_DIST_ACTIVE_SET, |
345 | .len = VGIC_MAX_IRQS / 8, | 361 | .len = VGIC_MAX_IRQS / 8, |
346 | .bits_per_irq = 1, | 362 | .bits_per_irq = 1, |
347 | .handle_mmio = handle_mmio_raz_wi, | 363 | .handle_mmio = handle_mmio_set_active_reg, |
348 | }, | 364 | }, |
349 | { | 365 | { |
350 | .base = GIC_DIST_ACTIVE_CLEAR, | 366 | .base = GIC_DIST_ACTIVE_CLEAR, |
351 | .len = VGIC_MAX_IRQS / 8, | 367 | .len = VGIC_MAX_IRQS / 8, |
352 | .bits_per_irq = 1, | 368 | .bits_per_irq = 1, |
353 | .handle_mmio = handle_mmio_raz_wi, | 369 | .handle_mmio = handle_mmio_clear_active_reg, |
354 | }, | 370 | }, |
355 | { | 371 | { |
356 | .base = GIC_DIST_PRI, | 372 | .base = GIC_DIST_PRI, |
@@ -388,24 +404,6 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = { | |||
388 | {} | 404 | {} |
389 | }; | 405 | }; |
390 | 406 | ||
391 | static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
392 | struct kvm_exit_mmio *mmio) | ||
393 | { | ||
394 | unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base; | ||
395 | |||
396 | if (!is_in_range(mmio->phys_addr, mmio->len, base, | ||
397 | KVM_VGIC_V2_DIST_SIZE)) | ||
398 | return false; | ||
399 | |||
400 | /* GICv2 does not support accesses wider than 32 bits */ | ||
401 | if (mmio->len > 4) { | ||
402 | kvm_inject_dabt(vcpu, mmio->phys_addr); | ||
403 | return true; | ||
404 | } | ||
405 | |||
406 | return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base); | ||
407 | } | ||
408 | |||
409 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | 407 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) |
410 | { | 408 | { |
411 | struct kvm *kvm = vcpu->kvm; | 409 | struct kvm *kvm = vcpu->kvm; |
@@ -490,6 +488,7 @@ static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
490 | static int vgic_v2_map_resources(struct kvm *kvm, | 488 | static int vgic_v2_map_resources(struct kvm *kvm, |
491 | const struct vgic_params *params) | 489 | const struct vgic_params *params) |
492 | { | 490 | { |
491 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
493 | int ret = 0; | 492 | int ret = 0; |
494 | 493 | ||
495 | if (!irqchip_in_kernel(kvm)) | 494 | if (!irqchip_in_kernel(kvm)) |
@@ -500,13 +499,17 @@ static int vgic_v2_map_resources(struct kvm *kvm, | |||
500 | if (vgic_ready(kvm)) | 499 | if (vgic_ready(kvm)) |
501 | goto out; | 500 | goto out; |
502 | 501 | ||
503 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | 502 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || |
504 | IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { | 503 | IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { |
505 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | 504 | kvm_err("Need to set vgic cpu and dist addresses first\n"); |
506 | ret = -ENXIO; | 505 | ret = -ENXIO; |
507 | goto out; | 506 | goto out; |
508 | } | 507 | } |
509 | 508 | ||
509 | vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base, | ||
510 | KVM_VGIC_V2_DIST_SIZE, | ||
511 | vgic_dist_ranges, -1, &dist->dist_iodev); | ||
512 | |||
510 | /* | 513 | /* |
511 | * Initialize the vgic if this hasn't already been done on demand by | 514 | * Initialize the vgic if this hasn't already been done on demand by |
512 | * accessing the vgic state from userspace. | 515 | * accessing the vgic state from userspace. |
@@ -514,18 +517,23 @@ static int vgic_v2_map_resources(struct kvm *kvm, | |||
514 | ret = vgic_init(kvm); | 517 | ret = vgic_init(kvm); |
515 | if (ret) { | 518 | if (ret) { |
516 | kvm_err("Unable to allocate maps\n"); | 519 | kvm_err("Unable to allocate maps\n"); |
517 | goto out; | 520 | goto out_unregister; |
518 | } | 521 | } |
519 | 522 | ||
520 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | 523 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, |
521 | params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, | 524 | params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, |
522 | true); | 525 | true); |
523 | if (ret) { | 526 | if (ret) { |
524 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | 527 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); |
525 | goto out; | 528 | goto out_unregister; |
526 | } | 529 | } |
527 | 530 | ||
528 | kvm->arch.vgic.ready = true; | 531 | dist->ready = true; |
532 | goto out; | ||
533 | |||
534 | out_unregister: | ||
535 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev); | ||
536 | |||
529 | out: | 537 | out: |
530 | if (ret) | 538 | if (ret) |
531 | kvm_vgic_destroy(kvm); | 539 | kvm_vgic_destroy(kvm); |
@@ -554,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm) | |||
554 | { | 562 | { |
555 | struct vgic_dist *dist = &kvm->arch.vgic; | 563 | struct vgic_dist *dist = &kvm->arch.vgic; |
556 | 564 | ||
557 | dist->vm_ops.handle_mmio = vgic_v2_handle_mmio; | ||
558 | dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; | 565 | dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; |
559 | dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; | 566 | dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; |
560 | dist->vm_ops.init_model = vgic_v2_init_model; | 567 | dist->vm_ops.init_model = vgic_v2_init_model; |
@@ -631,7 +638,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | |||
631 | * CPU Interface Register accesses - these are not accessed by the VM, but by | 638 | * CPU Interface Register accesses - these are not accessed by the VM, but by |
632 | * user space for saving and restoring VGIC state. | 639 | * user space for saving and restoring VGIC state. |
633 | */ | 640 | */ |
634 | static const struct kvm_mmio_range vgic_cpu_ranges[] = { | 641 | static const struct vgic_io_range vgic_cpu_ranges[] = { |
635 | { | 642 | { |
636 | .base = GIC_CPU_CTRL, | 643 | .base = GIC_CPU_CTRL, |
637 | .len = 12, | 644 | .len = 12, |
@@ -658,12 +665,13 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
658 | struct kvm_device_attr *attr, | 665 | struct kvm_device_attr *attr, |
659 | u32 *reg, bool is_write) | 666 | u32 *reg, bool is_write) |
660 | { | 667 | { |
661 | const struct kvm_mmio_range *r = NULL, *ranges; | 668 | const struct vgic_io_range *r = NULL, *ranges; |
662 | phys_addr_t offset; | 669 | phys_addr_t offset; |
663 | int ret, cpuid, c; | 670 | int ret, cpuid, c; |
664 | struct kvm_vcpu *vcpu, *tmp_vcpu; | 671 | struct kvm_vcpu *vcpu, *tmp_vcpu; |
665 | struct vgic_dist *vgic; | 672 | struct vgic_dist *vgic; |
666 | struct kvm_exit_mmio mmio; | 673 | struct kvm_exit_mmio mmio; |
674 | u32 data; | ||
667 | 675 | ||
668 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | 676 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; |
669 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | 677 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> |
@@ -685,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
685 | 693 | ||
686 | mmio.len = 4; | 694 | mmio.len = 4; |
687 | mmio.is_write = is_write; | 695 | mmio.is_write = is_write; |
696 | mmio.data = &data; | ||
688 | if (is_write) | 697 | if (is_write) |
689 | mmio_data_write(&mmio, ~0, *reg); | 698 | mmio_data_write(&mmio, ~0, *reg); |
690 | switch (attr->group) { | 699 | switch (attr->group) { |
@@ -699,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
699 | default: | 708 | default: |
700 | BUG(); | 709 | BUG(); |
701 | } | 710 | } |
702 | r = vgic_find_range(ranges, &mmio, offset); | 711 | r = vgic_find_range(ranges, 4, offset); |
703 | 712 | ||
704 | if (unlikely(!r || !r->handle_mmio)) { | 713 | if (unlikely(!r || !r->handle_mmio)) { |
705 | ret = -ENXIO; | 714 | ret = -ENXIO; |
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c index b3f154631515..e9c3a7a83833 100644 --- a/virt/kvm/arm/vgic-v3-emul.c +++ b/virt/kvm/arm/vgic-v3-emul.c | |||
@@ -340,7 +340,7 @@ static bool handle_mmio_idregs(struct kvm_vcpu *vcpu, | |||
340 | return false; | 340 | return false; |
341 | } | 341 | } |
342 | 342 | ||
343 | static const struct kvm_mmio_range vgic_v3_dist_ranges[] = { | 343 | static const struct vgic_io_range vgic_v3_dist_ranges[] = { |
344 | { | 344 | { |
345 | .base = GICD_CTLR, | 345 | .base = GICD_CTLR, |
346 | .len = 0x04, | 346 | .len = 0x04, |
@@ -502,6 +502,43 @@ static const struct kvm_mmio_range vgic_v3_dist_ranges[] = { | |||
502 | {}, | 502 | {}, |
503 | }; | 503 | }; |
504 | 504 | ||
505 | static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu, | ||
506 | struct kvm_exit_mmio *mmio, | ||
507 | phys_addr_t offset) | ||
508 | { | ||
509 | /* since we don't support LPIs, this register is zero for now */ | ||
510 | vgic_reg_access(mmio, NULL, offset, | ||
511 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | ||
512 | return false; | ||
513 | } | ||
514 | |||
515 | static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu, | ||
516 | struct kvm_exit_mmio *mmio, | ||
517 | phys_addr_t offset) | ||
518 | { | ||
519 | u32 reg; | ||
520 | u64 mpidr; | ||
521 | struct kvm_vcpu *redist_vcpu = mmio->private; | ||
522 | int target_vcpu_id = redist_vcpu->vcpu_id; | ||
523 | |||
524 | /* the upper 32 bits contain the affinity value */ | ||
525 | if ((offset & ~3) == 4) { | ||
526 | mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu); | ||
527 | reg = compress_mpidr(mpidr); | ||
528 | |||
529 | vgic_reg_access(mmio, ®, offset, | ||
530 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
531 | return false; | ||
532 | } | ||
533 | |||
534 | reg = redist_vcpu->vcpu_id << 8; | ||
535 | if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) | ||
536 | reg |= GICR_TYPER_LAST; | ||
537 | vgic_reg_access(mmio, ®, offset, | ||
538 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
539 | return false; | ||
540 | } | ||
541 | |||
505 | static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, | 542 | static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, |
506 | struct kvm_exit_mmio *mmio, | 543 | struct kvm_exit_mmio *mmio, |
507 | phys_addr_t offset) | 544 | phys_addr_t offset) |
@@ -570,186 +607,107 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu, | |||
570 | return vgic_handle_cfg_reg(reg, mmio, offset); | 607 | return vgic_handle_cfg_reg(reg, mmio, offset); |
571 | } | 608 | } |
572 | 609 | ||
573 | static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = { | 610 | #define SGI_base(x) ((x) + SZ_64K) |
611 | |||
612 | static const struct vgic_io_range vgic_redist_ranges[] = { | ||
613 | { | ||
614 | .base = GICR_CTLR, | ||
615 | .len = 0x04, | ||
616 | .bits_per_irq = 0, | ||
617 | .handle_mmio = handle_mmio_ctlr_redist, | ||
618 | }, | ||
619 | { | ||
620 | .base = GICR_TYPER, | ||
621 | .len = 0x08, | ||
622 | .bits_per_irq = 0, | ||
623 | .handle_mmio = handle_mmio_typer_redist, | ||
624 | }, | ||
625 | { | ||
626 | .base = GICR_IIDR, | ||
627 | .len = 0x04, | ||
628 | .bits_per_irq = 0, | ||
629 | .handle_mmio = handle_mmio_iidr, | ||
630 | }, | ||
631 | { | ||
632 | .base = GICR_WAKER, | ||
633 | .len = 0x04, | ||
634 | .bits_per_irq = 0, | ||
635 | .handle_mmio = handle_mmio_raz_wi, | ||
636 | }, | ||
574 | { | 637 | { |
575 | .base = GICR_IGROUPR0, | 638 | .base = GICR_IDREGS, |
639 | .len = 0x30, | ||
640 | .bits_per_irq = 0, | ||
641 | .handle_mmio = handle_mmio_idregs, | ||
642 | }, | ||
643 | { | ||
644 | .base = SGI_base(GICR_IGROUPR0), | ||
576 | .len = 0x04, | 645 | .len = 0x04, |
577 | .bits_per_irq = 1, | 646 | .bits_per_irq = 1, |
578 | .handle_mmio = handle_mmio_rao_wi, | 647 | .handle_mmio = handle_mmio_rao_wi, |
579 | }, | 648 | }, |
580 | { | 649 | { |
581 | .base = GICR_ISENABLER0, | 650 | .base = SGI_base(GICR_ISENABLER0), |
582 | .len = 0x04, | 651 | .len = 0x04, |
583 | .bits_per_irq = 1, | 652 | .bits_per_irq = 1, |
584 | .handle_mmio = handle_mmio_set_enable_reg_redist, | 653 | .handle_mmio = handle_mmio_set_enable_reg_redist, |
585 | }, | 654 | }, |
586 | { | 655 | { |
587 | .base = GICR_ICENABLER0, | 656 | .base = SGI_base(GICR_ICENABLER0), |
588 | .len = 0x04, | 657 | .len = 0x04, |
589 | .bits_per_irq = 1, | 658 | .bits_per_irq = 1, |
590 | .handle_mmio = handle_mmio_clear_enable_reg_redist, | 659 | .handle_mmio = handle_mmio_clear_enable_reg_redist, |
591 | }, | 660 | }, |
592 | { | 661 | { |
593 | .base = GICR_ISPENDR0, | 662 | .base = SGI_base(GICR_ISPENDR0), |
594 | .len = 0x04, | 663 | .len = 0x04, |
595 | .bits_per_irq = 1, | 664 | .bits_per_irq = 1, |
596 | .handle_mmio = handle_mmio_set_pending_reg_redist, | 665 | .handle_mmio = handle_mmio_set_pending_reg_redist, |
597 | }, | 666 | }, |
598 | { | 667 | { |
599 | .base = GICR_ICPENDR0, | 668 | .base = SGI_base(GICR_ICPENDR0), |
600 | .len = 0x04, | 669 | .len = 0x04, |
601 | .bits_per_irq = 1, | 670 | .bits_per_irq = 1, |
602 | .handle_mmio = handle_mmio_clear_pending_reg_redist, | 671 | .handle_mmio = handle_mmio_clear_pending_reg_redist, |
603 | }, | 672 | }, |
604 | { | 673 | { |
605 | .base = GICR_ISACTIVER0, | 674 | .base = SGI_base(GICR_ISACTIVER0), |
606 | .len = 0x04, | 675 | .len = 0x04, |
607 | .bits_per_irq = 1, | 676 | .bits_per_irq = 1, |
608 | .handle_mmio = handle_mmio_raz_wi, | 677 | .handle_mmio = handle_mmio_raz_wi, |
609 | }, | 678 | }, |
610 | { | 679 | { |
611 | .base = GICR_ICACTIVER0, | 680 | .base = SGI_base(GICR_ICACTIVER0), |
612 | .len = 0x04, | 681 | .len = 0x04, |
613 | .bits_per_irq = 1, | 682 | .bits_per_irq = 1, |
614 | .handle_mmio = handle_mmio_raz_wi, | 683 | .handle_mmio = handle_mmio_raz_wi, |
615 | }, | 684 | }, |
616 | { | 685 | { |
617 | .base = GICR_IPRIORITYR0, | 686 | .base = SGI_base(GICR_IPRIORITYR0), |
618 | .len = 0x20, | 687 | .len = 0x20, |
619 | .bits_per_irq = 8, | 688 | .bits_per_irq = 8, |
620 | .handle_mmio = handle_mmio_priority_reg_redist, | 689 | .handle_mmio = handle_mmio_priority_reg_redist, |
621 | }, | 690 | }, |
622 | { | 691 | { |
623 | .base = GICR_ICFGR0, | 692 | .base = SGI_base(GICR_ICFGR0), |
624 | .len = 0x08, | 693 | .len = 0x08, |
625 | .bits_per_irq = 2, | 694 | .bits_per_irq = 2, |
626 | .handle_mmio = handle_mmio_cfg_reg_redist, | 695 | .handle_mmio = handle_mmio_cfg_reg_redist, |
627 | }, | 696 | }, |
628 | { | 697 | { |
629 | .base = GICR_IGRPMODR0, | 698 | .base = SGI_base(GICR_IGRPMODR0), |
630 | .len = 0x04, | 699 | .len = 0x04, |
631 | .bits_per_irq = 1, | 700 | .bits_per_irq = 1, |
632 | .handle_mmio = handle_mmio_raz_wi, | 701 | .handle_mmio = handle_mmio_raz_wi, |
633 | }, | 702 | }, |
634 | { | 703 | { |
635 | .base = GICR_NSACR, | 704 | .base = SGI_base(GICR_NSACR), |
636 | .len = 0x04, | 705 | .len = 0x04, |
637 | .handle_mmio = handle_mmio_raz_wi, | 706 | .handle_mmio = handle_mmio_raz_wi, |
638 | }, | 707 | }, |
639 | {}, | 708 | {}, |
640 | }; | 709 | }; |
641 | 710 | ||
642 | static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu, | ||
643 | struct kvm_exit_mmio *mmio, | ||
644 | phys_addr_t offset) | ||
645 | { | ||
646 | /* since we don't support LPIs, this register is zero for now */ | ||
647 | vgic_reg_access(mmio, NULL, offset, | ||
648 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | ||
649 | return false; | ||
650 | } | ||
651 | |||
652 | static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu, | ||
653 | struct kvm_exit_mmio *mmio, | ||
654 | phys_addr_t offset) | ||
655 | { | ||
656 | u32 reg; | ||
657 | u64 mpidr; | ||
658 | struct kvm_vcpu *redist_vcpu = mmio->private; | ||
659 | int target_vcpu_id = redist_vcpu->vcpu_id; | ||
660 | |||
661 | /* the upper 32 bits contain the affinity value */ | ||
662 | if ((offset & ~3) == 4) { | ||
663 | mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu); | ||
664 | reg = compress_mpidr(mpidr); | ||
665 | |||
666 | vgic_reg_access(mmio, ®, offset, | ||
667 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
668 | return false; | ||
669 | } | ||
670 | |||
671 | reg = redist_vcpu->vcpu_id << 8; | ||
672 | if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) | ||
673 | reg |= GICR_TYPER_LAST; | ||
674 | vgic_reg_access(mmio, ®, offset, | ||
675 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
676 | return false; | ||
677 | } | ||
678 | |||
679 | static const struct kvm_mmio_range vgic_redist_ranges[] = { | ||
680 | { | ||
681 | .base = GICR_CTLR, | ||
682 | .len = 0x04, | ||
683 | .bits_per_irq = 0, | ||
684 | .handle_mmio = handle_mmio_ctlr_redist, | ||
685 | }, | ||
686 | { | ||
687 | .base = GICR_TYPER, | ||
688 | .len = 0x08, | ||
689 | .bits_per_irq = 0, | ||
690 | .handle_mmio = handle_mmio_typer_redist, | ||
691 | }, | ||
692 | { | ||
693 | .base = GICR_IIDR, | ||
694 | .len = 0x04, | ||
695 | .bits_per_irq = 0, | ||
696 | .handle_mmio = handle_mmio_iidr, | ||
697 | }, | ||
698 | { | ||
699 | .base = GICR_WAKER, | ||
700 | .len = 0x04, | ||
701 | .bits_per_irq = 0, | ||
702 | .handle_mmio = handle_mmio_raz_wi, | ||
703 | }, | ||
704 | { | ||
705 | .base = GICR_IDREGS, | ||
706 | .len = 0x30, | ||
707 | .bits_per_irq = 0, | ||
708 | .handle_mmio = handle_mmio_idregs, | ||
709 | }, | ||
710 | {}, | ||
711 | }; | ||
712 | |||
713 | /* | ||
714 | * This function splits accesses between the distributor and the two | ||
715 | * redistributor parts (private/SPI). As each redistributor is accessible | ||
716 | * from any CPU, we have to determine the affected VCPU by taking the faulting | ||
717 | * address into account. We then pass this VCPU to the handler function via | ||
718 | * the private parameter. | ||
719 | */ | ||
720 | #define SGI_BASE_OFFSET SZ_64K | ||
721 | static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
722 | struct kvm_exit_mmio *mmio) | ||
723 | { | ||
724 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
725 | unsigned long dbase = dist->vgic_dist_base; | ||
726 | unsigned long rdbase = dist->vgic_redist_base; | ||
727 | int nrcpus = atomic_read(&vcpu->kvm->online_vcpus); | ||
728 | int vcpu_id; | ||
729 | const struct kvm_mmio_range *mmio_range; | ||
730 | |||
731 | if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) { | ||
732 | return vgic_handle_mmio_range(vcpu, run, mmio, | ||
733 | vgic_v3_dist_ranges, dbase); | ||
734 | } | ||
735 | |||
736 | if (!is_in_range(mmio->phys_addr, mmio->len, rdbase, | ||
737 | GIC_V3_REDIST_SIZE * nrcpus)) | ||
738 | return false; | ||
739 | |||
740 | vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE; | ||
741 | rdbase += (vcpu_id * GIC_V3_REDIST_SIZE); | ||
742 | mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id); | ||
743 | |||
744 | if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) { | ||
745 | rdbase += SGI_BASE_OFFSET; | ||
746 | mmio_range = vgic_redist_sgi_ranges; | ||
747 | } else { | ||
748 | mmio_range = vgic_redist_ranges; | ||
749 | } | ||
750 | return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase); | ||
751 | } | ||
752 | |||
753 | static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) | 711 | static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) |
754 | { | 712 | { |
755 | if (vgic_queue_irq(vcpu, 0, irq)) { | 713 | if (vgic_queue_irq(vcpu, 0, irq)) { |
@@ -766,6 +724,9 @@ static int vgic_v3_map_resources(struct kvm *kvm, | |||
766 | { | 724 | { |
767 | int ret = 0; | 725 | int ret = 0; |
768 | struct vgic_dist *dist = &kvm->arch.vgic; | 726 | struct vgic_dist *dist = &kvm->arch.vgic; |
727 | gpa_t rdbase = dist->vgic_redist_base; | ||
728 | struct vgic_io_device *iodevs = NULL; | ||
729 | int i; | ||
769 | 730 | ||
770 | if (!irqchip_in_kernel(kvm)) | 731 | if (!irqchip_in_kernel(kvm)) |
771 | return 0; | 732 | return 0; |
@@ -791,7 +752,41 @@ static int vgic_v3_map_resources(struct kvm *kvm, | |||
791 | goto out; | 752 | goto out; |
792 | } | 753 | } |
793 | 754 | ||
794 | kvm->arch.vgic.ready = true; | 755 | ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base, |
756 | GIC_V3_DIST_SIZE, vgic_v3_dist_ranges, | ||
757 | -1, &dist->dist_iodev); | ||
758 | if (ret) | ||
759 | goto out; | ||
760 | |||
761 | iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL); | ||
762 | if (!iodevs) { | ||
763 | ret = -ENOMEM; | ||
764 | goto out_unregister; | ||
765 | } | ||
766 | |||
767 | for (i = 0; i < dist->nr_cpus; i++) { | ||
768 | ret = vgic_register_kvm_io_dev(kvm, rdbase, | ||
769 | SZ_128K, vgic_redist_ranges, | ||
770 | i, &iodevs[i]); | ||
771 | if (ret) | ||
772 | goto out_unregister; | ||
773 | rdbase += GIC_V3_REDIST_SIZE; | ||
774 | } | ||
775 | |||
776 | dist->redist_iodevs = iodevs; | ||
777 | dist->ready = true; | ||
778 | goto out; | ||
779 | |||
780 | out_unregister: | ||
781 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev); | ||
782 | if (iodevs) { | ||
783 | for (i = 0; i < dist->nr_cpus; i++) { | ||
784 | if (iodevs[i].dev.ops) | ||
785 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, | ||
786 | &iodevs[i].dev); | ||
787 | } | ||
788 | } | ||
789 | |||
795 | out: | 790 | out: |
796 | if (ret) | 791 | if (ret) |
797 | kvm_vgic_destroy(kvm); | 792 | kvm_vgic_destroy(kvm); |
@@ -832,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm) | |||
832 | { | 827 | { |
833 | struct vgic_dist *dist = &kvm->arch.vgic; | 828 | struct vgic_dist *dist = &kvm->arch.vgic; |
834 | 829 | ||
835 | dist->vm_ops.handle_mmio = vgic_v3_handle_mmio; | ||
836 | dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; | 830 | dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; |
837 | dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; | 831 | dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; |
838 | dist->vm_ops.init_model = vgic_v3_init_model; | 832 | dist->vm_ops.init_model = vgic_v3_init_model; |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index c9f60f524588..8d550ff14700 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -31,6 +31,9 @@ | |||
31 | #include <asm/kvm_emulate.h> | 31 | #include <asm/kvm_emulate.h> |
32 | #include <asm/kvm_arm.h> | 32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_mmu.h> | 33 | #include <asm/kvm_mmu.h> |
34 | #include <trace/events/kvm.h> | ||
35 | #include <asm/kvm.h> | ||
36 | #include <kvm/iodev.h> | ||
34 | 37 | ||
35 | /* | 38 | /* |
36 | * How the whole thing works (courtesy of Christoffer Dall): | 39 | * How the whole thing works (courtesy of Christoffer Dall): |
@@ -263,6 +266,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) | |||
263 | return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); | 266 | return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); |
264 | } | 267 | } |
265 | 268 | ||
269 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) | ||
270 | { | ||
271 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
272 | |||
273 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | ||
274 | } | ||
275 | |||
266 | static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) | 276 | static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) |
267 | { | 277 | { |
268 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 278 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
@@ -277,6 +287,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) | |||
277 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); | 287 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); |
278 | } | 288 | } |
279 | 289 | ||
290 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | ||
291 | { | ||
292 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
293 | |||
294 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | ||
295 | } | ||
296 | |||
297 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | ||
298 | { | ||
299 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
300 | |||
301 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | ||
302 | } | ||
303 | |||
280 | static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) | 304 | static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) |
281 | { | 305 | { |
282 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 306 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
@@ -520,6 +544,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm, | |||
520 | return false; | 544 | return false; |
521 | } | 545 | } |
522 | 546 | ||
547 | bool vgic_handle_set_active_reg(struct kvm *kvm, | ||
548 | struct kvm_exit_mmio *mmio, | ||
549 | phys_addr_t offset, int vcpu_id) | ||
550 | { | ||
551 | u32 *reg; | ||
552 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
553 | |||
554 | reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset); | ||
555 | vgic_reg_access(mmio, reg, offset, | ||
556 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | ||
557 | |||
558 | if (mmio->is_write) { | ||
559 | vgic_update_state(kvm); | ||
560 | return true; | ||
561 | } | ||
562 | |||
563 | return false; | ||
564 | } | ||
565 | |||
566 | bool vgic_handle_clear_active_reg(struct kvm *kvm, | ||
567 | struct kvm_exit_mmio *mmio, | ||
568 | phys_addr_t offset, int vcpu_id) | ||
569 | { | ||
570 | u32 *reg; | ||
571 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
572 | |||
573 | reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset); | ||
574 | vgic_reg_access(mmio, reg, offset, | ||
575 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | ||
576 | |||
577 | if (mmio->is_write) { | ||
578 | vgic_update_state(kvm); | ||
579 | return true; | ||
580 | } | ||
581 | |||
582 | return false; | ||
583 | } | ||
584 | |||
523 | static u32 vgic_cfg_expand(u16 val) | 585 | static u32 vgic_cfg_expand(u16 val) |
524 | { | 586 | { |
525 | u32 res = 0; | 587 | u32 res = 0; |
@@ -588,16 +650,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, | |||
588 | } | 650 | } |
589 | 651 | ||
590 | /** | 652 | /** |
591 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor | 653 | * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor |
592 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs | 654 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs |
593 | * | 655 | * |
594 | * Move any pending IRQs that have already been assigned to LRs back to the | 656 | * Move any IRQs that have already been assigned to LRs back to the |
595 | * emulated distributor state so that the complete emulated state can be read | 657 | * emulated distributor state so that the complete emulated state can be read |
596 | * from the main emulation structures without investigating the LRs. | 658 | * from the main emulation structures without investigating the LRs. |
597 | * | ||
598 | * Note that IRQs in the active state in the LRs get their pending state moved | ||
599 | * to the distributor but the active state stays in the LRs, because we don't | ||
600 | * track the active state on the distributor side. | ||
601 | */ | 659 | */ |
602 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | 660 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) |
603 | { | 661 | { |
@@ -613,12 +671,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
613 | * 01: pending | 671 | * 01: pending |
614 | * 10: active | 672 | * 10: active |
615 | * 11: pending and active | 673 | * 11: pending and active |
616 | * | ||
617 | * If the LR holds only an active interrupt (not pending) then | ||
618 | * just leave it alone. | ||
619 | */ | 674 | */ |
620 | if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) | 675 | BUG_ON(!(lr.state & LR_STATE_MASK)); |
621 | continue; | 676 | |
677 | /* Reestablish SGI source for pending and active IRQs */ | ||
678 | if (lr.irq < VGIC_NR_SGIS) | ||
679 | add_sgi_source(vcpu, lr.irq, lr.source); | ||
680 | |||
681 | /* | ||
682 | * If the LR holds an active (10) or a pending and active (11) | ||
683 | * interrupt then move the active state to the | ||
684 | * distributor tracking bit. | ||
685 | */ | ||
686 | if (lr.state & LR_STATE_ACTIVE) { | ||
687 | vgic_irq_set_active(vcpu, lr.irq); | ||
688 | lr.state &= ~LR_STATE_ACTIVE; | ||
689 | } | ||
622 | 690 | ||
623 | /* | 691 | /* |
624 | * Reestablish the pending state on the distributor and the | 692 | * Reestablish the pending state on the distributor and the |
@@ -626,21 +694,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
626 | * is fine, then we are only setting a few bits that were | 694 | * is fine, then we are only setting a few bits that were |
627 | * already set. | 695 | * already set. |
628 | */ | 696 | */ |
629 | vgic_dist_irq_set_pending(vcpu, lr.irq); | 697 | if (lr.state & LR_STATE_PENDING) { |
630 | if (lr.irq < VGIC_NR_SGIS) | 698 | vgic_dist_irq_set_pending(vcpu, lr.irq); |
631 | add_sgi_source(vcpu, lr.irq, lr.source); | 699 | lr.state &= ~LR_STATE_PENDING; |
632 | lr.state &= ~LR_STATE_PENDING; | 700 | } |
701 | |||
633 | vgic_set_lr(vcpu, i, lr); | 702 | vgic_set_lr(vcpu, i, lr); |
634 | 703 | ||
635 | /* | 704 | /* |
636 | * If there's no state left on the LR (it could still be | 705 | * Mark the LR as free for other use. |
637 | * active), then the LR does not hold any useful info and can | ||
638 | * be marked as free for other use. | ||
639 | */ | 706 | */ |
640 | if (!(lr.state & LR_STATE_MASK)) { | 707 | BUG_ON(lr.state & LR_STATE_MASK); |
641 | vgic_retire_lr(i, lr.irq, vcpu); | 708 | vgic_retire_lr(i, lr.irq, vcpu); |
642 | vgic_irq_clear_queued(vcpu, lr.irq); | 709 | vgic_irq_clear_queued(vcpu, lr.irq); |
643 | } | ||
644 | 710 | ||
645 | /* Finally update the VGIC state. */ | 711 | /* Finally update the VGIC state. */ |
646 | vgic_update_state(vcpu->kvm); | 712 | vgic_update_state(vcpu->kvm); |
@@ -648,24 +714,21 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
648 | } | 714 | } |
649 | 715 | ||
650 | const | 716 | const |
651 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, | 717 | struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, |
652 | struct kvm_exit_mmio *mmio, | 718 | int len, gpa_t offset) |
653 | phys_addr_t offset) | 719 | { |
654 | { | 720 | while (ranges->len) { |
655 | const struct kvm_mmio_range *r = ranges; | 721 | if (offset >= ranges->base && |
656 | 722 | (offset + len) <= (ranges->base + ranges->len)) | |
657 | while (r->len) { | 723 | return ranges; |
658 | if (offset >= r->base && | 724 | ranges++; |
659 | (offset + mmio->len) <= (r->base + r->len)) | ||
660 | return r; | ||
661 | r++; | ||
662 | } | 725 | } |
663 | 726 | ||
664 | return NULL; | 727 | return NULL; |
665 | } | 728 | } |
666 | 729 | ||
667 | static bool vgic_validate_access(const struct vgic_dist *dist, | 730 | static bool vgic_validate_access(const struct vgic_dist *dist, |
668 | const struct kvm_mmio_range *range, | 731 | const struct vgic_io_range *range, |
669 | unsigned long offset) | 732 | unsigned long offset) |
670 | { | 733 | { |
671 | int irq; | 734 | int irq; |
@@ -693,9 +756,8 @@ static bool vgic_validate_access(const struct vgic_dist *dist, | |||
693 | static bool call_range_handler(struct kvm_vcpu *vcpu, | 756 | static bool call_range_handler(struct kvm_vcpu *vcpu, |
694 | struct kvm_exit_mmio *mmio, | 757 | struct kvm_exit_mmio *mmio, |
695 | unsigned long offset, | 758 | unsigned long offset, |
696 | const struct kvm_mmio_range *range) | 759 | const struct vgic_io_range *range) |
697 | { | 760 | { |
698 | u32 *data32 = (void *)mmio->data; | ||
699 | struct kvm_exit_mmio mmio32; | 761 | struct kvm_exit_mmio mmio32; |
700 | bool ret; | 762 | bool ret; |
701 | 763 | ||
@@ -712,91 +774,142 @@ static bool call_range_handler(struct kvm_vcpu *vcpu, | |||
712 | mmio32.private = mmio->private; | 774 | mmio32.private = mmio->private; |
713 | 775 | ||
714 | mmio32.phys_addr = mmio->phys_addr + 4; | 776 | mmio32.phys_addr = mmio->phys_addr + 4; |
715 | if (mmio->is_write) | 777 | mmio32.data = &((u32 *)mmio->data)[1]; |
716 | *(u32 *)mmio32.data = data32[1]; | ||
717 | ret = range->handle_mmio(vcpu, &mmio32, offset + 4); | 778 | ret = range->handle_mmio(vcpu, &mmio32, offset + 4); |
718 | if (!mmio->is_write) | ||
719 | data32[1] = *(u32 *)mmio32.data; | ||
720 | 779 | ||
721 | mmio32.phys_addr = mmio->phys_addr; | 780 | mmio32.phys_addr = mmio->phys_addr; |
722 | if (mmio->is_write) | 781 | mmio32.data = &((u32 *)mmio->data)[0]; |
723 | *(u32 *)mmio32.data = data32[0]; | ||
724 | ret |= range->handle_mmio(vcpu, &mmio32, offset); | 782 | ret |= range->handle_mmio(vcpu, &mmio32, offset); |
725 | if (!mmio->is_write) | ||
726 | data32[0] = *(u32 *)mmio32.data; | ||
727 | 783 | ||
728 | return ret; | 784 | return ret; |
729 | } | 785 | } |
730 | 786 | ||
731 | /** | 787 | /** |
732 | * vgic_handle_mmio_range - handle an in-kernel MMIO access | 788 | * vgic_handle_mmio_access - handle an in-kernel MMIO access |
789 | * This is called by the read/write KVM IO device wrappers below. | ||
733 | * @vcpu: pointer to the vcpu performing the access | 790 | * @vcpu: pointer to the vcpu performing the access |
734 | * @run: pointer to the kvm_run structure | 791 | * @this: pointer to the KVM IO device in charge |
735 | * @mmio: pointer to the data describing the access | 792 | * @addr: guest physical address of the access |
736 | * @ranges: array of MMIO ranges in a given region | 793 | * @len: size of the access |
737 | * @mmio_base: base address of that region | 794 | * @val: pointer to the data region |
795 | * @is_write: read or write access | ||
738 | * | 796 | * |
739 | * returns true if the MMIO access could be performed | 797 | * returns true if the MMIO access could be performed |
740 | */ | 798 | */ |
741 | bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | 799 | static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, |
742 | struct kvm_exit_mmio *mmio, | 800 | struct kvm_io_device *this, gpa_t addr, |
743 | const struct kvm_mmio_range *ranges, | 801 | int len, void *val, bool is_write) |
744 | unsigned long mmio_base) | ||
745 | { | 802 | { |
746 | const struct kvm_mmio_range *range; | ||
747 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 803 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
804 | struct vgic_io_device *iodev = container_of(this, | ||
805 | struct vgic_io_device, dev); | ||
806 | struct kvm_run *run = vcpu->run; | ||
807 | const struct vgic_io_range *range; | ||
808 | struct kvm_exit_mmio mmio; | ||
748 | bool updated_state; | 809 | bool updated_state; |
749 | unsigned long offset; | 810 | gpa_t offset; |
750 | 811 | ||
751 | offset = mmio->phys_addr - mmio_base; | 812 | offset = addr - iodev->addr; |
752 | range = vgic_find_range(ranges, mmio, offset); | 813 | range = vgic_find_range(iodev->reg_ranges, len, offset); |
753 | if (unlikely(!range || !range->handle_mmio)) { | 814 | if (unlikely(!range || !range->handle_mmio)) { |
754 | pr_warn("Unhandled access %d %08llx %d\n", | 815 | pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len); |
755 | mmio->is_write, mmio->phys_addr, mmio->len); | 816 | return -ENXIO; |
756 | return false; | ||
757 | } | 817 | } |
758 | 818 | ||
759 | spin_lock(&vcpu->kvm->arch.vgic.lock); | 819 | mmio.phys_addr = addr; |
820 | mmio.len = len; | ||
821 | mmio.is_write = is_write; | ||
822 | mmio.data = val; | ||
823 | mmio.private = iodev->redist_vcpu; | ||
824 | |||
825 | spin_lock(&dist->lock); | ||
760 | offset -= range->base; | 826 | offset -= range->base; |
761 | if (vgic_validate_access(dist, range, offset)) { | 827 | if (vgic_validate_access(dist, range, offset)) { |
762 | updated_state = call_range_handler(vcpu, mmio, offset, range); | 828 | updated_state = call_range_handler(vcpu, &mmio, offset, range); |
763 | } else { | 829 | } else { |
764 | if (!mmio->is_write) | 830 | if (!is_write) |
765 | memset(mmio->data, 0, mmio->len); | 831 | memset(val, 0, len); |
766 | updated_state = false; | 832 | updated_state = false; |
767 | } | 833 | } |
768 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | 834 | spin_unlock(&dist->lock); |
769 | kvm_prepare_mmio(run, mmio); | 835 | run->mmio.is_write = is_write; |
836 | run->mmio.len = len; | ||
837 | run->mmio.phys_addr = addr; | ||
838 | memcpy(run->mmio.data, val, len); | ||
839 | |||
770 | kvm_handle_mmio_return(vcpu, run); | 840 | kvm_handle_mmio_return(vcpu, run); |
771 | 841 | ||
772 | if (updated_state) | 842 | if (updated_state) |
773 | vgic_kick_vcpus(vcpu->kvm); | 843 | vgic_kick_vcpus(vcpu->kvm); |
774 | 844 | ||
775 | return true; | 845 | return 0; |
846 | } | ||
847 | |||
848 | static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, | ||
849 | struct kvm_io_device *this, | ||
850 | gpa_t addr, int len, void *val) | ||
851 | { | ||
852 | return vgic_handle_mmio_access(vcpu, this, addr, len, val, false); | ||
776 | } | 853 | } |
777 | 854 | ||
855 | static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu, | ||
856 | struct kvm_io_device *this, | ||
857 | gpa_t addr, int len, const void *val) | ||
858 | { | ||
859 | return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val, | ||
860 | true); | ||
861 | } | ||
862 | |||
863 | struct kvm_io_device_ops vgic_io_ops = { | ||
864 | .read = vgic_handle_mmio_read, | ||
865 | .write = vgic_handle_mmio_write, | ||
866 | }; | ||
867 | |||
778 | /** | 868 | /** |
779 | * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation | 869 | * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus |
780 | * @vcpu: pointer to the vcpu performing the access | 870 | * @kvm: The VM structure pointer |
781 | * @run: pointer to the kvm_run structure | 871 | * @base: The (guest) base address for the register frame |
782 | * @mmio: pointer to the data describing the access | 872 | * @len: Length of the register frame window |
873 | * @ranges: Describing the handler functions for each register | ||
874 | * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call | ||
875 | * @iodev: Points to memory to be passed on to the handler | ||
783 | * | 876 | * |
784 | * returns true if the MMIO access has been performed in kernel space, | 877 | * @iodev stores the parameters of this function to be usable by the handler |
785 | * and false if it needs to be emulated in user space. | 878 | * respectively the dispatcher function (since the KVM I/O bus framework lacks |
786 | * Calls the actual handling routine for the selected VGIC model. | 879 | * an opaque parameter). Initialization is done in this function, but the |
880 | * reference should be valid and unique for the whole VGIC lifetime. | ||
881 | * If the register frame is not mapped for a specific VCPU, pass -1 to | ||
882 | * @redist_vcpu_id. | ||
787 | */ | 883 | */ |
788 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | 884 | int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len, |
789 | struct kvm_exit_mmio *mmio) | 885 | const struct vgic_io_range *ranges, |
886 | int redist_vcpu_id, | ||
887 | struct vgic_io_device *iodev) | ||
790 | { | 888 | { |
791 | if (!irqchip_in_kernel(vcpu->kvm)) | 889 | struct kvm_vcpu *vcpu = NULL; |
792 | return false; | 890 | int ret; |
793 | 891 | ||
794 | /* | 892 | if (redist_vcpu_id >= 0) |
795 | * This will currently call either vgic_v2_handle_mmio() or | 893 | vcpu = kvm_get_vcpu(kvm, redist_vcpu_id); |
796 | * vgic_v3_handle_mmio(), which in turn will call | 894 | |
797 | * vgic_handle_mmio_range() defined above. | 895 | iodev->addr = base; |
798 | */ | 896 | iodev->len = len; |
799 | return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio); | 897 | iodev->reg_ranges = ranges; |
898 | iodev->redist_vcpu = vcpu; | ||
899 | |||
900 | kvm_iodevice_init(&iodev->dev, &vgic_io_ops); | ||
901 | |||
902 | mutex_lock(&kvm->slots_lock); | ||
903 | |||
904 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len, | ||
905 | &iodev->dev); | ||
906 | mutex_unlock(&kvm->slots_lock); | ||
907 | |||
908 | /* Mark the iodev as invalid if registration fails. */ | ||
909 | if (ret) | ||
910 | iodev->dev.ops = NULL; | ||
911 | |||
912 | return ret; | ||
800 | } | 913 | } |
801 | 914 | ||
802 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) | 915 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) |
@@ -804,6 +917,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist) | |||
804 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; | 917 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; |
805 | } | 918 | } |
806 | 919 | ||
920 | static int compute_active_for_cpu(struct kvm_vcpu *vcpu) | ||
921 | { | ||
922 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
923 | unsigned long *active, *enabled, *act_percpu, *act_shared; | ||
924 | unsigned long active_private, active_shared; | ||
925 | int nr_shared = vgic_nr_shared_irqs(dist); | ||
926 | int vcpu_id; | ||
927 | |||
928 | vcpu_id = vcpu->vcpu_id; | ||
929 | act_percpu = vcpu->arch.vgic_cpu.active_percpu; | ||
930 | act_shared = vcpu->arch.vgic_cpu.active_shared; | ||
931 | |||
932 | active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id); | ||
933 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | ||
934 | bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS); | ||
935 | |||
936 | active = vgic_bitmap_get_shared_map(&dist->irq_active); | ||
937 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | ||
938 | bitmap_and(act_shared, active, enabled, nr_shared); | ||
939 | bitmap_and(act_shared, act_shared, | ||
940 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | ||
941 | nr_shared); | ||
942 | |||
943 | active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS); | ||
944 | active_shared = find_first_bit(act_shared, nr_shared); | ||
945 | |||
946 | return (active_private < VGIC_NR_PRIVATE_IRQS || | ||
947 | active_shared < nr_shared); | ||
948 | } | ||
949 | |||
807 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | 950 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) |
808 | { | 951 | { |
809 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 952 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
@@ -835,7 +978,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | |||
835 | 978 | ||
836 | /* | 979 | /* |
837 | * Update the interrupt state and determine which CPUs have pending | 980 | * Update the interrupt state and determine which CPUs have pending |
838 | * interrupts. Must be called with distributor lock held. | 981 | * or active interrupts. Must be called with distributor lock held. |
839 | */ | 982 | */ |
840 | void vgic_update_state(struct kvm *kvm) | 983 | void vgic_update_state(struct kvm *kvm) |
841 | { | 984 | { |
@@ -849,10 +992,13 @@ void vgic_update_state(struct kvm *kvm) | |||
849 | } | 992 | } |
850 | 993 | ||
851 | kvm_for_each_vcpu(c, vcpu, kvm) { | 994 | kvm_for_each_vcpu(c, vcpu, kvm) { |
852 | if (compute_pending_for_cpu(vcpu)) { | 995 | if (compute_pending_for_cpu(vcpu)) |
853 | pr_debug("CPU%d has pending interrupts\n", c); | ||
854 | set_bit(c, dist->irq_pending_on_cpu); | 996 | set_bit(c, dist->irq_pending_on_cpu); |
855 | } | 997 | |
998 | if (compute_active_for_cpu(vcpu)) | ||
999 | set_bit(c, dist->irq_active_on_cpu); | ||
1000 | else | ||
1001 | clear_bit(c, dist->irq_active_on_cpu); | ||
856 | } | 1002 | } |
857 | } | 1003 | } |
858 | 1004 | ||
@@ -955,6 +1101,26 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
955 | } | 1101 | } |
956 | } | 1102 | } |
957 | 1103 | ||
1104 | static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, | ||
1105 | int lr_nr, struct vgic_lr vlr) | ||
1106 | { | ||
1107 | if (vgic_irq_is_active(vcpu, irq)) { | ||
1108 | vlr.state |= LR_STATE_ACTIVE; | ||
1109 | kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); | ||
1110 | vgic_irq_clear_active(vcpu, irq); | ||
1111 | vgic_update_state(vcpu->kvm); | ||
1112 | } else if (vgic_dist_irq_is_pending(vcpu, irq)) { | ||
1113 | vlr.state |= LR_STATE_PENDING; | ||
1114 | kvm_debug("Set pending: 0x%x\n", vlr.state); | ||
1115 | } | ||
1116 | |||
1117 | if (!vgic_irq_is_edge(vcpu, irq)) | ||
1118 | vlr.state |= LR_EOI_INT; | ||
1119 | |||
1120 | vgic_set_lr(vcpu, lr_nr, vlr); | ||
1121 | vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); | ||
1122 | } | ||
1123 | |||
958 | /* | 1124 | /* |
959 | * Queue an interrupt to a CPU virtual interface. Return true on success, | 1125 | * Queue an interrupt to a CPU virtual interface. Return true on success, |
960 | * or false if it wasn't possible to queue it. | 1126 | * or false if it wasn't possible to queue it. |
@@ -982,9 +1148,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
982 | if (vlr.source == sgi_source_id) { | 1148 | if (vlr.source == sgi_source_id) { |
983 | kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); | 1149 | kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); |
984 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 1150 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
985 | vlr.state |= LR_STATE_PENDING; | 1151 | vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); |
986 | vgic_set_lr(vcpu, lr, vlr); | ||
987 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
988 | return true; | 1152 | return true; |
989 | } | 1153 | } |
990 | } | 1154 | } |
@@ -1001,12 +1165,8 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1001 | 1165 | ||
1002 | vlr.irq = irq; | 1166 | vlr.irq = irq; |
1003 | vlr.source = sgi_source_id; | 1167 | vlr.source = sgi_source_id; |
1004 | vlr.state = LR_STATE_PENDING; | 1168 | vlr.state = 0; |
1005 | if (!vgic_irq_is_edge(vcpu, irq)) | 1169 | vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); |
1006 | vlr.state |= LR_EOI_INT; | ||
1007 | |||
1008 | vgic_set_lr(vcpu, lr, vlr); | ||
1009 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
1010 | 1170 | ||
1011 | return true; | 1171 | return true; |
1012 | } | 1172 | } |
@@ -1038,39 +1198,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1038 | { | 1198 | { |
1039 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1199 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1040 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1200 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1201 | unsigned long *pa_percpu, *pa_shared; | ||
1041 | int i, vcpu_id; | 1202 | int i, vcpu_id; |
1042 | int overflow = 0; | 1203 | int overflow = 0; |
1204 | int nr_shared = vgic_nr_shared_irqs(dist); | ||
1043 | 1205 | ||
1044 | vcpu_id = vcpu->vcpu_id; | 1206 | vcpu_id = vcpu->vcpu_id; |
1045 | 1207 | ||
1208 | pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu; | ||
1209 | pa_shared = vcpu->arch.vgic_cpu.pend_act_shared; | ||
1210 | |||
1211 | bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu, | ||
1212 | VGIC_NR_PRIVATE_IRQS); | ||
1213 | bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared, | ||
1214 | nr_shared); | ||
1046 | /* | 1215 | /* |
1047 | * We may not have any pending interrupt, or the interrupts | 1216 | * We may not have any pending interrupt, or the interrupts |
1048 | * may have been serviced from another vcpu. In all cases, | 1217 | * may have been serviced from another vcpu. In all cases, |
1049 | * move along. | 1218 | * move along. |
1050 | */ | 1219 | */ |
1051 | if (!kvm_vgic_vcpu_pending_irq(vcpu)) { | 1220 | if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) |
1052 | pr_debug("CPU%d has no pending interrupt\n", vcpu_id); | ||
1053 | goto epilog; | 1221 | goto epilog; |
1054 | } | ||
1055 | 1222 | ||
1056 | /* SGIs */ | 1223 | /* SGIs */ |
1057 | for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { | 1224 | for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) { |
1058 | if (!queue_sgi(vcpu, i)) | 1225 | if (!queue_sgi(vcpu, i)) |
1059 | overflow = 1; | 1226 | overflow = 1; |
1060 | } | 1227 | } |
1061 | 1228 | ||
1062 | /* PPIs */ | 1229 | /* PPIs */ |
1063 | for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { | 1230 | for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) { |
1064 | if (!vgic_queue_hwirq(vcpu, i)) | 1231 | if (!vgic_queue_hwirq(vcpu, i)) |
1065 | overflow = 1; | 1232 | overflow = 1; |
1066 | } | 1233 | } |
1067 | 1234 | ||
1068 | /* SPIs */ | 1235 | /* SPIs */ |
1069 | for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { | 1236 | for_each_set_bit(i, pa_shared, nr_shared) { |
1070 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | 1237 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) |
1071 | overflow = 1; | 1238 | overflow = 1; |
1072 | } | 1239 | } |
1073 | 1240 | ||
1241 | |||
1242 | |||
1243 | |||
1074 | epilog: | 1244 | epilog: |
1075 | if (overflow) { | 1245 | if (overflow) { |
1076 | vgic_enable_underflow(vcpu); | 1246 | vgic_enable_underflow(vcpu); |
@@ -1089,7 +1259,9 @@ epilog: | |||
1089 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | 1259 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) |
1090 | { | 1260 | { |
1091 | u32 status = vgic_get_interrupt_status(vcpu); | 1261 | u32 status = vgic_get_interrupt_status(vcpu); |
1262 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1092 | bool level_pending = false; | 1263 | bool level_pending = false; |
1264 | struct kvm *kvm = vcpu->kvm; | ||
1093 | 1265 | ||
1094 | kvm_debug("STATUS = %08x\n", status); | 1266 | kvm_debug("STATUS = %08x\n", status); |
1095 | 1267 | ||
@@ -1106,6 +1278,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1106 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); | 1278 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
1107 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); | 1279 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); |
1108 | 1280 | ||
1281 | spin_lock(&dist->lock); | ||
1109 | vgic_irq_clear_queued(vcpu, vlr.irq); | 1282 | vgic_irq_clear_queued(vcpu, vlr.irq); |
1110 | WARN_ON(vlr.state & LR_STATE_MASK); | 1283 | WARN_ON(vlr.state & LR_STATE_MASK); |
1111 | vlr.state = 0; | 1284 | vlr.state = 0; |
@@ -1124,6 +1297,17 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1124 | */ | 1297 | */ |
1125 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); | 1298 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); |
1126 | 1299 | ||
1300 | /* | ||
1301 | * kvm_notify_acked_irq calls kvm_set_irq() | ||
1302 | * to reset the IRQ level. Need to release the | ||
1303 | * lock for kvm_set_irq to grab it. | ||
1304 | */ | ||
1305 | spin_unlock(&dist->lock); | ||
1306 | |||
1307 | kvm_notify_acked_irq(kvm, 0, | ||
1308 | vlr.irq - VGIC_NR_PRIVATE_IRQS); | ||
1309 | spin_lock(&dist->lock); | ||
1310 | |||
1127 | /* Any additional pending interrupt? */ | 1311 | /* Any additional pending interrupt? */ |
1128 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { | 1312 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { |
1129 | vgic_cpu_irq_set(vcpu, vlr.irq); | 1313 | vgic_cpu_irq_set(vcpu, vlr.irq); |
@@ -1133,6 +1317,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1133 | vgic_cpu_irq_clear(vcpu, vlr.irq); | 1317 | vgic_cpu_irq_clear(vcpu, vlr.irq); |
1134 | } | 1318 | } |
1135 | 1319 | ||
1320 | spin_unlock(&dist->lock); | ||
1321 | |||
1136 | /* | 1322 | /* |
1137 | * Despite being EOIed, the LR may not have | 1323 | * Despite being EOIed, the LR may not have |
1138 | * been marked as empty. | 1324 | * been marked as empty. |
@@ -1155,10 +1341,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1155 | return level_pending; | 1341 | return level_pending; |
1156 | } | 1342 | } |
1157 | 1343 | ||
1158 | /* | 1344 | /* Sync back the VGIC state after a guest run */ |
1159 | * Sync back the VGIC state after a guest run. The distributor lock is | ||
1160 | * needed so we don't get preempted in the middle of the state processing. | ||
1161 | */ | ||
1162 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1345 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1163 | { | 1346 | { |
1164 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1347 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
@@ -1205,14 +1388,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1205 | 1388 | ||
1206 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1389 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1207 | { | 1390 | { |
1208 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1209 | |||
1210 | if (!irqchip_in_kernel(vcpu->kvm)) | 1391 | if (!irqchip_in_kernel(vcpu->kvm)) |
1211 | return; | 1392 | return; |
1212 | 1393 | ||
1213 | spin_lock(&dist->lock); | ||
1214 | __kvm_vgic_sync_hwstate(vcpu); | 1394 | __kvm_vgic_sync_hwstate(vcpu); |
1215 | spin_unlock(&dist->lock); | ||
1216 | } | 1395 | } |
1217 | 1396 | ||
1218 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 1397 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
@@ -1225,6 +1404,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
1225 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); | 1404 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
1226 | } | 1405 | } |
1227 | 1406 | ||
1407 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) | ||
1408 | { | ||
1409 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1410 | |||
1411 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1412 | return 0; | ||
1413 | |||
1414 | return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); | ||
1415 | } | ||
1416 | |||
1417 | |||
1228 | void vgic_kick_vcpus(struct kvm *kvm) | 1418 | void vgic_kick_vcpus(struct kvm *kvm) |
1229 | { | 1419 | { |
1230 | struct kvm_vcpu *vcpu; | 1420 | struct kvm_vcpu *vcpu; |
@@ -1397,8 +1587,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
1397 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1587 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1398 | 1588 | ||
1399 | kfree(vgic_cpu->pending_shared); | 1589 | kfree(vgic_cpu->pending_shared); |
1590 | kfree(vgic_cpu->active_shared); | ||
1591 | kfree(vgic_cpu->pend_act_shared); | ||
1400 | kfree(vgic_cpu->vgic_irq_lr_map); | 1592 | kfree(vgic_cpu->vgic_irq_lr_map); |
1401 | vgic_cpu->pending_shared = NULL; | 1593 | vgic_cpu->pending_shared = NULL; |
1594 | vgic_cpu->active_shared = NULL; | ||
1595 | vgic_cpu->pend_act_shared = NULL; | ||
1402 | vgic_cpu->vgic_irq_lr_map = NULL; | 1596 | vgic_cpu->vgic_irq_lr_map = NULL; |
1403 | } | 1597 | } |
1404 | 1598 | ||
@@ -1408,9 +1602,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) | |||
1408 | 1602 | ||
1409 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; | 1603 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; |
1410 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); | 1604 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); |
1605 | vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); | ||
1606 | vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); | ||
1411 | vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); | 1607 | vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); |
1412 | 1608 | ||
1413 | if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { | 1609 | if (!vgic_cpu->pending_shared |
1610 | || !vgic_cpu->active_shared | ||
1611 | || !vgic_cpu->pend_act_shared | ||
1612 | || !vgic_cpu->vgic_irq_lr_map) { | ||
1414 | kvm_vgic_vcpu_destroy(vcpu); | 1613 | kvm_vgic_vcpu_destroy(vcpu); |
1415 | return -ENOMEM; | 1614 | return -ENOMEM; |
1416 | } | 1615 | } |
@@ -1463,10 +1662,12 @@ void kvm_vgic_destroy(struct kvm *kvm) | |||
1463 | kfree(dist->irq_spi_mpidr); | 1662 | kfree(dist->irq_spi_mpidr); |
1464 | kfree(dist->irq_spi_target); | 1663 | kfree(dist->irq_spi_target); |
1465 | kfree(dist->irq_pending_on_cpu); | 1664 | kfree(dist->irq_pending_on_cpu); |
1665 | kfree(dist->irq_active_on_cpu); | ||
1466 | dist->irq_sgi_sources = NULL; | 1666 | dist->irq_sgi_sources = NULL; |
1467 | dist->irq_spi_cpu = NULL; | 1667 | dist->irq_spi_cpu = NULL; |
1468 | dist->irq_spi_target = NULL; | 1668 | dist->irq_spi_target = NULL; |
1469 | dist->irq_pending_on_cpu = NULL; | 1669 | dist->irq_pending_on_cpu = NULL; |
1670 | dist->irq_active_on_cpu = NULL; | ||
1470 | dist->nr_cpus = 0; | 1671 | dist->nr_cpus = 0; |
1471 | } | 1672 | } |
1472 | 1673 | ||
@@ -1502,6 +1703,7 @@ int vgic_init(struct kvm *kvm) | |||
1502 | ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); | 1703 | ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); |
1503 | ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); | 1704 | ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); |
1504 | ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); | 1705 | ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); |
1706 | ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs); | ||
1505 | ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); | 1707 | ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); |
1506 | ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); | 1708 | ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); |
1507 | 1709 | ||
@@ -1514,10 +1716,13 @@ int vgic_init(struct kvm *kvm) | |||
1514 | GFP_KERNEL); | 1716 | GFP_KERNEL); |
1515 | dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), | 1717 | dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), |
1516 | GFP_KERNEL); | 1718 | GFP_KERNEL); |
1719 | dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), | ||
1720 | GFP_KERNEL); | ||
1517 | if (!dist->irq_sgi_sources || | 1721 | if (!dist->irq_sgi_sources || |
1518 | !dist->irq_spi_cpu || | 1722 | !dist->irq_spi_cpu || |
1519 | !dist->irq_spi_target || | 1723 | !dist->irq_spi_target || |
1520 | !dist->irq_pending_on_cpu) { | 1724 | !dist->irq_pending_on_cpu || |
1725 | !dist->irq_active_on_cpu) { | ||
1521 | ret = -ENOMEM; | 1726 | ret = -ENOMEM; |
1522 | goto out; | 1727 | goto out; |
1523 | } | 1728 | } |
@@ -1845,12 +2050,9 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1845 | return r; | 2050 | return r; |
1846 | } | 2051 | } |
1847 | 2052 | ||
1848 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) | 2053 | int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset) |
1849 | { | 2054 | { |
1850 | struct kvm_exit_mmio dev_attr_mmio; | 2055 | if (vgic_find_range(ranges, 4, offset)) |
1851 | |||
1852 | dev_attr_mmio.len = 4; | ||
1853 | if (vgic_find_range(ranges, &dev_attr_mmio, offset)) | ||
1854 | return 0; | 2056 | return 0; |
1855 | else | 2057 | else |
1856 | return -ENXIO; | 2058 | return -ENXIO; |
@@ -1883,8 +2085,10 @@ static struct notifier_block vgic_cpu_nb = { | |||
1883 | }; | 2085 | }; |
1884 | 2086 | ||
1885 | static const struct of_device_id vgic_ids[] = { | 2087 | static const struct of_device_id vgic_ids[] = { |
1886 | { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, | 2088 | { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, |
1887 | { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, | 2089 | { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, }, |
2090 | { .compatible = "arm,gic-400", .data = vgic_v2_probe, }, | ||
2091 | { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, | ||
1888 | {}, | 2092 | {}, |
1889 | }; | 2093 | }; |
1890 | 2094 | ||
@@ -1932,3 +2136,38 @@ out_free_irq: | |||
1932 | free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); | 2136 | free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); |
1933 | return ret; | 2137 | return ret; |
1934 | } | 2138 | } |
2139 | |||
2140 | int kvm_irq_map_gsi(struct kvm *kvm, | ||
2141 | struct kvm_kernel_irq_routing_entry *entries, | ||
2142 | int gsi) | ||
2143 | { | ||
2144 | return gsi; | ||
2145 | } | ||
2146 | |||
2147 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) | ||
2148 | { | ||
2149 | return pin; | ||
2150 | } | ||
2151 | |||
2152 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, | ||
2153 | u32 irq, int level, bool line_status) | ||
2154 | { | ||
2155 | unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS; | ||
2156 | |||
2157 | trace_kvm_set_irq(irq, level, irq_source_id); | ||
2158 | |||
2159 | BUG_ON(!vgic_initialized(kvm)); | ||
2160 | |||
2161 | if (spi > kvm->arch.vgic.nr_irqs) | ||
2162 | return -EINVAL; | ||
2163 | return kvm_vgic_inject_irq(kvm, 0, spi, level); | ||
2164 | |||
2165 | } | ||
2166 | |||
2167 | /* MSI not implemented yet */ | ||
2168 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | ||
2169 | struct kvm *kvm, int irq_source_id, | ||
2170 | int level, bool line_status) | ||
2171 | { | ||
2172 | return 0; | ||
2173 | } | ||
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h index 1e83bdf5f499..0df74cbb6200 100644 --- a/virt/kvm/arm/vgic.h +++ b/virt/kvm/arm/vgic.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #ifndef __KVM_VGIC_H__ | 20 | #ifndef __KVM_VGIC_H__ |
21 | #define __KVM_VGIC_H__ | 21 | #define __KVM_VGIC_H__ |
22 | 22 | ||
23 | #include <kvm/iodev.h> | ||
24 | |||
23 | #define VGIC_ADDR_UNDEF (-1) | 25 | #define VGIC_ADDR_UNDEF (-1) |
24 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | 26 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) |
25 | 27 | ||
@@ -57,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | |||
57 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); | 59 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); |
58 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); | 60 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); |
59 | 61 | ||
62 | struct kvm_exit_mmio { | ||
63 | phys_addr_t phys_addr; | ||
64 | void *data; | ||
65 | u32 len; | ||
66 | bool is_write; | ||
67 | void *private; | ||
68 | }; | ||
69 | |||
60 | void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | 70 | void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, |
61 | phys_addr_t offset, int mode); | 71 | phys_addr_t offset, int mode); |
62 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | 72 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, |
@@ -74,7 +84,7 @@ void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | |||
74 | *((u32 *)mmio->data) = cpu_to_le32(value) & mask; | 84 | *((u32 *)mmio->data) = cpu_to_le32(value) & mask; |
75 | } | 85 | } |
76 | 86 | ||
77 | struct kvm_mmio_range { | 87 | struct vgic_io_range { |
78 | phys_addr_t base; | 88 | phys_addr_t base; |
79 | unsigned long len; | 89 | unsigned long len; |
80 | int bits_per_irq; | 90 | int bits_per_irq; |
@@ -82,6 +92,11 @@ struct kvm_mmio_range { | |||
82 | phys_addr_t offset); | 92 | phys_addr_t offset); |
83 | }; | 93 | }; |
84 | 94 | ||
95 | int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len, | ||
96 | const struct vgic_io_range *ranges, | ||
97 | int redist_id, | ||
98 | struct vgic_io_device *iodev); | ||
99 | |||
85 | static inline bool is_in_range(phys_addr_t addr, unsigned long len, | 100 | static inline bool is_in_range(phys_addr_t addr, unsigned long len, |
86 | phys_addr_t baseaddr, unsigned long size) | 101 | phys_addr_t baseaddr, unsigned long size) |
87 | { | 102 | { |
@@ -89,14 +104,8 @@ static inline bool is_in_range(phys_addr_t addr, unsigned long len, | |||
89 | } | 104 | } |
90 | 105 | ||
91 | const | 106 | const |
92 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, | 107 | struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, |
93 | struct kvm_exit_mmio *mmio, | 108 | int len, gpa_t offset); |
94 | phys_addr_t offset); | ||
95 | |||
96 | bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
97 | struct kvm_exit_mmio *mmio, | ||
98 | const struct kvm_mmio_range *ranges, | ||
99 | unsigned long mmio_base); | ||
100 | 109 | ||
101 | bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | 110 | bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, |
102 | phys_addr_t offset, int vcpu_id, int access); | 111 | phys_addr_t offset, int vcpu_id, int access); |
@@ -107,12 +116,20 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | |||
107 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | 116 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, |
108 | phys_addr_t offset, int vcpu_id); | 117 | phys_addr_t offset, int vcpu_id); |
109 | 118 | ||
119 | bool vgic_handle_set_active_reg(struct kvm *kvm, | ||
120 | struct kvm_exit_mmio *mmio, | ||
121 | phys_addr_t offset, int vcpu_id); | ||
122 | |||
123 | bool vgic_handle_clear_active_reg(struct kvm *kvm, | ||
124 | struct kvm_exit_mmio *mmio, | ||
125 | phys_addr_t offset, int vcpu_id); | ||
126 | |||
110 | bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, | 127 | bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, |
111 | phys_addr_t offset); | 128 | phys_addr_t offset); |
112 | 129 | ||
113 | void vgic_kick_vcpus(struct kvm *kvm); | 130 | void vgic_kick_vcpus(struct kvm *kvm); |
114 | 131 | ||
115 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset); | 132 | int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset); |
116 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); | 133 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); |
117 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); | 134 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); |
118 | 135 | ||
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 00d86427af0f..571c1ce37d15 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include "iodev.h" | 11 | #include <kvm/iodev.h> |
12 | 12 | ||
13 | #include <linux/kvm_host.h> | 13 | #include <linux/kvm_host.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
@@ -60,8 +60,9 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) | |||
60 | return 1; | 60 | return 1; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int coalesced_mmio_write(struct kvm_io_device *this, | 63 | static int coalesced_mmio_write(struct kvm_vcpu *vcpu, |
64 | gpa_t addr, int len, const void *val) | 64 | struct kvm_io_device *this, gpa_t addr, |
65 | int len, const void *val) | ||
65 | { | 66 | { |
66 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); | 67 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
67 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; | 68 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 148b2392c762..9ff4193dfa49 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/seqlock.h> | 36 | #include <linux/seqlock.h> |
37 | #include <trace/events/kvm.h> | 37 | #include <trace/events/kvm.h> |
38 | 38 | ||
39 | #include "iodev.h" | 39 | #include <kvm/iodev.h> |
40 | 40 | ||
41 | #ifdef CONFIG_HAVE_KVM_IRQFD | 41 | #ifdef CONFIG_HAVE_KVM_IRQFD |
42 | /* | 42 | /* |
@@ -311,6 +311,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
311 | unsigned int events; | 311 | unsigned int events; |
312 | int idx; | 312 | int idx; |
313 | 313 | ||
314 | if (!kvm_arch_intc_initialized(kvm)) | ||
315 | return -EAGAIN; | ||
316 | |||
314 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); | 317 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); |
315 | if (!irqfd) | 318 | if (!irqfd) |
316 | return -ENOMEM; | 319 | return -ENOMEM; |
@@ -712,8 +715,8 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) | |||
712 | 715 | ||
713 | /* MMIO/PIO writes trigger an event if the addr/val match */ | 716 | /* MMIO/PIO writes trigger an event if the addr/val match */ |
714 | static int | 717 | static int |
715 | ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len, | 718 | ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, |
716 | const void *val) | 719 | int len, const void *val) |
717 | { | 720 | { |
718 | struct _ioeventfd *p = to_ioeventfd(this); | 721 | struct _ioeventfd *p = to_ioeventfd(this); |
719 | 722 | ||
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 7f256f31df10..1d56a901e791 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c | |||
@@ -105,7 +105,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | |||
105 | i = kvm_irq_map_gsi(kvm, irq_set, irq); | 105 | i = kvm_irq_map_gsi(kvm, irq_set, irq); |
106 | srcu_read_unlock(&kvm->irq_srcu, idx); | 106 | srcu_read_unlock(&kvm->irq_srcu, idx); |
107 | 107 | ||
108 | while(i--) { | 108 | while (i--) { |
109 | int r; | 109 | int r; |
110 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, | 110 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, |
111 | line_status); | 111 | line_status); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc6a25d95fbf..d3fc9399062a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include "iodev.h" | 19 | #include <kvm/iodev.h> |
20 | 20 | ||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
@@ -66,13 +66,13 @@ | |||
66 | MODULE_AUTHOR("Qumranet"); | 66 | MODULE_AUTHOR("Qumranet"); |
67 | MODULE_LICENSE("GPL"); | 67 | MODULE_LICENSE("GPL"); |
68 | 68 | ||
69 | unsigned int halt_poll_ns = 0; | 69 | static unsigned int halt_poll_ns; |
70 | module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); | 70 | module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Ordering of locks: | 73 | * Ordering of locks: |
74 | * | 74 | * |
75 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock | 75 | * kvm->lock --> kvm->slots_lock --> kvm->irq_lock |
76 | */ | 76 | */ |
77 | 77 | ||
78 | DEFINE_SPINLOCK(kvm_lock); | 78 | DEFINE_SPINLOCK(kvm_lock); |
@@ -80,7 +80,7 @@ static DEFINE_RAW_SPINLOCK(kvm_count_lock); | |||
80 | LIST_HEAD(vm_list); | 80 | LIST_HEAD(vm_list); |
81 | 81 | ||
82 | static cpumask_var_t cpus_hardware_enabled; | 82 | static cpumask_var_t cpus_hardware_enabled; |
83 | static int kvm_usage_count = 0; | 83 | static int kvm_usage_count; |
84 | static atomic_t hardware_enable_failed; | 84 | static atomic_t hardware_enable_failed; |
85 | 85 | ||
86 | struct kmem_cache *kvm_vcpu_cache; | 86 | struct kmem_cache *kvm_vcpu_cache; |
@@ -539,20 +539,12 @@ void *kvm_kvzalloc(unsigned long size) | |||
539 | return kzalloc(size, GFP_KERNEL); | 539 | return kzalloc(size, GFP_KERNEL); |
540 | } | 540 | } |
541 | 541 | ||
542 | void kvm_kvfree(const void *addr) | ||
543 | { | ||
544 | if (is_vmalloc_addr(addr)) | ||
545 | vfree(addr); | ||
546 | else | ||
547 | kfree(addr); | ||
548 | } | ||
549 | |||
550 | static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) | 542 | static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) |
551 | { | 543 | { |
552 | if (!memslot->dirty_bitmap) | 544 | if (!memslot->dirty_bitmap) |
553 | return; | 545 | return; |
554 | 546 | ||
555 | kvm_kvfree(memslot->dirty_bitmap); | 547 | kvfree(memslot->dirty_bitmap); |
556 | memslot->dirty_bitmap = NULL; | 548 | memslot->dirty_bitmap = NULL; |
557 | } | 549 | } |
558 | 550 | ||
@@ -888,8 +880,8 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
888 | * or moved, memslot will be created. | 880 | * or moved, memslot will be created. |
889 | * | 881 | * |
890 | * validation of sp->gfn happens in: | 882 | * validation of sp->gfn happens in: |
891 | * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) | 883 | * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) |
892 | * - kvm_is_visible_gfn (mmu_check_roots) | 884 | * - kvm_is_visible_gfn (mmu_check_roots) |
893 | */ | 885 | */ |
894 | kvm_arch_flush_shadow_memslot(kvm, slot); | 886 | kvm_arch_flush_shadow_memslot(kvm, slot); |
895 | 887 | ||
@@ -1061,9 +1053,11 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, | |||
1061 | mask = xchg(&dirty_bitmap[i], 0); | 1053 | mask = xchg(&dirty_bitmap[i], 0); |
1062 | dirty_bitmap_buffer[i] = mask; | 1054 | dirty_bitmap_buffer[i] = mask; |
1063 | 1055 | ||
1064 | offset = i * BITS_PER_LONG; | 1056 | if (mask) { |
1065 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, | 1057 | offset = i * BITS_PER_LONG; |
1066 | mask); | 1058 | kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, |
1059 | offset, mask); | ||
1060 | } | ||
1067 | } | 1061 | } |
1068 | 1062 | ||
1069 | spin_unlock(&kvm->mmu_lock); | 1063 | spin_unlock(&kvm->mmu_lock); |
@@ -1193,16 +1187,6 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | |||
1193 | return gfn_to_hva_memslot_prot(slot, gfn, writable); | 1187 | return gfn_to_hva_memslot_prot(slot, gfn, writable); |
1194 | } | 1188 | } |
1195 | 1189 | ||
1196 | static int kvm_read_hva(void *data, void __user *hva, int len) | ||
1197 | { | ||
1198 | return __copy_from_user(data, hva, len); | ||
1199 | } | ||
1200 | |||
1201 | static int kvm_read_hva_atomic(void *data, void __user *hva, int len) | ||
1202 | { | ||
1203 | return __copy_from_user_inatomic(data, hva, len); | ||
1204 | } | ||
1205 | |||
1206 | static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, | 1190 | static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, |
1207 | unsigned long start, int write, struct page **page) | 1191 | unsigned long start, int write, struct page **page) |
1208 | { | 1192 | { |
@@ -1481,7 +1465,6 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
1481 | 1465 | ||
1482 | return kvm_pfn_to_page(pfn); | 1466 | return kvm_pfn_to_page(pfn); |
1483 | } | 1467 | } |
1484 | |||
1485 | EXPORT_SYMBOL_GPL(gfn_to_page); | 1468 | EXPORT_SYMBOL_GPL(gfn_to_page); |
1486 | 1469 | ||
1487 | void kvm_release_page_clean(struct page *page) | 1470 | void kvm_release_page_clean(struct page *page) |
@@ -1517,6 +1500,7 @@ void kvm_set_pfn_dirty(pfn_t pfn) | |||
1517 | { | 1500 | { |
1518 | if (!kvm_is_reserved_pfn(pfn)) { | 1501 | if (!kvm_is_reserved_pfn(pfn)) { |
1519 | struct page *page = pfn_to_page(pfn); | 1502 | struct page *page = pfn_to_page(pfn); |
1503 | |||
1520 | if (!PageReserved(page)) | 1504 | if (!PageReserved(page)) |
1521 | SetPageDirty(page); | 1505 | SetPageDirty(page); |
1522 | } | 1506 | } |
@@ -1554,7 +1538,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | |||
1554 | addr = gfn_to_hva_prot(kvm, gfn, NULL); | 1538 | addr = gfn_to_hva_prot(kvm, gfn, NULL); |
1555 | if (kvm_is_error_hva(addr)) | 1539 | if (kvm_is_error_hva(addr)) |
1556 | return -EFAULT; | 1540 | return -EFAULT; |
1557 | r = kvm_read_hva(data, (void __user *)addr + offset, len); | 1541 | r = __copy_from_user(data, (void __user *)addr + offset, len); |
1558 | if (r) | 1542 | if (r) |
1559 | return -EFAULT; | 1543 | return -EFAULT; |
1560 | return 0; | 1544 | return 0; |
@@ -1593,7 +1577,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | |||
1593 | if (kvm_is_error_hva(addr)) | 1577 | if (kvm_is_error_hva(addr)) |
1594 | return -EFAULT; | 1578 | return -EFAULT; |
1595 | pagefault_disable(); | 1579 | pagefault_disable(); |
1596 | r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); | 1580 | r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); |
1597 | pagefault_enable(); | 1581 | pagefault_enable(); |
1598 | if (r) | 1582 | if (r) |
1599 | return -EFAULT; | 1583 | return -EFAULT; |
@@ -1653,8 +1637,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1653 | ghc->generation = slots->generation; | 1637 | ghc->generation = slots->generation; |
1654 | ghc->len = len; | 1638 | ghc->len = len; |
1655 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); | 1639 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); |
1656 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); | 1640 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); |
1657 | if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { | 1641 | if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { |
1658 | ghc->hva += offset; | 1642 | ghc->hva += offset; |
1659 | } else { | 1643 | } else { |
1660 | /* | 1644 | /* |
@@ -1742,7 +1726,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | |||
1742 | int offset = offset_in_page(gpa); | 1726 | int offset = offset_in_page(gpa); |
1743 | int ret; | 1727 | int ret; |
1744 | 1728 | ||
1745 | while ((seg = next_segment(len, offset)) != 0) { | 1729 | while ((seg = next_segment(len, offset)) != 0) { |
1746 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); | 1730 | ret = kvm_clear_guest_page(kvm, gfn, offset, seg); |
1747 | if (ret < 0) | 1731 | if (ret < 0) |
1748 | return ret; | 1732 | return ret; |
@@ -1800,6 +1784,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) | |||
1800 | start = cur = ktime_get(); | 1784 | start = cur = ktime_get(); |
1801 | if (halt_poll_ns) { | 1785 | if (halt_poll_ns) { |
1802 | ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns); | 1786 | ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns); |
1787 | |||
1803 | do { | 1788 | do { |
1804 | /* | 1789 | /* |
1805 | * This sets KVM_REQ_UNHALT if an interrupt | 1790 | * This sets KVM_REQ_UNHALT if an interrupt |
@@ -2118,7 +2103,7 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
2118 | * Special cases: vcpu ioctls that are asynchronous to vcpu execution, | 2103 | * Special cases: vcpu ioctls that are asynchronous to vcpu execution, |
2119 | * so vcpu_load() would break it. | 2104 | * so vcpu_load() would break it. |
2120 | */ | 2105 | */ |
2121 | if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) | 2106 | if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) |
2122 | return kvm_arch_vcpu_ioctl(filp, ioctl, arg); | 2107 | return kvm_arch_vcpu_ioctl(filp, ioctl, arg); |
2123 | #endif | 2108 | #endif |
2124 | 2109 | ||
@@ -2135,6 +2120,7 @@ static long kvm_vcpu_ioctl(struct file *filp, | |||
2135 | /* The thread running this VCPU changed. */ | 2120 | /* The thread running this VCPU changed. */ |
2136 | struct pid *oldpid = vcpu->pid; | 2121 | struct pid *oldpid = vcpu->pid; |
2137 | struct pid *newpid = get_task_pid(current, PIDTYPE_PID); | 2122 | struct pid *newpid = get_task_pid(current, PIDTYPE_PID); |
2123 | |||
2138 | rcu_assign_pointer(vcpu->pid, newpid); | 2124 | rcu_assign_pointer(vcpu->pid, newpid); |
2139 | if (oldpid) | 2125 | if (oldpid) |
2140 | synchronize_rcu(); | 2126 | synchronize_rcu(); |
@@ -2205,7 +2191,7 @@ out_free1: | |||
2205 | if (r) | 2191 | if (r) |
2206 | goto out; | 2192 | goto out; |
2207 | r = -EFAULT; | 2193 | r = -EFAULT; |
2208 | if (copy_to_user(argp, &mp_state, sizeof mp_state)) | 2194 | if (copy_to_user(argp, &mp_state, sizeof(mp_state))) |
2209 | goto out; | 2195 | goto out; |
2210 | r = 0; | 2196 | r = 0; |
2211 | break; | 2197 | break; |
@@ -2214,7 +2200,7 @@ out_free1: | |||
2214 | struct kvm_mp_state mp_state; | 2200 | struct kvm_mp_state mp_state; |
2215 | 2201 | ||
2216 | r = -EFAULT; | 2202 | r = -EFAULT; |
2217 | if (copy_from_user(&mp_state, argp, sizeof mp_state)) | 2203 | if (copy_from_user(&mp_state, argp, sizeof(mp_state))) |
2218 | goto out; | 2204 | goto out; |
2219 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); | 2205 | r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); |
2220 | break; | 2206 | break; |
@@ -2223,13 +2209,13 @@ out_free1: | |||
2223 | struct kvm_translation tr; | 2209 | struct kvm_translation tr; |
2224 | 2210 | ||
2225 | r = -EFAULT; | 2211 | r = -EFAULT; |
2226 | if (copy_from_user(&tr, argp, sizeof tr)) | 2212 | if (copy_from_user(&tr, argp, sizeof(tr))) |
2227 | goto out; | 2213 | goto out; |
2228 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); | 2214 | r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); |
2229 | if (r) | 2215 | if (r) |
2230 | goto out; | 2216 | goto out; |
2231 | r = -EFAULT; | 2217 | r = -EFAULT; |
2232 | if (copy_to_user(argp, &tr, sizeof tr)) | 2218 | if (copy_to_user(argp, &tr, sizeof(tr))) |
2233 | goto out; | 2219 | goto out; |
2234 | r = 0; | 2220 | r = 0; |
2235 | break; | 2221 | break; |
@@ -2238,7 +2224,7 @@ out_free1: | |||
2238 | struct kvm_guest_debug dbg; | 2224 | struct kvm_guest_debug dbg; |
2239 | 2225 | ||
2240 | r = -EFAULT; | 2226 | r = -EFAULT; |
2241 | if (copy_from_user(&dbg, argp, sizeof dbg)) | 2227 | if (copy_from_user(&dbg, argp, sizeof(dbg))) |
2242 | goto out; | 2228 | goto out; |
2243 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); | 2229 | r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); |
2244 | break; | 2230 | break; |
@@ -2252,14 +2238,14 @@ out_free1: | |||
2252 | if (argp) { | 2238 | if (argp) { |
2253 | r = -EFAULT; | 2239 | r = -EFAULT; |
2254 | if (copy_from_user(&kvm_sigmask, argp, | 2240 | if (copy_from_user(&kvm_sigmask, argp, |
2255 | sizeof kvm_sigmask)) | 2241 | sizeof(kvm_sigmask))) |
2256 | goto out; | 2242 | goto out; |
2257 | r = -EINVAL; | 2243 | r = -EINVAL; |
2258 | if (kvm_sigmask.len != sizeof sigset) | 2244 | if (kvm_sigmask.len != sizeof(sigset)) |
2259 | goto out; | 2245 | goto out; |
2260 | r = -EFAULT; | 2246 | r = -EFAULT; |
2261 | if (copy_from_user(&sigset, sigmask_arg->sigset, | 2247 | if (copy_from_user(&sigset, sigmask_arg->sigset, |
2262 | sizeof sigset)) | 2248 | sizeof(sigset))) |
2263 | goto out; | 2249 | goto out; |
2264 | p = &sigset; | 2250 | p = &sigset; |
2265 | } | 2251 | } |
@@ -2321,14 +2307,14 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, | |||
2321 | if (argp) { | 2307 | if (argp) { |
2322 | r = -EFAULT; | 2308 | r = -EFAULT; |
2323 | if (copy_from_user(&kvm_sigmask, argp, | 2309 | if (copy_from_user(&kvm_sigmask, argp, |
2324 | sizeof kvm_sigmask)) | 2310 | sizeof(kvm_sigmask))) |
2325 | goto out; | 2311 | goto out; |
2326 | r = -EINVAL; | 2312 | r = -EINVAL; |
2327 | if (kvm_sigmask.len != sizeof csigset) | 2313 | if (kvm_sigmask.len != sizeof(csigset)) |
2328 | goto out; | 2314 | goto out; |
2329 | r = -EFAULT; | 2315 | r = -EFAULT; |
2330 | if (copy_from_user(&csigset, sigmask_arg->sigset, | 2316 | if (copy_from_user(&csigset, sigmask_arg->sigset, |
2331 | sizeof csigset)) | 2317 | sizeof(csigset))) |
2332 | goto out; | 2318 | goto out; |
2333 | sigset_from_compat(&sigset, &csigset); | 2319 | sigset_from_compat(&sigset, &csigset); |
2334 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); | 2320 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
@@ -2525,7 +2511,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2525 | 2511 | ||
2526 | r = -EFAULT; | 2512 | r = -EFAULT; |
2527 | if (copy_from_user(&kvm_userspace_mem, argp, | 2513 | if (copy_from_user(&kvm_userspace_mem, argp, |
2528 | sizeof kvm_userspace_mem)) | 2514 | sizeof(kvm_userspace_mem))) |
2529 | goto out; | 2515 | goto out; |
2530 | 2516 | ||
2531 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); | 2517 | r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); |
@@ -2535,7 +2521,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2535 | struct kvm_dirty_log log; | 2521 | struct kvm_dirty_log log; |
2536 | 2522 | ||
2537 | r = -EFAULT; | 2523 | r = -EFAULT; |
2538 | if (copy_from_user(&log, argp, sizeof log)) | 2524 | if (copy_from_user(&log, argp, sizeof(log))) |
2539 | goto out; | 2525 | goto out; |
2540 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); | 2526 | r = kvm_vm_ioctl_get_dirty_log(kvm, &log); |
2541 | break; | 2527 | break; |
@@ -2543,16 +2529,18 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2543 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 2529 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
2544 | case KVM_REGISTER_COALESCED_MMIO: { | 2530 | case KVM_REGISTER_COALESCED_MMIO: { |
2545 | struct kvm_coalesced_mmio_zone zone; | 2531 | struct kvm_coalesced_mmio_zone zone; |
2532 | |||
2546 | r = -EFAULT; | 2533 | r = -EFAULT; |
2547 | if (copy_from_user(&zone, argp, sizeof zone)) | 2534 | if (copy_from_user(&zone, argp, sizeof(zone))) |
2548 | goto out; | 2535 | goto out; |
2549 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); | 2536 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
2550 | break; | 2537 | break; |
2551 | } | 2538 | } |
2552 | case KVM_UNREGISTER_COALESCED_MMIO: { | 2539 | case KVM_UNREGISTER_COALESCED_MMIO: { |
2553 | struct kvm_coalesced_mmio_zone zone; | 2540 | struct kvm_coalesced_mmio_zone zone; |
2541 | |||
2554 | r = -EFAULT; | 2542 | r = -EFAULT; |
2555 | if (copy_from_user(&zone, argp, sizeof zone)) | 2543 | if (copy_from_user(&zone, argp, sizeof(zone))) |
2556 | goto out; | 2544 | goto out; |
2557 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); | 2545 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
2558 | break; | 2546 | break; |
@@ -2562,7 +2550,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2562 | struct kvm_irqfd data; | 2550 | struct kvm_irqfd data; |
2563 | 2551 | ||
2564 | r = -EFAULT; | 2552 | r = -EFAULT; |
2565 | if (copy_from_user(&data, argp, sizeof data)) | 2553 | if (copy_from_user(&data, argp, sizeof(data))) |
2566 | goto out; | 2554 | goto out; |
2567 | r = kvm_irqfd(kvm, &data); | 2555 | r = kvm_irqfd(kvm, &data); |
2568 | break; | 2556 | break; |
@@ -2571,7 +2559,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2571 | struct kvm_ioeventfd data; | 2559 | struct kvm_ioeventfd data; |
2572 | 2560 | ||
2573 | r = -EFAULT; | 2561 | r = -EFAULT; |
2574 | if (copy_from_user(&data, argp, sizeof data)) | 2562 | if (copy_from_user(&data, argp, sizeof(data))) |
2575 | goto out; | 2563 | goto out; |
2576 | r = kvm_ioeventfd(kvm, &data); | 2564 | r = kvm_ioeventfd(kvm, &data); |
2577 | break; | 2565 | break; |
@@ -2592,7 +2580,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2592 | struct kvm_msi msi; | 2580 | struct kvm_msi msi; |
2593 | 2581 | ||
2594 | r = -EFAULT; | 2582 | r = -EFAULT; |
2595 | if (copy_from_user(&msi, argp, sizeof msi)) | 2583 | if (copy_from_user(&msi, argp, sizeof(msi))) |
2596 | goto out; | 2584 | goto out; |
2597 | r = kvm_send_userspace_msi(kvm, &msi); | 2585 | r = kvm_send_userspace_msi(kvm, &msi); |
2598 | break; | 2586 | break; |
@@ -2604,7 +2592,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2604 | struct kvm_irq_level irq_event; | 2592 | struct kvm_irq_level irq_event; |
2605 | 2593 | ||
2606 | r = -EFAULT; | 2594 | r = -EFAULT; |
2607 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 2595 | if (copy_from_user(&irq_event, argp, sizeof(irq_event))) |
2608 | goto out; | 2596 | goto out; |
2609 | 2597 | ||
2610 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, | 2598 | r = kvm_vm_ioctl_irq_line(kvm, &irq_event, |
@@ -2614,7 +2602,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2614 | 2602 | ||
2615 | r = -EFAULT; | 2603 | r = -EFAULT; |
2616 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 2604 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
2617 | if (copy_to_user(argp, &irq_event, sizeof irq_event)) | 2605 | if (copy_to_user(argp, &irq_event, sizeof(irq_event))) |
2618 | goto out; | 2606 | goto out; |
2619 | } | 2607 | } |
2620 | 2608 | ||
@@ -2647,7 +2635,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2647 | goto out_free_irq_routing; | 2635 | goto out_free_irq_routing; |
2648 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | 2636 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
2649 | routing.flags); | 2637 | routing.flags); |
2650 | out_free_irq_routing: | 2638 | out_free_irq_routing: |
2651 | vfree(entries); | 2639 | vfree(entries); |
2652 | break; | 2640 | break; |
2653 | } | 2641 | } |
@@ -2822,8 +2810,7 @@ static void hardware_enable_nolock(void *junk) | |||
2822 | if (r) { | 2810 | if (r) { |
2823 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); | 2811 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
2824 | atomic_inc(&hardware_enable_failed); | 2812 | atomic_inc(&hardware_enable_failed); |
2825 | printk(KERN_INFO "kvm: enabling virtualization on " | 2813 | pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); |
2826 | "CPU%d failed\n", cpu); | ||
2827 | } | 2814 | } |
2828 | } | 2815 | } |
2829 | 2816 | ||
@@ -2899,12 +2886,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
2899 | val &= ~CPU_TASKS_FROZEN; | 2886 | val &= ~CPU_TASKS_FROZEN; |
2900 | switch (val) { | 2887 | switch (val) { |
2901 | case CPU_DYING: | 2888 | case CPU_DYING: |
2902 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | 2889 | pr_info("kvm: disabling virtualization on CPU%d\n", |
2903 | cpu); | 2890 | cpu); |
2904 | hardware_disable(); | 2891 | hardware_disable(); |
2905 | break; | 2892 | break; |
2906 | case CPU_STARTING: | 2893 | case CPU_STARTING: |
2907 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 2894 | pr_info("kvm: enabling virtualization on CPU%d\n", |
2908 | cpu); | 2895 | cpu); |
2909 | hardware_enable(); | 2896 | hardware_enable(); |
2910 | break; | 2897 | break; |
@@ -2921,7 +2908,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | |||
2921 | * | 2908 | * |
2922 | * And Intel TXT required VMX off for all cpu when system shutdown. | 2909 | * And Intel TXT required VMX off for all cpu when system shutdown. |
2923 | */ | 2910 | */ |
2924 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 2911 | pr_info("kvm: exiting hardware virtualization\n"); |
2925 | kvm_rebooting = true; | 2912 | kvm_rebooting = true; |
2926 | on_each_cpu(hardware_disable_nolock, NULL, 1); | 2913 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
2927 | return NOTIFY_OK; | 2914 | return NOTIFY_OK; |
@@ -2945,7 +2932,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
2945 | } | 2932 | } |
2946 | 2933 | ||
2947 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, | 2934 | static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, |
2948 | const struct kvm_io_range *r2) | 2935 | const struct kvm_io_range *r2) |
2949 | { | 2936 | { |
2950 | if (r1->addr < r2->addr) | 2937 | if (r1->addr < r2->addr) |
2951 | return -1; | 2938 | return -1; |
@@ -2998,7 +2985,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, | |||
2998 | return off; | 2985 | return off; |
2999 | } | 2986 | } |
3000 | 2987 | ||
3001 | static int __kvm_io_bus_write(struct kvm_io_bus *bus, | 2988 | static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
3002 | struct kvm_io_range *range, const void *val) | 2989 | struct kvm_io_range *range, const void *val) |
3003 | { | 2990 | { |
3004 | int idx; | 2991 | int idx; |
@@ -3009,7 +2996,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus, | |||
3009 | 2996 | ||
3010 | while (idx < bus->dev_count && | 2997 | while (idx < bus->dev_count && |
3011 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { | 2998 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
3012 | if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, | 2999 | if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, |
3013 | range->len, val)) | 3000 | range->len, val)) |
3014 | return idx; | 3001 | return idx; |
3015 | idx++; | 3002 | idx++; |
@@ -3019,7 +3006,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus, | |||
3019 | } | 3006 | } |
3020 | 3007 | ||
3021 | /* kvm_io_bus_write - called under kvm->slots_lock */ | 3008 | /* kvm_io_bus_write - called under kvm->slots_lock */ |
3022 | int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 3009 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
3023 | int len, const void *val) | 3010 | int len, const void *val) |
3024 | { | 3011 | { |
3025 | struct kvm_io_bus *bus; | 3012 | struct kvm_io_bus *bus; |
@@ -3031,14 +3018,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
3031 | .len = len, | 3018 | .len = len, |
3032 | }; | 3019 | }; |
3033 | 3020 | ||
3034 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | 3021 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
3035 | r = __kvm_io_bus_write(bus, &range, val); | 3022 | r = __kvm_io_bus_write(vcpu, bus, &range, val); |
3036 | return r < 0 ? r : 0; | 3023 | return r < 0 ? r : 0; |
3037 | } | 3024 | } |
3038 | 3025 | ||
3039 | /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ | 3026 | /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ |
3040 | int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 3027 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
3041 | int len, const void *val, long cookie) | 3028 | gpa_t addr, int len, const void *val, long cookie) |
3042 | { | 3029 | { |
3043 | struct kvm_io_bus *bus; | 3030 | struct kvm_io_bus *bus; |
3044 | struct kvm_io_range range; | 3031 | struct kvm_io_range range; |
@@ -3048,12 +3035,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
3048 | .len = len, | 3035 | .len = len, |
3049 | }; | 3036 | }; |
3050 | 3037 | ||
3051 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | 3038 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
3052 | 3039 | ||
3053 | /* First try the device referenced by cookie. */ | 3040 | /* First try the device referenced by cookie. */ |
3054 | if ((cookie >= 0) && (cookie < bus->dev_count) && | 3041 | if ((cookie >= 0) && (cookie < bus->dev_count) && |
3055 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) | 3042 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) |
3056 | if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, | 3043 | if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, |
3057 | val)) | 3044 | val)) |
3058 | return cookie; | 3045 | return cookie; |
3059 | 3046 | ||
@@ -3061,11 +3048,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
3061 | * cookie contained garbage; fall back to search and return the | 3048 | * cookie contained garbage; fall back to search and return the |
3062 | * correct cookie value. | 3049 | * correct cookie value. |
3063 | */ | 3050 | */ |
3064 | return __kvm_io_bus_write(bus, &range, val); | 3051 | return __kvm_io_bus_write(vcpu, bus, &range, val); |
3065 | } | 3052 | } |
3066 | 3053 | ||
3067 | static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, | 3054 | static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, |
3068 | void *val) | 3055 | struct kvm_io_range *range, void *val) |
3069 | { | 3056 | { |
3070 | int idx; | 3057 | int idx; |
3071 | 3058 | ||
@@ -3075,7 +3062,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, | |||
3075 | 3062 | ||
3076 | while (idx < bus->dev_count && | 3063 | while (idx < bus->dev_count && |
3077 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { | 3064 | kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { |
3078 | if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, | 3065 | if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, |
3079 | range->len, val)) | 3066 | range->len, val)) |
3080 | return idx; | 3067 | return idx; |
3081 | idx++; | 3068 | idx++; |
@@ -3086,7 +3073,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, | |||
3086 | EXPORT_SYMBOL_GPL(kvm_io_bus_write); | 3073 | EXPORT_SYMBOL_GPL(kvm_io_bus_write); |
3087 | 3074 | ||
3088 | /* kvm_io_bus_read - called under kvm->slots_lock */ | 3075 | /* kvm_io_bus_read - called under kvm->slots_lock */ |
3089 | int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 3076 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
3090 | int len, void *val) | 3077 | int len, void *val) |
3091 | { | 3078 | { |
3092 | struct kvm_io_bus *bus; | 3079 | struct kvm_io_bus *bus; |
@@ -3098,8 +3085,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
3098 | .len = len, | 3085 | .len = len, |
3099 | }; | 3086 | }; |
3100 | 3087 | ||
3101 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | 3088 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
3102 | r = __kvm_io_bus_read(bus, &range, val); | 3089 | r = __kvm_io_bus_read(vcpu, bus, &range, val); |
3103 | return r < 0 ? r : 0; | 3090 | return r < 0 ? r : 0; |
3104 | } | 3091 | } |
3105 | 3092 | ||
@@ -3269,6 +3256,7 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) | |||
3269 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) | 3256 | static void kvm_sched_in(struct preempt_notifier *pn, int cpu) |
3270 | { | 3257 | { |
3271 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); | 3258 | struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); |
3259 | |||
3272 | if (vcpu->preempted) | 3260 | if (vcpu->preempted) |
3273 | vcpu->preempted = false; | 3261 | vcpu->preempted = false; |
3274 | 3262 | ||
@@ -3350,7 +3338,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
3350 | 3338 | ||
3351 | r = misc_register(&kvm_dev); | 3339 | r = misc_register(&kvm_dev); |
3352 | if (r) { | 3340 | if (r) { |
3353 | printk(KERN_ERR "kvm: misc device register failed\n"); | 3341 | pr_err("kvm: misc device register failed\n"); |
3354 | goto out_unreg; | 3342 | goto out_unreg; |
3355 | } | 3343 | } |
3356 | 3344 | ||
@@ -3361,7 +3349,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
3361 | 3349 | ||
3362 | r = kvm_init_debug(); | 3350 | r = kvm_init_debug(); |
3363 | if (r) { | 3351 | if (r) { |
3364 | printk(KERN_ERR "kvm: create debugfs files failed\n"); | 3352 | pr_err("kvm: create debugfs files failed\n"); |
3365 | goto out_undebugfs; | 3353 | goto out_undebugfs; |
3366 | } | 3354 | } |
3367 | 3355 | ||