aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2016-12-09 06:44:40 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2017-01-30 05:17:29 -0500
commit27f67f8727843fbbbcd05a003183af79693759e9 (patch)
tree637706c80d1d48c4fab9849c207b303f5f04f6f1
parentd051ae531324fb5130366d47e05bf8eadeb95535 (diff)
KVM: s390: Get rid of ar_t
sparse with __CHECK_ENDIAN__ shows that ar_t was never properly used across KVM on s390. We can now: - fix all places - do not make ar_t special Since ar_t is just used as a register number (no endianness issues for u8), and all other register numbers are also just plain int variables, let's just use u8, which matches the __u8 in the userspace ABI for the memop ioctl. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Janosch Frank <frankja@linux.vnet.ibm.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
-rw-r--r--arch/s390/kvm/gaccess.c14
-rw-r--r--arch/s390/kvm/gaccess.h10
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/kvm/priv.c30
4 files changed, 31 insertions, 33 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 6e94705efd4e..4492c9363178 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
373 ipte_unlock_simple(vcpu); 373 ipte_unlock_simple(vcpu);
374} 374}
375 375
376static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, 376static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
377 enum gacc_mode mode) 377 enum gacc_mode mode)
378{ 378{
379 union alet alet; 379 union alet alet;
@@ -487,7 +487,7 @@ enum prot_type {
487}; 487};
488 488
489static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, 489static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
490 ar_t ar, enum gacc_mode mode, enum prot_type prot) 490 u8 ar, enum gacc_mode mode, enum prot_type prot)
491{ 491{
492 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 492 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
493 struct trans_exc_code_bits *tec; 493 struct trans_exc_code_bits *tec;
@@ -545,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
545} 545}
546 546
547static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, 547static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
548 unsigned long ga, ar_t ar, enum gacc_mode mode) 548 unsigned long ga, u8 ar, enum gacc_mode mode)
549{ 549{
550 int rc; 550 int rc;
551 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 551 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
@@ -777,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
777 return 1; 777 return 1;
778} 778}
779 779
780static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, 780static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
781 unsigned long *pages, unsigned long nr_pages, 781 unsigned long *pages, unsigned long nr_pages,
782 const union asce asce, enum gacc_mode mode) 782 const union asce asce, enum gacc_mode mode)
783{ 783{
@@ -809,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
809 return 0; 809 return 0;
810} 810}
811 811
812int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 812int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
813 unsigned long len, enum gacc_mode mode) 813 unsigned long len, enum gacc_mode mode)
814{ 814{
815 psw_t *psw = &vcpu->arch.sie_block->gpsw; 815 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -883,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
883 * Note: The IPTE lock is not taken during this function, so the caller 883 * Note: The IPTE lock is not taken during this function, so the caller
884 * has to take care of this. 884 * has to take care of this.
885 */ 885 */
886int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 886int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
887 unsigned long *gpa, enum gacc_mode mode) 887 unsigned long *gpa, enum gacc_mode mode)
888{ 888{
889 psw_t *psw = &vcpu->arch.sie_block->gpsw; 889 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -916,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
916/** 916/**
917 * check_gva_range - test a range of guest virtual addresses for accessibility 917 * check_gva_range - test a range of guest virtual addresses for accessibility
918 */ 918 */
919int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 919int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
920 unsigned long length, enum gacc_mode mode) 920 unsigned long length, enum gacc_mode mode)
921{ 921{
922 unsigned long gpa; 922 unsigned long gpa;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 8756569ad938..5c9cc18f3b4a 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -162,11 +162,11 @@ enum gacc_mode {
162}; 162};
163 163
164int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 164int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
165 ar_t ar, unsigned long *gpa, enum gacc_mode mode); 165 u8 ar, unsigned long *gpa, enum gacc_mode mode);
166int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 166int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
167 unsigned long length, enum gacc_mode mode); 167 unsigned long length, enum gacc_mode mode);
168 168
169int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 169int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
170 unsigned long len, enum gacc_mode mode); 170 unsigned long len, enum gacc_mode mode);
171 171
172int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 172int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
218 * if data has been changed in guest space in case of an exception. 218 * if data has been changed in guest space in case of an exception.
219 */ 219 */
220static inline __must_check 220static inline __must_check
221int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 221int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
222 unsigned long len) 222 unsigned long len)
223{ 223{
224 return access_guest(vcpu, ga, ar, data, len, GACC_STORE); 224 return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
@@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
238 * data will be copied from guest space to kernel space. 238 * data will be copied from guest space to kernel space.
239 */ 239 */
240static inline __must_check 240static inline __must_check
241int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 241int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
242 unsigned long len) 242 unsigned long len)
243{ 243{
244 return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); 244 return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 3a4e97f1a9e6..22a0a7ceffad 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
86 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 86 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
87} 87}
88 88
89typedef u8 __bitwise ar_t; 89static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
90
91static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
92{ 90{
93 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 91 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
94 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 92 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
101 99
102static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, 100static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
103 u64 *address1, u64 *address2, 101 u64 *address1, u64 *address2,
104 ar_t *ar_b1, ar_t *ar_b2) 102 u8 *ar_b1, u8 *ar_b2)
105{ 103{
106 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 104 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
107 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; 105 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
125 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 123 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
126} 124}
127 125
128static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) 126static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
129{ 127{
130 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 128 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
131 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + 129 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
140 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; 138 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
141} 139}
142 140
143static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) 141static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
144{ 142{
145 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 143 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
146 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 144 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e18435355c16..1ecc1cffdf7c 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
54static int handle_set_clock(struct kvm_vcpu *vcpu) 54static int handle_set_clock(struct kvm_vcpu *vcpu)
55{ 55{
56 int rc; 56 int rc;
57 ar_t ar; 57 u8 ar;
58 u64 op2, val; 58 u64 op2, val;
59 59
60 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 60 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
79 u64 operand2; 79 u64 operand2;
80 u32 address; 80 u32 address;
81 int rc; 81 int rc;
82 ar_t ar; 82 u8 ar;
83 83
84 vcpu->stat.instruction_spx++; 84 vcpu->stat.instruction_spx++;
85 85
@@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
117 u64 operand2; 117 u64 operand2;
118 u32 address; 118 u32 address;
119 int rc; 119 int rc;
120 ar_t ar; 120 u8 ar;
121 121
122 vcpu->stat.instruction_stpx++; 122 vcpu->stat.instruction_stpx++;
123 123
@@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
147 u16 vcpu_id = vcpu->vcpu_id; 147 u16 vcpu_id = vcpu->vcpu_id;
148 u64 ga; 148 u64 ga;
149 int rc; 149 int rc;
150 ar_t ar; 150 u8 ar;
151 151
152 vcpu->stat.instruction_stap++; 152 vcpu->stat.instruction_stap++;
153 153
@@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
380 u32 tpi_data[3]; 380 u32 tpi_data[3];
381 int rc; 381 int rc;
382 u64 addr; 382 u64 addr;
383 ar_t ar; 383 u8 ar;
384 384
385 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 385 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
386 if (addr & 3) 386 if (addr & 3)
@@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
548 psw_compat_t new_psw; 548 psw_compat_t new_psw;
549 u64 addr; 549 u64 addr;
550 int rc; 550 int rc;
551 ar_t ar; 551 u8 ar;
552 552
553 if (gpsw->mask & PSW_MASK_PSTATE) 553 if (gpsw->mask & PSW_MASK_PSTATE)
554 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 554 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
575 psw_t new_psw; 575 psw_t new_psw;
576 u64 addr; 576 u64 addr;
577 int rc; 577 int rc;
578 ar_t ar; 578 u8 ar;
579 579
580 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 580 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
581 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 581 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
597 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 597 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
598 u64 operand2; 598 u64 operand2;
599 int rc; 599 int rc;
600 ar_t ar; 600 u8 ar;
601 601
602 vcpu->stat.instruction_stidp++; 602 vcpu->stat.instruction_stidp++;
603 603
@@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
644 ASCEBC(mem->vm[0].cpi, 16); 644 ASCEBC(mem->vm[0].cpi, 16);
645} 645}
646 646
647static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 647static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
648 u8 fc, u8 sel1, u16 sel2) 648 u8 fc, u8 sel1, u16 sel2)
649{ 649{
650 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 650 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
@@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
663 unsigned long mem = 0; 663 unsigned long mem = 0;
664 u64 operand2; 664 u64 operand2;
665 int rc = 0; 665 int rc = 0;
666 ar_t ar; 666 u8 ar;
667 667
668 vcpu->stat.instruction_stsi++; 668 vcpu->stat.instruction_stsi++;
669 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 669 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
@@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
970 int reg, rc, nr_regs; 970 int reg, rc, nr_regs;
971 u32 ctl_array[16]; 971 u32 ctl_array[16];
972 u64 ga; 972 u64 ga;
973 ar_t ar; 973 u8 ar;
974 974
975 vcpu->stat.instruction_lctl++; 975 vcpu->stat.instruction_lctl++;
976 976
@@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1009 int reg, rc, nr_regs; 1009 int reg, rc, nr_regs;
1010 u32 ctl_array[16]; 1010 u32 ctl_array[16];
1011 u64 ga; 1011 u64 ga;
1012 ar_t ar; 1012 u8 ar;
1013 1013
1014 vcpu->stat.instruction_stctl++; 1014 vcpu->stat.instruction_stctl++;
1015 1015
@@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
1043 int reg, rc, nr_regs; 1043 int reg, rc, nr_regs;
1044 u64 ctl_array[16]; 1044 u64 ctl_array[16];
1045 u64 ga; 1045 u64 ga;
1046 ar_t ar; 1046 u8 ar;
1047 1047
1048 vcpu->stat.instruction_lctlg++; 1048 vcpu->stat.instruction_lctlg++;
1049 1049
@@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
1081 int reg, rc, nr_regs; 1081 int reg, rc, nr_regs;
1082 u64 ctl_array[16]; 1082 u64 ctl_array[16];
1083 u64 ga; 1083 u64 ga;
1084 ar_t ar; 1084 u8 ar;
1085 1085
1086 vcpu->stat.instruction_stctg++; 1086 vcpu->stat.instruction_stctg++;
1087 1087
@@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
1132 unsigned long hva, gpa; 1132 unsigned long hva, gpa;
1133 int ret = 0, cc = 0; 1133 int ret = 0, cc = 0;
1134 bool writable; 1134 bool writable;
1135 ar_t ar; 1135 u8 ar;
1136 1136
1137 vcpu->stat.instruction_tprot++; 1137 vcpu->stat.instruction_tprot++;
1138 1138