aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c19
-rw-r--r--arch/avr32/boards/atstk1000/atstk1003.c2
-rw-r--r--arch/avr32/boards/atstk1000/atstk1004.c2
-rw-r--r--arch/avr32/kernel/time.c6
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c56
-rw-r--r--arch/ia64/kvm/kvm-ia64.c5
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/emulate.c2
-rw-r--r--arch/s390/kvm/gaccess.h62
-rw-r--r--arch/s390/kvm/intercept.c14
-rw-r--r--arch/s390/kvm/interrupt.c21
-rw-r--r--arch/s390/kvm/kvm-s390.c9
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/svm.c10
-rw-r--r--arch/x86/kvm/vmx.c22
-rw-r--r--arch/x86/kvm/x86.c109
17 files changed, 200 insertions, 156 deletions
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 14dc5a143695..8538ba75ef92 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -21,6 +21,8 @@
21 21
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/atmel-mci.h>
25
24#include <asm/arch/at32ap700x.h> 26#include <asm/arch/at32ap700x.h>
25#include <asm/arch/board.h> 27#include <asm/arch/board.h>
26#include <asm/arch/init.h> 28#include <asm/arch/init.h>
@@ -260,6 +262,21 @@ void __init setup_board(void)
260 at32_setup_serial_console(0); 262 at32_setup_serial_console(0);
261} 263}
262 264
265#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
266
267/* MMC card detect requires MACB0 *NOT* be used */
268#ifdef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM
269static struct mci_platform_data __initdata mci0_data = {
270 .detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */
271 .wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */
272};
273#define MCI_PDATA &mci0_data
274#else
275#define MCI_PDATA NULL
276#endif /* SW6 for sd{cd,wp} routing */
277
278#endif /* SW2 for MMC signal routing */
279
263static int __init atstk1002_init(void) 280static int __init atstk1002_init(void)
264{ 281{
265 /* 282 /*
@@ -309,7 +326,7 @@ static int __init atstk1002_init(void)
309 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); 326 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
310#endif 327#endif
311#ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM 328#ifndef CONFIG_BOARD_ATSTK1002_SW2_CUSTOM
312 at32_add_device_mci(0, NULL); 329 at32_add_device_mci(0, MCI_PDATA);
313#endif 330#endif
314#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM 331#ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM
315 set_hw_addr(at32_add_device_eth(1, &eth_data[1])); 332 set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
diff --git a/arch/avr32/boards/atstk1000/atstk1003.c b/arch/avr32/boards/atstk1000/atstk1003.c
index ea109f435a83..591fc73b554a 100644
--- a/arch/avr32/boards/atstk1000/atstk1003.c
+++ b/arch/avr32/boards/atstk1000/atstk1003.c
@@ -154,7 +154,7 @@ static int __init atstk1003_init(void)
154 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); 154 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
155#endif 155#endif
156#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM 156#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
157 at32_add_device_mci(0); 157 at32_add_device_mci(0, NULL);
158#endif 158#endif
159 at32_add_device_usba(0, NULL); 159 at32_add_device_usba(0, NULL);
160#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM 160#ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM
diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c
index c7236df74d74..d9c5e0a21256 100644
--- a/arch/avr32/boards/atstk1000/atstk1004.c
+++ b/arch/avr32/boards/atstk1000/atstk1004.c
@@ -137,7 +137,7 @@ static int __init atstk1004_init(void)
137 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); 137 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
138#endif 138#endif
139#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM 139#ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
140 at32_add_device_mci(0); 140 at32_add_device_mci(0, NULL);
141#endif 141#endif
142 at32_add_device_lcdc(0, &atstk1000_lcdc_data, 142 at32_add_device_lcdc(0, &atstk1000_lcdc_data,
143 fbmem_start, fbmem_size, 0); 143 fbmem_start, fbmem_size, 0);
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index abd954fb7ba0..7e7f32771ae1 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -43,6 +43,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
43{ 43{
44 struct clock_event_device *evdev = dev_id; 44 struct clock_event_device *evdev = dev_id;
45 45
46 if (unlikely(!(intc_get_pending(0) & 1)))
47 return IRQ_NONE;
48
46 /* 49 /*
47 * Disable the interrupt until the clockevent subsystem 50 * Disable the interrupt until the clockevent subsystem
48 * reprograms it. 51 * reprograms it.
@@ -55,7 +58,8 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
55 58
56static struct irqaction timer_irqaction = { 59static struct irqaction timer_irqaction = {
57 .handler = timer_interrupt, 60 .handler = timer_interrupt,
58 .flags = IRQF_TIMER | IRQF_DISABLED, 61 /* Oprofile uses the same irq as the timer, so allow it to be shared */
62 .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED,
59 .name = "avr32_comparator", 63 .name = "avr32_comparator",
60}; 64};
61 65
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 351e1b42f937..1617048c86c5 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/gpio.h>
15#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
16#include <linux/usb/atmel_usba_udc.h> 17#include <linux/usb/atmel_usba_udc.h>
17 18
@@ -1285,7 +1286,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1285{ 1286{
1286 struct mci_platform_data _data; 1287 struct mci_platform_data _data;
1287 struct platform_device *pdev; 1288 struct platform_device *pdev;
1288 struct dw_dma_slave *dws;
1289 1289
1290 if (id != 0) 1290 if (id != 0)
1291 return NULL; 1291 return NULL;
@@ -1300,7 +1300,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1300 1300
1301 if (!data) { 1301 if (!data) {
1302 data = &_data; 1302 data = &_data;
1303 memset(data, 0, sizeof(struct mci_platform_data)); 1303 memset(data, -1, sizeof(struct mci_platform_data));
1304 data->detect_pin = GPIO_PIN_NONE;
1305 data->wp_pin = GPIO_PIN_NONE;
1304 } 1306 }
1305 1307
1306 if (platform_device_add_data(pdev, data, 1308 if (platform_device_add_data(pdev, data,
@@ -1314,12 +1316,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1314 select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ 1316 select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */
1315 select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ 1317 select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */
1316 1318
1317 if (data) { 1319 if (gpio_is_valid(data->detect_pin))
1318 if (data->detect_pin != GPIO_PIN_NONE) 1320 at32_select_gpio(data->detect_pin, 0);
1319 at32_select_gpio(data->detect_pin, 0); 1321 if (gpio_is_valid(data->wp_pin))
1320 if (data->wp_pin != GPIO_PIN_NONE) 1322 at32_select_gpio(data->wp_pin, 0);
1321 at32_select_gpio(data->wp_pin, 0);
1322 }
1323 1323
1324 atmel_mci0_pclk.dev = &pdev->dev; 1324 atmel_mci0_pclk.dev = &pdev->dev;
1325 1325
@@ -1853,11 +1853,11 @@ at32_add_device_cf(unsigned int id, unsigned int extint,
1853 if (at32_init_ide_or_cf(pdev, data->cs, extint)) 1853 if (at32_init_ide_or_cf(pdev, data->cs, extint))
1854 goto fail; 1854 goto fail;
1855 1855
1856 if (data->detect_pin != GPIO_PIN_NONE) 1856 if (gpio_is_valid(data->detect_pin))
1857 at32_select_gpio(data->detect_pin, AT32_GPIOF_DEGLITCH); 1857 at32_select_gpio(data->detect_pin, AT32_GPIOF_DEGLITCH);
1858 if (data->reset_pin != GPIO_PIN_NONE) 1858 if (gpio_is_valid(data->reset_pin))
1859 at32_select_gpio(data->reset_pin, 0); 1859 at32_select_gpio(data->reset_pin, 0);
1860 if (data->vcc_pin != GPIO_PIN_NONE) 1860 if (gpio_is_valid(data->vcc_pin))
1861 at32_select_gpio(data->vcc_pin, 0); 1861 at32_select_gpio(data->vcc_pin, 0);
1862 /* READY is used as extint, so we can't select it as gpio */ 1862 /* READY is used as extint, so we can't select it as gpio */
1863 1863
@@ -1937,9 +1937,11 @@ static struct clk atmel_ac97c0_pclk = {
1937 .index = 10, 1937 .index = 10,
1938}; 1938};
1939 1939
1940struct platform_device *__init at32_add_device_ac97c(unsigned int id) 1940struct platform_device *__init
1941at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data)
1941{ 1942{
1942 struct platform_device *pdev; 1943 struct platform_device *pdev;
1944 struct ac97c_platform_data _data;
1943 1945
1944 if (id != 0) 1946 if (id != 0)
1945 return NULL; 1947 return NULL;
@@ -1950,19 +1952,37 @@ struct platform_device *__init at32_add_device_ac97c(unsigned int id)
1950 1952
1951 if (platform_device_add_resources(pdev, atmel_ac97c0_resource, 1953 if (platform_device_add_resources(pdev, atmel_ac97c0_resource,
1952 ARRAY_SIZE(atmel_ac97c0_resource))) 1954 ARRAY_SIZE(atmel_ac97c0_resource)))
1953 goto err_add_resources; 1955 goto fail;
1956
1957 if (!data) {
1958 data = &_data;
1959 memset(data, 0, sizeof(struct ac97c_platform_data));
1960 data->reset_pin = GPIO_PIN_NONE;
1961 }
1954 1962
1955 select_peripheral(PB(20), PERIPH_B, 0); /* SYNC */ 1963 data->dma_rx_periph_id = 3;
1956 select_peripheral(PB(21), PERIPH_B, 0); /* SDO */ 1964 data->dma_tx_periph_id = 4;
1957 select_peripheral(PB(22), PERIPH_B, 0); /* SDI */ 1965 data->dma_controller_id = 0;
1958 select_peripheral(PB(23), PERIPH_B, 0); /* SCLK */ 1966
1967 if (platform_device_add_data(pdev, data,
1968 sizeof(struct ac97c_platform_data)))
1969 goto fail;
1970
1971 select_peripheral(PB(20), PERIPH_B, 0); /* SDO */
1972 select_peripheral(PB(21), PERIPH_B, 0); /* SYNC */
1973 select_peripheral(PB(22), PERIPH_B, 0); /* SCLK */
1974 select_peripheral(PB(23), PERIPH_B, 0); /* SDI */
1975
1976 /* TODO: gpio_is_valid(data->reset_pin) with kernel 2.6.26. */
1977 if (data->reset_pin != GPIO_PIN_NONE)
1978 at32_select_gpio(data->reset_pin, 0);
1959 1979
1960 atmel_ac97c0_pclk.dev = &pdev->dev; 1980 atmel_ac97c0_pclk.dev = &pdev->dev;
1961 1981
1962 platform_device_add(pdev); 1982 platform_device_add(pdev);
1963 return pdev; 1983 return pdev;
1964 1984
1965err_add_resources: 1985fail:
1966 platform_device_put(pdev); 1986 platform_device_put(pdev);
1967 return NULL; 1987 return NULL;
1968} 1988}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 2672f4d278ac..7a37d06376be 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
125 PAGE_KERNEL)); 125 PAGE_KERNEL));
126 local_irq_save(saved_psr); 126 local_irq_save(saved_psr);
127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
128 local_irq_restore(saved_psr);
128 if (slot < 0) 129 if (slot < 0)
129 return; 130 return;
130 local_irq_restore(saved_psr);
131 131
132 spin_lock(&vp_lock); 132 spin_lock(&vp_lock);
133 status = ia64_pal_vp_init_env(kvm_vsa_base ? 133 status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)
160 160
161 local_irq_save(saved_psr); 161 local_irq_save(saved_psr);
162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
163 local_irq_restore(saved_psr);
163 if (slot < 0) 164 if (slot < 0)
164 return; 165 return;
165 local_irq_restore(saved_psr);
166 166
167 status = ia64_pal_vp_exit_env(host_iva); 167 status = ia64_pal_vp_exit_env(host_iva);
168 if (status) 168 if (status)
@@ -1253,6 +1253,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1253uninit: 1253uninit:
1254 kvm_vcpu_uninit(vcpu); 1254 kvm_vcpu_uninit(vcpu);
1255fail: 1255fail:
1256 local_irq_restore(psr);
1256 return r; 1257 return r;
1257} 1258}
1258 1259
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 75dff7cfa814..5a5602da5091 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -177,7 +177,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
177 vcpu->arch.msr & MSR_PR); 177 vcpu->arch.msr & MSR_PR);
178} 178}
179 179
180void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid) 180void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
181 gva_t eend, u32 asid)
181{ 182{
182 unsigned int pid = asid & 0xff; 183 unsigned int pid = asid & 0xff;
183 int i; 184 int i;
@@ -191,7 +192,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
191 if (!get_tlb_v(stlbe)) 192 if (!get_tlb_v(stlbe))
192 continue; 193 continue;
193 194
194 if (eaddr < get_tlb_eaddr(stlbe)) 195 if (eend < get_tlb_eaddr(stlbe))
195 continue; 196 continue;
196 197
197 if (eaddr > get_tlb_end(stlbe)) 198 if (eaddr > get_tlb_end(stlbe))
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 000097461283..8c605d0a5488 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -137,7 +137,7 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
137 if (tlbe->word0 & PPC44x_TLB_VALID) { 137 if (tlbe->word0 & PPC44x_TLB_VALID) {
138 eaddr = get_tlb_eaddr(tlbe); 138 eaddr = get_tlb_eaddr(tlbe);
139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; 139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
140 kvmppc_mmu_invalidate(vcpu, eaddr, asid); 140 kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
141 } 141 }
142 142
143 switch (ws) { 143 switch (ws) {
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 4e0633c413f3..ed60f3a74a85 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,11 +18,11 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19 19
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
21 u64 guestaddr) 21 unsigned long guestaddr)
22{ 22{
23 u64 prefix = vcpu->arch.sie_block->prefix; 23 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 u64 origin = vcpu->kvm->arch.guest_origin; 24 unsigned long origin = vcpu->kvm->arch.guest_origin;
25 u64 memsize = vcpu->kvm->arch.guest_memsize; 25 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
26 26
27 if (guestaddr < 2 * PAGE_SIZE) 27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix; 28 guestaddr += prefix;
@@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
37 return (void __user *) guestaddr; 37 return (void __user *) guestaddr;
38} 38}
39 39
40static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 40static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
41 u64 *result) 41 u64 *result)
42{ 42{
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
47 if (IS_ERR((void __force *) uptr)) 47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr); 48 return PTR_ERR((void __force *) uptr);
49 49
50 return get_user(*result, (u64 __user *) uptr); 50 return get_user(*result, (unsigned long __user *) uptr);
51} 51}
52 52
53static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 53static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
54 u32 *result) 54 u32 *result)
55{ 55{
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
63 return get_user(*result, (u32 __user *) uptr); 63 return get_user(*result, (u32 __user *) uptr);
64} 64}
65 65
66static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 66static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
67 u16 *result) 67 u16 *result)
68{ 68{
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
76 return get_user(*result, (u16 __user *) uptr); 76 return get_user(*result, (u16 __user *) uptr);
77} 77}
78 78
79static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 79static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
80 u8 *result) 80 u8 *result)
81{ 81{
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
87 return get_user(*result, (u8 __user *) uptr); 87 return get_user(*result, (u8 __user *) uptr);
88} 88}
89 89
90static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 90static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
91 u64 value) 91 u64 value)
92{ 92{
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
100 return put_user(value, (u64 __user *) uptr); 100 return put_user(value, (u64 __user *) uptr);
101} 101}
102 102
103static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 103static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
104 u32 value) 104 u32 value)
105{ 105{
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
113 return put_user(value, (u32 __user *) uptr); 113 return put_user(value, (u32 __user *) uptr);
114} 114}
115 115
116static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 116static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
117 u16 value) 117 u16 value)
118{ 118{
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
126 return put_user(value, (u16 __user *) uptr); 126 return put_user(value, (u16 __user *) uptr);
127} 127}
128 128
129static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 129static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
130 u8 value) 130 u8 value)
131{ 131{
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
@@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
138} 138}
139 139
140 140
141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, 141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
142 unsigned long guestdest,
142 const void *from, unsigned long n) 143 const void *from, unsigned long n)
143{ 144{
144 int rc; 145 int rc;
@@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
153 return 0; 154 return 0;
154} 155}
155 156
156static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, 157static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
157 const void *from, unsigned long n) 158 const void *from, unsigned long n)
158{ 159{
159 u64 prefix = vcpu->arch.sie_block->prefix; 160 unsigned long prefix = vcpu->arch.sie_block->prefix;
160 u64 origin = vcpu->kvm->arch.guest_origin; 161 unsigned long origin = vcpu->kvm->arch.guest_origin;
161 u64 memsize = vcpu->kvm->arch.guest_memsize; 162 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
162 163
163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
164 goto slowpath; 165 goto slowpath;
@@ -189,7 +190,8 @@ slowpath:
189} 190}
190 191
191static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 192static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
192 u64 guestsrc, unsigned long n) 193 unsigned long guestsrc,
194 unsigned long n)
193{ 195{
194 int rc; 196 int rc;
195 unsigned long i; 197 unsigned long i;
@@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
204} 206}
205 207
206static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 208static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
207 u64 guestsrc, unsigned long n) 209 unsigned long guestsrc, unsigned long n)
208{ 210{
209 u64 prefix = vcpu->arch.sie_block->prefix; 211 unsigned long prefix = vcpu->arch.sie_block->prefix;
210 u64 origin = vcpu->kvm->arch.guest_origin; 212 unsigned long origin = vcpu->kvm->arch.guest_origin;
211 u64 memsize = vcpu->kvm->arch.guest_memsize; 213 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
212 214
213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
214 goto slowpath; 216 goto slowpath;
@@ -238,11 +240,12 @@ slowpath:
238 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 240 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
239} 241}
240 242
241static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, 243static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
244 unsigned long guestdest,
242 const void *from, unsigned long n) 245 const void *from, unsigned long n)
243{ 246{
244 u64 origin = vcpu->kvm->arch.guest_origin; 247 unsigned long origin = vcpu->kvm->arch.guest_origin;
245 u64 memsize = vcpu->kvm->arch.guest_memsize; 248 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
246 249
247 if (guestdest + n > memsize) 250 if (guestdest + n > memsize)
248 return -EFAULT; 251 return -EFAULT;
@@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
256} 259}
257 260
258static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 261static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
259 u64 guestsrc, unsigned long n) 262 unsigned long guestsrc,
263 unsigned long n)
260{ 264{
261 u64 origin = vcpu->kvm->arch.guest_origin; 265 unsigned long origin = vcpu->kvm->arch.guest_origin;
262 u64 memsize = vcpu->kvm->arch.guest_memsize; 266 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
263 267
264 if (guestsrc + n > memsize) 268 if (guestsrc + n > memsize)
265 return -EFAULT; 269 return -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 47a0b642174c..61236102203e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -20,7 +20,7 @@
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "gaccess.h" 21#include "gaccess.h"
22 22
23static int handle_lctg(struct kvm_vcpu *vcpu) 23static int handle_lctlg(struct kvm_vcpu *vcpu)
24{ 24{
25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; 26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
@@ -30,7 +30,7 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
30 u64 useraddr; 30 u64 useraddr;
31 int reg, rc; 31 int reg, rc;
32 32
33 vcpu->stat.instruction_lctg++; 33 vcpu->stat.instruction_lctlg++;
34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
35 return -ENOTSUPP; 35 return -ENOTSUPP;
36 36
@@ -38,9 +38,12 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
38 if (base2) 38 if (base2)
39 useraddr += vcpu->arch.guest_gprs[base2]; 39 useraddr += vcpu->arch.guest_gprs[base2];
40 40
41 if (useraddr & 7)
42 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
43
41 reg = reg1; 44 reg = reg1;
42 45
43 VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 46 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
44 disp2); 47 disp2);
45 48
46 do { 49 do {
@@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
74 if (base2) 77 if (base2)
75 useraddr += vcpu->arch.guest_gprs[base2]; 78 useraddr += vcpu->arch.guest_gprs[base2];
76 79
80 if (useraddr & 3)
81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82
77 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 83 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
78 disp2); 84 disp2);
79 85
@@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
99 [0xae] = kvm_s390_handle_sigp, 105 [0xae] = kvm_s390_handle_sigp,
100 [0xb2] = kvm_s390_handle_priv, 106 [0xb2] = kvm_s390_handle_priv,
101 [0xb7] = handle_lctl, 107 [0xb7] = handle_lctl,
102 [0xeb] = handle_lctg, 108 [0xeb] = handle_lctlg,
103}; 109};
104 110
105static int handle_noop(struct kvm_vcpu *vcpu) 111static int handle_noop(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 11230b0db957..2960702b4824 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -13,6 +13,7 @@
13#include <asm/lowcore.h> 13#include <asm/lowcore.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
16#include <linux/signal.h>
16#include "kvm-s390.h" 17#include "kvm-s390.h"
17#include "gaccess.h" 18#include "gaccess.h"
18 19
@@ -246,15 +247,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
246 default: 247 default:
247 BUG(); 248 BUG();
248 } 249 }
249
250 if (exception) { 250 if (exception) {
251 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" 251 printk("kvm: The guest lowcore is not mapped during interrupt "
252 " interrupt"); 252 "delivery, killing userspace\n");
253 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 253 do_exit(SIGKILL);
254 if (inti->type == KVM_S390_PROGRAM_INT) {
255 printk(KERN_WARNING "kvm: recursive program check\n");
256 BUG();
257 }
258 } 254 }
259} 255}
260 256
@@ -277,14 +273,11 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
277 __LC_EXT_NEW_PSW, sizeof(psw_t)); 273 __LC_EXT_NEW_PSW, sizeof(psw_t));
278 if (rc == -EFAULT) 274 if (rc == -EFAULT)
279 exception = 1; 275 exception = 1;
280
281 if (exception) { 276 if (exception) {
282 VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ 277 printk("kvm: The guest lowcore is not mapped during interrupt "
283 " ckc interrupt"); 278 "delivery, killing userspace\n");
284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 279 do_exit(SIGKILL);
285 return 0;
286 } 280 }
287
288 return 1; 281 return 1;
289} 282}
290 283
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1782cbcd2829..8b00eb2ddf57 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -39,7 +39,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
39 { "exit_instruction", VCPU_STAT(exit_instruction) }, 39 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42 { "instruction_lctg", VCPU_STAT(instruction_lctg) }, 42 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
43 { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 43 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
@@ -112,7 +112,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
112 112
113int kvm_dev_ioctl_check_extension(long ext) 113int kvm_dev_ioctl_check_extension(long ext)
114{ 114{
115 return 0; 115 switch (ext) {
116 case KVM_CAP_USER_MEMORY:
117 return 1;
118 default:
119 return 0;
120 }
116} 121}
117 122
118/* Section: vm related */ 123/* Section: vm related */
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 5a556114eaa5..170392687ce0 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -43,7 +43,8 @@
43#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 43#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
44 44
45 45
46static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) 46static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
47 unsigned long *reg)
47{ 48{
48 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 49 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
49 int rc; 50 int rc;
@@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
167} 168}
168 169
169static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 170static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
170 u64 *reg) 171 unsigned long *reg)
171{ 172{
172 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 173 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
173 struct kvm_s390_local_interrupt *li; 174 struct kvm_s390_local_interrupt *li;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b0e4ddca6c18..2fa231923cf7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1814 spin_unlock(&vcpu->kvm->mmu_lock); 1814 spin_unlock(&vcpu->kvm->mmu_lock);
1815 return r; 1815 return r;
1816} 1816}
1817EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
1817 1818
1818void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1819void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1819{ 1820{
@@ -1870,6 +1871,12 @@ void kvm_enable_tdp(void)
1870} 1871}
1871EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1872EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1872 1873
1874void kvm_disable_tdp(void)
1875{
1876 tdp_enabled = false;
1877}
1878EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1879
1873static void free_mmu_pages(struct kvm_vcpu *vcpu) 1880static void free_mmu_pages(struct kvm_vcpu *vcpu)
1874{ 1881{
1875 struct kvm_mmu_page *sp; 1882 struct kvm_mmu_page *sp;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b756e876dce3..e2ee264740c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -453,7 +453,8 @@ static __init int svm_hardware_setup(void)
453 if (npt_enabled) { 453 if (npt_enabled) {
454 printk(KERN_INFO "kvm: Nested Paging enabled\n"); 454 printk(KERN_INFO "kvm: Nested Paging enabled\n");
455 kvm_enable_tdp(); 455 kvm_enable_tdp();
456 } 456 } else
457 kvm_disable_tdp();
457 458
458 return 0; 459 return 0;
459 460
@@ -1007,10 +1008,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1007 struct kvm *kvm = svm->vcpu.kvm; 1008 struct kvm *kvm = svm->vcpu.kvm;
1008 u64 fault_address; 1009 u64 fault_address;
1009 u32 error_code; 1010 u32 error_code;
1011 bool event_injection = false;
1010 1012
1011 if (!irqchip_in_kernel(kvm) && 1013 if (!irqchip_in_kernel(kvm) &&
1012 is_external_interrupt(exit_int_info)) 1014 is_external_interrupt(exit_int_info)) {
1015 event_injection = true;
1013 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1016 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
1017 }
1014 1018
1015 fault_address = svm->vmcb->control.exit_info_2; 1019 fault_address = svm->vmcb->control.exit_info_2;
1016 error_code = svm->vmcb->control.exit_info_1; 1020 error_code = svm->vmcb->control.exit_info_1;
@@ -1024,6 +1028,8 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1024 (u32)fault_address, (u32)(fault_address >> 32), 1028 (u32)fault_address, (u32)(fault_address >> 32),
1025 handler); 1029 handler);
1026 1030
1031 if (event_injection)
1032 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1027 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1033 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1028} 1034}
1029 1035
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0cac63701719..2a69773e3b26 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
2300 (u32)((u64)cr2 >> 32), handler); 2300 (u32)((u64)cr2 >> 32), handler);
2301 if (vect_info & VECTORING_INFO_VALID_MASK)
2302 kvm_mmu_unprotect_page_virt(vcpu, cr2);
2301 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2302 } 2304 }
2303 2305
@@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3116 return ERR_PTR(-ENOMEM); 3118 return ERR_PTR(-ENOMEM);
3117 3119
3118 allocate_vpid(vmx); 3120 allocate_vpid(vmx);
3119 if (id == 0 && vm_need_ept()) {
3120 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3121 VMX_EPT_WRITABLE_MASK |
3122 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3123 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
3124 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3125 VMX_EPT_EXECUTABLE_MASK);
3126 kvm_enable_tdp();
3127 }
3128 3121
3129 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 3122 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3130 if (err) 3123 if (err)
@@ -3303,8 +3296,17 @@ static int __init vmx_init(void)
3303 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3296 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
3304 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3297 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
3305 3298
3306 if (cpu_has_vmx_ept()) 3299 if (vm_need_ept()) {
3307 bypass_guest_pf = 0; 3300 bypass_guest_pf = 0;
3301 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3302 VMX_EPT_WRITABLE_MASK |
3303 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3304 kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
3305 VMX_EPT_FAKE_DIRTY_MASK, 0ull,
3306 VMX_EPT_EXECUTABLE_MASK);
3307 kvm_enable_tdp();
3308 } else
3309 kvm_disable_tdp();
3308 3310
3309 if (bypass_guest_pf) 3311 if (bypass_guest_pf)
3310 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 3312 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f1cdb011cff..5916191420c7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3184 kvm_desct->base |= seg_desc->base2 << 24; 3184 kvm_desct->base |= seg_desc->base2 << 24;
3185 kvm_desct->limit = seg_desc->limit0; 3185 kvm_desct->limit = seg_desc->limit0;
3186 kvm_desct->limit |= seg_desc->limit << 16; 3186 kvm_desct->limit |= seg_desc->limit << 16;
3187 if (seg_desc->g) {
3188 kvm_desct->limit <<= 12;
3189 kvm_desct->limit |= 0xfff;
3190 }
3187 kvm_desct->selector = selector; 3191 kvm_desct->selector = selector;
3188 kvm_desct->type = seg_desc->type; 3192 kvm_desct->type = seg_desc->type;
3189 kvm_desct->present = seg_desc->p; 3193 kvm_desct->present = seg_desc->p;
@@ -3223,6 +3227,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3223static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3227static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3224 struct desc_struct *seg_desc) 3228 struct desc_struct *seg_desc)
3225{ 3229{
3230 gpa_t gpa;
3226 struct descriptor_table dtable; 3231 struct descriptor_table dtable;
3227 u16 index = selector >> 3; 3232 u16 index = selector >> 3;
3228 3233
@@ -3232,13 +3237,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3232 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 3237 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3233 return 1; 3238 return 1;
3234 } 3239 }
3235 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3240 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3241 gpa += index * 8;
3242 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3236} 3243}
3237 3244
3238/* allowed just for 8 bytes segments */ 3245/* allowed just for 8 bytes segments */
3239static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3246static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3240 struct desc_struct *seg_desc) 3247 struct desc_struct *seg_desc)
3241{ 3248{
3249 gpa_t gpa;
3242 struct descriptor_table dtable; 3250 struct descriptor_table dtable;
3243 u16 index = selector >> 3; 3251 u16 index = selector >> 3;
3244 3252
@@ -3246,7 +3254,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3246 3254
3247 if (dtable.limit < index * 8 + 7) 3255 if (dtable.limit < index * 8 + 7)
3248 return 1; 3256 return 1;
3249 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3257 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3258 gpa += index * 8;
3259 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3250} 3260}
3251 3261
3252static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, 3262static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3258,55 +3268,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3258 base_addr |= (seg_desc->base1 << 16); 3268 base_addr |= (seg_desc->base1 << 16);
3259 base_addr |= (seg_desc->base2 << 24); 3269 base_addr |= (seg_desc->base2 << 24);
3260 3270
3261 return base_addr; 3271 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3262}
3263
3264static int load_tss_segment32(struct kvm_vcpu *vcpu,
3265 struct desc_struct *seg_desc,
3266 struct tss_segment_32 *tss)
3267{
3268 u32 base_addr;
3269
3270 base_addr = get_tss_base_addr(vcpu, seg_desc);
3271
3272 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3273 sizeof(struct tss_segment_32));
3274}
3275
3276static int save_tss_segment32(struct kvm_vcpu *vcpu,
3277 struct desc_struct *seg_desc,
3278 struct tss_segment_32 *tss)
3279{
3280 u32 base_addr;
3281
3282 base_addr = get_tss_base_addr(vcpu, seg_desc);
3283
3284 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3285 sizeof(struct tss_segment_32));
3286}
3287
3288static int load_tss_segment16(struct kvm_vcpu *vcpu,
3289 struct desc_struct *seg_desc,
3290 struct tss_segment_16 *tss)
3291{
3292 u32 base_addr;
3293
3294 base_addr = get_tss_base_addr(vcpu, seg_desc);
3295
3296 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3297 sizeof(struct tss_segment_16));
3298}
3299
3300static int save_tss_segment16(struct kvm_vcpu *vcpu,
3301 struct desc_struct *seg_desc,
3302 struct tss_segment_16 *tss)
3303{
3304 u32 base_addr;
3305
3306 base_addr = get_tss_base_addr(vcpu, seg_desc);
3307
3308 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3309 sizeof(struct tss_segment_16));
3310} 3272}
3311 3273
3312static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) 3274static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3466,20 +3428,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3466} 3428}
3467 3429
3468static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, 3430static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3469 struct desc_struct *cseg_desc, 3431 u32 old_tss_base,
3470 struct desc_struct *nseg_desc) 3432 struct desc_struct *nseg_desc)
3471{ 3433{
3472 struct tss_segment_16 tss_segment_16; 3434 struct tss_segment_16 tss_segment_16;
3473 int ret = 0; 3435 int ret = 0;
3474 3436
3475 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) 3437 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3438 sizeof tss_segment_16))
3476 goto out; 3439 goto out;
3477 3440
3478 save_state_to_tss16(vcpu, &tss_segment_16); 3441 save_state_to_tss16(vcpu, &tss_segment_16);
3479 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3480 3442
3481 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) 3443 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3444 sizeof tss_segment_16))
3482 goto out; 3445 goto out;
3446
3447 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3448 &tss_segment_16, sizeof tss_segment_16))
3449 goto out;
3450
3483 if (load_state_from_tss16(vcpu, &tss_segment_16)) 3451 if (load_state_from_tss16(vcpu, &tss_segment_16))
3484 goto out; 3452 goto out;
3485 3453
@@ -3489,20 +3457,26 @@ out:
3489} 3457}
3490 3458
3491static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, 3459static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3492 struct desc_struct *cseg_desc, 3460 u32 old_tss_base,
3493 struct desc_struct *nseg_desc) 3461 struct desc_struct *nseg_desc)
3494{ 3462{
3495 struct tss_segment_32 tss_segment_32; 3463 struct tss_segment_32 tss_segment_32;
3496 int ret = 0; 3464 int ret = 0;
3497 3465
3498 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) 3466 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3467 sizeof tss_segment_32))
3499 goto out; 3468 goto out;
3500 3469
3501 save_state_to_tss32(vcpu, &tss_segment_32); 3470 save_state_to_tss32(vcpu, &tss_segment_32);
3502 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3503 3471
3504 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) 3472 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3473 sizeof tss_segment_32))
3474 goto out;
3475
3476 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3477 &tss_segment_32, sizeof tss_segment_32))
3505 goto out; 3478 goto out;
3479
3506 if (load_state_from_tss32(vcpu, &tss_segment_32)) 3480 if (load_state_from_tss32(vcpu, &tss_segment_32))
3507 goto out; 3481 goto out;
3508 3482
@@ -3517,16 +3491,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3517 struct desc_struct cseg_desc; 3491 struct desc_struct cseg_desc;
3518 struct desc_struct nseg_desc; 3492 struct desc_struct nseg_desc;
3519 int ret = 0; 3493 int ret = 0;
3494 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3495 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
3520 3496
3521 kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3497 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
3522 3498
3499 /* FIXME: Handle errors. Failure to read either TSS or their
3500 * descriptors should generate a pagefault.
3501 */
3523 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3502 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3524 goto out; 3503 goto out;
3525 3504
3526 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) 3505 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
3527 goto out; 3506 goto out;
3528 3507
3529
3530 if (reason != TASK_SWITCH_IRET) { 3508 if (reason != TASK_SWITCH_IRET) {
3531 int cpl; 3509 int cpl;
3532 3510
@@ -3544,8 +3522,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3544 3522
3545 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3523 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3546 cseg_desc.type &= ~(1 << 1); //clear the B flag 3524 cseg_desc.type &= ~(1 << 1); //clear the B flag
3547 save_guest_segment_descriptor(vcpu, tr_seg.selector, 3525 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
3548 &cseg_desc);
3549 } 3526 }
3550 3527
3551 if (reason == TASK_SWITCH_IRET) { 3528 if (reason == TASK_SWITCH_IRET) {
@@ -3557,10 +3534,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3557 kvm_x86_ops->cache_regs(vcpu); 3534 kvm_x86_ops->cache_regs(vcpu);
3558 3535
3559 if (nseg_desc.type & 8) 3536 if (nseg_desc.type & 8)
3560 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, 3537 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
3561 &nseg_desc); 3538 &nseg_desc);
3562 else 3539 else
3563 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, 3540 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
3564 &nseg_desc); 3541 &nseg_desc);
3565 3542
3566 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { 3543 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {