diff options
Diffstat (limited to 'arch')
144 files changed, 3222 insertions, 1751 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index eef3bbb97075..d82875820a15 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -83,6 +83,13 @@ config KRETPROBES | |||
83 | def_bool y | 83 | def_bool y |
84 | depends on KPROBES && HAVE_KRETPROBES | 84 | depends on KPROBES && HAVE_KRETPROBES |
85 | 85 | ||
86 | config USER_RETURN_NOTIFIER | ||
87 | bool | ||
88 | depends on HAVE_USER_RETURN_NOTIFIER | ||
89 | help | ||
90 | Provide a kernel-internal notification when a cpu is about to | ||
91 | switch to user mode. | ||
92 | |||
86 | config HAVE_IOREMAP_PROT | 93 | config HAVE_IOREMAP_PROT |
87 | bool | 94 | bool |
88 | 95 | ||
@@ -132,5 +139,7 @@ config HAVE_HW_BREAKPOINT | |||
132 | select ANON_INODES | 139 | select ANON_INODES |
133 | select PERF_EVENTS | 140 | select PERF_EVENTS |
134 | 141 | ||
142 | config HAVE_USER_RETURN_NOTIFIER | ||
143 | bool | ||
135 | 144 | ||
136 | source "kernel/gcov/Kconfig" | 145 | source "kernel/gcov/Kconfig" |
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index 26773e3246e2..06edfefc3373 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h | |||
@@ -67,6 +67,8 @@ | |||
67 | #define SO_TIMESTAMPING 37 | 67 | #define SO_TIMESTAMPING 37 |
68 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 68 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
69 | 69 | ||
70 | #define SO_RXQ_OVFL 40 | ||
71 | |||
70 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | 72 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we |
71 | * have to define SOCK_NONBLOCK to a different value here. | 73 | * have to define SOCK_NONBLOCK to a different value here. |
72 | */ | 74 | */ |
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 5b5c17485942..7f23665122df 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -433,10 +433,11 @@ | |||
433 | #define __NR_signalfd 476 | 433 | #define __NR_signalfd 476 |
434 | #define __NR_timerfd 477 | 434 | #define __NR_timerfd 477 |
435 | #define __NR_eventfd 478 | 435 | #define __NR_eventfd 478 |
436 | #define __NR_recvmmsg 479 | ||
436 | 437 | ||
437 | #ifdef __KERNEL__ | 438 | #ifdef __KERNEL__ |
438 | 439 | ||
439 | #define NR_SYSCALLS 479 | 440 | #define NR_SYSCALLS 480 |
440 | 441 | ||
441 | #define __ARCH_WANT_IPC_PARSE_VERSION | 442 | #define __ARCH_WANT_IPC_PARSE_VERSION |
442 | #define __ARCH_WANT_OLD_READDIR | 443 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 95c9aef1c106..cda6b8b3d573 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -497,6 +497,7 @@ sys_call_table: | |||
497 | .quad sys_signalfd | 497 | .quad sys_signalfd |
498 | .quad sys_ni_syscall | 498 | .quad sys_ni_syscall |
499 | .quad sys_eventfd | 499 | .quad sys_eventfd |
500 | .quad sys_recvmmsg | ||
500 | 501 | ||
501 | .size sys_call_table, . - sys_call_table | 502 | .size sys_call_table, . - sys_call_table |
502 | .type sys_call_table, @object | 503 | .type sys_call_table, @object |
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 92ac61d294fd..90ffd04b8e74 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index fafce1b5c69f..f58c1156e779 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -374,6 +374,7 @@ | |||
374 | CALL(sys_pwritev) | 374 | CALL(sys_pwritev) |
375 | CALL(sys_rt_tgsigqueueinfo) | 375 | CALL(sys_rt_tgsigqueueinfo) |
376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
377 | /* 365 */ CALL(sys_recvmmsg) | ||
377 | #ifndef syscalls_counted | 378 | #ifndef syscalls_counted |
378 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 379 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
379 | #define syscalls_counted | 380 | #define syscalls_counted |
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 8ac9b8424007..346485910732 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c | |||
@@ -22,47 +22,42 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; | |||
22 | 22 | ||
23 | static ctl_table ctl_isa_vars[4] = { | 23 | static ctl_table ctl_isa_vars[4] = { |
24 | { | 24 | { |
25 | .ctl_name = BUS_ISA_MEM_BASE, | ||
26 | .procname = "membase", | 25 | .procname = "membase", |
27 | .data = &isa_membase, | 26 | .data = &isa_membase, |
28 | .maxlen = sizeof(isa_membase), | 27 | .maxlen = sizeof(isa_membase), |
29 | .mode = 0444, | 28 | .mode = 0444, |
30 | .proc_handler = &proc_dointvec, | 29 | .proc_handler = proc_dointvec, |
31 | }, { | 30 | }, { |
32 | .ctl_name = BUS_ISA_PORT_BASE, | ||
33 | .procname = "portbase", | 31 | .procname = "portbase", |
34 | .data = &isa_portbase, | 32 | .data = &isa_portbase, |
35 | .maxlen = sizeof(isa_portbase), | 33 | .maxlen = sizeof(isa_portbase), |
36 | .mode = 0444, | 34 | .mode = 0444, |
37 | .proc_handler = &proc_dointvec, | 35 | .proc_handler = proc_dointvec, |
38 | }, { | 36 | }, { |
39 | .ctl_name = BUS_ISA_PORT_SHIFT, | ||
40 | .procname = "portshift", | 37 | .procname = "portshift", |
41 | .data = &isa_portshift, | 38 | .data = &isa_portshift, |
42 | .maxlen = sizeof(isa_portshift), | 39 | .maxlen = sizeof(isa_portshift), |
43 | .mode = 0444, | 40 | .mode = 0444, |
44 | .proc_handler = &proc_dointvec, | 41 | .proc_handler = proc_dointvec, |
45 | }, {0} | 42 | }, {} |
46 | }; | 43 | }; |
47 | 44 | ||
48 | static struct ctl_table_header *isa_sysctl_header; | 45 | static struct ctl_table_header *isa_sysctl_header; |
49 | 46 | ||
50 | static ctl_table ctl_isa[2] = { | 47 | static ctl_table ctl_isa[2] = { |
51 | { | 48 | { |
52 | .ctl_name = CTL_BUS_ISA, | ||
53 | .procname = "isa", | 49 | .procname = "isa", |
54 | .mode = 0555, | 50 | .mode = 0555, |
55 | .child = ctl_isa_vars, | 51 | .child = ctl_isa_vars, |
56 | }, {0} | 52 | }, {} |
57 | }; | 53 | }; |
58 | 54 | ||
59 | static ctl_table ctl_bus[2] = { | 55 | static ctl_table ctl_bus[2] = { |
60 | { | 56 | { |
61 | .ctl_name = CTL_BUS, | ||
62 | .procname = "bus", | 57 | .procname = "bus", |
63 | .mode = 0555, | 58 | .mode = 0555, |
64 | .child = ctl_isa, | 59 | .child = ctl_isa, |
65 | }, {0} | 60 | }, {} |
66 | }; | 61 | }; |
67 | 62 | ||
68 | void __init | 63 | void __init |
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c index 0da693b0f7e1..fbe6fa02c882 100644 --- a/arch/arm/mach-bcmring/arch.c +++ b/arch/arm/mach-bcmring/arch.c | |||
@@ -47,10 +47,6 @@ HW_DECLARE_SPINLOCK(gpio) | |||
47 | EXPORT_SYMBOL(bcmring_gpio_reg_lock); | 47 | EXPORT_SYMBOL(bcmring_gpio_reg_lock); |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* FIXME: temporary solution */ | ||
51 | #define BCM_SYSCTL_REBOOT_WARM 1 | ||
52 | #define CTL_BCM_REBOOT 112 | ||
53 | |||
54 | /* sysctl */ | 50 | /* sysctl */ |
55 | int bcmring_arch_warm_reboot; /* do a warm reboot on hard reset */ | 51 | int bcmring_arch_warm_reboot; /* do a warm reboot on hard reset */ |
56 | 52 | ||
@@ -58,18 +54,16 @@ static struct ctl_table_header *bcmring_sysctl_header; | |||
58 | 54 | ||
59 | static struct ctl_table bcmring_sysctl_warm_reboot[] = { | 55 | static struct ctl_table bcmring_sysctl_warm_reboot[] = { |
60 | { | 56 | { |
61 | .ctl_name = BCM_SYSCTL_REBOOT_WARM, | ||
62 | .procname = "warm", | 57 | .procname = "warm", |
63 | .data = &bcmring_arch_warm_reboot, | 58 | .data = &bcmring_arch_warm_reboot, |
64 | .maxlen = sizeof(int), | 59 | .maxlen = sizeof(int), |
65 | .mode = 0644, | 60 | .mode = 0644, |
66 | .proc_handler = &proc_dointvec}, | 61 | .proc_handler = proc_dointvec}, |
67 | {} | 62 | {} |
68 | }; | 63 | }; |
69 | 64 | ||
70 | static struct ctl_table bcmring_sysctl_reboot[] = { | 65 | static struct ctl_table bcmring_sysctl_reboot[] = { |
71 | { | 66 | { |
72 | .ctl_name = CTL_BCM_REBOOT, | ||
73 | .procname = "reboot", | 67 | .procname = "reboot", |
74 | .mode = 0555, | 68 | .mode = 0555, |
75 | .child = bcmring_sysctl_warm_reboot}, | 69 | .child = bcmring_sysctl_warm_reboot}, |
diff --git a/arch/arm/mach-davinci/include/mach/asp.h b/arch/arm/mach-davinci/include/mach/asp.h index 18e4ce34ece6..e07f70ed7c53 100644 --- a/arch/arm/mach-davinci/include/mach/asp.h +++ b/arch/arm/mach-davinci/include/mach/asp.h | |||
@@ -51,6 +51,14 @@ struct snd_platform_data { | |||
51 | u32 rx_dma_offset; | 51 | u32 rx_dma_offset; |
52 | enum dma_event_q eventq_no; /* event queue number */ | 52 | enum dma_event_q eventq_no; /* event queue number */ |
53 | unsigned int codec_fmt; | 53 | unsigned int codec_fmt; |
54 | /* | ||
55 | * Allowing this is more efficient and eliminates left and right swaps | ||
56 | * caused by underruns, but will swap the left and right channels | ||
57 | * when compared to previous behavior. | ||
58 | */ | ||
59 | unsigned enable_channel_combine:1; | ||
60 | unsigned sram_size_playback; | ||
61 | unsigned sram_size_capture; | ||
54 | 62 | ||
55 | /* McASP specific fields */ | 63 | /* McASP specific fields */ |
56 | int tdm_slots; | 64 | int tdm_slots; |
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index 0acb5560229c..08e535d92c06 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c | |||
@@ -410,6 +410,15 @@ static struct regulator_init_data sdp3430_vpll2 = { | |||
410 | .consumer_supplies = &sdp3430_vdvi_supply, | 410 | .consumer_supplies = &sdp3430_vdvi_supply, |
411 | }; | 411 | }; |
412 | 412 | ||
413 | static struct twl4030_codec_audio_data sdp3430_audio = { | ||
414 | .audio_mclk = 26000000, | ||
415 | }; | ||
416 | |||
417 | static struct twl4030_codec_data sdp3430_codec = { | ||
418 | .audio_mclk = 26000000, | ||
419 | .audio = &sdp3430_audio, | ||
420 | }; | ||
421 | |||
413 | static struct twl4030_platform_data sdp3430_twldata = { | 422 | static struct twl4030_platform_data sdp3430_twldata = { |
414 | .irq_base = TWL4030_IRQ_BASE, | 423 | .irq_base = TWL4030_IRQ_BASE, |
415 | .irq_end = TWL4030_IRQ_END, | 424 | .irq_end = TWL4030_IRQ_END, |
@@ -420,6 +429,7 @@ static struct twl4030_platform_data sdp3430_twldata = { | |||
420 | .madc = &sdp3430_madc_data, | 429 | .madc = &sdp3430_madc_data, |
421 | .keypad = &sdp3430_kp_data, | 430 | .keypad = &sdp3430_kp_data, |
422 | .usb = &sdp3430_usb_data, | 431 | .usb = &sdp3430_usb_data, |
432 | .codec = &sdp3430_codec, | ||
423 | 433 | ||
424 | .vaux1 = &sdp3430_vaux1, | 434 | .vaux1 = &sdp3430_vaux1, |
425 | .vaux2 = &sdp3430_vaux2, | 435 | .vaux2 = &sdp3430_vaux2, |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 08b0816afa61..af411e11dddf 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -254,6 +254,15 @@ static struct twl4030_usb_data beagle_usb_data = { | |||
254 | .usb_mode = T2_USB_MODE_ULPI, | 254 | .usb_mode = T2_USB_MODE_ULPI, |
255 | }; | 255 | }; |
256 | 256 | ||
257 | static struct twl4030_codec_audio_data beagle_audio_data = { | ||
258 | .audio_mclk = 26000000, | ||
259 | }; | ||
260 | |||
261 | static struct twl4030_codec_data beagle_codec_data = { | ||
262 | .audio_mclk = 26000000, | ||
263 | .audio = &beagle_audio_data, | ||
264 | }; | ||
265 | |||
257 | static struct twl4030_platform_data beagle_twldata = { | 266 | static struct twl4030_platform_data beagle_twldata = { |
258 | .irq_base = TWL4030_IRQ_BASE, | 267 | .irq_base = TWL4030_IRQ_BASE, |
259 | .irq_end = TWL4030_IRQ_END, | 268 | .irq_end = TWL4030_IRQ_END, |
@@ -261,6 +270,7 @@ static struct twl4030_platform_data beagle_twldata = { | |||
261 | /* platform_data for children goes here */ | 270 | /* platform_data for children goes here */ |
262 | .usb = &beagle_usb_data, | 271 | .usb = &beagle_usb_data, |
263 | .gpio = &beagle_gpio_data, | 272 | .gpio = &beagle_gpio_data, |
273 | .codec = &beagle_codec_data, | ||
264 | .vmmc1 = &beagle_vmmc1, | 274 | .vmmc1 = &beagle_vmmc1, |
265 | .vsim = &beagle_vsim, | 275 | .vsim = &beagle_vsim, |
266 | .vdac = &beagle_vdac, | 276 | .vdac = &beagle_vdac, |
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 4c4d7f8dbd72..25ca5f6a0d3d 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -194,6 +194,15 @@ static struct twl4030_madc_platform_data omap3evm_madc_data = { | |||
194 | .irq_line = 1, | 194 | .irq_line = 1, |
195 | }; | 195 | }; |
196 | 196 | ||
197 | static struct twl4030_codec_audio_data omap3evm_audio_data = { | ||
198 | .audio_mclk = 26000000, | ||
199 | }; | ||
200 | |||
201 | static struct twl4030_codec_data omap3evm_codec_data = { | ||
202 | .audio_mclk = 26000000, | ||
203 | .audio = &omap3evm_audio_data, | ||
204 | }; | ||
205 | |||
197 | static struct twl4030_platform_data omap3evm_twldata = { | 206 | static struct twl4030_platform_data omap3evm_twldata = { |
198 | .irq_base = TWL4030_IRQ_BASE, | 207 | .irq_base = TWL4030_IRQ_BASE, |
199 | .irq_end = TWL4030_IRQ_END, | 208 | .irq_end = TWL4030_IRQ_END, |
@@ -203,6 +212,7 @@ static struct twl4030_platform_data omap3evm_twldata = { | |||
203 | .madc = &omap3evm_madc_data, | 212 | .madc = &omap3evm_madc_data, |
204 | .usb = &omap3evm_usb_data, | 213 | .usb = &omap3evm_usb_data, |
205 | .gpio = &omap3evm_gpio_data, | 214 | .gpio = &omap3evm_gpio_data, |
215 | .codec = &omap3evm_codec_data, | ||
206 | }; | 216 | }; |
207 | 217 | ||
208 | static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = { | 218 | static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = { |
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 7519edb69155..c4be626c8422 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c | |||
@@ -281,11 +281,21 @@ static struct twl4030_usb_data omap3pandora_usb_data = { | |||
281 | .usb_mode = T2_USB_MODE_ULPI, | 281 | .usb_mode = T2_USB_MODE_ULPI, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static struct twl4030_codec_audio_data omap3pandora_audio_data = { | ||
285 | .audio_mclk = 26000000, | ||
286 | }; | ||
287 | |||
288 | static struct twl4030_codec_data omap3pandora_codec_data = { | ||
289 | .audio_mclk = 26000000, | ||
290 | .audio = &omap3pandora_audio_data, | ||
291 | }; | ||
292 | |||
284 | static struct twl4030_platform_data omap3pandora_twldata = { | 293 | static struct twl4030_platform_data omap3pandora_twldata = { |
285 | .irq_base = TWL4030_IRQ_BASE, | 294 | .irq_base = TWL4030_IRQ_BASE, |
286 | .irq_end = TWL4030_IRQ_END, | 295 | .irq_end = TWL4030_IRQ_END, |
287 | .gpio = &omap3pandora_gpio_data, | 296 | .gpio = &omap3pandora_gpio_data, |
288 | .usb = &omap3pandora_usb_data, | 297 | .usb = &omap3pandora_usb_data, |
298 | .codec = &omap3pandora_codec_data, | ||
289 | .vmmc1 = &pandora_vmmc1, | 299 | .vmmc1 = &pandora_vmmc1, |
290 | .vmmc2 = &pandora_vmmc2, | 300 | .vmmc2 = &pandora_vmmc2, |
291 | .keypad = &pandora_kp_data, | 301 | .keypad = &pandora_kp_data, |
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 9917d2fddc2f..e1fb50451e19 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c | |||
@@ -329,6 +329,15 @@ static struct regulator_init_data overo_vmmc1 = { | |||
329 | .consumer_supplies = &overo_vmmc1_supply, | 329 | .consumer_supplies = &overo_vmmc1_supply, |
330 | }; | 330 | }; |
331 | 331 | ||
332 | static struct twl4030_codec_audio_data overo_audio_data = { | ||
333 | .audio_mclk = 26000000, | ||
334 | }; | ||
335 | |||
336 | static struct twl4030_codec_data overo_codec_data = { | ||
337 | .audio_mclk = 26000000, | ||
338 | .audio = &overo_audio_data, | ||
339 | }; | ||
340 | |||
332 | /* mmc2 (WLAN) and Bluetooth don't use twl4030 regulators */ | 341 | /* mmc2 (WLAN) and Bluetooth don't use twl4030 regulators */ |
333 | 342 | ||
334 | static struct twl4030_platform_data overo_twldata = { | 343 | static struct twl4030_platform_data overo_twldata = { |
@@ -336,6 +345,7 @@ static struct twl4030_platform_data overo_twldata = { | |||
336 | .irq_end = TWL4030_IRQ_END, | 345 | .irq_end = TWL4030_IRQ_END, |
337 | .gpio = &overo_gpio_data, | 346 | .gpio = &overo_gpio_data, |
338 | .usb = &overo_usb_data, | 347 | .usb = &overo_usb_data, |
348 | .codec = &overo_codec_data, | ||
339 | .vmmc1 = &overo_vmmc1, | 349 | .vmmc1 = &overo_vmmc1, |
340 | }; | 350 | }; |
341 | 351 | ||
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c index 51e0b3ba5f3a..51df584728f6 100644 --- a/arch/arm/mach-omap2/board-zoom2.c +++ b/arch/arm/mach-omap2/board-zoom2.c | |||
@@ -230,6 +230,15 @@ static struct twl4030_madc_platform_data zoom2_madc_data = { | |||
230 | .irq_line = 1, | 230 | .irq_line = 1, |
231 | }; | 231 | }; |
232 | 232 | ||
233 | static struct twl4030_codec_audio_data zoom2_audio_data = { | ||
234 | .audio_mclk = 26000000, | ||
235 | }; | ||
236 | |||
237 | static struct twl4030_codec_data zoom2_codec_data = { | ||
238 | .audio_mclk = 26000000, | ||
239 | .audio = &zoom2_audio_data, | ||
240 | }; | ||
241 | |||
233 | static struct twl4030_platform_data zoom2_twldata = { | 242 | static struct twl4030_platform_data zoom2_twldata = { |
234 | .irq_base = TWL4030_IRQ_BASE, | 243 | .irq_base = TWL4030_IRQ_BASE, |
235 | .irq_end = TWL4030_IRQ_END, | 244 | .irq_end = TWL4030_IRQ_END, |
@@ -240,6 +249,7 @@ static struct twl4030_platform_data zoom2_twldata = { | |||
240 | .usb = &zoom2_usb_data, | 249 | .usb = &zoom2_usb_data, |
241 | .gpio = &zoom2_gpio_data, | 250 | .gpio = &zoom2_gpio_data, |
242 | .keypad = &zoom2_kp_twl4030_data, | 251 | .keypad = &zoom2_kp_twl4030_data, |
252 | .codec = &zoom2_codec_data, | ||
243 | .vmmc1 = &zoom2_vmmc1, | 253 | .vmmc1 = &zoom2_vmmc1, |
244 | .vmmc2 = &zoom2_vmmc2, | 254 | .vmmc2 = &zoom2_vmmc2, |
245 | .vsim = &zoom2_vsim, | 255 | .vsim = &zoom2_vsim, |
diff --git a/arch/arm/mach-s3c6400/include/mach/map.h b/arch/arm/mach-s3c6400/include/mach/map.h index f3b48f841d84..106ee13581e2 100644 --- a/arch/arm/mach-s3c6400/include/mach/map.h +++ b/arch/arm/mach-s3c6400/include/mach/map.h | |||
@@ -48,6 +48,8 @@ | |||
48 | #define S3C64XX_PA_IIS1 (0x7F003000) | 48 | #define S3C64XX_PA_IIS1 (0x7F003000) |
49 | #define S3C64XX_PA_TIMER (0x7F006000) | 49 | #define S3C64XX_PA_TIMER (0x7F006000) |
50 | #define S3C64XX_PA_IIC0 (0x7F004000) | 50 | #define S3C64XX_PA_IIC0 (0x7F004000) |
51 | #define S3C64XX_PA_PCM0 (0x7F009000) | ||
52 | #define S3C64XX_PA_PCM1 (0x7F00A000) | ||
51 | #define S3C64XX_PA_IISV4 (0x7F00D000) | 53 | #define S3C64XX_PA_IISV4 (0x7F00D000) |
52 | #define S3C64XX_PA_IIC1 (0x7F00F000) | 54 | #define S3C64XX_PA_IIC1 (0x7F00F000) |
53 | 55 | ||
diff --git a/arch/arm/plat-s3c/include/plat/audio.h b/arch/arm/plat-s3c/include/plat/audio.h index de0e8da48bc3..f22d23bb6271 100644 --- a/arch/arm/plat-s3c/include/plat/audio.h +++ b/arch/arm/plat-s3c/include/plat/audio.h | |||
@@ -1,45 +1,17 @@ | |||
1 | /* arch/arm/mach-s3c2410/include/mach/audio.h | 1 | /* arch/arm/plat-s3c/include/plat/audio.h |
2 | * | 2 | * |
3 | * Copyright (c) 2004-2005 Simtec Electronics | 3 | * Copyright (c) 2009 Samsung Electronics Co. Ltd |
4 | * http://www.simtec.co.uk/products/SWLINUX/ | 4 | * Author: Jaswinder Singh <jassi.brar@samsung.com> |
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * S3C24XX - Audio platfrom_device info | ||
8 | * | 5 | * |
9 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
12 | */ | 9 | */ |
13 | |||
14 | #ifndef __ASM_ARCH_AUDIO_H | ||
15 | #define __ASM_ARCH_AUDIO_H __FILE__ | ||
16 | |||
17 | /* struct s3c24xx_iis_ops | ||
18 | * | ||
19 | * called from the s3c24xx audio core to deal with the architecture | ||
20 | * or the codec's setup and control. | ||
21 | * | ||
22 | * the pointer to itself is passed through in case the caller wants to | ||
23 | * embed this in an larger structure for easy reference to it's context. | ||
24 | */ | ||
25 | 10 | ||
26 | struct s3c24xx_iis_ops { | 11 | /** |
27 | struct module *owner; | 12 | * struct s3c_audio_pdata - common platform data for audio device drivers |
28 | 13 | * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode | |
29 | int (*startup)(struct s3c24xx_iis_ops *me); | 14 | */ |
30 | void (*shutdown)(struct s3c24xx_iis_ops *me); | 15 | struct s3c_audio_pdata { |
31 | int (*suspend)(struct s3c24xx_iis_ops *me); | 16 | int (*cfg_gpio)(struct platform_device *); |
32 | int (*resume)(struct s3c24xx_iis_ops *me); | ||
33 | |||
34 | int (*open)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); | ||
35 | int (*close)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm); | ||
36 | int (*prepare)(struct s3c24xx_iis_ops *me, struct snd_pcm_substream *strm, struct snd_pcm_runtime *rt); | ||
37 | }; | 17 | }; |
38 | |||
39 | struct s3c24xx_platdata_iis { | ||
40 | const char *codec_clk; | ||
41 | struct s3c24xx_iis_ops *ops; | ||
42 | int (*match_dev)(struct device *dev); | ||
43 | }; | ||
44 | |||
45 | #endif /* __ASM_ARCH_AUDIO_H */ | ||
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h index 0f540ea1e999..932cbbbb4273 100644 --- a/arch/arm/plat-s3c/include/plat/devs.h +++ b/arch/arm/plat-s3c/include/plat/devs.h | |||
@@ -28,6 +28,9 @@ extern struct platform_device s3c64xx_device_iis0; | |||
28 | extern struct platform_device s3c64xx_device_iis1; | 28 | extern struct platform_device s3c64xx_device_iis1; |
29 | extern struct platform_device s3c64xx_device_iisv4; | 29 | extern struct platform_device s3c64xx_device_iisv4; |
30 | 30 | ||
31 | extern struct platform_device s3c64xx_device_pcm0; | ||
32 | extern struct platform_device s3c64xx_device_pcm1; | ||
33 | |||
31 | extern struct platform_device s3c_device_fb; | 34 | extern struct platform_device s3c_device_fb; |
32 | extern struct platform_device s3c_device_usb; | 35 | extern struct platform_device s3c_device_usb; |
33 | extern struct platform_device s3c_device_lcd; | 36 | extern struct platform_device s3c_device_lcd; |
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 07659dad1748..abf2fbc2eb2f 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | |||
@@ -67,6 +67,8 @@ | |||
67 | #define S3C2412_IISMOD_BCLK_MASK (3 << 1) | 67 | #define S3C2412_IISMOD_BCLK_MASK (3 << 1) |
68 | #define S3C2412_IISMOD_8BIT (1 << 0) | 68 | #define S3C2412_IISMOD_8BIT (1 << 0) |
69 | 69 | ||
70 | #define S3C64XX_IISMOD_CDCLKCON (1 << 12) | ||
71 | |||
70 | #define S3C2412_IISPSR_PSREN (1 << 15) | 72 | #define S3C2412_IISPSR_PSREN (1 << 15) |
71 | 73 | ||
72 | #define S3C2412_IISFIC_TXFLUSH (1 << 15) | 74 | #define S3C2412_IISFIC_TXFLUSH (1 << 15) |
diff --git a/arch/arm/plat-s3c64xx/dev-audio.c b/arch/arm/plat-s3c64xx/dev-audio.c index 1322beb40dd7..a21a88fbb7e3 100644 --- a/arch/arm/plat-s3c64xx/dev-audio.c +++ b/arch/arm/plat-s3c64xx/dev-audio.c | |||
@@ -15,9 +15,14 @@ | |||
15 | 15 | ||
16 | #include <mach/irqs.h> | 16 | #include <mach/irqs.h> |
17 | #include <mach/map.h> | 17 | #include <mach/map.h> |
18 | #include <mach/dma.h> | ||
19 | #include <mach/gpio.h> | ||
18 | 20 | ||
19 | #include <plat/devs.h> | 21 | #include <plat/devs.h> |
20 | 22 | #include <plat/audio.h> | |
23 | #include <plat/gpio-bank-d.h> | ||
24 | #include <plat/gpio-bank-e.h> | ||
25 | #include <plat/gpio-cfg.h> | ||
21 | 26 | ||
22 | static struct resource s3c64xx_iis0_resource[] = { | 27 | static struct resource s3c64xx_iis0_resource[] = { |
23 | [0] = { | 28 | [0] = { |
@@ -66,3 +71,97 @@ struct platform_device s3c64xx_device_iisv4 = { | |||
66 | .resource = s3c64xx_iisv4_resource, | 71 | .resource = s3c64xx_iisv4_resource, |
67 | }; | 72 | }; |
68 | EXPORT_SYMBOL(s3c64xx_device_iisv4); | 73 | EXPORT_SYMBOL(s3c64xx_device_iisv4); |
74 | |||
75 | |||
76 | /* PCM Controller platform_devices */ | ||
77 | |||
78 | static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev) | ||
79 | { | ||
80 | switch (pdev->id) { | ||
81 | case 0: | ||
82 | s3c_gpio_cfgpin(S3C64XX_GPD(0), S3C64XX_GPD0_PCM0_SCLK); | ||
83 | s3c_gpio_cfgpin(S3C64XX_GPD(1), S3C64XX_GPD1_PCM0_EXTCLK); | ||
84 | s3c_gpio_cfgpin(S3C64XX_GPD(2), S3C64XX_GPD2_PCM0_FSYNC); | ||
85 | s3c_gpio_cfgpin(S3C64XX_GPD(3), S3C64XX_GPD3_PCM0_SIN); | ||
86 | s3c_gpio_cfgpin(S3C64XX_GPD(4), S3C64XX_GPD4_PCM0_SOUT); | ||
87 | break; | ||
88 | case 1: | ||
89 | s3c_gpio_cfgpin(S3C64XX_GPE(0), S3C64XX_GPE0_PCM1_SCLK); | ||
90 | s3c_gpio_cfgpin(S3C64XX_GPE(1), S3C64XX_GPE1_PCM1_EXTCLK); | ||
91 | s3c_gpio_cfgpin(S3C64XX_GPE(2), S3C64XX_GPE2_PCM1_FSYNC); | ||
92 | s3c_gpio_cfgpin(S3C64XX_GPE(3), S3C64XX_GPE3_PCM1_SIN); | ||
93 | s3c_gpio_cfgpin(S3C64XX_GPE(4), S3C64XX_GPE4_PCM1_SOUT); | ||
94 | break; | ||
95 | default: | ||
96 | printk(KERN_DEBUG "Invalid PCM Controller number!"); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static struct resource s3c64xx_pcm0_resource[] = { | ||
104 | [0] = { | ||
105 | .start = S3C64XX_PA_PCM0, | ||
106 | .end = S3C64XX_PA_PCM0 + 0x100 - 1, | ||
107 | .flags = IORESOURCE_MEM, | ||
108 | }, | ||
109 | [1] = { | ||
110 | .start = DMACH_PCM0_TX, | ||
111 | .end = DMACH_PCM0_TX, | ||
112 | .flags = IORESOURCE_DMA, | ||
113 | }, | ||
114 | [2] = { | ||
115 | .start = DMACH_PCM0_RX, | ||
116 | .end = DMACH_PCM0_RX, | ||
117 | .flags = IORESOURCE_DMA, | ||
118 | }, | ||
119 | }; | ||
120 | |||
121 | static struct s3c_audio_pdata s3c_pcm0_pdata = { | ||
122 | .cfg_gpio = s3c64xx_pcm_cfg_gpio, | ||
123 | }; | ||
124 | |||
125 | struct platform_device s3c64xx_device_pcm0 = { | ||
126 | .name = "samsung-pcm", | ||
127 | .id = 0, | ||
128 | .num_resources = ARRAY_SIZE(s3c64xx_pcm0_resource), | ||
129 | .resource = s3c64xx_pcm0_resource, | ||
130 | .dev = { | ||
131 | .platform_data = &s3c_pcm0_pdata, | ||
132 | }, | ||
133 | }; | ||
134 | EXPORT_SYMBOL(s3c64xx_device_pcm0); | ||
135 | |||
136 | static struct resource s3c64xx_pcm1_resource[] = { | ||
137 | [0] = { | ||
138 | .start = S3C64XX_PA_PCM1, | ||
139 | .end = S3C64XX_PA_PCM1 + 0x100 - 1, | ||
140 | .flags = IORESOURCE_MEM, | ||
141 | }, | ||
142 | [1] = { | ||
143 | .start = DMACH_PCM1_TX, | ||
144 | .end = DMACH_PCM1_TX, | ||
145 | .flags = IORESOURCE_DMA, | ||
146 | }, | ||
147 | [2] = { | ||
148 | .start = DMACH_PCM1_RX, | ||
149 | .end = DMACH_PCM1_RX, | ||
150 | .flags = IORESOURCE_DMA, | ||
151 | }, | ||
152 | }; | ||
153 | |||
154 | static struct s3c_audio_pdata s3c_pcm1_pdata = { | ||
155 | .cfg_gpio = s3c64xx_pcm_cfg_gpio, | ||
156 | }; | ||
157 | |||
158 | struct platform_device s3c64xx_device_pcm1 = { | ||
159 | .name = "samsung-pcm", | ||
160 | .id = 1, | ||
161 | .num_resources = ARRAY_SIZE(s3c64xx_pcm1_resource), | ||
162 | .resource = s3c64xx_pcm1_resource, | ||
163 | .dev = { | ||
164 | .platform_data = &s3c_pcm1_pdata, | ||
165 | }, | ||
166 | }; | ||
167 | EXPORT_SYMBOL(s3c64xx_device_pcm1); | ||
diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h index fe863f9794d5..c8d1fae49476 100644 --- a/arch/avr32/include/asm/socket.h +++ b/arch/avr32/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* __ASM_AVR32_SOCKET_H */ | 65 | #endif /* __ASM_AVR32_SOCKET_H */ |
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S index 7ee0057613b3..e76bad16b0f0 100644 --- a/arch/avr32/kernel/syscall_table.S +++ b/arch/avr32/kernel/syscall_table.S | |||
@@ -295,4 +295,5 @@ sys_call_table: | |||
295 | .long sys_signalfd | 295 | .long sys_signalfd |
296 | .long sys_ni_syscall /* 280, was sys_timerfd */ | 296 | .long sys_ni_syscall /* 280, was sys_timerfd */ |
297 | .long sys_eventfd | 297 | .long sys_eventfd |
298 | .long sys_recvmmsg | ||
298 | .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ | 299 | .long sys_ni_syscall /* r8 is saturated at nr_syscalls */ |
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 94a0375cbdcf..a50637a8b9bd 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -1600,6 +1600,7 @@ ENTRY(_sys_call_table) | |||
1600 | .long _sys_pwritev | 1600 | .long _sys_pwritev |
1601 | .long _sys_rt_tgsigqueueinfo | 1601 | .long _sys_rt_tgsigqueueinfo |
1602 | .long _sys_perf_event_open | 1602 | .long _sys_perf_event_open |
1603 | .long _sys_recvmmsg /* 370 */ | ||
1603 | 1604 | ||
1604 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1605 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1605 | .long _sys_ni_syscall | 1606 | .long _sys_ni_syscall |
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h index 45ec49bdb7b1..1a4a61909ca8 100644 --- a/arch/cris/include/asm/socket.h +++ b/arch/cris/include/asm/socket.h | |||
@@ -62,6 +62,8 @@ | |||
62 | #define SO_PROTOCOL 38 | 62 | #define SO_PROTOCOL 38 |
63 | #define SO_DOMAIN 39 | 63 | #define SO_DOMAIN 39 |
64 | 64 | ||
65 | #define SO_RXQ_OVFL 40 | ||
66 | |||
65 | #endif /* _ASM_SOCKET_H */ | 67 | #endif /* _ASM_SOCKET_H */ |
66 | 68 | ||
67 | 69 | ||
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h index 2dea726095c2..a6b26880c1ec 100644 --- a/arch/frv/include/asm/socket.h +++ b/arch/frv/include/asm/socket.h | |||
@@ -60,5 +60,7 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
64 | 66 | ||
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c index 0d4d3e3a4cfc..5fa3889d858b 100644 --- a/arch/frv/kernel/pm.c +++ b/arch/frv/kernel/pm.c | |||
@@ -211,37 +211,6 @@ static int cmode_procctl(ctl_table *ctl, int write, | |||
211 | return try_set_cmode(new_cmode)?:*lenp; | 211 | return try_set_cmode(new_cmode)?:*lenp; |
212 | } | 212 | } |
213 | 213 | ||
214 | static int cmode_sysctl(ctl_table *table, | ||
215 | void __user *oldval, size_t __user *oldlenp, | ||
216 | void __user *newval, size_t newlen) | ||
217 | { | ||
218 | if (oldval && oldlenp) { | ||
219 | size_t oldlen; | ||
220 | |||
221 | if (get_user(oldlen, oldlenp)) | ||
222 | return -EFAULT; | ||
223 | |||
224 | if (oldlen != sizeof(int)) | ||
225 | return -EINVAL; | ||
226 | |||
227 | if (put_user(clock_cmode_current, (unsigned __user *)oldval) || | ||
228 | put_user(sizeof(int), oldlenp)) | ||
229 | return -EFAULT; | ||
230 | } | ||
231 | if (newval && newlen) { | ||
232 | int new_cmode; | ||
233 | |||
234 | if (newlen != sizeof(int)) | ||
235 | return -EINVAL; | ||
236 | |||
237 | if (get_user(new_cmode, (int __user *)newval)) | ||
238 | return -EFAULT; | ||
239 | |||
240 | return try_set_cmode(new_cmode)?:1; | ||
241 | } | ||
242 | return 1; | ||
243 | } | ||
244 | |||
245 | static int try_set_p0(int new_p0) | 214 | static int try_set_p0(int new_p0) |
246 | { | 215 | { |
247 | unsigned long flags, clkc; | 216 | unsigned long flags, clkc; |
@@ -314,37 +283,6 @@ static int p0_procctl(ctl_table *ctl, int write, | |||
314 | return try_set_p0(new_p0)?:*lenp; | 283 | return try_set_p0(new_p0)?:*lenp; |
315 | } | 284 | } |
316 | 285 | ||
317 | static int p0_sysctl(ctl_table *table, | ||
318 | void __user *oldval, size_t __user *oldlenp, | ||
319 | void __user *newval, size_t newlen) | ||
320 | { | ||
321 | if (oldval && oldlenp) { | ||
322 | size_t oldlen; | ||
323 | |||
324 | if (get_user(oldlen, oldlenp)) | ||
325 | return -EFAULT; | ||
326 | |||
327 | if (oldlen != sizeof(int)) | ||
328 | return -EINVAL; | ||
329 | |||
330 | if (put_user(clock_p0_current, (unsigned __user *)oldval) || | ||
331 | put_user(sizeof(int), oldlenp)) | ||
332 | return -EFAULT; | ||
333 | } | ||
334 | if (newval && newlen) { | ||
335 | int new_p0; | ||
336 | |||
337 | if (newlen != sizeof(int)) | ||
338 | return -EINVAL; | ||
339 | |||
340 | if (get_user(new_p0, (int __user *)newval)) | ||
341 | return -EFAULT; | ||
342 | |||
343 | return try_set_p0(new_p0)?:1; | ||
344 | } | ||
345 | return 1; | ||
346 | } | ||
347 | |||
348 | static int cm_procctl(ctl_table *ctl, int write, | 286 | static int cm_procctl(ctl_table *ctl, int write, |
349 | void __user *buffer, size_t *lenp, loff_t *fpos) | 287 | void __user *buffer, size_t *lenp, loff_t *fpos) |
350 | { | 288 | { |
@@ -358,87 +296,47 @@ static int cm_procctl(ctl_table *ctl, int write, | |||
358 | return try_set_cm(new_cm)?:*lenp; | 296 | return try_set_cm(new_cm)?:*lenp; |
359 | } | 297 | } |
360 | 298 | ||
361 | static int cm_sysctl(ctl_table *table, | ||
362 | void __user *oldval, size_t __user *oldlenp, | ||
363 | void __user *newval, size_t newlen) | ||
364 | { | ||
365 | if (oldval && oldlenp) { | ||
366 | size_t oldlen; | ||
367 | |||
368 | if (get_user(oldlen, oldlenp)) | ||
369 | return -EFAULT; | ||
370 | |||
371 | if (oldlen != sizeof(int)) | ||
372 | return -EINVAL; | ||
373 | |||
374 | if (put_user(clock_cm_current, (unsigned __user *)oldval) || | ||
375 | put_user(sizeof(int), oldlenp)) | ||
376 | return -EFAULT; | ||
377 | } | ||
378 | if (newval && newlen) { | ||
379 | int new_cm; | ||
380 | |||
381 | if (newlen != sizeof(int)) | ||
382 | return -EINVAL; | ||
383 | |||
384 | if (get_user(new_cm, (int __user *)newval)) | ||
385 | return -EFAULT; | ||
386 | |||
387 | return try_set_cm(new_cm)?:1; | ||
388 | } | ||
389 | return 1; | ||
390 | } | ||
391 | |||
392 | |||
393 | static struct ctl_table pm_table[] = | 299 | static struct ctl_table pm_table[] = |
394 | { | 300 | { |
395 | { | 301 | { |
396 | .ctl_name = CTL_PM_SUSPEND, | ||
397 | .procname = "suspend", | 302 | .procname = "suspend", |
398 | .data = NULL, | 303 | .data = NULL, |
399 | .maxlen = 0, | 304 | .maxlen = 0, |
400 | .mode = 0200, | 305 | .mode = 0200, |
401 | .proc_handler = &sysctl_pm_do_suspend, | 306 | .proc_handler = sysctl_pm_do_suspend, |
402 | }, | 307 | }, |
403 | { | 308 | { |
404 | .ctl_name = CTL_PM_CMODE, | ||
405 | .procname = "cmode", | 309 | .procname = "cmode", |
406 | .data = &clock_cmode_current, | 310 | .data = &clock_cmode_current, |
407 | .maxlen = sizeof(int), | 311 | .maxlen = sizeof(int), |
408 | .mode = 0644, | 312 | .mode = 0644, |
409 | .proc_handler = &cmode_procctl, | 313 | .proc_handler = cmode_procctl, |
410 | .strategy = &cmode_sysctl, | ||
411 | }, | 314 | }, |
412 | { | 315 | { |
413 | .ctl_name = CTL_PM_P0, | ||
414 | .procname = "p0", | 316 | .procname = "p0", |
415 | .data = &clock_p0_current, | 317 | .data = &clock_p0_current, |
416 | .maxlen = sizeof(int), | 318 | .maxlen = sizeof(int), |
417 | .mode = 0644, | 319 | .mode = 0644, |
418 | .proc_handler = &p0_procctl, | 320 | .proc_handler = p0_procctl, |
419 | .strategy = &p0_sysctl, | ||
420 | }, | 321 | }, |
421 | { | 322 | { |
422 | .ctl_name = CTL_PM_CM, | ||
423 | .procname = "cm", | 323 | .procname = "cm", |
424 | .data = &clock_cm_current, | 324 | .data = &clock_cm_current, |
425 | .maxlen = sizeof(int), | 325 | .maxlen = sizeof(int), |
426 | .mode = 0644, | 326 | .mode = 0644, |
427 | .proc_handler = &cm_procctl, | 327 | .proc_handler = cm_procctl, |
428 | .strategy = &cm_sysctl, | ||
429 | }, | 328 | }, |
430 | { .ctl_name = 0} | 329 | { } |
431 | }; | 330 | }; |
432 | 331 | ||
433 | static struct ctl_table pm_dir_table[] = | 332 | static struct ctl_table pm_dir_table[] = |
434 | { | 333 | { |
435 | { | 334 | { |
436 | .ctl_name = CTL_PM, | ||
437 | .procname = "pm", | 335 | .procname = "pm", |
438 | .mode = 0555, | 336 | .mode = 0555, |
439 | .child = pm_table, | 337 | .child = pm_table, |
440 | }, | 338 | }, |
441 | { .ctl_name = 0} | 339 | { } |
442 | }; | 340 | }; |
443 | 341 | ||
444 | /* | 342 | /* |
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c index 3e9d7e03fb95..035516cb7a97 100644 --- a/arch/frv/kernel/sysctl.c +++ b/arch/frv/kernel/sysctl.c | |||
@@ -176,21 +176,19 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, | |||
176 | static struct ctl_table frv_table[] = | 176 | static struct ctl_table frv_table[] = |
177 | { | 177 | { |
178 | { | 178 | { |
179 | .ctl_name = 1, | ||
180 | .procname = "cache-mode", | 179 | .procname = "cache-mode", |
181 | .data = NULL, | 180 | .data = NULL, |
182 | .maxlen = 0, | 181 | .maxlen = 0, |
183 | .mode = 0644, | 182 | .mode = 0644, |
184 | .proc_handler = &procctl_frv_cachemode, | 183 | .proc_handler = procctl_frv_cachemode, |
185 | }, | 184 | }, |
186 | #ifdef CONFIG_MMU | 185 | #ifdef CONFIG_MMU |
187 | { | 186 | { |
188 | .ctl_name = 2, | ||
189 | .procname = "pin-cxnr", | 187 | .procname = "pin-cxnr", |
190 | .data = NULL, | 188 | .data = NULL, |
191 | .maxlen = 0, | 189 | .maxlen = 0, |
192 | .mode = 0644, | 190 | .mode = 0644, |
193 | .proc_handler = &procctl_frv_pin_cxnr | 191 | .proc_handler = procctl_frv_pin_cxnr |
194 | }, | 192 | }, |
195 | #endif | 193 | #endif |
196 | {} | 194 | {} |
@@ -203,7 +201,6 @@ static struct ctl_table frv_table[] = | |||
203 | static struct ctl_table frv_dir_table[] = | 201 | static struct ctl_table frv_dir_table[] = |
204 | { | 202 | { |
205 | { | 203 | { |
206 | .ctl_name = CTL_FRV, | ||
207 | .procname = "frv", | 204 | .procname = "frv", |
208 | .mode = 0555, | 205 | .mode = 0555, |
209 | .child = frv_table | 206 | .child = frv_table |
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h index 1547f01c8e22..04c0f4596eb5 100644 --- a/arch/h8300/include/asm/socket.h +++ b/arch/h8300/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e5..10c37510f4b4 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -327,7 +327,7 @@ ia32_syscall_table: | |||
327 | data8 compat_sys_writev | 327 | data8 compat_sys_writev |
328 | data8 sys_getsid | 328 | data8 sys_getsid |
329 | data8 sys_fdatasync | 329 | data8 sys_fdatasync |
330 | data8 sys32_sysctl | 330 | data8 compat_sys_sysctl |
331 | data8 sys_mlock /* 150 */ | 331 | data8 sys_mlock /* 150 */ |
332 | data8 sys_munlock | 332 | data8 sys_munlock |
333 | data8 sys_mlockall | 333 | data8 sys_mlockall |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 625ed8f76fce..429ec968c9ee 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -1628,61 +1628,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags) | |||
1628 | return sys_msync(addr, len + (start - addr), flags); | 1628 | return sys_msync(addr, len + (start - addr), flags); |
1629 | } | 1629 | } |
1630 | 1630 | ||
1631 | struct sysctl32 { | ||
1632 | unsigned int name; | ||
1633 | int nlen; | ||
1634 | unsigned int oldval; | ||
1635 | unsigned int oldlenp; | ||
1636 | unsigned int newval; | ||
1637 | unsigned int newlen; | ||
1638 | unsigned int __unused[4]; | ||
1639 | }; | ||
1640 | |||
1641 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
1642 | asmlinkage long | ||
1643 | sys32_sysctl (struct sysctl32 __user *args) | ||
1644 | { | ||
1645 | struct sysctl32 a32; | ||
1646 | mm_segment_t old_fs = get_fs (); | ||
1647 | void __user *oldvalp, *newvalp; | ||
1648 | size_t oldlen; | ||
1649 | int __user *namep; | ||
1650 | long ret; | ||
1651 | |||
1652 | if (copy_from_user(&a32, args, sizeof(a32))) | ||
1653 | return -EFAULT; | ||
1654 | |||
1655 | /* | ||
1656 | * We need to pre-validate these because we have to disable address checking | ||
1657 | * before calling do_sysctl() because of OLDLEN but we can't run the risk of the | ||
1658 | * user specifying bad addresses here. Well, since we're dealing with 32 bit | ||
1659 | * addresses, we KNOW that access_ok() will always succeed, so this is an | ||
1660 | * expensive NOP, but so what... | ||
1661 | */ | ||
1662 | namep = (int __user *) compat_ptr(a32.name); | ||
1663 | oldvalp = compat_ptr(a32.oldval); | ||
1664 | newvalp = compat_ptr(a32.newval); | ||
1665 | |||
1666 | if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1667 | || !access_ok(VERIFY_WRITE, namep, 0) | ||
1668 | || !access_ok(VERIFY_WRITE, oldvalp, 0) | ||
1669 | || !access_ok(VERIFY_WRITE, newvalp, 0)) | ||
1670 | return -EFAULT; | ||
1671 | |||
1672 | set_fs(KERNEL_DS); | ||
1673 | lock_kernel(); | ||
1674 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, | ||
1675 | newvalp, (size_t) a32.newlen); | ||
1676 | unlock_kernel(); | ||
1677 | set_fs(old_fs); | ||
1678 | |||
1679 | if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1680 | return -EFAULT; | ||
1681 | |||
1682 | return ret; | ||
1683 | } | ||
1684 | #endif | ||
1685 | |||
1686 | asmlinkage long | 1631 | asmlinkage long |
1687 | sys32_newuname (struct new_utsname __user *name) | 1632 | sys32_newuname (struct new_utsname __user *name) |
1688 | { | 1633 | { |
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 18a7e49abbc5..bc90c75adf67 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -60,6 +60,7 @@ struct kvm_ioapic_state { | |||
60 | #define KVM_IRQCHIP_PIC_MASTER 0 | 60 | #define KVM_IRQCHIP_PIC_MASTER 0 |
61 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 61 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
62 | #define KVM_IRQCHIP_IOAPIC 2 | 62 | #define KVM_IRQCHIP_IOAPIC 2 |
63 | #define KVM_NR_IRQCHIPS 3 | ||
63 | 64 | ||
64 | #define KVM_CONTEXT_SIZE 8*1024 | 65 | #define KVM_CONTEXT_SIZE 8*1024 |
65 | 66 | ||
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index d9b6325a9328..a362e67e0ca6 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -475,7 +475,6 @@ struct kvm_arch { | |||
475 | struct list_head assigned_dev_head; | 475 | struct list_head assigned_dev_head; |
476 | struct iommu_domain *iommu_domain; | 476 | struct iommu_domain *iommu_domain; |
477 | int iommu_flags; | 477 | int iommu_flags; |
478 | struct hlist_head irq_ack_notifier_list; | ||
479 | 478 | ||
480 | unsigned long irq_sources_bitmap; | 479 | unsigned long irq_sources_bitmap; |
481 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | 480 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; |
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 0b0d5ff062e5..51427eaa51ba 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -69,4 +69,6 @@ | |||
69 | #define SO_PROTOCOL 38 | 69 | #define SO_PROTOCOL 38 |
70 | #define SO_DOMAIN 39 | 70 | #define SO_DOMAIN 39 |
71 | 71 | ||
72 | #define SO_RXQ_OVFL 40 | ||
73 | |||
72 | #endif /* _ASM_IA64_SOCKET_H */ | 74 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 5a5347f5c4e4..9c72e36c5281 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -311,11 +311,12 @@ | |||
311 | #define __NR_preadv 1319 | 311 | #define __NR_preadv 1319 |
312 | #define __NR_pwritev 1320 | 312 | #define __NR_pwritev 1320 |
313 | #define __NR_rt_tgsigqueueinfo 1321 | 313 | #define __NR_rt_tgsigqueueinfo 1321 |
314 | #define __NR_rt_recvmmsg 1322 | ||
314 | 315 | ||
315 | #ifdef __KERNEL__ | 316 | #ifdef __KERNEL__ |
316 | 317 | ||
317 | 318 | ||
318 | #define NR_syscalls 298 /* length of syscall table */ | 319 | #define NR_syscalls 299 /* length of syscall table */ |
319 | 320 | ||
320 | /* | 321 | /* |
321 | * The following defines stop scripts/checksyscalls.sh from complaining about | 322 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 6631a9dfafdc..b942f4032d7a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
239 | #ifdef CONFIG_SYSCTL | 239 | #ifdef CONFIG_SYSCTL |
240 | static ctl_table kdump_ctl_table[] = { | 240 | static ctl_table kdump_ctl_table[] = { |
241 | { | 241 | { |
242 | .ctl_name = CTL_UNNUMBERED, | ||
243 | .procname = "kdump_on_init", | 242 | .procname = "kdump_on_init", |
244 | .data = &kdump_on_init, | 243 | .data = &kdump_on_init, |
245 | .maxlen = sizeof(int), | 244 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 245 | .mode = 0644, |
247 | .proc_handler = &proc_dointvec, | 246 | .proc_handler = proc_dointvec, |
248 | }, | 247 | }, |
249 | { | 248 | { |
250 | .ctl_name = CTL_UNNUMBERED, | ||
251 | .procname = "kdump_on_fatal_mca", | 249 | .procname = "kdump_on_fatal_mca", |
252 | .data = &kdump_on_fatal_mca, | 250 | .data = &kdump_on_fatal_mca, |
253 | .maxlen = sizeof(int), | 251 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 252 | .mode = 0644, |
255 | .proc_handler = &proc_dointvec, | 253 | .proc_handler = proc_dointvec, |
256 | }, | 254 | }, |
257 | { .ctl_name = 0 } | 255 | { } |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static ctl_table sys_table[] = { | 258 | static ctl_table sys_table[] = { |
261 | { | 259 | { |
262 | .ctl_name = CTL_KERN, | ||
263 | .procname = "kernel", | 260 | .procname = "kernel", |
264 | .mode = 0555, | 261 | .mode = 0555, |
265 | .child = kdump_ctl_table, | 262 | .child = kdump_ctl_table, |
266 | }, | 263 | }, |
267 | { .ctl_name = 0 } | 264 | { } |
268 | }; | 265 | }; |
269 | #endif | 266 | #endif |
270 | 267 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d0e7d37017b4..d75b872ca4dc 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1806,6 +1806,7 @@ sys_call_table: | |||
1806 | data8 sys_preadv | 1806 | data8 sys_preadv |
1807 | data8 sys_pwritev // 1320 | 1807 | data8 sys_pwritev // 1320 |
1808 | data8 sys_rt_tgsigqueueinfo | 1808 | data8 sys_rt_tgsigqueueinfo |
1809 | data8 sys_recvmmsg | ||
1809 | 1810 | ||
1810 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1811 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
1811 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | 1812 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..402698b6689f 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -522,42 +522,37 @@ EXPORT_SYMBOL(pfm_sysctl); | |||
522 | 522 | ||
523 | static ctl_table pfm_ctl_table[]={ | 523 | static ctl_table pfm_ctl_table[]={ |
524 | { | 524 | { |
525 | .ctl_name = CTL_UNNUMBERED, | ||
526 | .procname = "debug", | 525 | .procname = "debug", |
527 | .data = &pfm_sysctl.debug, | 526 | .data = &pfm_sysctl.debug, |
528 | .maxlen = sizeof(int), | 527 | .maxlen = sizeof(int), |
529 | .mode = 0666, | 528 | .mode = 0666, |
530 | .proc_handler = &proc_dointvec, | 529 | .proc_handler = proc_dointvec, |
531 | }, | 530 | }, |
532 | { | 531 | { |
533 | .ctl_name = CTL_UNNUMBERED, | ||
534 | .procname = "debug_ovfl", | 532 | .procname = "debug_ovfl", |
535 | .data = &pfm_sysctl.debug_ovfl, | 533 | .data = &pfm_sysctl.debug_ovfl, |
536 | .maxlen = sizeof(int), | 534 | .maxlen = sizeof(int), |
537 | .mode = 0666, | 535 | .mode = 0666, |
538 | .proc_handler = &proc_dointvec, | 536 | .proc_handler = proc_dointvec, |
539 | }, | 537 | }, |
540 | { | 538 | { |
541 | .ctl_name = CTL_UNNUMBERED, | ||
542 | .procname = "fastctxsw", | 539 | .procname = "fastctxsw", |
543 | .data = &pfm_sysctl.fastctxsw, | 540 | .data = &pfm_sysctl.fastctxsw, |
544 | .maxlen = sizeof(int), | 541 | .maxlen = sizeof(int), |
545 | .mode = 0600, | 542 | .mode = 0600, |
546 | .proc_handler = &proc_dointvec, | 543 | .proc_handler = proc_dointvec, |
547 | }, | 544 | }, |
548 | { | 545 | { |
549 | .ctl_name = CTL_UNNUMBERED, | ||
550 | .procname = "expert_mode", | 546 | .procname = "expert_mode", |
551 | .data = &pfm_sysctl.expert_mode, | 547 | .data = &pfm_sysctl.expert_mode, |
552 | .maxlen = sizeof(int), | 548 | .maxlen = sizeof(int), |
553 | .mode = 0600, | 549 | .mode = 0600, |
554 | .proc_handler = &proc_dointvec, | 550 | .proc_handler = proc_dointvec, |
555 | }, | 551 | }, |
556 | {} | 552 | {} |
557 | }; | 553 | }; |
558 | static ctl_table pfm_sysctl_dir[] = { | 554 | static ctl_table pfm_sysctl_dir[] = { |
559 | { | 555 | { |
560 | .ctl_name = CTL_UNNUMBERED, | ||
561 | .procname = "perfmon", | 556 | .procname = "perfmon", |
562 | .mode = 0555, | 557 | .mode = 0555, |
563 | .child = pfm_ctl_table, | 558 | .child = pfm_ctl_table, |
@@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = { | |||
566 | }; | 561 | }; |
567 | static ctl_table pfm_sysctl_root[] = { | 562 | static ctl_table pfm_sysctl_root[] = { |
568 | { | 563 | { |
569 | .ctl_name = CTL_KERN, | ||
570 | .procname = "kernel", | 564 | .procname = "kernel", |
571 | .mode = 0555, | 565 | .mode = 0555, |
572 | .child = pfm_sysctl_dir, | 566 | .child = pfm_sysctl_dir, |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 0bb99b732908..1089b3e918ac 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
50 | 50 | ||
51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
52 | coalesced_mmio.o irq_comm.o) | 52 | coalesced_mmio.o irq_comm.o assigned-dev.o) |
53 | 53 | ||
54 | ifeq ($(CONFIG_IOMMU_API),y) | 54 | ifeq ($(CONFIG_IOMMU_API),y) |
55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) | 55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..5fdeec5fddcf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |||
124 | 124 | ||
125 | static DEFINE_SPINLOCK(vp_lock); | 125 | static DEFINE_SPINLOCK(vp_lock); |
126 | 126 | ||
127 | void kvm_arch_hardware_enable(void *garbage) | 127 | int kvm_arch_hardware_enable(void *garbage) |
128 | { | 128 | { |
129 | long status; | 129 | long status; |
130 | long tmp_base; | 130 | long tmp_base; |
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
138 | local_irq_restore(saved_psr); | 138 | local_irq_restore(saved_psr); |
139 | if (slot < 0) | 139 | if (slot < 0) |
140 | return; | 140 | return -EINVAL; |
141 | 141 | ||
142 | spin_lock(&vp_lock); | 142 | spin_lock(&vp_lock); |
143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | 143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? |
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | 145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); |
146 | if (status != 0) { | 146 | if (status != 0) { |
147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | 147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); |
148 | return ; | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (!kvm_vsa_base) { | 151 | if (!kvm_vsa_base) { |
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage) | |||
154 | } | 154 | } |
155 | spin_unlock(&vp_lock); | 155 | spin_unlock(&vp_lock); |
156 | ia64_ptr_entry(0x3, slot); | 156 | ia64_ptr_entry(0x3, slot); |
157 | |||
158 | return 0; | ||
157 | } | 159 | } |
158 | 160 | ||
159 | void kvm_arch_hardware_disable(void *garbage) | 161 | void kvm_arch_hardware_disable(void *garbage) |
@@ -851,8 +853,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |||
851 | r = 0; | 853 | r = 0; |
852 | switch (chip->chip_id) { | 854 | switch (chip->chip_id) { |
853 | case KVM_IRQCHIP_IOAPIC: | 855 | case KVM_IRQCHIP_IOAPIC: |
854 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 856 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
855 | sizeof(struct kvm_ioapic_state)); | ||
856 | break; | 857 | break; |
857 | default: | 858 | default: |
858 | r = -EINVAL; | 859 | r = -EINVAL; |
@@ -868,9 +869,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
868 | r = 0; | 869 | r = 0; |
869 | switch (chip->chip_id) { | 870 | switch (chip->chip_id) { |
870 | case KVM_IRQCHIP_IOAPIC: | 871 | case KVM_IRQCHIP_IOAPIC: |
871 | memcpy(ioapic_irqchip(kvm), | 872 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
872 | &chip->chip.ioapic, | ||
873 | sizeof(struct kvm_ioapic_state)); | ||
874 | break; | 873 | break; |
875 | default: | 874 | default: |
876 | r = -EINVAL; | 875 | r = -EINVAL; |
@@ -944,7 +943,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
944 | { | 943 | { |
945 | struct kvm *kvm = filp->private_data; | 944 | struct kvm *kvm = filp->private_data; |
946 | void __user *argp = (void __user *)arg; | 945 | void __user *argp = (void __user *)arg; |
947 | int r = -EINVAL; | 946 | int r = -ENOTTY; |
948 | 947 | ||
949 | switch (ioctl) { | 948 | switch (ioctl) { |
950 | case KVM_SET_MEMORY_REGION: { | 949 | case KVM_SET_MEMORY_REGION: { |
@@ -985,10 +984,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
985 | goto out; | 984 | goto out; |
986 | if (irqchip_in_kernel(kvm)) { | 985 | if (irqchip_in_kernel(kvm)) { |
987 | __s32 status; | 986 | __s32 status; |
988 | mutex_lock(&kvm->irq_lock); | ||
989 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 987 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
990 | irq_event.irq, irq_event.level); | 988 | irq_event.irq, irq_event.level); |
991 | mutex_unlock(&kvm->irq_lock); | ||
992 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 989 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
993 | irq_event.status = status; | 990 | irq_event.status = status; |
994 | if (copy_to_user(argp, &irq_event, | 991 | if (copy_to_user(argp, &irq_event, |
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index 3390a864f224..469787c30098 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_M32R_SOCKET_H */ | 65 | #endif /* _ASM_M32R_SOCKET_H */ |
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h index eee01cce921b..9bf49c87d954 100644 --- a/arch/m68k/include/asm/socket.h +++ b/arch/m68k/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 37e6f305a68e..ef3ec1d6ceb3 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h | |||
@@ -12,23 +12,15 @@ | |||
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/of.h> /* linux/of.h gets to determine #include ordering */ | ||
16 | |||
15 | #ifndef _ASM_MICROBLAZE_PROM_H | 17 | #ifndef _ASM_MICROBLAZE_PROM_H |
16 | #define _ASM_MICROBLAZE_PROM_H | 18 | #define _ASM_MICROBLAZE_PROM_H |
17 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
18 | |||
19 | /* Definitions used by the flattened device tree */ | ||
20 | #define OF_DT_HEADER 0xd00dfeed /* marker */ | ||
21 | #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ | ||
22 | #define OF_DT_END_NODE 0x2 /* End node */ | ||
23 | #define OF_DT_PROP 0x3 /* Property: name off, size, content */ | ||
24 | #define OF_DT_NOP 0x4 /* nop */ | ||
25 | #define OF_DT_END 0x9 | ||
26 | |||
27 | #define OF_DT_VERSION 0x10 | ||
28 | |||
29 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
30 | 21 | ||
31 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/of_fdt.h> | ||
32 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
33 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
34 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
@@ -41,122 +33,19 @@ | |||
41 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) | 33 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) |
42 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) | 34 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) |
43 | 35 | ||
44 | /* | ||
45 | * This is what gets passed to the kernel by prom_init or kexec | ||
46 | * | ||
47 | * The dt struct contains the device tree structure, full pathes and | ||
48 | * property contents. The dt strings contain a separate block with just | ||
49 | * the strings for the property names, and is fully page aligned and | ||
50 | * self contained in a page, so that it can be kept around by the kernel, | ||
51 | * each property name appears only once in this page (cheap compression) | ||
52 | * | ||
53 | * the mem_rsvmap contains a map of reserved ranges of physical memory, | ||
54 | * passing it here instead of in the device-tree itself greatly simplifies | ||
55 | * the job of everybody. It's just a list of u64 pairs (base/size) that | ||
56 | * ends when size is 0 | ||
57 | */ | ||
58 | struct boot_param_header { | ||
59 | u32 magic; /* magic word OF_DT_HEADER */ | ||
60 | u32 totalsize; /* total size of DT block */ | ||
61 | u32 off_dt_struct; /* offset to structure */ | ||
62 | u32 off_dt_strings; /* offset to strings */ | ||
63 | u32 off_mem_rsvmap; /* offset to memory reserve map */ | ||
64 | u32 version; /* format version */ | ||
65 | u32 last_comp_version; /* last compatible version */ | ||
66 | /* version 2 fields below */ | ||
67 | u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ | ||
68 | /* version 3 fields below */ | ||
69 | u32 dt_strings_size; /* size of the DT strings block */ | ||
70 | /* version 17 fields below */ | ||
71 | u32 dt_struct_size; /* size of the DT structure block */ | ||
72 | }; | ||
73 | |||
74 | typedef u32 phandle; | ||
75 | typedef u32 ihandle; | ||
76 | |||
77 | struct property { | ||
78 | char *name; | ||
79 | int length; | ||
80 | void *value; | ||
81 | struct property *next; | ||
82 | }; | ||
83 | |||
84 | struct device_node { | ||
85 | const char *name; | ||
86 | const char *type; | ||
87 | phandle node; | ||
88 | phandle linux_phandle; | ||
89 | char *full_name; | ||
90 | |||
91 | struct property *properties; | ||
92 | struct property *deadprops; /* removed properties */ | ||
93 | struct device_node *parent; | ||
94 | struct device_node *child; | ||
95 | struct device_node *sibling; | ||
96 | struct device_node *next; /* next device of same type */ | ||
97 | struct device_node *allnext; /* next in list of all nodes */ | ||
98 | struct proc_dir_entry *pde; /* this node's proc directory */ | ||
99 | struct kref kref; | ||
100 | unsigned long _flags; | ||
101 | void *data; | ||
102 | }; | ||
103 | |||
104 | extern struct device_node *of_chosen; | 36 | extern struct device_node *of_chosen; |
105 | 37 | ||
106 | static inline int of_node_check_flag(struct device_node *n, unsigned long flag) | ||
107 | { | ||
108 | return test_bit(flag, &n->_flags); | ||
109 | } | ||
110 | |||
111 | static inline void of_node_set_flag(struct device_node *n, unsigned long flag) | ||
112 | { | ||
113 | set_bit(flag, &n->_flags); | ||
114 | } | ||
115 | |||
116 | #define HAVE_ARCH_DEVTREE_FIXUPS | 38 | #define HAVE_ARCH_DEVTREE_FIXUPS |
117 | 39 | ||
118 | static inline void set_node_proc_entry(struct device_node *dn, | ||
119 | struct proc_dir_entry *de) | ||
120 | { | ||
121 | dn->pde = de; | ||
122 | } | ||
123 | |||
124 | extern struct device_node *allnodes; /* temporary while merging */ | 40 | extern struct device_node *allnodes; /* temporary while merging */ |
125 | extern rwlock_t devtree_lock; /* temporary while merging */ | 41 | extern rwlock_t devtree_lock; /* temporary while merging */ |
126 | 42 | ||
127 | extern struct device_node *of_find_all_nodes(struct device_node *prev); | ||
128 | extern struct device_node *of_node_get(struct device_node *node); | ||
129 | extern void of_node_put(struct device_node *node); | ||
130 | |||
131 | /* For scanning the flat device-tree at boot time */ | ||
132 | extern int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
133 | const char *uname, int depth, | ||
134 | void *data), | ||
135 | void *data); | ||
136 | extern void *__init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
137 | unsigned long *size); | ||
138 | extern int __init | ||
139 | of_flat_dt_is_compatible(unsigned long node, const char *name); | ||
140 | extern unsigned long __init of_get_flat_dt_root(void); | ||
141 | |||
142 | /* For updating the device tree at runtime */ | 43 | /* For updating the device tree at runtime */ |
143 | extern void of_attach_node(struct device_node *); | 44 | extern void of_attach_node(struct device_node *); |
144 | extern void of_detach_node(struct device_node *); | 45 | extern void of_detach_node(struct device_node *); |
145 | 46 | ||
146 | /* Other Prototypes */ | 47 | /* Other Prototypes */ |
147 | extern void finish_device_tree(void); | ||
148 | extern void unflatten_device_tree(void); | ||
149 | extern int early_uartlite_console(void); | 48 | extern int early_uartlite_console(void); |
150 | extern void early_init_devtree(void *); | ||
151 | extern int machine_is_compatible(const char *compat); | ||
152 | extern void print_properties(struct device_node *node); | ||
153 | extern int prom_n_intr_cells(struct device_node *np); | ||
154 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); | ||
155 | extern int prom_add_property(struct device_node *np, struct property *prop); | ||
156 | extern int prom_remove_property(struct device_node *np, struct property *prop); | ||
157 | extern int prom_update_property(struct device_node *np, | ||
158 | struct property *newprop, | ||
159 | struct property *oldprop); | ||
160 | 49 | ||
161 | extern struct resource *request_OF_resource(struct device_node *node, | 50 | extern struct resource *request_OF_resource(struct device_node *node, |
162 | int index, const char *name_postfix); | 51 | int index, const char *name_postfix); |
@@ -166,18 +55,6 @@ extern int release_OF_resource(struct device_node *node, int index); | |||
166 | * OF address retreival & translation | 55 | * OF address retreival & translation |
167 | */ | 56 | */ |
168 | 57 | ||
169 | /* Helper to read a big number; size is in cells (not bytes) */ | ||
170 | static inline u64 of_read_number(const u32 *cell, int size) | ||
171 | { | ||
172 | u64 r = 0; | ||
173 | while (size--) | ||
174 | r = (r << 32) | *(cell++); | ||
175 | return r; | ||
176 | } | ||
177 | |||
178 | /* Like of_read_number, but we want an unsigned long result */ | ||
179 | #define of_read_ulong(cell, size) of_read_number(cell, size) | ||
180 | |||
181 | /* Translate an OF address block into a CPU physical address | 58 | /* Translate an OF address block into a CPU physical address |
182 | */ | 59 | */ |
183 | extern u64 of_translate_address(struct device_node *np, const u32 *addr); | 60 | extern u64 of_translate_address(struct device_node *np, const u32 *addr); |
@@ -305,12 +182,6 @@ extern int of_irq_to_resource(struct device_node *dev, int index, | |||
305 | */ | 182 | */ |
306 | extern void __iomem *of_iomap(struct device_node *device, int index); | 183 | extern void __iomem *of_iomap(struct device_node *device, int index); |
307 | 184 | ||
308 | /* | ||
309 | * NB: This is here while we transition from using asm/prom.h | ||
310 | * to linux/of.h | ||
311 | */ | ||
312 | #include <linux/of.h> | ||
313 | |||
314 | #endif /* __ASSEMBLY__ */ | 185 | #endif /* __ASSEMBLY__ */ |
315 | #endif /* __KERNEL__ */ | 186 | #endif /* __KERNEL__ */ |
316 | #endif /* _ASM_MICROBLAZE_PROM_H */ | 187 | #endif /* _ASM_MICROBLAZE_PROM_H */ |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 697ce3007f30..30916193fcc7 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/linkage.h> | 31 | #include <linux/linkage.h> |
32 | #include <asm/thread_info.h> | 32 | #include <asm/thread_info.h> |
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/prom.h> /* for OF_DT_HEADER */ | 34 | #include <linux/of_fdt.h> /* for OF_DT_HEADER */ |
35 | 35 | ||
36 | #ifdef CONFIG_MMU | 36 | #ifdef CONFIG_MMU |
37 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ | 37 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ |
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index c005cc6f1aaf..b817df172aa9 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c | |||
@@ -860,29 +860,6 @@ struct device_node *of_find_node_by_phandle(phandle handle) | |||
860 | EXPORT_SYMBOL(of_find_node_by_phandle); | 860 | EXPORT_SYMBOL(of_find_node_by_phandle); |
861 | 861 | ||
862 | /** | 862 | /** |
863 | * of_find_all_nodes - Get next node in global list | ||
864 | * @prev: Previous node or NULL to start iteration | ||
865 | * of_node_put() will be called on it | ||
866 | * | ||
867 | * Returns a node pointer with refcount incremented, use | ||
868 | * of_node_put() on it when done. | ||
869 | */ | ||
870 | struct device_node *of_find_all_nodes(struct device_node *prev) | ||
871 | { | ||
872 | struct device_node *np; | ||
873 | |||
874 | read_lock(&devtree_lock); | ||
875 | np = prev ? prev->allnext : allnodes; | ||
876 | for (; np != NULL; np = np->allnext) | ||
877 | if (of_node_get(np)) | ||
878 | break; | ||
879 | of_node_put(prev); | ||
880 | read_unlock(&devtree_lock); | ||
881 | return np; | ||
882 | } | ||
883 | EXPORT_SYMBOL(of_find_all_nodes); | ||
884 | |||
885 | /** | ||
886 | * of_node_get - Increment refcount of a node | 863 | * of_node_get - Increment refcount of a node |
887 | * @node: Node to inc refcount, NULL is supported to | 864 | * @node: Node to inc refcount, NULL is supported to |
888 | * simplify writing of callers | 865 | * simplify writing of callers |
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index ecec19155135..c1ab1dc10898 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S | |||
@@ -371,3 +371,4 @@ ENTRY(sys_call_table) | |||
371 | .long sys_ni_syscall | 371 | .long sys_ni_syscall |
372 | .long sys_rt_tgsigqueueinfo /* 365 */ | 372 | .long sys_rt_tgsigqueueinfo /* 365 */ |
373 | .long sys_perf_event_open | 373 | .long sys_perf_event_open |
374 | .long sys_recvmmsg | ||
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index ae05accd9fe4..9de5190f2487 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h | |||
@@ -80,6 +80,8 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ | |||
80 | #define SO_TIMESTAMPING 37 | 80 | #define SO_TIMESTAMPING 37 |
81 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 81 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
82 | 82 | ||
83 | #define SO_RXQ_OVFL 40 | ||
84 | |||
83 | #ifdef __KERNEL__ | 85 | #ifdef __KERNEL__ |
84 | 86 | ||
85 | /** sock_type - Socket types | 87 | /** sock_type - Socket types |
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 8c9dfa9e9018..65c679ecbe6b 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -355,16 +355,17 @@ | |||
355 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) | 355 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) |
356 | #define __NR_perf_event_open (__NR_Linux + 333) | 356 | #define __NR_perf_event_open (__NR_Linux + 333) |
357 | #define __NR_accept4 (__NR_Linux + 334) | 357 | #define __NR_accept4 (__NR_Linux + 334) |
358 | #define __NR_recvmmsg (__NR_Linux + 335) | ||
358 | 359 | ||
359 | /* | 360 | /* |
360 | * Offset of the last Linux o32 flavoured syscall | 361 | * Offset of the last Linux o32 flavoured syscall |
361 | */ | 362 | */ |
362 | #define __NR_Linux_syscalls 334 | 363 | #define __NR_Linux_syscalls 335 |
363 | 364 | ||
364 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 365 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
365 | 366 | ||
366 | #define __NR_O32_Linux 4000 | 367 | #define __NR_O32_Linux 4000 |
367 | #define __NR_O32_Linux_syscalls 334 | 368 | #define __NR_O32_Linux_syscalls 335 |
368 | 369 | ||
369 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 370 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
370 | 371 | ||
@@ -666,16 +667,17 @@ | |||
666 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) | 667 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) |
667 | #define __NR_perf_event_open (__NR_Linux + 292) | 668 | #define __NR_perf_event_open (__NR_Linux + 292) |
668 | #define __NR_accept4 (__NR_Linux + 293) | 669 | #define __NR_accept4 (__NR_Linux + 293) |
670 | #define __NR_recvmmsg (__NR_Linux + 294) | ||
669 | 671 | ||
670 | /* | 672 | /* |
671 | * Offset of the last Linux 64-bit flavoured syscall | 673 | * Offset of the last Linux 64-bit flavoured syscall |
672 | */ | 674 | */ |
673 | #define __NR_Linux_syscalls 293 | 675 | #define __NR_Linux_syscalls 294 |
674 | 676 | ||
675 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 677 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
676 | 678 | ||
677 | #define __NR_64_Linux 5000 | 679 | #define __NR_64_Linux 5000 |
678 | #define __NR_64_Linux_syscalls 293 | 680 | #define __NR_64_Linux_syscalls 294 |
679 | 681 | ||
680 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 682 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
681 | 683 | ||
@@ -981,16 +983,17 @@ | |||
981 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) | 983 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) |
982 | #define __NR_perf_event_open (__NR_Linux + 296) | 984 | #define __NR_perf_event_open (__NR_Linux + 296) |
983 | #define __NR_accept4 (__NR_Linux + 297) | 985 | #define __NR_accept4 (__NR_Linux + 297) |
986 | #define __NR_recvmmsg (__NR_Linux + 298) | ||
984 | 987 | ||
985 | /* | 988 | /* |
986 | * Offset of the last N32 flavoured syscall | 989 | * Offset of the last N32 flavoured syscall |
987 | */ | 990 | */ |
988 | #define __NR_Linux_syscalls 297 | 991 | #define __NR_Linux_syscalls 298 |
989 | 992 | ||
990 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 993 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
991 | 994 | ||
992 | #define __NR_N32_Linux 6000 | 995 | #define __NR_N32_Linux 6000 |
993 | #define __NR_N32_Linux_syscalls 297 | 996 | #define __NR_N32_Linux_syscalls 298 |
994 | 997 | ||
995 | #ifdef __KERNEL__ | 998 | #ifdef __KERNEL__ |
996 | 999 | ||
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index b77fefaff9da..1a2793efdc4e 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -265,67 +265,6 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz, | |||
265 | } | 265 | } |
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | struct sysctl_args32 | ||
269 | { | ||
270 | compat_caddr_t name; | ||
271 | int nlen; | ||
272 | compat_caddr_t oldval; | ||
273 | compat_caddr_t oldlenp; | ||
274 | compat_caddr_t newval; | ||
275 | compat_size_t newlen; | ||
276 | unsigned int __unused[4]; | ||
277 | }; | ||
278 | |||
279 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
280 | |||
281 | SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args) | ||
282 | { | ||
283 | struct sysctl_args32 tmp; | ||
284 | int error; | ||
285 | size_t oldlen; | ||
286 | size_t __user *oldlenp = NULL; | ||
287 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; | ||
288 | |||
289 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
290 | return -EFAULT; | ||
291 | |||
292 | if (tmp.oldval && tmp.oldlenp) { | ||
293 | /* Duh, this is ugly and might not work if sysctl_args | ||
294 | is in read-only memory, but do_sysctl does indirectly | ||
295 | a lot of uaccess in both directions and we'd have to | ||
296 | basically copy the whole sysctl.c here, and | ||
297 | glibc's __sysctl uses rw memory for the structure | ||
298 | anyway. */ | ||
299 | if (get_user(oldlen, (u32 __user *)A(tmp.oldlenp)) || | ||
300 | put_user(oldlen, (size_t __user *)addr)) | ||
301 | return -EFAULT; | ||
302 | oldlenp = (size_t __user *)addr; | ||
303 | } | ||
304 | |||
305 | lock_kernel(); | ||
306 | error = do_sysctl((int __user *)A(tmp.name), tmp.nlen, (void __user *)A(tmp.oldval), | ||
307 | oldlenp, (void __user *)A(tmp.newval), tmp.newlen); | ||
308 | unlock_kernel(); | ||
309 | if (oldlenp) { | ||
310 | if (!error) { | ||
311 | if (get_user(oldlen, (size_t __user *)addr) || | ||
312 | put_user(oldlen, (u32 __user *)A(tmp.oldlenp))) | ||
313 | error = -EFAULT; | ||
314 | } | ||
315 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | ||
316 | } | ||
317 | return error; | ||
318 | } | ||
319 | |||
320 | #else | ||
321 | |||
322 | SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args) | ||
323 | { | ||
324 | return -ENOSYS; | ||
325 | } | ||
326 | |||
327 | #endif /* CONFIG_SYSCTL_SYSCALL */ | ||
328 | |||
329 | SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name) | 268 | SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name) |
330 | { | 269 | { |
331 | int ret = 0; | 270 | int ret = 0; |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fd2a9bb620d6..17202bbe843f 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -583,6 +583,7 @@ einval: li v0, -ENOSYS | |||
583 | sys sys_rt_tgsigqueueinfo 4 | 583 | sys sys_rt_tgsigqueueinfo 4 |
584 | sys sys_perf_event_open 5 | 584 | sys sys_perf_event_open 5 |
585 | sys sys_accept4 4 | 585 | sys sys_accept4 4 |
586 | sys sys_recvmmsg 5 | ||
586 | .endm | 587 | .endm |
587 | 588 | ||
588 | /* We pre-compute the number of _instruction_ bytes needed to | 589 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 18bf7f32c5e4..a8a6c596eb04 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -420,4 +420,5 @@ sys_call_table: | |||
420 | PTR sys_rt_tgsigqueueinfo | 420 | PTR sys_rt_tgsigqueueinfo |
421 | PTR sys_perf_event_open | 421 | PTR sys_perf_event_open |
422 | PTR sys_accept4 | 422 | PTR sys_accept4 |
423 | PTR sys_recvmmsg | ||
423 | .size sys_call_table,.-sys_call_table | 424 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 6ebc07976694..66b5a48676dd 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -272,7 +272,7 @@ EXPORT(sysn32_call_table) | |||
272 | PTR sys_munlockall | 272 | PTR sys_munlockall |
273 | PTR sys_vhangup /* 6150 */ | 273 | PTR sys_vhangup /* 6150 */ |
274 | PTR sys_pivot_root | 274 | PTR sys_pivot_root |
275 | PTR sys_32_sysctl | 275 | PTR compat_sys_sysctl |
276 | PTR sys_prctl | 276 | PTR sys_prctl |
277 | PTR compat_sys_adjtimex | 277 | PTR compat_sys_adjtimex |
278 | PTR compat_sys_setrlimit /* 6155 */ | 278 | PTR compat_sys_setrlimit /* 6155 */ |
@@ -418,4 +418,5 @@ EXPORT(sysn32_call_table) | |||
418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ | 418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ |
419 | PTR sys_perf_event_open | 419 | PTR sys_perf_event_open |
420 | PTR sys_accept4 | 420 | PTR sys_accept4 |
421 | PTR compat_sys_recvmmsg | ||
421 | .size sysn32_call_table,.-sysn32_call_table | 422 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 14dde4ca932e..515f9eab2b28 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -356,7 +356,7 @@ sys_call_table: | |||
356 | PTR sys_ni_syscall /* 4150 */ | 356 | PTR sys_ni_syscall /* 4150 */ |
357 | PTR sys_getsid | 357 | PTR sys_getsid |
358 | PTR sys_fdatasync | 358 | PTR sys_fdatasync |
359 | PTR sys_32_sysctl | 359 | PTR compat_sys_sysctl |
360 | PTR sys_mlock | 360 | PTR sys_mlock |
361 | PTR sys_munlock /* 4155 */ | 361 | PTR sys_munlock /* 4155 */ |
362 | PTR sys_mlockall | 362 | PTR sys_mlockall |
@@ -538,4 +538,5 @@ sys_call_table: | |||
538 | PTR compat_sys_rt_tgsigqueueinfo | 538 | PTR compat_sys_rt_tgsigqueueinfo |
539 | PTR sys_perf_event_open | 539 | PTR sys_perf_event_open |
540 | PTR sys_accept4 | 540 | PTR sys_accept4 |
541 | PTR compat_sys_recvmmsg | ||
541 | .size sys_call_table,.-sys_call_table | 542 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index b3deed8db619..14b9a28a4aec 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c | |||
@@ -37,23 +37,6 @@ | |||
37 | #include "ds1603.h" | 37 | #include "ds1603.h" |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | /* Strategy function to write EEPROM after changing string entry */ | ||
41 | int sysctl_lasatstring(ctl_table *table, | ||
42 | void *oldval, size_t *oldlenp, | ||
43 | void *newval, size_t newlen) | ||
44 | { | ||
45 | int r; | ||
46 | |||
47 | r = sysctl_string(table, oldval, oldlenp, newval, newlen); | ||
48 | if (r < 0) | ||
49 | return r; | ||
50 | |||
51 | if (newval && newlen) | ||
52 | lasat_write_eeprom_info(); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | 40 | ||
58 | /* And the same for proc */ | 41 | /* And the same for proc */ |
59 | int proc_dolasatstring(ctl_table *table, int write, | 42 | int proc_dolasatstring(ctl_table *table, int write, |
@@ -113,46 +96,6 @@ int proc_dolasatrtc(ctl_table *table, int write, | |||
113 | } | 96 | } |
114 | #endif | 97 | #endif |
115 | 98 | ||
116 | /* Sysctl for setting the IP addresses */ | ||
117 | int sysctl_lasat_intvec(ctl_table *table, | ||
118 | void *oldval, size_t *oldlenp, | ||
119 | void *newval, size_t newlen) | ||
120 | { | ||
121 | int r; | ||
122 | |||
123 | r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); | ||
124 | if (r < 0) | ||
125 | return r; | ||
126 | |||
127 | if (newval && newlen) | ||
128 | lasat_write_eeprom_info(); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | #ifdef CONFIG_DS1603 | ||
134 | /* Same for RTC */ | ||
135 | int sysctl_lasat_rtc(ctl_table *table, | ||
136 | void *oldval, size_t *oldlenp, | ||
137 | void *newval, size_t newlen) | ||
138 | { | ||
139 | struct timespec ts; | ||
140 | int r; | ||
141 | |||
142 | read_persistent_clock(&ts); | ||
143 | rtctmp = ts.tv_sec; | ||
144 | if (rtctmp < 0) | ||
145 | rtctmp = 0; | ||
146 | r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); | ||
147 | if (r < 0) | ||
148 | return r; | ||
149 | if (newval && newlen) | ||
150 | rtc_mips_set_mmss(rtctmp); | ||
151 | |||
152 | return r; | ||
153 | } | ||
154 | #endif | ||
155 | |||
156 | #ifdef CONFIG_INET | 99 | #ifdef CONFIG_INET |
157 | int proc_lasat_ip(ctl_table *table, int write, | 100 | int proc_lasat_ip(ctl_table *table, int write, |
158 | void *buffer, size_t *lenp, loff_t *ppos) | 101 | void *buffer, size_t *lenp, loff_t *ppos) |
@@ -214,23 +157,6 @@ int proc_lasat_ip(ctl_table *table, int write, | |||
214 | } | 157 | } |
215 | #endif | 158 | #endif |
216 | 159 | ||
217 | static int sysctl_lasat_prid(ctl_table *table, | ||
218 | void *oldval, size_t *oldlenp, | ||
219 | void *newval, size_t newlen) | ||
220 | { | ||
221 | int r; | ||
222 | |||
223 | r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); | ||
224 | if (r < 0) | ||
225 | return r; | ||
226 | if (newval && newlen) { | ||
227 | lasat_board_info.li_eeprom_info.prid = *(int *)newval; | ||
228 | lasat_write_eeprom_info(); | ||
229 | lasat_init_board_info(); | ||
230 | } | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | int proc_lasat_prid(ctl_table *table, int write, | 160 | int proc_lasat_prid(ctl_table *table, int write, |
235 | void *buffer, size_t *lenp, loff_t *ppos) | 161 | void *buffer, size_t *lenp, loff_t *ppos) |
236 | { | 162 | { |
@@ -252,115 +178,92 @@ extern int lasat_boot_to_service; | |||
252 | 178 | ||
253 | static ctl_table lasat_table[] = { | 179 | static ctl_table lasat_table[] = { |
254 | { | 180 | { |
255 | .ctl_name = CTL_UNNUMBERED, | ||
256 | .procname = "cpu-hz", | 181 | .procname = "cpu-hz", |
257 | .data = &lasat_board_info.li_cpu_hz, | 182 | .data = &lasat_board_info.li_cpu_hz, |
258 | .maxlen = sizeof(int), | 183 | .maxlen = sizeof(int), |
259 | .mode = 0444, | 184 | .mode = 0444, |
260 | .proc_handler = &proc_dointvec, | 185 | .proc_handler = proc_dointvec, |
261 | .strategy = &sysctl_intvec | ||
262 | }, | 186 | }, |
263 | { | 187 | { |
264 | .ctl_name = CTL_UNNUMBERED, | ||
265 | .procname = "bus-hz", | 188 | .procname = "bus-hz", |
266 | .data = &lasat_board_info.li_bus_hz, | 189 | .data = &lasat_board_info.li_bus_hz, |
267 | .maxlen = sizeof(int), | 190 | .maxlen = sizeof(int), |
268 | .mode = 0444, | 191 | .mode = 0444, |
269 | .proc_handler = &proc_dointvec, | 192 | .proc_handler = proc_dointvec, |
270 | .strategy = &sysctl_intvec | ||
271 | }, | 193 | }, |
272 | { | 194 | { |
273 | .ctl_name = CTL_UNNUMBERED, | ||
274 | .procname = "bmid", | 195 | .procname = "bmid", |
275 | .data = &lasat_board_info.li_bmid, | 196 | .data = &lasat_board_info.li_bmid, |
276 | .maxlen = sizeof(int), | 197 | .maxlen = sizeof(int), |
277 | .mode = 0444, | 198 | .mode = 0444, |
278 | .proc_handler = &proc_dointvec, | 199 | .proc_handler = proc_dointvec, |
279 | .strategy = &sysctl_intvec | ||
280 | }, | 200 | }, |
281 | { | 201 | { |
282 | .ctl_name = CTL_UNNUMBERED, | ||
283 | .procname = "prid", | 202 | .procname = "prid", |
284 | .data = &lasat_board_info.li_prid, | 203 | .data = &lasat_board_info.li_prid, |
285 | .maxlen = sizeof(int), | 204 | .maxlen = sizeof(int), |
286 | .mode = 0644, | 205 | .mode = 0644, |
287 | .proc_handler = &proc_lasat_prid, | 206 | .proc_handler = proc_lasat_prid, |
288 | .strategy = &sysctl_lasat_prid | 207 | . }, |
289 | }, | ||
290 | #ifdef CONFIG_INET | 208 | #ifdef CONFIG_INET |
291 | { | 209 | { |
292 | .ctl_name = CTL_UNNUMBERED, | ||
293 | .procname = "ipaddr", | 210 | .procname = "ipaddr", |
294 | .data = &lasat_board_info.li_eeprom_info.ipaddr, | 211 | .data = &lasat_board_info.li_eeprom_info.ipaddr, |
295 | .maxlen = sizeof(int), | 212 | .maxlen = sizeof(int), |
296 | .mode = 0644, | 213 | .mode = 0644, |
297 | .proc_handler = &proc_lasat_ip, | 214 | .proc_handler = proc_lasat_ip, |
298 | .strategy = &sysctl_lasat_intvec | ||
299 | }, | 215 | }, |
300 | { | 216 | { |
301 | .ctl_name = CTL_UNNUMBERED, | ||
302 | .procname = "netmask", | 217 | .procname = "netmask", |
303 | .data = &lasat_board_info.li_eeprom_info.netmask, | 218 | .data = &lasat_board_info.li_eeprom_info.netmask, |
304 | .maxlen = sizeof(int), | 219 | .maxlen = sizeof(int), |
305 | .mode = 0644, | 220 | .mode = 0644, |
306 | .proc_handler = &proc_lasat_ip, | 221 | .proc_handler = proc_lasat_ip, |
307 | .strategy = &sysctl_lasat_intvec | ||
308 | }, | 222 | }, |
309 | #endif | 223 | #endif |
310 | { | 224 | { |
311 | .ctl_name = CTL_UNNUMBERED, | ||
312 | .procname = "passwd_hash", | 225 | .procname = "passwd_hash", |
313 | .data = &lasat_board_info.li_eeprom_info.passwd_hash, | 226 | .data = &lasat_board_info.li_eeprom_info.passwd_hash, |
314 | .maxlen = | 227 | .maxlen = |
315 | sizeof(lasat_board_info.li_eeprom_info.passwd_hash), | 228 | sizeof(lasat_board_info.li_eeprom_info.passwd_hash), |
316 | .mode = 0600, | 229 | .mode = 0600, |
317 | .proc_handler = &proc_dolasatstring, | 230 | .proc_handler = proc_dolasatstring, |
318 | .strategy = &sysctl_lasatstring | ||
319 | }, | 231 | }, |
320 | { | 232 | { |
321 | .ctl_name = CTL_UNNUMBERED, | ||
322 | .procname = "boot-service", | 233 | .procname = "boot-service", |
323 | .data = &lasat_boot_to_service, | 234 | .data = &lasat_boot_to_service, |
324 | .maxlen = sizeof(int), | 235 | .maxlen = sizeof(int), |
325 | .mode = 0644, | 236 | .mode = 0644, |
326 | .proc_handler = &proc_dointvec, | 237 | .proc_handler = proc_dointvec, |
327 | .strategy = &sysctl_intvec | ||
328 | }, | 238 | }, |
329 | #ifdef CONFIG_DS1603 | 239 | #ifdef CONFIG_DS1603 |
330 | { | 240 | { |
331 | .ctl_name = CTL_UNNUMBERED, | ||
332 | .procname = "rtc", | 241 | .procname = "rtc", |
333 | .data = &rtctmp, | 242 | .data = &rtctmp, |
334 | .maxlen = sizeof(int), | 243 | .maxlen = sizeof(int), |
335 | .mode = 0644, | 244 | .mode = 0644, |
336 | .proc_handler = &proc_dolasatrtc, | 245 | .proc_handler = proc_dolasatrtc, |
337 | .strategy = &sysctl_lasat_rtc | ||
338 | }, | 246 | }, |
339 | #endif | 247 | #endif |
340 | { | 248 | { |
341 | .ctl_name = CTL_UNNUMBERED, | ||
342 | .procname = "namestr", | 249 | .procname = "namestr", |
343 | .data = &lasat_board_info.li_namestr, | 250 | .data = &lasat_board_info.li_namestr, |
344 | .maxlen = sizeof(lasat_board_info.li_namestr), | 251 | .maxlen = sizeof(lasat_board_info.li_namestr), |
345 | .mode = 0444, | 252 | .mode = 0444, |
346 | .proc_handler = &proc_dostring, | 253 | .proc_handler = proc_dostring, |
347 | .strategy = &sysctl_string | ||
348 | }, | 254 | }, |
349 | { | 255 | { |
350 | .ctl_name = CTL_UNNUMBERED, | ||
351 | .procname = "typestr", | 256 | .procname = "typestr", |
352 | .data = &lasat_board_info.li_typestr, | 257 | .data = &lasat_board_info.li_typestr, |
353 | .maxlen = sizeof(lasat_board_info.li_typestr), | 258 | .maxlen = sizeof(lasat_board_info.li_typestr), |
354 | .mode = 0444, | 259 | .mode = 0444, |
355 | .proc_handler = &proc_dostring, | 260 | .proc_handler = proc_dostring, |
356 | .strategy = &sysctl_string | ||
357 | }, | 261 | }, |
358 | {} | 262 | {} |
359 | }; | 263 | }; |
360 | 264 | ||
361 | static ctl_table lasat_root_table[] = { | 265 | static ctl_table lasat_root_table[] = { |
362 | { | 266 | { |
363 | .ctl_name = CTL_UNNUMBERED, | ||
364 | .procname = "lasat", | 267 | .procname = "lasat", |
365 | .mode = 0555, | 268 | .mode = 0555, |
366 | .child = lasat_table | 269 | .child = lasat_table |
diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h index 4df75af29d76..4e60c4281288 100644 --- a/arch/mn10300/include/asm/socket.h +++ b/arch/mn10300/include/asm/socket.h | |||
@@ -60,4 +60,6 @@ | |||
60 | #define SO_PROTOCOL 38 | 60 | #define SO_PROTOCOL 38 |
61 | #define SO_DOMAIN 39 | 61 | #define SO_DOMAIN 39 |
62 | 62 | ||
63 | #define SO_RXQ_OVFL 40 | ||
64 | |||
63 | #endif /* _ASM_SOCKET_H */ | 65 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 960b1e5d8e16..225b7d6a1a0a 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h | |||
@@ -59,6 +59,8 @@ | |||
59 | #define SO_TIMESTAMPING 0x4020 | 59 | #define SO_TIMESTAMPING 0x4020 |
60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 60 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
61 | 61 | ||
62 | #define SO_RXQ_OVFL 0x4021 | ||
63 | |||
62 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | 64 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we |
63 | * have to define SOCK_NONBLOCK to a different value here. | 65 | * have to define SOCK_NONBLOCK to a different value here. |
64 | */ | 66 | */ |
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 561388b17c91..76d23ec8dfaa 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c | |||
@@ -90,77 +90,6 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, | |||
90 | return -ENOSYS; | 90 | return -ENOSYS; |
91 | } | 91 | } |
92 | 92 | ||
93 | #ifdef CONFIG_SYSCTL | ||
94 | |||
95 | struct __sysctl_args32 { | ||
96 | u32 name; | ||
97 | int nlen; | ||
98 | u32 oldval; | ||
99 | u32 oldlenp; | ||
100 | u32 newval; | ||
101 | u32 newlen; | ||
102 | u32 __unused[4]; | ||
103 | }; | ||
104 | |||
105 | asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | ||
106 | { | ||
107 | #ifndef CONFIG_SYSCTL_SYSCALL | ||
108 | return -ENOSYS; | ||
109 | #else | ||
110 | struct __sysctl_args32 tmp; | ||
111 | int error; | ||
112 | unsigned int oldlen32; | ||
113 | size_t oldlen, __user *oldlenp = NULL; | ||
114 | unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7; | ||
115 | |||
116 | DBG(("sysctl32(%p)\n", args)); | ||
117 | |||
118 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
119 | return -EFAULT; | ||
120 | |||
121 | if (tmp.oldval && tmp.oldlenp) { | ||
122 | /* Duh, this is ugly and might not work if sysctl_args | ||
123 | is in read-only memory, but do_sysctl does indirectly | ||
124 | a lot of uaccess in both directions and we'd have to | ||
125 | basically copy the whole sysctl.c here, and | ||
126 | glibc's __sysctl uses rw memory for the structure | ||
127 | anyway. */ | ||
128 | /* a possibly better hack than this, which will avoid the | ||
129 | * problem if the struct is read only, is to push the | ||
130 | * 'oldlen' value out to the user's stack instead. -PB | ||
131 | */ | ||
132 | if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp)) | ||
133 | return -EFAULT; | ||
134 | oldlen = oldlen32; | ||
135 | if (put_user(oldlen, (size_t *)addr)) | ||
136 | return -EFAULT; | ||
137 | oldlenp = (size_t *)addr; | ||
138 | } | ||
139 | |||
140 | lock_kernel(); | ||
141 | error = do_sysctl((int __user *)(u64)tmp.name, tmp.nlen, | ||
142 | (void __user *)(u64)tmp.oldval, oldlenp, | ||
143 | (void __user *)(u64)tmp.newval, tmp.newlen); | ||
144 | unlock_kernel(); | ||
145 | if (oldlenp) { | ||
146 | if (!error) { | ||
147 | if (get_user(oldlen, (size_t *)addr)) { | ||
148 | error = -EFAULT; | ||
149 | } else { | ||
150 | oldlen32 = oldlen; | ||
151 | if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp)) | ||
152 | error = -EFAULT; | ||
153 | } | ||
154 | } | ||
155 | if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused))) | ||
156 | error = -EFAULT; | ||
157 | } | ||
158 | return error; | ||
159 | #endif | ||
160 | } | ||
161 | |||
162 | #endif /* CONFIG_SYSCTL */ | ||
163 | |||
164 | asmlinkage long sys32_sched_rr_get_interval(pid_t pid, | 93 | asmlinkage long sys32_sched_rr_get_interval(pid_t pid, |
165 | struct compat_timespec __user *interval) | 94 | struct compat_timespec __user *interval) |
166 | { | 95 | { |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 843f423dec67..01c4fcf8f481 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -234,7 +234,7 @@ | |||
234 | ENTRY_SAME(getsid) | 234 | ENTRY_SAME(getsid) |
235 | ENTRY_SAME(fdatasync) | 235 | ENTRY_SAME(fdatasync) |
236 | /* struct __sysctl_args is a mess */ | 236 | /* struct __sysctl_args is a mess */ |
237 | ENTRY_DIFF(sysctl) | 237 | ENTRY_COMP(sysctl) |
238 | ENTRY_SAME(mlock) /* 150 */ | 238 | ENTRY_SAME(mlock) /* 150 */ |
239 | ENTRY_SAME(munlock) | 239 | ENTRY_SAME(munlock) |
240 | ENTRY_SAME(mlockall) | 240 | ENTRY_SAME(mlockall) |
diff --git a/arch/powerpc/include/asm/pmac_low_i2c.h b/arch/powerpc/include/asm/pmac_low_i2c.h index 131011bd7e76..01d71826d92f 100644 --- a/arch/powerpc/include/asm/pmac_low_i2c.h +++ b/arch/powerpc/include/asm/pmac_low_i2c.h | |||
@@ -72,11 +72,7 @@ extern int pmac_i2c_get_type(struct pmac_i2c_bus *bus); | |||
72 | extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus); | 72 | extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus); |
73 | extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus); | 73 | extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus); |
74 | 74 | ||
75 | /* i2c layer adapter attach/detach */ | 75 | /* i2c layer adapter helpers */ |
76 | extern void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus, | ||
77 | struct i2c_adapter *adapter); | ||
78 | extern void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus, | ||
79 | struct i2c_adapter *adapter); | ||
80 | extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus); | 76 | extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus); |
81 | extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter); | 77 | extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter); |
82 | 78 | ||
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 6ff04185d2aa..2ab9cbd98826 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/of.h> /* linux/of.h gets to determine #include ordering */ | ||
1 | #ifndef _POWERPC_PROM_H | 2 | #ifndef _POWERPC_PROM_H |
2 | #define _POWERPC_PROM_H | 3 | #define _POWERPC_PROM_H |
3 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
@@ -16,6 +17,7 @@ | |||
16 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
17 | */ | 18 | */ |
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/of_fdt.h> | ||
19 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
20 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
21 | #include <asm/irq.h> | 23 | #include <asm/irq.h> |
@@ -28,133 +30,14 @@ | |||
28 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) | 30 | #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) |
29 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) | 31 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) |
30 | 32 | ||
31 | /* Definitions used by the flattened device tree */ | ||
32 | #define OF_DT_HEADER 0xd00dfeed /* marker */ | ||
33 | #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ | ||
34 | #define OF_DT_END_NODE 0x2 /* End node */ | ||
35 | #define OF_DT_PROP 0x3 /* Property: name off, size, | ||
36 | * content */ | ||
37 | #define OF_DT_NOP 0x4 /* nop */ | ||
38 | #define OF_DT_END 0x9 | ||
39 | |||
40 | #define OF_DT_VERSION 0x10 | ||
41 | |||
42 | /* | ||
43 | * This is what gets passed to the kernel by prom_init or kexec | ||
44 | * | ||
45 | * The dt struct contains the device tree structure, full pathes and | ||
46 | * property contents. The dt strings contain a separate block with just | ||
47 | * the strings for the property names, and is fully page aligned and | ||
48 | * self contained in a page, so that it can be kept around by the kernel, | ||
49 | * each property name appears only once in this page (cheap compression) | ||
50 | * | ||
51 | * the mem_rsvmap contains a map of reserved ranges of physical memory, | ||
52 | * passing it here instead of in the device-tree itself greatly simplifies | ||
53 | * the job of everybody. It's just a list of u64 pairs (base/size) that | ||
54 | * ends when size is 0 | ||
55 | */ | ||
56 | struct boot_param_header | ||
57 | { | ||
58 | u32 magic; /* magic word OF_DT_HEADER */ | ||
59 | u32 totalsize; /* total size of DT block */ | ||
60 | u32 off_dt_struct; /* offset to structure */ | ||
61 | u32 off_dt_strings; /* offset to strings */ | ||
62 | u32 off_mem_rsvmap; /* offset to memory reserve map */ | ||
63 | u32 version; /* format version */ | ||
64 | u32 last_comp_version; /* last compatible version */ | ||
65 | /* version 2 fields below */ | ||
66 | u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ | ||
67 | /* version 3 fields below */ | ||
68 | u32 dt_strings_size; /* size of the DT strings block */ | ||
69 | /* version 17 fields below */ | ||
70 | u32 dt_struct_size; /* size of the DT structure block */ | ||
71 | }; | ||
72 | |||
73 | |||
74 | |||
75 | typedef u32 phandle; | ||
76 | typedef u32 ihandle; | ||
77 | |||
78 | struct property { | ||
79 | char *name; | ||
80 | int length; | ||
81 | void *value; | ||
82 | struct property *next; | ||
83 | }; | ||
84 | |||
85 | struct device_node { | ||
86 | const char *name; | ||
87 | const char *type; | ||
88 | phandle node; | ||
89 | phandle linux_phandle; | ||
90 | char *full_name; | ||
91 | |||
92 | struct property *properties; | ||
93 | struct property *deadprops; /* removed properties */ | ||
94 | struct device_node *parent; | ||
95 | struct device_node *child; | ||
96 | struct device_node *sibling; | ||
97 | struct device_node *next; /* next device of same type */ | ||
98 | struct device_node *allnext; /* next in list of all nodes */ | ||
99 | struct proc_dir_entry *pde; /* this node's proc directory */ | ||
100 | struct kref kref; | ||
101 | unsigned long _flags; | ||
102 | void *data; | ||
103 | }; | ||
104 | |||
105 | extern struct device_node *of_chosen; | 33 | extern struct device_node *of_chosen; |
106 | 34 | ||
107 | static inline int of_node_check_flag(struct device_node *n, unsigned long flag) | ||
108 | { | ||
109 | return test_bit(flag, &n->_flags); | ||
110 | } | ||
111 | |||
112 | static inline void of_node_set_flag(struct device_node *n, unsigned long flag) | ||
113 | { | ||
114 | set_bit(flag, &n->_flags); | ||
115 | } | ||
116 | |||
117 | |||
118 | #define HAVE_ARCH_DEVTREE_FIXUPS | 35 | #define HAVE_ARCH_DEVTREE_FIXUPS |
119 | 36 | ||
120 | static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) | ||
121 | { | ||
122 | dn->pde = de; | ||
123 | } | ||
124 | |||
125 | |||
126 | extern struct device_node *of_find_all_nodes(struct device_node *prev); | ||
127 | extern struct device_node *of_node_get(struct device_node *node); | ||
128 | extern void of_node_put(struct device_node *node); | ||
129 | |||
130 | /* For scanning the flat device-tree at boot time */ | ||
131 | extern int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
132 | const char *uname, int depth, | ||
133 | void *data), | ||
134 | void *data); | ||
135 | extern void* __init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
136 | unsigned long *size); | ||
137 | extern int __init of_flat_dt_is_compatible(unsigned long node, const char *name); | ||
138 | extern unsigned long __init of_get_flat_dt_root(void); | ||
139 | |||
140 | /* For updating the device tree at runtime */ | 37 | /* For updating the device tree at runtime */ |
141 | extern void of_attach_node(struct device_node *); | 38 | extern void of_attach_node(struct device_node *); |
142 | extern void of_detach_node(struct device_node *); | 39 | extern void of_detach_node(struct device_node *); |
143 | 40 | ||
144 | /* Other Prototypes */ | ||
145 | extern void finish_device_tree(void); | ||
146 | extern void unflatten_device_tree(void); | ||
147 | extern void early_init_devtree(void *); | ||
148 | extern int machine_is_compatible(const char *compat); | ||
149 | extern void print_properties(struct device_node *node); | ||
150 | extern int prom_n_intr_cells(struct device_node* np); | ||
151 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); | ||
152 | extern int prom_add_property(struct device_node* np, struct property* prop); | ||
153 | extern int prom_remove_property(struct device_node *np, struct property *prop); | ||
154 | extern int prom_update_property(struct device_node *np, | ||
155 | struct property *newprop, | ||
156 | struct property *oldprop); | ||
157 | |||
158 | #ifdef CONFIG_PPC32 | 41 | #ifdef CONFIG_PPC32 |
159 | /* | 42 | /* |
160 | * PCI <-> OF matching functions | 43 | * PCI <-> OF matching functions |
@@ -178,26 +61,6 @@ extern int release_OF_resource(struct device_node* node, int index); | |||
178 | * OF address retreival & translation | 61 | * OF address retreival & translation |
179 | */ | 62 | */ |
180 | 63 | ||
181 | |||
182 | /* Helper to read a big number; size is in cells (not bytes) */ | ||
183 | static inline u64 of_read_number(const u32 *cell, int size) | ||
184 | { | ||
185 | u64 r = 0; | ||
186 | while (size--) | ||
187 | r = (r << 32) | *(cell++); | ||
188 | return r; | ||
189 | } | ||
190 | |||
191 | /* Like of_read_number, but we want an unsigned long result */ | ||
192 | #ifdef CONFIG_PPC32 | ||
193 | static inline unsigned long of_read_ulong(const u32 *cell, int size) | ||
194 | { | ||
195 | return cell[size-1]; | ||
196 | } | ||
197 | #else | ||
198 | #define of_read_ulong(cell, size) of_read_number(cell, size) | ||
199 | #endif | ||
200 | |||
201 | /* Translate an OF address block into a CPU physical address | 64 | /* Translate an OF address block into a CPU physical address |
202 | */ | 65 | */ |
203 | extern u64 of_translate_address(struct device_node *np, const u32 *addr); | 66 | extern u64 of_translate_address(struct device_node *np, const u32 *addr); |
@@ -349,11 +212,5 @@ extern int of_irq_to_resource(struct device_node *dev, int index, | |||
349 | */ | 212 | */ |
350 | extern void __iomem *of_iomap(struct device_node *device, int index); | 213 | extern void __iomem *of_iomap(struct device_node *device, int index); |
351 | 214 | ||
352 | /* | ||
353 | * NB: This is here while we transition from using asm/prom.h | ||
354 | * to linux/of.h | ||
355 | */ | ||
356 | #include <linux/of.h> | ||
357 | |||
358 | #endif /* __KERNEL__ */ | 215 | #endif /* __KERNEL__ */ |
359 | #endif /* _POWERPC_PROM_H */ | 216 | #endif /* _POWERPC_PROM_H */ |
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 3ab8b3e6feb0..866f7606da68 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h | |||
@@ -67,4 +67,6 @@ | |||
67 | #define SO_PROTOCOL 38 | 67 | #define SO_PROTOCOL 38 |
68 | #define SO_DOMAIN 39 | 68 | #define SO_DOMAIN 39 |
69 | 69 | ||
70 | #define SO_RXQ_OVFL 40 | ||
71 | |||
70 | #endif /* _ASM_POWERPC_SOCKET_H */ | 72 | #endif /* _ASM_POWERPC_SOCKET_H */ |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 88d9c1d5e5fb..049dda60e475 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -110,18 +110,16 @@ int powersave_nap; | |||
110 | */ | 110 | */ |
111 | static ctl_table powersave_nap_ctl_table[]={ | 111 | static ctl_table powersave_nap_ctl_table[]={ |
112 | { | 112 | { |
113 | .ctl_name = KERN_PPC_POWERSAVE_NAP, | ||
114 | .procname = "powersave-nap", | 113 | .procname = "powersave-nap", |
115 | .data = &powersave_nap, | 114 | .data = &powersave_nap, |
116 | .maxlen = sizeof(int), | 115 | .maxlen = sizeof(int), |
117 | .mode = 0644, | 116 | .mode = 0644, |
118 | .proc_handler = &proc_dointvec, | 117 | .proc_handler = proc_dointvec, |
119 | }, | 118 | }, |
120 | {} | 119 | {} |
121 | }; | 120 | }; |
122 | static ctl_table powersave_nap_sysctl_root[] = { | 121 | static ctl_table powersave_nap_sysctl_root[] = { |
123 | { | 122 | { |
124 | .ctl_name = CTL_KERN, | ||
125 | .procname = "kernel", | 123 | .procname = "kernel", |
126 | .mode = 0555, | 124 | .mode = 0555, |
127 | .child = powersave_nap_ctl_table, | 125 | .child = powersave_nap_ctl_table, |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d4405b95bfaa..4ec300862466 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -1317,29 +1317,6 @@ struct device_node *of_find_next_cache_node(struct device_node *np) | |||
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /** | 1319 | /** |
1320 | * of_find_all_nodes - Get next node in global list | ||
1321 | * @prev: Previous node or NULL to start iteration | ||
1322 | * of_node_put() will be called on it | ||
1323 | * | ||
1324 | * Returns a node pointer with refcount incremented, use | ||
1325 | * of_node_put() on it when done. | ||
1326 | */ | ||
1327 | struct device_node *of_find_all_nodes(struct device_node *prev) | ||
1328 | { | ||
1329 | struct device_node *np; | ||
1330 | |||
1331 | read_lock(&devtree_lock); | ||
1332 | np = prev ? prev->allnext : allnodes; | ||
1333 | for (; np != 0; np = np->allnext) | ||
1334 | if (of_node_get(np)) | ||
1335 | break; | ||
1336 | of_node_put(prev); | ||
1337 | read_unlock(&devtree_lock); | ||
1338 | return np; | ||
1339 | } | ||
1340 | EXPORT_SYMBOL(of_find_all_nodes); | ||
1341 | |||
1342 | /** | ||
1343 | * of_node_get - Increment refcount of a node | 1320 | * of_node_get - Increment refcount of a node |
1344 | * @node: Node to inc refcount, NULL is supported to | 1321 | * @node: Node to inc refcount, NULL is supported to |
1345 | * simplify writing of callers | 1322 | * simplify writing of callers |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index b97c2d67f4ac..c5a4732bcc48 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -520,58 +520,6 @@ asmlinkage long compat_sys_umask(u32 mask) | |||
520 | return sys_umask((int)mask); | 520 | return sys_umask((int)mask); |
521 | } | 521 | } |
522 | 522 | ||
523 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
524 | struct __sysctl_args32 { | ||
525 | u32 name; | ||
526 | int nlen; | ||
527 | u32 oldval; | ||
528 | u32 oldlenp; | ||
529 | u32 newval; | ||
530 | u32 newlen; | ||
531 | u32 __unused[4]; | ||
532 | }; | ||
533 | |||
534 | asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args) | ||
535 | { | ||
536 | struct __sysctl_args32 tmp; | ||
537 | int error; | ||
538 | size_t oldlen; | ||
539 | size_t __user *oldlenp = NULL; | ||
540 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; | ||
541 | |||
542 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
543 | return -EFAULT; | ||
544 | |||
545 | if (tmp.oldval && tmp.oldlenp) { | ||
546 | /* Duh, this is ugly and might not work if sysctl_args | ||
547 | is in read-only memory, but do_sysctl does indirectly | ||
548 | a lot of uaccess in both directions and we'd have to | ||
549 | basically copy the whole sysctl.c here, and | ||
550 | glibc's __sysctl uses rw memory for the structure | ||
551 | anyway. */ | ||
552 | oldlenp = (size_t __user *)addr; | ||
553 | if (get_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) || | ||
554 | put_user(oldlen, oldlenp)) | ||
555 | return -EFAULT; | ||
556 | } | ||
557 | |||
558 | lock_kernel(); | ||
559 | error = do_sysctl(compat_ptr(tmp.name), tmp.nlen, | ||
560 | compat_ptr(tmp.oldval), oldlenp, | ||
561 | compat_ptr(tmp.newval), tmp.newlen); | ||
562 | unlock_kernel(); | ||
563 | if (oldlenp) { | ||
564 | if (!error) { | ||
565 | if (get_user(oldlen, oldlenp) || | ||
566 | put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp))) | ||
567 | error = -EFAULT; | ||
568 | } | ||
569 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | ||
570 | } | ||
571 | return error; | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | unsigned long compat_sys_mmap2(unsigned long addr, size_t len, | 523 | unsigned long compat_sys_mmap2(unsigned long addr, size_t len, |
576 | unsigned long prot, unsigned long flags, | 524 | unsigned long prot, unsigned long flags, |
577 | unsigned long fd, unsigned long pgoff) | 525 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2a4551f78f60..5902bbc2411e 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
78 | return r; | 78 | return r; |
79 | } | 79 | } |
80 | 80 | ||
81 | void kvm_arch_hardware_enable(void *garbage) | 81 | int kvm_arch_hardware_enable(void *garbage) |
82 | { | 82 | { |
83 | return 0; | ||
83 | } | 84 | } |
84 | 85 | ||
85 | void kvm_arch_hardware_disable(void *garbage) | 86 | void kvm_arch_hardware_disable(void *garbage) |
@@ -421,7 +422,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
421 | 422 | ||
422 | switch (ioctl) { | 423 | switch (ioctl) { |
423 | default: | 424 | default: |
424 | r = -EINVAL; | 425 | r = -ENOTTY; |
425 | } | 426 | } |
426 | 427 | ||
427 | return r; | 428 | return r; |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 806ef67868bd..8167d42a776f 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -51,7 +51,7 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
51 | 51 | ||
52 | /* The BUILD_BUG_ON below breaks in funny ways, commented out | 52 | /* The BUILD_BUG_ON below breaks in funny ways, commented out |
53 | * for now ... -BenH | 53 | * for now ... -BenH |
54 | BUILD_BUG_ON(__builtin_constant_p(type)); | 54 | BUILD_BUG_ON(!__builtin_constant_p(type)); |
55 | */ | 55 | */ |
56 | switch (type) { | 56 | switch (type) { |
57 | case EXT_INTR_EXITS: | 57 | case EXT_INTR_EXITS: |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 414ca9849f23..345e2da56767 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/timer.h> | 43 | #include <linux/timer.h> |
44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
45 | #include <linux/i2c.h> | ||
45 | #include <asm/keylargo.h> | 46 | #include <asm/keylargo.h> |
46 | #include <asm/uninorth.h> | 47 | #include <asm/uninorth.h> |
47 | #include <asm/io.h> | 48 | #include <asm/io.h> |
@@ -80,7 +81,7 @@ struct pmac_i2c_bus | |||
80 | struct device_node *busnode; | 81 | struct device_node *busnode; |
81 | int type; | 82 | int type; |
82 | int flags; | 83 | int flags; |
83 | struct i2c_adapter *adapter; | 84 | struct i2c_adapter adapter; |
84 | void *hostdata; | 85 | void *hostdata; |
85 | int channel; /* some hosts have multiple */ | 86 | int channel; /* some hosts have multiple */ |
86 | int mode; /* current mode */ | 87 | int mode; /* current mode */ |
@@ -1014,25 +1015,9 @@ int pmac_i2c_get_channel(struct pmac_i2c_bus *bus) | |||
1014 | EXPORT_SYMBOL_GPL(pmac_i2c_get_channel); | 1015 | EXPORT_SYMBOL_GPL(pmac_i2c_get_channel); |
1015 | 1016 | ||
1016 | 1017 | ||
1017 | void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus, | ||
1018 | struct i2c_adapter *adapter) | ||
1019 | { | ||
1020 | WARN_ON(bus->adapter != NULL); | ||
1021 | bus->adapter = adapter; | ||
1022 | } | ||
1023 | EXPORT_SYMBOL_GPL(pmac_i2c_attach_adapter); | ||
1024 | |||
1025 | void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus, | ||
1026 | struct i2c_adapter *adapter) | ||
1027 | { | ||
1028 | WARN_ON(bus->adapter != adapter); | ||
1029 | bus->adapter = NULL; | ||
1030 | } | ||
1031 | EXPORT_SYMBOL_GPL(pmac_i2c_detach_adapter); | ||
1032 | |||
1033 | struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus) | 1018 | struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus) |
1034 | { | 1019 | { |
1035 | return bus->adapter; | 1020 | return &bus->adapter; |
1036 | } | 1021 | } |
1037 | EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter); | 1022 | EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter); |
1038 | 1023 | ||
@@ -1041,7 +1026,7 @@ struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter) | |||
1041 | struct pmac_i2c_bus *bus; | 1026 | struct pmac_i2c_bus *bus; |
1042 | 1027 | ||
1043 | list_for_each_entry(bus, &pmac_i2c_busses, link) | 1028 | list_for_each_entry(bus, &pmac_i2c_busses, link) |
1044 | if (bus->adapter == adapter) | 1029 | if (&bus->adapter == adapter) |
1045 | return bus; | 1030 | return bus; |
1046 | return NULL; | 1031 | return NULL; |
1047 | } | 1032 | } |
@@ -1053,7 +1038,7 @@ int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter) | |||
1053 | 1038 | ||
1054 | if (bus == NULL) | 1039 | if (bus == NULL) |
1055 | return 0; | 1040 | return 0; |
1056 | return (bus->adapter == adapter); | 1041 | return (&bus->adapter == adapter); |
1057 | } | 1042 | } |
1058 | EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter); | 1043 | EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter); |
1059 | 1044 | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index b55fd7ed1c31..495589950dc7 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -61,12 +61,12 @@ static struct ctl_table appldata_table[] = { | |||
61 | { | 61 | { |
62 | .procname = "timer", | 62 | .procname = "timer", |
63 | .mode = S_IRUGO | S_IWUSR, | 63 | .mode = S_IRUGO | S_IWUSR, |
64 | .proc_handler = &appldata_timer_handler, | 64 | .proc_handler = appldata_timer_handler, |
65 | }, | 65 | }, |
66 | { | 66 | { |
67 | .procname = "interval", | 67 | .procname = "interval", |
68 | .mode = S_IRUGO | S_IWUSR, | 68 | .mode = S_IRUGO | S_IWUSR, |
69 | .proc_handler = &appldata_interval_handler, | 69 | .proc_handler = appldata_interval_handler, |
70 | }, | 70 | }, |
71 | { }, | 71 | { }, |
72 | }; | 72 | }; |
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index fa741f84c5b9..4ce7fa95880f 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -83,8 +83,9 @@ static void appldata_get_net_sum_data(void *data) | |||
83 | rx_dropped = 0; | 83 | rx_dropped = 0; |
84 | tx_dropped = 0; | 84 | tx_dropped = 0; |
85 | collisions = 0; | 85 | collisions = 0; |
86 | read_lock(&dev_base_lock); | 86 | |
87 | for_each_netdev(&init_net, dev) { | 87 | rcu_read_lock(); |
88 | for_each_netdev_rcu(&init_net, dev) { | ||
88 | const struct net_device_stats *stats = dev_get_stats(dev); | 89 | const struct net_device_stats *stats = dev_get_stats(dev); |
89 | 90 | ||
90 | rx_packets += stats->rx_packets; | 91 | rx_packets += stats->rx_packets; |
@@ -98,7 +99,8 @@ static void appldata_get_net_sum_data(void *data) | |||
98 | collisions += stats->collisions; | 99 | collisions += stats->collisions; |
99 | i++; | 100 | i++; |
100 | } | 101 | } |
101 | read_unlock(&dev_base_lock); | 102 | rcu_read_unlock(); |
103 | |||
102 | net_data->nr_interfaces = i; | 104 | net_data->nr_interfaces = i; |
103 | net_data->rx_packets = rx_packets; | 105 | net_data->rx_packets = rx_packets; |
104 | net_data->tx_packets = tx_packets; | 106 | net_data->tx_packets = tx_packets; |
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h index 3dfcaeb5d7f4..82b32a100c7d 100644 --- a/arch/s390/include/asm/kvm.h +++ b/arch/s390/include/asm/kvm.h | |||
@@ -1,6 +1,5 @@ | |||
1 | #ifndef __LINUX_KVM_S390_H | 1 | #ifndef __LINUX_KVM_S390_H |
2 | #define __LINUX_KVM_S390_H | 2 | #define __LINUX_KVM_S390_H |
3 | |||
4 | /* | 3 | /* |
5 | * asm-s390/kvm.h - KVM s390 specific structures and definitions | 4 | * asm-s390/kvm.h - KVM s390 specific structures and definitions |
6 | * | 5 | * |
@@ -15,6 +14,8 @@ | |||
15 | */ | 14 | */ |
16 | #include <linux/types.h> | 15 | #include <linux/types.h> |
17 | 16 | ||
17 | #define __KVM_S390 | ||
18 | |||
18 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 19 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
19 | struct kvm_regs { | 20 | struct kvm_regs { |
20 | /* general purpose regs for s390 */ | 21 | /* general purpose regs for s390 */ |
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index e42df89a0b85..fdff1e995c73 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h | |||
@@ -68,4 +68,6 @@ | |||
68 | #define SO_PROTOCOL 38 | 68 | #define SO_PROTOCOL 38 |
69 | #define SO_DOMAIN 39 | 69 | #define SO_DOMAIN 39 |
70 | 70 | ||
71 | #define SO_RXQ_OVFL 40 | ||
72 | |||
71 | #endif /* _ASM_SOCKET_H */ | 73 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 0debcec23a39..fda1a8123f9b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -527,59 +527,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, | |||
527 | return ret; | 527 | return ret; |
528 | } | 528 | } |
529 | 529 | ||
530 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
531 | struct __sysctl_args32 { | ||
532 | u32 name; | ||
533 | int nlen; | ||
534 | u32 oldval; | ||
535 | u32 oldlenp; | ||
536 | u32 newval; | ||
537 | u32 newlen; | ||
538 | u32 __unused[4]; | ||
539 | }; | ||
540 | |||
541 | asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | ||
542 | { | ||
543 | struct __sysctl_args32 tmp; | ||
544 | int error; | ||
545 | size_t oldlen; | ||
546 | size_t __user *oldlenp = NULL; | ||
547 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; | ||
548 | |||
549 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
550 | return -EFAULT; | ||
551 | |||
552 | if (tmp.oldval && tmp.oldlenp) { | ||
553 | /* Duh, this is ugly and might not work if sysctl_args | ||
554 | is in read-only memory, but do_sysctl does indirectly | ||
555 | a lot of uaccess in both directions and we'd have to | ||
556 | basically copy the whole sysctl.c here, and | ||
557 | glibc's __sysctl uses rw memory for the structure | ||
558 | anyway. */ | ||
559 | if (get_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp)) || | ||
560 | put_user(oldlen, (size_t __user *)addr)) | ||
561 | return -EFAULT; | ||
562 | oldlenp = (size_t __user *)addr; | ||
563 | } | ||
564 | |||
565 | lock_kernel(); | ||
566 | error = do_sysctl(compat_ptr(tmp.name), tmp.nlen, compat_ptr(tmp.oldval), | ||
567 | oldlenp, compat_ptr(tmp.newval), tmp.newlen); | ||
568 | unlock_kernel(); | ||
569 | if (oldlenp) { | ||
570 | if (!error) { | ||
571 | if (get_user(oldlen, (size_t __user *)addr) || | ||
572 | put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp))) | ||
573 | error = -EFAULT; | ||
574 | } | ||
575 | if (copy_to_user(args->__unused, tmp.__unused, | ||
576 | sizeof(tmp.__unused))) | ||
577 | error = -EFAULT; | ||
578 | } | ||
579 | return error; | ||
580 | } | ||
581 | #endif | ||
582 | |||
583 | struct stat64_emu31 { | 530 | struct stat64_emu31 { |
584 | unsigned long long st_dev; | 531 | unsigned long long st_dev; |
585 | unsigned int __pad1; | 532 | unsigned int __pad1; |
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index c07f9ca05ade..45e9092b3aad 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -162,7 +162,6 @@ struct ucontext32 { | |||
162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
163 | }; | 163 | }; |
164 | 164 | ||
165 | struct __sysctl_args32; | ||
166 | struct stat64_emu31; | 165 | struct stat64_emu31; |
167 | struct mmap_arg_struct_emu31; | 166 | struct mmap_arg_struct_emu31; |
168 | struct fadvise64_64_args; | 167 | struct fadvise64_64_args; |
@@ -212,7 +211,6 @@ long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | |||
212 | size_t count); | 211 | size_t count); |
213 | long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, | 212 | long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, |
214 | s32 count); | 213 | s32 count); |
215 | long sys32_sysctl(struct __sysctl_args32 __user *args); | ||
216 | long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf); | 214 | long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf); |
217 | long sys32_lstat64(char __user * filename, | 215 | long sys32_lstat64(char __user * filename, |
218 | struct stat64_emu31 __user * statbuf); | 216 | struct stat64_emu31 __user * statbuf); |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index cbd9901dc0f8..30de2d0e52bb 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -689,8 +689,6 @@ sys32_fdatasync_wrapper: | |||
689 | llgfr %r2,%r2 # unsigned int | 689 | llgfr %r2,%r2 # unsigned int |
690 | jg sys_fdatasync # branch to system call | 690 | jg sys_fdatasync # branch to system call |
691 | 691 | ||
692 | #sys32_sysctl_wrapper # tbd | ||
693 | |||
694 | .globl sys32_mlock_wrapper | 692 | .globl sys32_mlock_wrapper |
695 | sys32_mlock_wrapper: | 693 | sys32_mlock_wrapper: |
696 | llgfr %r2,%r2 # unsigned long | 694 | llgfr %r2,%r2 # unsigned long |
@@ -1087,8 +1085,8 @@ sys32_stime_wrapper: | |||
1087 | 1085 | ||
1088 | .globl sys32_sysctl_wrapper | 1086 | .globl sys32_sysctl_wrapper |
1089 | sys32_sysctl_wrapper: | 1087 | sys32_sysctl_wrapper: |
1090 | llgtr %r2,%r2 # struct __sysctl_args32 * | 1088 | llgtr %r2,%r2 # struct compat_sysctl_args * |
1091 | jg sys32_sysctl | 1089 | jg compat_sys_sysctl |
1092 | 1090 | ||
1093 | .globl sys32_fstat64_wrapper | 1091 | .globl sys32_fstat64_wrapper |
1094 | sys32_fstat64_wrapper: | 1092 | sys32_fstat64_wrapper: |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 20f282c911c2..071c81f179ef 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -893,35 +893,30 @@ s390dbf_procactive(ctl_table *table, int write, | |||
893 | 893 | ||
894 | static struct ctl_table s390dbf_table[] = { | 894 | static struct ctl_table s390dbf_table[] = { |
895 | { | 895 | { |
896 | .ctl_name = CTL_S390DBF_STOPPABLE, | ||
897 | .procname = "debug_stoppable", | 896 | .procname = "debug_stoppable", |
898 | .data = &debug_stoppable, | 897 | .data = &debug_stoppable, |
899 | .maxlen = sizeof(int), | 898 | .maxlen = sizeof(int), |
900 | .mode = S_IRUGO | S_IWUSR, | 899 | .mode = S_IRUGO | S_IWUSR, |
901 | .proc_handler = &proc_dointvec, | 900 | .proc_handler = proc_dointvec, |
902 | .strategy = &sysctl_intvec, | ||
903 | }, | 901 | }, |
904 | { | 902 | { |
905 | .ctl_name = CTL_S390DBF_ACTIVE, | ||
906 | .procname = "debug_active", | 903 | .procname = "debug_active", |
907 | .data = &debug_active, | 904 | .data = &debug_active, |
908 | .maxlen = sizeof(int), | 905 | .maxlen = sizeof(int), |
909 | .mode = S_IRUGO | S_IWUSR, | 906 | .mode = S_IRUGO | S_IWUSR, |
910 | .proc_handler = &s390dbf_procactive, | 907 | .proc_handler = s390dbf_procactive, |
911 | .strategy = &sysctl_intvec, | ||
912 | }, | 908 | }, |
913 | { .ctl_name = 0 } | 909 | { } |
914 | }; | 910 | }; |
915 | 911 | ||
916 | static struct ctl_table s390dbf_dir_table[] = { | 912 | static struct ctl_table s390dbf_dir_table[] = { |
917 | { | 913 | { |
918 | .ctl_name = CTL_S390DBF, | ||
919 | .procname = "s390dbf", | 914 | .procname = "s390dbf", |
920 | .maxlen = 0, | 915 | .maxlen = 0, |
921 | .mode = S_IRUGO | S_IXUGO, | 916 | .mode = S_IRUGO | S_IXUGO, |
922 | .child = s390dbf_table, | 917 | .child = s390dbf_table, |
923 | }, | 918 | }, |
924 | { .ctl_name = 0 } | 919 | { } |
925 | }; | 920 | }; |
926 | 921 | ||
927 | static struct ctl_table_header *s390dbf_sysctl_header; | 922 | static struct ctl_table_header *s390dbf_sysctl_header; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 07ced89740d7..f8bcaefd7d34 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
74 | static unsigned long long *facilities; | 74 | static unsigned long long *facilities; |
75 | 75 | ||
76 | /* Section: not file related */ | 76 | /* Section: not file related */ |
77 | void kvm_arch_hardware_enable(void *garbage) | 77 | int kvm_arch_hardware_enable(void *garbage) |
78 | { | 78 | { |
79 | /* every s390 is virtualization enabled ;-) */ | 79 | /* every s390 is virtualization enabled ;-) */ |
80 | return 0; | ||
80 | } | 81 | } |
81 | 82 | ||
82 | void kvm_arch_hardware_disable(void *garbage) | 83 | void kvm_arch_hardware_disable(void *garbage) |
@@ -116,10 +117,16 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
116 | 117 | ||
117 | int kvm_dev_ioctl_check_extension(long ext) | 118 | int kvm_dev_ioctl_check_extension(long ext) |
118 | { | 119 | { |
120 | int r; | ||
121 | |||
119 | switch (ext) { | 122 | switch (ext) { |
123 | case KVM_CAP_S390_PSW: | ||
124 | r = 1; | ||
125 | break; | ||
120 | default: | 126 | default: |
121 | return 0; | 127 | r = 0; |
122 | } | 128 | } |
129 | return r; | ||
123 | } | 130 | } |
124 | 131 | ||
125 | /* Section: vm related */ | 132 | /* Section: vm related */ |
@@ -150,7 +157,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
150 | break; | 157 | break; |
151 | } | 158 | } |
152 | default: | 159 | default: |
153 | r = -EINVAL; | 160 | r = -ENOTTY; |
154 | } | 161 | } |
155 | 162 | ||
156 | return r; | 163 | return r; |
@@ -419,8 +426,10 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
419 | vcpu_load(vcpu); | 426 | vcpu_load(vcpu); |
420 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | 427 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) |
421 | rc = -EBUSY; | 428 | rc = -EBUSY; |
422 | else | 429 | else { |
423 | vcpu->arch.sie_block->gpsw = psw; | 430 | vcpu->run->psw_mask = psw.mask; |
431 | vcpu->run->psw_addr = psw.addr; | ||
432 | } | ||
424 | vcpu_put(vcpu); | 433 | vcpu_put(vcpu); |
425 | return rc; | 434 | return rc; |
426 | } | 435 | } |
@@ -508,9 +517,6 @@ rerun_vcpu: | |||
508 | 517 | ||
509 | switch (kvm_run->exit_reason) { | 518 | switch (kvm_run->exit_reason) { |
510 | case KVM_EXIT_S390_SIEIC: | 519 | case KVM_EXIT_S390_SIEIC: |
511 | vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; | ||
512 | vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; | ||
513 | break; | ||
514 | case KVM_EXIT_UNKNOWN: | 520 | case KVM_EXIT_UNKNOWN: |
515 | case KVM_EXIT_INTR: | 521 | case KVM_EXIT_INTR: |
516 | case KVM_EXIT_S390_RESET: | 522 | case KVM_EXIT_S390_RESET: |
@@ -519,6 +525,9 @@ rerun_vcpu: | |||
519 | BUG(); | 525 | BUG(); |
520 | } | 526 | } |
521 | 527 | ||
528 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; | ||
529 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; | ||
530 | |||
522 | might_fault(); | 531 | might_fault(); |
523 | 532 | ||
524 | do { | 533 | do { |
@@ -538,8 +547,6 @@ rerun_vcpu: | |||
538 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 547 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
539 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 548 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
540 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; | 549 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; |
541 | kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask; | ||
542 | kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr; | ||
543 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; | 550 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; |
544 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; | 551 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; |
545 | rc = 0; | 552 | rc = 0; |
@@ -551,6 +558,9 @@ rerun_vcpu: | |||
551 | rc = 0; | 558 | rc = 0; |
552 | } | 559 | } |
553 | 560 | ||
561 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; | ||
562 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; | ||
563 | |||
554 | if (vcpu->sigset_active) | 564 | if (vcpu->sigset_active) |
555 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 565 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
556 | 566 | ||
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 40c8c6748cfe..15ee1111de58 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -188,9 +188,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
188 | 188 | ||
189 | /* make sure that the new value is valid memory */ | 189 | /* make sure that the new value is valid memory */ |
190 | address = address & 0x7fffe000u; | 190 | address = address & 0x7fffe000u; |
191 | if ((copy_from_guest(vcpu, &tmp, | 191 | if ((copy_from_user(&tmp, (void __user *) |
192 | (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) || | 192 | (address + vcpu->arch.sie_block->gmsor) , 1)) || |
193 | (copy_from_guest(vcpu, &tmp, (u64) (address + | 193 | (copy_from_user(&tmp, (void __user *)(address + |
194 | vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { | 194 | vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { |
195 | *reg |= SIGP_STAT_INVALID_PARAMETER; | 195 | *reg |= SIGP_STAT_INVALID_PARAMETER; |
196 | return 1; /* invalid parameter */ | 196 | return 1; /* invalid parameter */ |
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index b201135cc18c..ff58779bf7e9 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -343,30 +343,29 @@ static struct ctl_table cmm_table[] = { | |||
343 | { | 343 | { |
344 | .procname = "cmm_pages", | 344 | .procname = "cmm_pages", |
345 | .mode = 0644, | 345 | .mode = 0644, |
346 | .proc_handler = &cmm_pages_handler, | 346 | .proc_handler = cmm_pages_handler, |
347 | }, | 347 | }, |
348 | { | 348 | { |
349 | .procname = "cmm_timed_pages", | 349 | .procname = "cmm_timed_pages", |
350 | .mode = 0644, | 350 | .mode = 0644, |
351 | .proc_handler = &cmm_pages_handler, | 351 | .proc_handler = cmm_pages_handler, |
352 | }, | 352 | }, |
353 | { | 353 | { |
354 | .procname = "cmm_timeout", | 354 | .procname = "cmm_timeout", |
355 | .mode = 0644, | 355 | .mode = 0644, |
356 | .proc_handler = &cmm_timeout_handler, | 356 | .proc_handler = cmm_timeout_handler, |
357 | }, | 357 | }, |
358 | { .ctl_name = 0 } | 358 | { } |
359 | }; | 359 | }; |
360 | 360 | ||
361 | static struct ctl_table cmm_dir_table[] = { | 361 | static struct ctl_table cmm_dir_table[] = { |
362 | { | 362 | { |
363 | .ctl_name = CTL_VM, | ||
364 | .procname = "vm", | 363 | .procname = "vm", |
365 | .maxlen = 0, | 364 | .maxlen = 0, |
366 | .mode = 0555, | 365 | .mode = 0555, |
367 | .child = cmm_table, | 366 | .child = cmm_table, |
368 | }, | 367 | }, |
369 | { .ctl_name = 0 } | 368 | { } |
370 | }; | 369 | }; |
371 | #endif | 370 | #endif |
372 | 371 | ||
diff --git a/arch/sh/boards/mach-hp6xx/setup.c b/arch/sh/boards/mach-hp6xx/setup.c index 8f305b36358b..e6dd5e96321e 100644 --- a/arch/sh/boards/mach-hp6xx/setup.c +++ b/arch/sh/boards/mach-hp6xx/setup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <sound/sh_dac_audio.h> | ||
16 | #include <asm/hd64461.h> | 17 | #include <asm/hd64461.h> |
17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
18 | #include <mach/hp6xx.h> | 19 | #include <mach/hp6xx.h> |
@@ -51,9 +52,63 @@ static struct platform_device jornadakbd_device = { | |||
51 | .id = -1, | 52 | .id = -1, |
52 | }; | 53 | }; |
53 | 54 | ||
55 | static void dac_audio_start(struct dac_audio_pdata *pdata) | ||
56 | { | ||
57 | u16 v; | ||
58 | u8 v8; | ||
59 | |||
60 | /* HP Jornada 680/690 speaker on */ | ||
61 | v = inw(HD64461_GPADR); | ||
62 | v &= ~HD64461_GPADR_SPEAKER; | ||
63 | outw(v, HD64461_GPADR); | ||
64 | |||
65 | /* HP Palmtop 620lx/660lx speaker on */ | ||
66 | v8 = inb(PKDR); | ||
67 | v8 &= ~PKDR_SPEAKER; | ||
68 | outb(v8, PKDR); | ||
69 | |||
70 | sh_dac_enable(pdata->channel); | ||
71 | } | ||
72 | |||
73 | static void dac_audio_stop(struct dac_audio_pdata *pdata) | ||
74 | { | ||
75 | u16 v; | ||
76 | u8 v8; | ||
77 | |||
78 | /* HP Jornada 680/690 speaker off */ | ||
79 | v = inw(HD64461_GPADR); | ||
80 | v |= HD64461_GPADR_SPEAKER; | ||
81 | outw(v, HD64461_GPADR); | ||
82 | |||
83 | /* HP Palmtop 620lx/660lx speaker off */ | ||
84 | v8 = inb(PKDR); | ||
85 | v8 |= PKDR_SPEAKER; | ||
86 | outb(v8, PKDR); | ||
87 | |||
88 | sh_dac_output(0, pdata->channel); | ||
89 | sh_dac_disable(pdata->channel); | ||
90 | } | ||
91 | |||
92 | static struct dac_audio_pdata dac_audio_platform_data = { | ||
93 | .buffer_size = 64000, | ||
94 | .channel = 1, | ||
95 | .start = dac_audio_start, | ||
96 | .stop = dac_audio_stop, | ||
97 | }; | ||
98 | |||
99 | static struct platform_device dac_audio_device = { | ||
100 | .name = "dac_audio", | ||
101 | .id = -1, | ||
102 | .dev = { | ||
103 | .platform_data = &dac_audio_platform_data, | ||
104 | } | ||
105 | |||
106 | }; | ||
107 | |||
54 | static struct platform_device *hp6xx_devices[] __initdata = { | 108 | static struct platform_device *hp6xx_devices[] __initdata = { |
55 | &cf_ide_device, | 109 | &cf_ide_device, |
56 | &jornadakbd_device, | 110 | &jornadakbd_device, |
111 | &dac_audio_device, | ||
57 | }; | 112 | }; |
58 | 113 | ||
59 | static void __init hp6xx_init_irq(void) | 114 | static void __init hp6xx_init_irq(void) |
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index e78c3be8ad2f..0894bba9fade 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
@@ -313,6 +313,9 @@ static struct platform_device fsi_device = { | |||
313 | .dev = { | 313 | .dev = { |
314 | .platform_data = &fsi_info, | 314 | .platform_data = &fsi_info, |
315 | }, | 315 | }, |
316 | .archdata = { | ||
317 | .hwblk_id = HWBLK_SPU, /* FSI needs SPU hwblk */ | ||
318 | }, | ||
316 | }; | 319 | }; |
317 | 320 | ||
318 | /* KEYSC in SoC (Needs SW33-2 set to ON) */ | 321 | /* KEYSC in SoC (Needs SW33-2 set to ON) */ |
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index acf99700deed..f739061e2ee4 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h | |||
@@ -7,6 +7,7 @@ struct sh_eth_plat_data { | |||
7 | int phy; | 7 | int phy; |
8 | int edmac_endian; | 8 | int edmac_endian; |
9 | 9 | ||
10 | unsigned char mac_addr[6]; | ||
10 | unsigned no_ether_link:1; | 11 | unsigned no_ether_link:1; |
11 | unsigned ether_link_active_low:1; | 12 | unsigned ether_link_active_low:1; |
12 | }; | 13 | }; |
diff --git a/arch/sh/include/mach-common/mach/hp6xx.h b/arch/sh/include/mach-common/mach/hp6xx.h index 0d4165a32dcd..bcc301ac12f4 100644 --- a/arch/sh/include/mach-common/mach/hp6xx.h +++ b/arch/sh/include/mach-common/mach/hp6xx.h | |||
@@ -29,6 +29,9 @@ | |||
29 | 29 | ||
30 | #define PKDR_LED_GREEN 0x10 | 30 | #define PKDR_LED_GREEN 0x10 |
31 | 31 | ||
32 | /* HP Palmtop 620lx/660lx speaker on/off */ | ||
33 | #define PKDR_SPEAKER 0x20 | ||
34 | |||
32 | #define SCPDR_TS_SCAN_ENABLE 0x20 | 35 | #define SCPDR_TS_SCAN_ENABLE 0x20 |
33 | #define SCPDR_TS_SCAN_Y 0x02 | 36 | #define SCPDR_TS_SCAN_Y 0x02 |
34 | #define SCPDR_TS_SCAN_X 0x01 | 37 | #define SCPDR_TS_SCAN_X 0x01 |
@@ -42,6 +45,7 @@ | |||
42 | #define ADC_CHANNEL_BACKUP 4 | 45 | #define ADC_CHANNEL_BACKUP 4 |
43 | #define ADC_CHANNEL_CHARGE 5 | 46 | #define ADC_CHANNEL_CHARGE 5 |
44 | 47 | ||
48 | /* HP Jornada 680/690 speaker on/off */ | ||
45 | #define HD64461_GPADR_SPEAKER 0x01 | 49 | #define HD64461_GPADR_SPEAKER 0x01 |
46 | #define HD64461_GPADR_PCMCIA0 (0x02|0x08) | 50 | #define HD64461_GPADR_PCMCIA0 (0x02|0x08) |
47 | 51 | ||
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 5bfde6c77498..07d2aaea9ae8 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S | |||
@@ -391,3 +391,4 @@ sys_call_table: | |||
391 | .long sys_pwritev | 391 | .long sys_pwritev |
392 | .long sys_rt_tgsigqueueinfo | 392 | .long sys_rt_tgsigqueueinfo |
393 | .long sys_perf_event_open | 393 | .long sys_perf_event_open |
394 | .long sys_recvmmsg /* 365 */ | ||
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 267e5ebbb475..75c0cbe2eda0 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c | |||
@@ -877,44 +877,39 @@ static int misaligned_fixup(struct pt_regs *regs) | |||
877 | 877 | ||
878 | static ctl_table unaligned_table[] = { | 878 | static ctl_table unaligned_table[] = { |
879 | { | 879 | { |
880 | .ctl_name = CTL_UNNUMBERED, | ||
881 | .procname = "kernel_reports", | 880 | .procname = "kernel_reports", |
882 | .data = &kernel_mode_unaligned_fixup_count, | 881 | .data = &kernel_mode_unaligned_fixup_count, |
883 | .maxlen = sizeof(int), | 882 | .maxlen = sizeof(int), |
884 | .mode = 0644, | 883 | .mode = 0644, |
885 | .proc_handler = &proc_dointvec | 884 | .proc_handler = proc_dointvec |
886 | }, | 885 | }, |
887 | { | 886 | { |
888 | .ctl_name = CTL_UNNUMBERED, | ||
889 | .procname = "user_reports", | 887 | .procname = "user_reports", |
890 | .data = &user_mode_unaligned_fixup_count, | 888 | .data = &user_mode_unaligned_fixup_count, |
891 | .maxlen = sizeof(int), | 889 | .maxlen = sizeof(int), |
892 | .mode = 0644, | 890 | .mode = 0644, |
893 | .proc_handler = &proc_dointvec | 891 | .proc_handler = proc_dointvec |
894 | }, | 892 | }, |
895 | { | 893 | { |
896 | .ctl_name = CTL_UNNUMBERED, | ||
897 | .procname = "user_enable", | 894 | .procname = "user_enable", |
898 | .data = &user_mode_unaligned_fixup_enable, | 895 | .data = &user_mode_unaligned_fixup_enable, |
899 | .maxlen = sizeof(int), | 896 | .maxlen = sizeof(int), |
900 | .mode = 0644, | 897 | .mode = 0644, |
901 | .proc_handler = &proc_dointvec}, | 898 | .proc_handler = proc_dointvec}, |
902 | {} | 899 | {} |
903 | }; | 900 | }; |
904 | 901 | ||
905 | static ctl_table unaligned_root[] = { | 902 | static ctl_table unaligned_root[] = { |
906 | { | 903 | { |
907 | .ctl_name = CTL_UNNUMBERED, | ||
908 | .procname = "unaligned_fixup", | 904 | .procname = "unaligned_fixup", |
909 | .mode = 0555, | 905 | .mode = 0555, |
910 | unaligned_table | 906 | .child = unaligned_table |
911 | }, | 907 | }, |
912 | {} | 908 | {} |
913 | }; | 909 | }; |
914 | 910 | ||
915 | static ctl_table sh64_root[] = { | 911 | static ctl_table sh64_root[] = { |
916 | { | 912 | { |
917 | .ctl_name = CTL_UNNUMBERED, | ||
918 | .procname = "sh64", | 913 | .procname = "sh64", |
919 | .mode = 0555, | 914 | .mode = 0555, |
920 | .child = unaligned_root | 915 | .child = unaligned_root |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 05ef5380a687..33ac1a9ac881 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -221,6 +221,13 @@ config SPARC64_SMP | |||
221 | default y | 221 | default y |
222 | depends on SPARC64 && SMP | 222 | depends on SPARC64 && SMP |
223 | 223 | ||
224 | config EARLYFB | ||
225 | bool "Support for early boot text console" | ||
226 | default y | ||
227 | depends on SPARC64 | ||
228 | help | ||
229 | Say Y here to enable a faster early framebuffer boot console. | ||
230 | |||
224 | choice | 231 | choice |
225 | prompt "Kernel page size" if SPARC64 | 232 | prompt "Kernel page size" if SPARC64 |
226 | default SPARC64_PAGE_SIZE_8KB | 233 | default SPARC64_PAGE_SIZE_8KB |
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index dfe272d14465..113225b241e0 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile | |||
@@ -27,6 +27,7 @@ AS := $(AS) -32 | |||
27 | LDFLAGS := -m elf32_sparc | 27 | LDFLAGS := -m elf32_sparc |
28 | CHECKFLAGS += -D__sparc__ | 28 | CHECKFLAGS += -D__sparc__ |
29 | export BITS := 32 | 29 | export BITS := 32 |
30 | UTS_MACHINE := sparc | ||
30 | 31 | ||
31 | #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 | 32 | #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 |
32 | KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 | 33 | KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 |
@@ -46,6 +47,7 @@ CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 | |||
46 | 47 | ||
47 | LDFLAGS := -m elf64_sparc | 48 | LDFLAGS := -m elf64_sparc |
48 | export BITS := 64 | 49 | export BITS := 64 |
50 | UTS_MACHINE := sparc64 | ||
49 | 51 | ||
50 | KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \ | 52 | KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \ |
51 | -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \ | 53 | -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \ |
diff --git a/arch/sparc/include/asm/btext.h b/arch/sparc/include/asm/btext.h new file mode 100644 index 000000000000..9b2bc6b6ed0a --- /dev/null +++ b/arch/sparc/include/asm/btext.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _SPARC_BTEXT_H | ||
2 | #define _SPARC_BTEXT_H | ||
3 | |||
4 | extern int btext_find_display(void); | ||
5 | |||
6 | #endif /* _SPARC_BTEXT_H */ | ||
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 28a42b73f64f..3ea5964c43b4 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h | |||
@@ -148,7 +148,7 @@ static inline unsigned long leon_load_reg(unsigned long paddr) | |||
148 | return retval; | 148 | return retval; |
149 | } | 149 | } |
150 | 150 | ||
151 | extern inline void leon_srmmu_disabletlb(void) | 151 | static inline void leon_srmmu_disabletlb(void) |
152 | { | 152 | { |
153 | unsigned int retval; | 153 | unsigned int retval; |
154 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), | 154 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), |
@@ -158,7 +158,7 @@ extern inline void leon_srmmu_disabletlb(void) | |||
158 | "i"(ASI_LEON_MMUREGS) : "memory"); | 158 | "i"(ASI_LEON_MMUREGS) : "memory"); |
159 | } | 159 | } |
160 | 160 | ||
161 | extern inline void leon_srmmu_enabletlb(void) | 161 | static inline void leon_srmmu_enabletlb(void) |
162 | { | 162 | { |
163 | unsigned int retval; | 163 | unsigned int retval; |
164 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), | 164 | __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), |
@@ -190,7 +190,7 @@ extern void leon_init_IRQ(void); | |||
190 | 190 | ||
191 | extern unsigned long last_valid_pfn; | 191 | extern unsigned long last_valid_pfn; |
192 | 192 | ||
193 | extern inline unsigned long sparc_leon3_get_dcachecfg(void) | 193 | static inline unsigned long sparc_leon3_get_dcachecfg(void) |
194 | { | 194 | { |
195 | unsigned int retval; | 195 | unsigned int retval; |
196 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | 196 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : |
@@ -201,7 +201,7 @@ extern inline unsigned long sparc_leon3_get_dcachecfg(void) | |||
201 | } | 201 | } |
202 | 202 | ||
203 | /* enable snooping */ | 203 | /* enable snooping */ |
204 | extern inline void sparc_leon3_enable_snooping(void) | 204 | static inline void sparc_leon3_enable_snooping(void) |
205 | { | 205 | { |
206 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" | 206 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" |
207 | "set 0x800000, %%l2\n\t" | 207 | "set 0x800000, %%l2\n\t" |
@@ -209,7 +209,14 @@ extern inline void sparc_leon3_enable_snooping(void) | |||
209 | "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); | 209 | "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); |
210 | }; | 210 | }; |
211 | 211 | ||
212 | extern inline void sparc_leon3_disable_cache(void) | 212 | static inline int sparc_leon3_snooping_enabled(void) |
213 | { | ||
214 | u32 cctrl; | ||
215 | __asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl)); | ||
216 | return (cctrl >> 23) & 1; | ||
217 | }; | ||
218 | |||
219 | static inline void sparc_leon3_disable_cache(void) | ||
213 | { | 220 | { |
214 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" | 221 | __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" |
215 | "set 0x00000f, %%l2\n\t" | 222 | "set 0x00000f, %%l2\n\t" |
@@ -340,6 +347,30 @@ extern int leon_flush_needed(void); | |||
340 | extern void leon_switch_mm(void); | 347 | extern void leon_switch_mm(void); |
341 | extern int srmmu_swprobe_trace; | 348 | extern int srmmu_swprobe_trace; |
342 | 349 | ||
350 | #ifdef CONFIG_SMP | ||
351 | extern int leon_smp_nrcpus(void); | ||
352 | extern void leon_clear_profile_irq(int cpu); | ||
353 | extern void leon_smp_done(void); | ||
354 | extern void leon_boot_cpus(void); | ||
355 | extern int leon_boot_one_cpu(int i); | ||
356 | void leon_init_smp(void); | ||
357 | extern void cpu_probe(void); | ||
358 | extern void cpu_idle(void); | ||
359 | extern void init_IRQ(void); | ||
360 | extern void cpu_panic(void); | ||
361 | extern int __leon_processor_id(void); | ||
362 | void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu); | ||
363 | |||
364 | extern unsigned int real_irq_entry[], smpleon_ticker[]; | ||
365 | extern unsigned int patchme_maybe_smp_msg[]; | ||
366 | extern unsigned long trapbase_cpu1[]; | ||
367 | extern unsigned long trapbase_cpu2[]; | ||
368 | extern unsigned long trapbase_cpu3[]; | ||
369 | extern unsigned int t_nmi[], linux_trap_ipi15_leon[]; | ||
370 | extern unsigned int linux_trap_ipi15_sun4m[]; | ||
371 | |||
372 | #endif /* CONFIG_SMP */ | ||
373 | |||
343 | #endif /* __KERNEL__ */ | 374 | #endif /* __KERNEL__ */ |
344 | 375 | ||
345 | #endif /* __ASSEMBLY__ */ | 376 | #endif /* __ASSEMBLY__ */ |
@@ -356,6 +387,10 @@ extern int srmmu_swprobe_trace; | |||
356 | #define leon_switch_mm() do {} while (0) | 387 | #define leon_switch_mm() do {} while (0) |
357 | #define leon_init_IRQ() do {} while (0) | 388 | #define leon_init_IRQ() do {} while (0) |
358 | #define init_leon() do {} while (0) | 389 | #define init_leon() do {} while (0) |
390 | #define leon_smp_done() do {} while (0) | ||
391 | #define leon_boot_cpus() do {} while (0) | ||
392 | #define leon_boot_one_cpu(i) 1 | ||
393 | #define leon_init_smp() do {} while (0) | ||
359 | 394 | ||
360 | #endif /* !defined(CONFIG_SPARC_LEON) */ | 395 | #endif /* !defined(CONFIG_SPARC_LEON) */ |
361 | 396 | ||
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 82a190d7efc1..f845828ca4c6 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/of.h> /* linux/of.h gets to determine #include ordering */ | ||
1 | #ifndef _SPARC_PROM_H | 2 | #ifndef _SPARC_PROM_H |
2 | #define _SPARC_PROM_H | 3 | #define _SPARC_PROM_H |
3 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
@@ -28,50 +29,11 @@ | |||
28 | #define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) | 29 | #define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) |
29 | #define of_node_cmp(s1, s2) strcmp((s1), (s2)) | 30 | #define of_node_cmp(s1, s2) strcmp((s1), (s2)) |
30 | 31 | ||
31 | typedef u32 phandle; | ||
32 | typedef u32 ihandle; | ||
33 | |||
34 | struct property { | ||
35 | char *name; | ||
36 | int length; | ||
37 | void *value; | ||
38 | struct property *next; | ||
39 | unsigned long _flags; | ||
40 | unsigned int unique_id; | ||
41 | }; | ||
42 | |||
43 | struct of_irq_controller; | ||
44 | struct device_node { | ||
45 | const char *name; | ||
46 | const char *type; | ||
47 | phandle node; | ||
48 | char *path_component_name; | ||
49 | char *full_name; | ||
50 | |||
51 | struct property *properties; | ||
52 | struct property *deadprops; /* removed properties */ | ||
53 | struct device_node *parent; | ||
54 | struct device_node *child; | ||
55 | struct device_node *sibling; | ||
56 | struct device_node *next; /* next device of same type */ | ||
57 | struct device_node *allnext; /* next in list of all nodes */ | ||
58 | struct proc_dir_entry *pde; /* this node's proc directory */ | ||
59 | struct kref kref; | ||
60 | unsigned long _flags; | ||
61 | void *data; | ||
62 | unsigned int unique_id; | ||
63 | |||
64 | struct of_irq_controller *irq_trans; | ||
65 | }; | ||
66 | |||
67 | struct of_irq_controller { | 32 | struct of_irq_controller { |
68 | unsigned int (*irq_build)(struct device_node *, unsigned int, void *); | 33 | unsigned int (*irq_build)(struct device_node *, unsigned int, void *); |
69 | void *data; | 34 | void *data; |
70 | }; | 35 | }; |
71 | 36 | ||
72 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | ||
73 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | ||
74 | |||
75 | extern struct device_node *of_find_node_by_cpuid(int cpuid); | 37 | extern struct device_node *of_find_node_by_cpuid(int cpuid); |
76 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); | 38 | extern int of_set_property(struct device_node *node, const char *name, void *val, int len); |
77 | extern struct mutex of_set_property_mutex; | 39 | extern struct mutex of_set_property_mutex; |
@@ -89,15 +51,6 @@ extern void prom_build_devicetree(void); | |||
89 | extern void of_populate_present_mask(void); | 51 | extern void of_populate_present_mask(void); |
90 | extern void of_fill_in_cpu_data(void); | 52 | extern void of_fill_in_cpu_data(void); |
91 | 53 | ||
92 | /* Dummy ref counting routines - to be implemented later */ | ||
93 | static inline struct device_node *of_node_get(struct device_node *node) | ||
94 | { | ||
95 | return node; | ||
96 | } | ||
97 | static inline void of_node_put(struct device_node *node) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | /* These routines are here to provide compatibility with how powerpc | 54 | /* These routines are here to provide compatibility with how powerpc |
102 | * handles IRQ mapping for OF device nodes. We precompute and permanently | 55 | * handles IRQ mapping for OF device nodes. We precompute and permanently |
103 | * register them in the of_device objects, whereas powerpc computes them | 56 | * register them in the of_device objects, whereas powerpc computes them |
@@ -108,12 +61,6 @@ static inline void irq_dispose_mapping(unsigned int virq) | |||
108 | { | 61 | { |
109 | } | 62 | } |
110 | 63 | ||
111 | /* | ||
112 | * NB: This is here while we transition from using asm/prom.h | ||
113 | * to linux/of.h | ||
114 | */ | ||
115 | #include <linux/of.h> | ||
116 | |||
117 | extern struct device_node *of_console_device; | 64 | extern struct device_node *of_console_device; |
118 | extern char *of_console_path; | 65 | extern char *of_console_path; |
119 | extern char *of_console_options; | 66 | extern char *of_console_options; |
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index 1dc129ac2feb..6e5621006f85 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h | |||
@@ -35,8 +35,8 @@ struct rw_semaphore { | |||
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #define __RWSEM_INITIALIZER(name) \ | 37 | #define __RWSEM_INITIALIZER(name) \ |
38 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ | 38 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
39 | __RWSEM_DEP_MAP_INIT(name) } | 39 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } |
40 | 40 | ||
41 | #define DECLARE_RWSEM(name) \ | 41 | #define DECLARE_RWSEM(name) \ |
42 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 42 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 58101dc70493..841905c10215 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h | |||
@@ -106,6 +106,15 @@ static inline int hard_smp4d_processor_id(void) | |||
106 | return cpuid; | 106 | return cpuid; |
107 | } | 107 | } |
108 | 108 | ||
109 | extern inline int hard_smpleon_processor_id(void) | ||
110 | { | ||
111 | int cpuid; | ||
112 | __asm__ __volatile__("rd %%asr17,%0\n\t" | ||
113 | "srl %0,28,%0" : | ||
114 | "=&r" (cpuid) : ); | ||
115 | return cpuid; | ||
116 | } | ||
117 | |||
109 | #ifndef MODULE | 118 | #ifndef MODULE |
110 | static inline int hard_smp_processor_id(void) | 119 | static inline int hard_smp_processor_id(void) |
111 | { | 120 | { |
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 3a5ae3d12088..9d3fefcff2f5 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h | |||
@@ -56,6 +56,8 @@ | |||
56 | #define SO_TIMESTAMPING 0x0023 | 56 | #define SO_TIMESTAMPING 0x0023 |
57 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | 57 | #define SCM_TIMESTAMPING SO_TIMESTAMPING |
58 | 58 | ||
59 | #define SO_RXQ_OVFL 0x0024 | ||
60 | |||
59 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | 61 | /* Security levels - as per NRL IPv6 - don't actually do anything */ |
60 | #define SO_SECURITY_AUTHENTICATION 0x5001 | 62 | #define SO_SECURITY_AUTHENTICATION 0x5001 |
61 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 | 63 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 |
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 42f2316c3eaa..d8d25bd97121 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
@@ -396,8 +396,9 @@ | |||
396 | #define __NR_pwritev 325 | 396 | #define __NR_pwritev 325 |
397 | #define __NR_rt_tgsigqueueinfo 326 | 397 | #define __NR_rt_tgsigqueueinfo 326 |
398 | #define __NR_perf_event_open 327 | 398 | #define __NR_perf_event_open 327 |
399 | #define __NR_recvmmsg 328 | ||
399 | 400 | ||
400 | #define NR_SYSCALLS 328 | 401 | #define NR_SYSCALLS 329 |
401 | 402 | ||
402 | #ifdef __32bit_syscall_numbers__ | 403 | #ifdef __32bit_syscall_numbers__ |
403 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | 404 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 5b47fab9966e..c6316142db4e 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -72,7 +72,7 @@ obj-y += dma.o | |||
72 | obj-$(CONFIG_SPARC32_PCI) += pcic.o | 72 | obj-$(CONFIG_SPARC32_PCI) += pcic.o |
73 | 73 | ||
74 | obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o | 74 | obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o |
75 | obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o | 75 | obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o |
76 | obj-$(CONFIG_SPARC64_SMP) += hvtramp.o | 76 | obj-$(CONFIG_SPARC64_SMP) += hvtramp.o |
77 | 77 | ||
78 | obj-y += auxio_$(BITS).o | 78 | obj-y += auxio_$(BITS).o |
@@ -87,6 +87,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o | |||
87 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 87 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
88 | CFLAGS_REMOVE_ftrace.o := -pg | 88 | CFLAGS_REMOVE_ftrace.o := -pg |
89 | 89 | ||
90 | obj-$(CONFIG_EARLYFB) += btext.o | ||
90 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 91 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
91 | # sparc64 PCI | 92 | # sparc64 PCI |
92 | obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o | 93 | obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o |
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 9c115823c4b5..71ec90b9e316 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/miscdevice.h> | 12 | #include <linux/miscdevice.h> |
13 | #include <linux/smp_lock.h> | ||
14 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
15 | #include <linux/of.h> | 14 | #include <linux/of.h> |
16 | #include <linux/of_device.h> | 15 | #include <linux/of_device.h> |
@@ -76,7 +75,6 @@ static inline void apc_free(struct of_device *op) | |||
76 | 75 | ||
77 | static int apc_open(struct inode *inode, struct file *f) | 76 | static int apc_open(struct inode *inode, struct file *f) |
78 | { | 77 | { |
79 | cycle_kernel_lock(); | ||
80 | return 0; | 78 | return 0; |
81 | } | 79 | } |
82 | 80 | ||
@@ -87,61 +85,46 @@ static int apc_release(struct inode *inode, struct file *f) | |||
87 | 85 | ||
88 | static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) | 86 | static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) |
89 | { | 87 | { |
90 | __u8 inarg, __user *arg; | 88 | __u8 inarg, __user *arg = (__u8 __user *) __arg; |
91 | |||
92 | arg = (__u8 __user *) __arg; | ||
93 | |||
94 | lock_kernel(); | ||
95 | 89 | ||
96 | switch (cmd) { | 90 | switch (cmd) { |
97 | case APCIOCGFANCTL: | 91 | case APCIOCGFANCTL: |
98 | if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) { | 92 | if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) |
99 | unlock_kernel(); | ||
100 | return -EFAULT; | 93 | return -EFAULT; |
101 | } | ||
102 | break; | 94 | break; |
103 | 95 | ||
104 | case APCIOCGCPWR: | 96 | case APCIOCGCPWR: |
105 | if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) { | 97 | if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) |
106 | unlock_kernel(); | ||
107 | return -EFAULT; | 98 | return -EFAULT; |
108 | } | ||
109 | break; | 99 | break; |
110 | 100 | ||
111 | case APCIOCGBPORT: | 101 | case APCIOCGBPORT: |
112 | if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) { | 102 | if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) |
113 | unlock_kernel(); | ||
114 | return -EFAULT; | 103 | return -EFAULT; |
115 | } | ||
116 | break; | 104 | break; |
117 | 105 | ||
118 | case APCIOCSFANCTL: | 106 | case APCIOCSFANCTL: |
119 | if (get_user(inarg, arg)) { | 107 | if (get_user(inarg, arg)) |
120 | unlock_kernel(); | ||
121 | return -EFAULT; | 108 | return -EFAULT; |
122 | } | ||
123 | apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG); | 109 | apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG); |
124 | break; | 110 | break; |
111 | |||
125 | case APCIOCSCPWR: | 112 | case APCIOCSCPWR: |
126 | if (get_user(inarg, arg)) { | 113 | if (get_user(inarg, arg)) |
127 | unlock_kernel(); | ||
128 | return -EFAULT; | 114 | return -EFAULT; |
129 | } | ||
130 | apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG); | 115 | apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG); |
131 | break; | 116 | break; |
117 | |||
132 | case APCIOCSBPORT: | 118 | case APCIOCSBPORT: |
133 | if (get_user(inarg, arg)) { | 119 | if (get_user(inarg, arg)) |
134 | unlock_kernel(); | ||
135 | return -EFAULT; | 120 | return -EFAULT; |
136 | } | ||
137 | apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG); | 121 | apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG); |
138 | break; | 122 | break; |
123 | |||
139 | default: | 124 | default: |
140 | unlock_kernel(); | ||
141 | return -EINVAL; | 125 | return -EINVAL; |
142 | }; | 126 | }; |
143 | 127 | ||
144 | unlock_kernel(); | ||
145 | return 0; | 128 | return 0; |
146 | } | 129 | } |
147 | 130 | ||
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index 45c41232fc4c..ee8d214cae1e 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c | |||
@@ -28,6 +28,7 @@ void __init auxio_probe(void) | |||
28 | struct resource r; | 28 | struct resource r; |
29 | 29 | ||
30 | switch (sparc_cpu_model) { | 30 | switch (sparc_cpu_model) { |
31 | case sparc_leon: | ||
31 | case sun4d: | 32 | case sun4d: |
32 | case sun4: | 33 | case sun4: |
33 | return; | 34 | return; |
diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c new file mode 100644 index 000000000000..8cc2d56ffe9a --- /dev/null +++ b/arch/sparc/kernel/btext.c | |||
@@ -0,0 +1,673 @@ | |||
1 | /* | ||
2 | * Procedures for drawing on the screen early on in the boot process. | ||
3 | * | ||
4 | * Benjamin Herrenschmidt <benh@kernel.crashing.org> | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/console.h> | ||
11 | |||
12 | #include <asm/btext.h> | ||
13 | #include <asm/oplib.h> | ||
14 | #include <asm/io.h> | ||
15 | |||
16 | #define NO_SCROLL | ||
17 | |||
18 | #ifndef NO_SCROLL | ||
19 | static void scrollscreen(void); | ||
20 | #endif | ||
21 | |||
22 | static void draw_byte(unsigned char c, long locX, long locY); | ||
23 | static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); | ||
24 | static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); | ||
25 | static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); | ||
26 | |||
27 | #define __force_data __attribute__((__section__(".data"))) | ||
28 | |||
29 | static int g_loc_X __force_data; | ||
30 | static int g_loc_Y __force_data; | ||
31 | static int g_max_loc_X __force_data; | ||
32 | static int g_max_loc_Y __force_data; | ||
33 | |||
34 | static int dispDeviceRowBytes __force_data; | ||
35 | static int dispDeviceDepth __force_data; | ||
36 | static int dispDeviceRect[4] __force_data; | ||
37 | static unsigned char *dispDeviceBase __force_data; | ||
38 | |||
39 | #define cmapsz (16*256) | ||
40 | |||
41 | static unsigned char vga_font[cmapsz]; | ||
42 | |||
43 | static int __init btext_initialize(unsigned int node) | ||
44 | { | ||
45 | unsigned int width, height, depth, pitch; | ||
46 | unsigned long address = 0; | ||
47 | u32 prop; | ||
48 | |||
49 | if (prom_getproperty(node, "width", (char *)&width, 4) < 0) | ||
50 | return -EINVAL; | ||
51 | if (prom_getproperty(node, "height", (char *)&height, 4) < 0) | ||
52 | return -EINVAL; | ||
53 | if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0) | ||
54 | return -EINVAL; | ||
55 | pitch = width * ((depth + 7) / 8); | ||
56 | |||
57 | if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 && | ||
58 | prop != 0xffffffffu) | ||
59 | pitch = prop; | ||
60 | |||
61 | if (pitch == 1) | ||
62 | pitch = 0x1000; | ||
63 | |||
64 | if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0) | ||
65 | address = prop; | ||
66 | |||
67 | /* FIXME: Add support for PCI reg properties. Right now, only | ||
68 | * reliable on macs | ||
69 | */ | ||
70 | if (address == 0) | ||
71 | return -EINVAL; | ||
72 | |||
73 | g_loc_X = 0; | ||
74 | g_loc_Y = 0; | ||
75 | g_max_loc_X = width / 8; | ||
76 | g_max_loc_Y = height / 16; | ||
77 | dispDeviceBase = (unsigned char *)address; | ||
78 | dispDeviceRowBytes = pitch; | ||
79 | dispDeviceDepth = depth == 15 ? 16 : depth; | ||
80 | dispDeviceRect[0] = dispDeviceRect[1] = 0; | ||
81 | dispDeviceRect[2] = width; | ||
82 | dispDeviceRect[3] = height; | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | /* Calc the base address of a given point (x,y) */ | ||
88 | static unsigned char * calc_base(int x, int y) | ||
89 | { | ||
90 | unsigned char *base = dispDeviceBase; | ||
91 | |||
92 | base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); | ||
93 | base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; | ||
94 | return base; | ||
95 | } | ||
96 | |||
97 | static void btext_clearscreen(void) | ||
98 | { | ||
99 | unsigned int *base = (unsigned int *)calc_base(0, 0); | ||
100 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | ||
101 | (dispDeviceDepth >> 3)) >> 2; | ||
102 | int i,j; | ||
103 | |||
104 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) | ||
105 | { | ||
106 | unsigned int *ptr = base; | ||
107 | for(j=width; j; --j) | ||
108 | *(ptr++) = 0; | ||
109 | base += (dispDeviceRowBytes >> 2); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | #ifndef NO_SCROLL | ||
114 | static void scrollscreen(void) | ||
115 | { | ||
116 | unsigned int *src = (unsigned int *)calc_base(0,16); | ||
117 | unsigned int *dst = (unsigned int *)calc_base(0,0); | ||
118 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | ||
119 | (dispDeviceDepth >> 3)) >> 2; | ||
120 | int i,j; | ||
121 | |||
122 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) | ||
123 | { | ||
124 | unsigned int *src_ptr = src; | ||
125 | unsigned int *dst_ptr = dst; | ||
126 | for(j=width; j; --j) | ||
127 | *(dst_ptr++) = *(src_ptr++); | ||
128 | src += (dispDeviceRowBytes >> 2); | ||
129 | dst += (dispDeviceRowBytes >> 2); | ||
130 | } | ||
131 | for (i=0; i<16; i++) | ||
132 | { | ||
133 | unsigned int *dst_ptr = dst; | ||
134 | for(j=width; j; --j) | ||
135 | *(dst_ptr++) = 0; | ||
136 | dst += (dispDeviceRowBytes >> 2); | ||
137 | } | ||
138 | } | ||
139 | #endif /* ndef NO_SCROLL */ | ||
140 | |||
141 | void btext_drawchar(char c) | ||
142 | { | ||
143 | int cline = 0; | ||
144 | #ifdef NO_SCROLL | ||
145 | int x; | ||
146 | #endif | ||
147 | switch (c) { | ||
148 | case '\b': | ||
149 | if (g_loc_X > 0) | ||
150 | --g_loc_X; | ||
151 | break; | ||
152 | case '\t': | ||
153 | g_loc_X = (g_loc_X & -8) + 8; | ||
154 | break; | ||
155 | case '\r': | ||
156 | g_loc_X = 0; | ||
157 | break; | ||
158 | case '\n': | ||
159 | g_loc_X = 0; | ||
160 | g_loc_Y++; | ||
161 | cline = 1; | ||
162 | break; | ||
163 | default: | ||
164 | draw_byte(c, g_loc_X++, g_loc_Y); | ||
165 | } | ||
166 | if (g_loc_X >= g_max_loc_X) { | ||
167 | g_loc_X = 0; | ||
168 | g_loc_Y++; | ||
169 | cline = 1; | ||
170 | } | ||
171 | #ifndef NO_SCROLL | ||
172 | while (g_loc_Y >= g_max_loc_Y) { | ||
173 | scrollscreen(); | ||
174 | g_loc_Y--; | ||
175 | } | ||
176 | #else | ||
177 | /* wrap around from bottom to top of screen so we don't | ||
178 | waste time scrolling each line. -- paulus. */ | ||
179 | if (g_loc_Y >= g_max_loc_Y) | ||
180 | g_loc_Y = 0; | ||
181 | if (cline) { | ||
182 | for (x = 0; x < g_max_loc_X; ++x) | ||
183 | draw_byte(' ', x, g_loc_Y); | ||
184 | } | ||
185 | #endif | ||
186 | } | ||
187 | |||
188 | static void btext_drawtext(const char *c, unsigned int len) | ||
189 | { | ||
190 | while (len--) | ||
191 | btext_drawchar(*c++); | ||
192 | } | ||
193 | |||
194 | static void draw_byte(unsigned char c, long locX, long locY) | ||
195 | { | ||
196 | unsigned char *base = calc_base(locX << 3, locY << 4); | ||
197 | unsigned char *font = &vga_font[((unsigned int)c) * 16]; | ||
198 | int rb = dispDeviceRowBytes; | ||
199 | |||
200 | switch(dispDeviceDepth) { | ||
201 | case 24: | ||
202 | case 32: | ||
203 | draw_byte_32(font, (unsigned int *)base, rb); | ||
204 | break; | ||
205 | case 15: | ||
206 | case 16: | ||
207 | draw_byte_16(font, (unsigned int *)base, rb); | ||
208 | break; | ||
209 | case 8: | ||
210 | draw_byte_8(font, (unsigned int *)base, rb); | ||
211 | break; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | static unsigned int expand_bits_8[16] = { | ||
216 | 0x00000000, | ||
217 | 0x000000ff, | ||
218 | 0x0000ff00, | ||
219 | 0x0000ffff, | ||
220 | 0x00ff0000, | ||
221 | 0x00ff00ff, | ||
222 | 0x00ffff00, | ||
223 | 0x00ffffff, | ||
224 | 0xff000000, | ||
225 | 0xff0000ff, | ||
226 | 0xff00ff00, | ||
227 | 0xff00ffff, | ||
228 | 0xffff0000, | ||
229 | 0xffff00ff, | ||
230 | 0xffffff00, | ||
231 | 0xffffffff | ||
232 | }; | ||
233 | |||
234 | static unsigned int expand_bits_16[4] = { | ||
235 | 0x00000000, | ||
236 | 0x0000ffff, | ||
237 | 0xffff0000, | ||
238 | 0xffffffff | ||
239 | }; | ||
240 | |||
241 | |||
242 | static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) | ||
243 | { | ||
244 | int l, bits; | ||
245 | int fg = 0xFFFFFFFFUL; | ||
246 | int bg = 0x00000000UL; | ||
247 | |||
248 | for (l = 0; l < 16; ++l) | ||
249 | { | ||
250 | bits = *font++; | ||
251 | base[0] = (-(bits >> 7) & fg) ^ bg; | ||
252 | base[1] = (-((bits >> 6) & 1) & fg) ^ bg; | ||
253 | base[2] = (-((bits >> 5) & 1) & fg) ^ bg; | ||
254 | base[3] = (-((bits >> 4) & 1) & fg) ^ bg; | ||
255 | base[4] = (-((bits >> 3) & 1) & fg) ^ bg; | ||
256 | base[5] = (-((bits >> 2) & 1) & fg) ^ bg; | ||
257 | base[6] = (-((bits >> 1) & 1) & fg) ^ bg; | ||
258 | base[7] = (-(bits & 1) & fg) ^ bg; | ||
259 | base = (unsigned int *) ((char *)base + rb); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) | ||
264 | { | ||
265 | int l, bits; | ||
266 | int fg = 0xFFFFFFFFUL; | ||
267 | int bg = 0x00000000UL; | ||
268 | unsigned int *eb = (int *)expand_bits_16; | ||
269 | |||
270 | for (l = 0; l < 16; ++l) | ||
271 | { | ||
272 | bits = *font++; | ||
273 | base[0] = (eb[bits >> 6] & fg) ^ bg; | ||
274 | base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; | ||
275 | base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; | ||
276 | base[3] = (eb[bits & 3] & fg) ^ bg; | ||
277 | base = (unsigned int *) ((char *)base + rb); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) | ||
282 | { | ||
283 | int l, bits; | ||
284 | int fg = 0x0F0F0F0FUL; | ||
285 | int bg = 0x00000000UL; | ||
286 | unsigned int *eb = (int *)expand_bits_8; | ||
287 | |||
288 | for (l = 0; l < 16; ++l) | ||
289 | { | ||
290 | bits = *font++; | ||
291 | base[0] = (eb[bits >> 4] & fg) ^ bg; | ||
292 | base[1] = (eb[bits & 0xf] & fg) ^ bg; | ||
293 | base = (unsigned int *) ((char *)base + rb); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | static void btext_console_write(struct console *con, const char *s, | ||
298 | unsigned int n) | ||
299 | { | ||
300 | btext_drawtext(s, n); | ||
301 | } | ||
302 | |||
303 | static struct console btext_console = { | ||
304 | .name = "btext", | ||
305 | .write = btext_console_write, | ||
306 | .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME, | ||
307 | .index = 0, | ||
308 | }; | ||
309 | |||
310 | int __init btext_find_display(void) | ||
311 | { | ||
312 | unsigned int node; | ||
313 | char type[32]; | ||
314 | int ret; | ||
315 | |||
316 | node = prom_inst2pkg(prom_stdout); | ||
317 | if (prom_getproperty(node, "device_type", type, 32) < 0) | ||
318 | return -ENODEV; | ||
319 | if (strcmp(type, "display")) | ||
320 | return -ENODEV; | ||
321 | |||
322 | ret = btext_initialize(node); | ||
323 | if (!ret) { | ||
324 | btext_clearscreen(); | ||
325 | register_console(&btext_console); | ||
326 | } | ||
327 | return ret; | ||
328 | } | ||
329 | |||
330 | static unsigned char vga_font[cmapsz] = { | ||
331 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
332 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd, | ||
333 | 0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff, | ||
334 | 0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00, | ||
335 | 0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, | ||
336 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, | ||
337 | 0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, | ||
338 | 0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, | ||
339 | 0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c, | ||
340 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, | ||
341 | 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, | ||
342 | 0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
343 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, | ||
344 | 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, | ||
345 | 0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e, | ||
346 | 0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, | ||
347 | 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18, | ||
348 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30, | ||
349 | 0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63, | ||
350 | 0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00, | ||
351 | 0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18, | ||
352 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8, | ||
353 | 0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, | ||
354 | 0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, | ||
355 | 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, | ||
356 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, | ||
357 | 0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb, | ||
358 | 0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00, | ||
359 | 0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6, | ||
360 | 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
361 | 0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, | ||
362 | 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, | ||
363 | 0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
364 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
365 | 0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
366 | 0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
367 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00, | ||
368 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, | ||
369 | 0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
370 | 0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
371 | 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, | ||
372 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, | ||
373 | 0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
374 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
375 | 0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, | ||
376 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00, | ||
377 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, | ||
378 | 0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, | ||
379 | 0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c, | ||
380 | 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18, | ||
381 | 0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, | ||
382 | 0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, | ||
383 | 0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
384 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, | ||
385 | 0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, | ||
386 | 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, | ||
387 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00, | ||
388 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, | ||
389 | 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
390 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, | ||
391 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, | ||
392 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
393 | 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
394 | 0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, | ||
395 | 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c, | ||
396 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18, | ||
397 | 0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, | ||
398 | 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, | ||
399 | 0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c, | ||
400 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe, | ||
401 | 0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, | ||
402 | 0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
403 | 0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, | ||
404 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18, | ||
405 | 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, | ||
406 | 0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
407 | 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78, | ||
408 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, | ||
409 | 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
410 | 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, | ||
411 | 0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06, | ||
412 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, | ||
413 | 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, | ||
414 | 0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, | ||
415 | 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, | ||
416 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde, | ||
417 | 0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, | ||
418 | 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, | ||
419 | 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc, | ||
420 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, | ||
421 | 0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c, | ||
422 | 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00, | ||
423 | 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe, | ||
424 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, | ||
425 | 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, | ||
426 | 0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00, | ||
427 | 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, | ||
428 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
429 | 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c, | ||
430 | 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, | ||
431 | 0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6, | ||
432 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, | ||
433 | 0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7, | ||
434 | 0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, | ||
435 | 0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6, | ||
436 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, | ||
437 | 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, | ||
438 | 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, | ||
439 | 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, | ||
440 | 0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, | ||
441 | 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, | ||
442 | 0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
443 | 0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, | ||
444 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, | ||
445 | 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, | ||
446 | 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, | ||
447 | 0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66, | ||
448 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, | ||
449 | 0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, | ||
450 | 0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, | ||
451 | 0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff, | ||
452 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, | ||
453 | 0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, | ||
454 | 0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, | ||
455 | 0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, | ||
456 | 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00, | ||
457 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
458 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, | ||
459 | 0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
460 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, | ||
461 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60, | ||
462 | 0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
463 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c, | ||
464 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc, | ||
465 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
466 | 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
467 | 0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0, | ||
468 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, | ||
469 | 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60, | ||
470 | 0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, | ||
471 | 0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, | ||
472 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06, | ||
473 | 0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60, | ||
474 | 0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, | ||
475 | 0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, | ||
476 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb, | ||
477 | 0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
478 | 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, | ||
479 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, | ||
480 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, | ||
481 | 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
482 | 0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00, | ||
483 | 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0, | ||
484 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, | ||
485 | 0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, | ||
486 | 0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, | ||
487 | 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, | ||
488 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, | ||
489 | 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
490 | 0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, | ||
491 | 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, | ||
492 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, | ||
493 | 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
494 | 0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, | ||
495 | 0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, | ||
496 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, | ||
497 | 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18, | ||
498 | 0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00, | ||
499 | 0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
500 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, | ||
501 | 0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, | ||
502 | 0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00, | ||
503 | 0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, | ||
504 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe, | ||
505 | 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, | ||
506 | 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, | ||
507 | 0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, | ||
508 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c, | ||
509 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, | ||
510 | 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, | ||
511 | 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06, | ||
512 | 0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe, | ||
513 | 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, | ||
514 | 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
515 | 0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, | ||
516 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, | ||
517 | 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66, | ||
518 | 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, | ||
519 | 0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, | ||
520 | 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, | ||
521 | 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, | ||
522 | 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, | ||
523 | 0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe, | ||
524 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b, | ||
525 | 0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c, | ||
526 | 0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00, | ||
527 | 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, | ||
528 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6, | ||
529 | 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, | ||
530 | 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, | ||
531 | 0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, | ||
532 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc, | ||
533 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, | ||
534 | 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00, | ||
535 | 0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, | ||
536 | 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, | ||
537 | 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, | ||
538 | 0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, | ||
539 | 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc, | ||
540 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18, | ||
541 | 0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, | ||
542 | 0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00, | ||
543 | 0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
544 | 0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c, | ||
545 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, | ||
546 | 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, | ||
547 | 0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, | ||
548 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc, | ||
549 | 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, | ||
550 | 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, | ||
551 | 0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, | ||
552 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00, | ||
553 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, | ||
554 | 0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
555 | 0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c, | ||
556 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, | ||
557 | 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
558 | 0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
559 | 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06, | ||
560 | 0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, | ||
561 | 0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, | ||
562 | 0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, | ||
563 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00, | ||
564 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36, | ||
565 | 0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, | ||
566 | 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, | ||
567 | 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, | ||
568 | 0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, | ||
569 | 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18, | ||
570 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
571 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, | ||
572 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, | ||
573 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, | ||
574 | 0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
575 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36, | ||
576 | 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8, | ||
577 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, | ||
578 | 0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
579 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
580 | 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6, | ||
581 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
582 | 0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
583 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00, | ||
584 | 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, | ||
585 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
586 | 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
587 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, | ||
588 | 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, | ||
589 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
590 | 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
591 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, | ||
592 | 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, | ||
593 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, | ||
594 | 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
595 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, | ||
596 | 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, | ||
597 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
598 | 0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
599 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, | ||
600 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff, | ||
601 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
602 | 0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
603 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, | ||
604 | 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, | ||
605 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, | ||
606 | 0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
607 | 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, | ||
608 | 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, | ||
609 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
610 | 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
611 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36, | ||
612 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f, | ||
613 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, | ||
614 | 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
615 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, | ||
616 | 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, | ||
617 | 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
618 | 0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, | ||
619 | 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, | ||
620 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, | ||
621 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
622 | 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
623 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
624 | 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, | ||
625 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, | ||
626 | 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, | ||
627 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, | ||
628 | 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
629 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
630 | 0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00, | ||
631 | 0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc, | ||
632 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, | ||
633 | 0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
634 | 0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, | ||
635 | 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe, | ||
636 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8, | ||
637 | 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
638 | 0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00, | ||
639 | 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
640 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66, | ||
641 | 0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, | ||
642 | 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, | ||
643 | 0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee, | ||
644 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66, | ||
645 | 0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
646 | 0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
647 | 0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0, | ||
648 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60, | ||
649 | 0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, | ||
650 | 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, | ||
651 | 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, | ||
652 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, | ||
653 | 0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, | ||
654 | 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, | ||
655 | 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e, | ||
656 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18, | ||
657 | 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, | ||
658 | 0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, | ||
659 | 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, | ||
660 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, | ||
661 | 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, | ||
662 | 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
663 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, | ||
664 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
665 | 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c, | ||
666 | 0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00, | ||
667 | 0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
668 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00, | ||
669 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
670 | 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
671 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
672 | 0x00, 0x00, 0x00, 0x00, | ||
673 | }; | ||
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 1446df90ef85..e447938d39cf 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -185,6 +185,17 @@ static const struct manufacturer_info __initconst manufacturer_info[] = { | |||
185 | FPU(-1, NULL) | 185 | FPU(-1, NULL) |
186 | } | 186 | } |
187 | },{ | 187 | },{ |
188 | 0xF, /* Aeroflex Gaisler */ | ||
189 | .cpu_info = { | ||
190 | CPU(3, "LEON"), | ||
191 | CPU(-1, NULL) | ||
192 | }, | ||
193 | .fpu_info = { | ||
194 | FPU(2, "GRFPU"), | ||
195 | FPU(3, "GRFPU-Lite"), | ||
196 | FPU(-1, NULL) | ||
197 | } | ||
198 | },{ | ||
188 | 0x17, | 199 | 0x17, |
189 | .cpu_info = { | 200 | .cpu_info = { |
190 | CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"), | 201 | CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"), |
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index f41ecc5ac0b4..ec9c7bc67d21 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S | |||
@@ -400,6 +400,39 @@ linux_trap_ipi15_sun4d: | |||
400 | /* FIXME */ | 400 | /* FIXME */ |
401 | 1: b,a 1b | 401 | 1: b,a 1b |
402 | 402 | ||
403 | #ifdef CONFIG_SPARC_LEON | ||
404 | |||
405 | .globl smpleon_ticker | ||
406 | /* SMP per-cpu ticker interrupts are handled specially. */ | ||
407 | smpleon_ticker: | ||
408 | SAVE_ALL | ||
409 | or %l0, PSR_PIL, %g2 | ||
410 | wr %g2, 0x0, %psr | ||
411 | WRITE_PAUSE | ||
412 | wr %g2, PSR_ET, %psr | ||
413 | WRITE_PAUSE | ||
414 | call leon_percpu_timer_interrupt | ||
415 | add %sp, STACKFRAME_SZ, %o0 | ||
416 | wr %l0, PSR_ET, %psr | ||
417 | WRITE_PAUSE | ||
418 | RESTORE_ALL | ||
419 | |||
420 | .align 4 | ||
421 | .globl linux_trap_ipi15_leon | ||
422 | linux_trap_ipi15_leon: | ||
423 | SAVE_ALL | ||
424 | or %l0, PSR_PIL, %l4 | ||
425 | wr %l4, 0x0, %psr | ||
426 | WRITE_PAUSE | ||
427 | wr %l4, PSR_ET, %psr | ||
428 | WRITE_PAUSE | ||
429 | call leon_cross_call_irq | ||
430 | nop | ||
431 | b ret_trap_lockless_ipi | ||
432 | clr %l6 | ||
433 | |||
434 | #endif /* CONFIG_SPARC_LEON */ | ||
435 | |||
403 | #endif /* CONFIG_SMP */ | 436 | #endif /* CONFIG_SMP */ |
404 | 437 | ||
405 | /* This routine handles illegal instructions and privileged | 438 | /* This routine handles illegal instructions and privileged |
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 439d82a95ac9..21bb2590d4ae 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S | |||
@@ -811,9 +811,31 @@ found_version: | |||
811 | got_prop: | 811 | got_prop: |
812 | #ifdef CONFIG_SPARC_LEON | 812 | #ifdef CONFIG_SPARC_LEON |
813 | /* no cpu-type check is needed, it is a SPARC-LEON */ | 813 | /* no cpu-type check is needed, it is a SPARC-LEON */ |
814 | #ifdef CONFIG_SMP | ||
815 | ba leon_smp_init | ||
816 | nop | ||
817 | |||
818 | .global leon_smp_init | ||
819 | leon_smp_init: | ||
820 | sethi %hi(boot_cpu_id), %g1 ! master always 0 | ||
821 | stb %g0, [%g1 + %lo(boot_cpu_id)] | ||
822 | sethi %hi(boot_cpu_id4), %g1 ! master always 0 | ||
823 | stb %g0, [%g1 + %lo(boot_cpu_id4)] | ||
824 | |||
825 | rd %asr17,%g1 | ||
826 | srl %g1,28,%g1 | ||
827 | |||
828 | cmp %g0,%g1 | ||
829 | beq sun4c_continue_boot !continue with master | ||
830 | nop | ||
831 | |||
832 | ba leon_smp_cpu_startup | ||
833 | nop | ||
834 | #else | ||
814 | ba sun4c_continue_boot | 835 | ba sun4c_continue_boot |
815 | nop | 836 | nop |
816 | #endif | 837 | #endif |
838 | #endif | ||
817 | set cputypval, %o2 | 839 | set cputypval, %o2 |
818 | ldub [%o2 + 0x4], %l1 | 840 | ldub [%o2 + 0x4], %l1 |
819 | 841 | ||
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 9f61fd8cbb7b..3c8c44f6a41c 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -48,8 +48,13 @@ | |||
48 | #include <asm/dma.h> | 48 | #include <asm/dma.h> |
49 | #include <asm/iommu.h> | 49 | #include <asm/iommu.h> |
50 | #include <asm/io-unit.h> | 50 | #include <asm/io-unit.h> |
51 | #include <asm/leon.h> | ||
51 | 52 | ||
53 | #ifdef CONFIG_SPARC_LEON | ||
54 | #define mmu_inval_dma_area(p, l) leon_flush_dcache_all() | ||
55 | #else | ||
52 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | 56 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ |
57 | #endif | ||
53 | 58 | ||
54 | static struct resource *_sparc_find_resource(struct resource *r, | 59 | static struct resource *_sparc_find_resource(struct resource *r, |
55 | unsigned long); | 60 | unsigned long); |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8ab1d4728a4b..ce996f97855f 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -187,7 +187,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
187 | for_each_online_cpu(j) | 187 | for_each_online_cpu(j) |
188 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 188 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
189 | #endif | 189 | #endif |
190 | seq_printf(p, " %9s", irq_desc[i].chip->typename); | 190 | seq_printf(p, " %9s", irq_desc[i].chip->name); |
191 | seq_printf(p, " %s", action->name); | 191 | seq_printf(p, " %s", action->name); |
192 | 192 | ||
193 | for (action=action->next; action; action = action->next) | 193 | for (action=action->next; action; action = action->next) |
@@ -484,7 +484,7 @@ static void sun4v_virq_eoi(unsigned int virt_irq) | |||
484 | } | 484 | } |
485 | 485 | ||
486 | static struct irq_chip sun4u_irq = { | 486 | static struct irq_chip sun4u_irq = { |
487 | .typename = "sun4u", | 487 | .name = "sun4u", |
488 | .enable = sun4u_irq_enable, | 488 | .enable = sun4u_irq_enable, |
489 | .disable = sun4u_irq_disable, | 489 | .disable = sun4u_irq_disable, |
490 | .eoi = sun4u_irq_eoi, | 490 | .eoi = sun4u_irq_eoi, |
@@ -492,7 +492,7 @@ static struct irq_chip sun4u_irq = { | |||
492 | }; | 492 | }; |
493 | 493 | ||
494 | static struct irq_chip sun4v_irq = { | 494 | static struct irq_chip sun4v_irq = { |
495 | .typename = "sun4v", | 495 | .name = "sun4v", |
496 | .enable = sun4v_irq_enable, | 496 | .enable = sun4v_irq_enable, |
497 | .disable = sun4v_irq_disable, | 497 | .disable = sun4v_irq_disable, |
498 | .eoi = sun4v_irq_eoi, | 498 | .eoi = sun4v_irq_eoi, |
@@ -500,7 +500,7 @@ static struct irq_chip sun4v_irq = { | |||
500 | }; | 500 | }; |
501 | 501 | ||
502 | static struct irq_chip sun4v_virq = { | 502 | static struct irq_chip sun4v_virq = { |
503 | .typename = "vsun4v", | 503 | .name = "vsun4v", |
504 | .enable = sun4v_virq_enable, | 504 | .enable = sun4v_virq_enable, |
505 | .disable = sun4v_virq_disable, | 505 | .disable = sun4v_virq_disable, |
506 | .eoi = sun4v_virq_eoi, | 506 | .eoi = sun4v_virq_eoi, |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 54d8a5bd4824..87f1760c0aa2 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -12,11 +12,14 @@ | |||
12 | #include <linux/of_platform.h> | 12 | #include <linux/of_platform.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/of_device.h> | 14 | #include <linux/of_device.h> |
15 | |||
15 | #include <asm/oplib.h> | 16 | #include <asm/oplib.h> |
16 | #include <asm/timer.h> | 17 | #include <asm/timer.h> |
17 | #include <asm/prom.h> | 18 | #include <asm/prom.h> |
18 | #include <asm/leon.h> | 19 | #include <asm/leon.h> |
19 | #include <asm/leon_amba.h> | 20 | #include <asm/leon_amba.h> |
21 | #include <asm/traps.h> | ||
22 | #include <asm/cacheflush.h> | ||
20 | 23 | ||
21 | #include "prom.h" | 24 | #include "prom.h" |
22 | #include "irq.h" | 25 | #include "irq.h" |
@@ -115,6 +118,21 @@ void __init leon_init_timers(irq_handler_t counter_fn) | |||
115 | (((1000000 / 100) - 1))); | 118 | (((1000000 / 100) - 1))); |
116 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); | 119 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); |
117 | 120 | ||
121 | #ifdef CONFIG_SMP | ||
122 | leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs; | ||
123 | leon_percpu_timer_dev[0].irq = leon3_gptimer_irq+1; | ||
124 | |||
125 | if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & | ||
126 | (1<<LEON3_GPTIMER_SEPIRQ))) { | ||
127 | prom_printf("irq timer not configured with seperate irqs \n"); | ||
128 | BUG(); | ||
129 | } | ||
130 | |||
131 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); | ||
132 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); | ||
133 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); | ||
134 | # endif | ||
135 | |||
118 | } else { | 136 | } else { |
119 | printk(KERN_ERR "No Timer/irqctrl found\n"); | 137 | printk(KERN_ERR "No Timer/irqctrl found\n"); |
120 | BUG(); | 138 | BUG(); |
@@ -130,11 +148,41 @@ void __init leon_init_timers(irq_handler_t counter_fn) | |||
130 | prom_halt(); | 148 | prom_halt(); |
131 | } | 149 | } |
132 | 150 | ||
151 | # ifdef CONFIG_SMP | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)]; | ||
155 | |||
156 | /* For SMP we use the level 14 ticker, however the bootup code | ||
157 | * has copied the firmwares level 14 vector into boot cpu's | ||
158 | * trap table, we must fix this now or we get squashed. | ||
159 | */ | ||
160 | local_irq_save(flags); | ||
161 | |||
162 | patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ | ||
163 | |||
164 | /* Adjust so that we jump directly to smpleon_ticker */ | ||
165 | trap_table->inst_three += smpleon_ticker - real_irq_entry; | ||
166 | |||
167 | local_flush_cache_all(); | ||
168 | local_irq_restore(flags); | ||
169 | } | ||
170 | # endif | ||
171 | |||
133 | if (leon3_gptimer_regs) { | 172 | if (leon3_gptimer_regs) { |
134 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, | 173 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, |
135 | LEON3_GPTIMER_EN | | 174 | LEON3_GPTIMER_EN | |
136 | LEON3_GPTIMER_RL | | 175 | LEON3_GPTIMER_RL | |
137 | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); | 176 | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); |
177 | |||
178 | #ifdef CONFIG_SMP | ||
179 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, | ||
180 | LEON3_GPTIMER_EN | | ||
181 | LEON3_GPTIMER_RL | | ||
182 | LEON3_GPTIMER_LD | | ||
183 | LEON3_GPTIMER_IRQEN); | ||
184 | #endif | ||
185 | |||
138 | } | 186 | } |
139 | } | 187 | } |
140 | 188 | ||
@@ -175,6 +223,42 @@ void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) | |||
175 | } | 223 | } |
176 | } | 224 | } |
177 | 225 | ||
226 | #ifdef CONFIG_SMP | ||
227 | |||
228 | void leon_set_cpu_int(int cpu, int level) | ||
229 | { | ||
230 | unsigned long mask; | ||
231 | mask = get_irqmask(level); | ||
232 | LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask); | ||
233 | } | ||
234 | |||
235 | static void leon_clear_ipi(int cpu, int level) | ||
236 | { | ||
237 | unsigned long mask; | ||
238 | mask = get_irqmask(level); | ||
239 | LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask<<16); | ||
240 | } | ||
241 | |||
242 | static void leon_set_udt(int cpu) | ||
243 | { | ||
244 | } | ||
245 | |||
246 | void leon_clear_profile_irq(int cpu) | ||
247 | { | ||
248 | } | ||
249 | |||
250 | void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) | ||
251 | { | ||
252 | unsigned long mask, flags, *addr; | ||
253 | mask = get_irqmask(irq_nr); | ||
254 | local_irq_save(flags); | ||
255 | addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]); | ||
256 | LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask))); | ||
257 | local_irq_restore(flags); | ||
258 | } | ||
259 | |||
260 | #endif | ||
261 | |||
178 | void __init leon_init_IRQ(void) | 262 | void __init leon_init_IRQ(void) |
179 | { | 263 | { |
180 | sparc_init_timers = leon_init_timers; | 264 | sparc_init_timers = leon_init_timers; |
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c new file mode 100644 index 000000000000..05c0dadd6371 --- /dev/null +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -0,0 +1,468 @@ | |||
1 | /* leon_smp.c: Sparc-Leon SMP support. | ||
2 | * | ||
3 | * based on sun4m_smp.c | ||
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB | ||
6 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB | ||
7 | */ | ||
8 | |||
9 | #include <asm/head.h> | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/swap.h> | ||
22 | #include <linux/profile.h> | ||
23 | #include <linux/pm.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include <asm/ptrace.h> | ||
30 | #include <asm/atomic.h> | ||
31 | #include <asm/irq_regs.h> | ||
32 | |||
33 | #include <asm/delay.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/oplib.h> | ||
39 | #include <asm/cpudata.h> | ||
40 | #include <asm/asi.h> | ||
41 | #include <asm/leon.h> | ||
42 | #include <asm/leon_amba.h> | ||
43 | |||
44 | #ifdef CONFIG_SPARC_LEON | ||
45 | |||
46 | #include "irq.h" | ||
47 | |||
48 | extern ctxd_t *srmmu_ctx_table_phys; | ||
49 | static int smp_processors_ready; | ||
50 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; | ||
51 | extern unsigned char boot_cpu_id; | ||
52 | extern cpumask_t smp_commenced_mask; | ||
53 | void __init leon_configure_cache_smp(void); | ||
54 | |||
55 | static inline unsigned long do_swap(volatile unsigned long *ptr, | ||
56 | unsigned long val) | ||
57 | { | ||
58 | __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val) | ||
59 | : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) | ||
60 | : "memory"); | ||
61 | return val; | ||
62 | } | ||
63 | |||
64 | static void smp_setup_percpu_timer(void); | ||
65 | |||
66 | void __cpuinit leon_callin(void) | ||
67 | { | ||
68 | int cpuid = hard_smpleon_processor_id(); | ||
69 | |||
70 | local_flush_cache_all(); | ||
71 | local_flush_tlb_all(); | ||
72 | leon_configure_cache_smp(); | ||
73 | |||
74 | /* Get our local ticker going. */ | ||
75 | smp_setup_percpu_timer(); | ||
76 | |||
77 | calibrate_delay(); | ||
78 | smp_store_cpu_info(cpuid); | ||
79 | |||
80 | local_flush_cache_all(); | ||
81 | local_flush_tlb_all(); | ||
82 | |||
83 | /* | ||
84 | * Unblock the master CPU _only_ when the scheduler state | ||
85 | * of all secondary CPUs will be up-to-date, so after | ||
86 | * the SMP initialization the master will be just allowed | ||
87 | * to call the scheduler code. | ||
88 | * Allow master to continue. | ||
89 | */ | ||
90 | do_swap(&cpu_callin_map[cpuid], 1); | ||
91 | |||
92 | local_flush_cache_all(); | ||
93 | local_flush_tlb_all(); | ||
94 | |||
95 | cpu_probe(); | ||
96 | |||
97 | /* Fix idle thread fields. */ | ||
98 | __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) | ||
99 | : "memory" /* paranoid */); | ||
100 | |||
101 | /* Attach to the address space of init_task. */ | ||
102 | atomic_inc(&init_mm.mm_count); | ||
103 | current->active_mm = &init_mm; | ||
104 | |||
105 | while (!cpu_isset(cpuid, smp_commenced_mask)) | ||
106 | mb(); | ||
107 | |||
108 | local_irq_enable(); | ||
109 | cpu_set(cpuid, cpu_online_map); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Cycle through the processors asking the PROM to start each one. | ||
114 | */ | ||
115 | |||
116 | extern struct linux_prom_registers smp_penguin_ctable; | ||
117 | |||
118 | void __init leon_configure_cache_smp(void) | ||
119 | { | ||
120 | unsigned long cfg = sparc_leon3_get_dcachecfg(); | ||
121 | int me = smp_processor_id(); | ||
122 | |||
123 | if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) { | ||
124 | printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n", | ||
125 | (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg), | ||
126 | (unsigned int)cfg, (unsigned int)me); | ||
127 | sparc_leon3_disable_cache(); | ||
128 | } else { | ||
129 | if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) { | ||
130 | sparc_leon3_enable_snooping(); | ||
131 | } else { | ||
132 | printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n", | ||
133 | me); | ||
134 | sparc_leon3_disable_cache(); | ||
135 | } | ||
136 | } | ||
137 | |||
138 | local_flush_cache_all(); | ||
139 | local_flush_tlb_all(); | ||
140 | } | ||
141 | |||
142 | void leon_smp_setbroadcast(unsigned int mask) | ||
143 | { | ||
144 | int broadcast = | ||
145 | ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> | ||
146 | LEON3_IRQMPSTATUS_BROADCAST) & 1); | ||
147 | if (!broadcast) { | ||
148 | prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", | ||
149 | leon_smp_nrcpus()); | ||
150 | if (leon_smp_nrcpus() > 1) { | ||
151 | BUG(); | ||
152 | } else { | ||
153 | prom_printf("continue anyway\n"); | ||
154 | return; | ||
155 | } | ||
156 | } | ||
157 | LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); | ||
158 | } | ||
159 | |||
160 | unsigned int leon_smp_getbroadcast(void) | ||
161 | { | ||
162 | unsigned int mask; | ||
163 | mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast)); | ||
164 | return mask; | ||
165 | } | ||
166 | |||
167 | int leon_smp_nrcpus(void) | ||
168 | { | ||
169 | int nrcpu = | ||
170 | ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> | ||
171 | LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1; | ||
172 | return nrcpu; | ||
173 | } | ||
174 | |||
175 | void __init leon_boot_cpus(void) | ||
176 | { | ||
177 | int nrcpu = leon_smp_nrcpus(); | ||
178 | int me = smp_processor_id(); | ||
179 | |||
180 | printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x \n", (unsigned int)me, | ||
181 | (unsigned int)nrcpu, (unsigned int)NR_CPUS, | ||
182 | (unsigned int)&(leon3_irqctrl_regs->mpstatus)); | ||
183 | |||
184 | leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); | ||
185 | leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); | ||
186 | leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me); | ||
187 | |||
188 | leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); | ||
189 | |||
190 | leon_configure_cache_smp(); | ||
191 | smp_setup_percpu_timer(); | ||
192 | local_flush_cache_all(); | ||
193 | |||
194 | } | ||
195 | |||
196 | int __cpuinit leon_boot_one_cpu(int i) | ||
197 | { | ||
198 | |||
199 | struct task_struct *p; | ||
200 | int timeout; | ||
201 | |||
202 | /* Cook up an idler for this guy. */ | ||
203 | p = fork_idle(i); | ||
204 | |||
205 | current_set[i] = task_thread_info(p); | ||
206 | |||
207 | /* See trampoline.S:leon_smp_cpu_startup for details... | ||
208 | * Initialize the contexts table | ||
209 | * Since the call to prom_startcpu() trashes the structure, | ||
210 | * we need to re-initialize it for each cpu | ||
211 | */ | ||
212 | smp_penguin_ctable.which_io = 0; | ||
213 | smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys; | ||
214 | smp_penguin_ctable.reg_size = 0; | ||
215 | |||
216 | /* whirrr, whirrr, whirrrrrrrrr... */ | ||
217 | printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, | ||
218 | (unsigned int)&leon3_irqctrl_regs->mpstatus); | ||
219 | local_flush_cache_all(); | ||
220 | |||
221 | LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); | ||
222 | |||
223 | /* wheee... it's going... */ | ||
224 | for (timeout = 0; timeout < 10000; timeout++) { | ||
225 | if (cpu_callin_map[i]) | ||
226 | break; | ||
227 | udelay(200); | ||
228 | } | ||
229 | printk(KERN_INFO "Started CPU %d \n", (unsigned int)i); | ||
230 | |||
231 | if (!(cpu_callin_map[i])) { | ||
232 | printk(KERN_ERR "Processor %d is stuck.\n", i); | ||
233 | return -ENODEV; | ||
234 | } else { | ||
235 | leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); | ||
236 | leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); | ||
237 | leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i); | ||
238 | } | ||
239 | |||
240 | local_flush_cache_all(); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | void __init leon_smp_done(void) | ||
245 | { | ||
246 | |||
247 | int i, first; | ||
248 | int *prev; | ||
249 | |||
250 | /* setup cpu list for irq rotation */ | ||
251 | first = 0; | ||
252 | prev = &first; | ||
253 | for (i = 0; i < NR_CPUS; i++) { | ||
254 | if (cpu_online(i)) { | ||
255 | *prev = i; | ||
256 | prev = &cpu_data(i).next; | ||
257 | } | ||
258 | } | ||
259 | *prev = first; | ||
260 | local_flush_cache_all(); | ||
261 | |||
262 | /* Free unneeded trap tables */ | ||
263 | if (!cpu_isset(1, cpu_present_map)) { | ||
264 | ClearPageReserved(virt_to_page(trapbase_cpu1)); | ||
265 | init_page_count(virt_to_page(trapbase_cpu1)); | ||
266 | free_page((unsigned long)trapbase_cpu1); | ||
267 | totalram_pages++; | ||
268 | num_physpages++; | ||
269 | } | ||
270 | if (!cpu_isset(2, cpu_present_map)) { | ||
271 | ClearPageReserved(virt_to_page(trapbase_cpu2)); | ||
272 | init_page_count(virt_to_page(trapbase_cpu2)); | ||
273 | free_page((unsigned long)trapbase_cpu2); | ||
274 | totalram_pages++; | ||
275 | num_physpages++; | ||
276 | } | ||
277 | if (!cpu_isset(3, cpu_present_map)) { | ||
278 | ClearPageReserved(virt_to_page(trapbase_cpu3)); | ||
279 | init_page_count(virt_to_page(trapbase_cpu3)); | ||
280 | free_page((unsigned long)trapbase_cpu3); | ||
281 | totalram_pages++; | ||
282 | num_physpages++; | ||
283 | } | ||
284 | /* Ok, they are spinning and ready to go. */ | ||
285 | smp_processors_ready = 1; | ||
286 | |||
287 | } | ||
288 | |||
289 | void leon_irq_rotate(int cpu) | ||
290 | { | ||
291 | } | ||
292 | |||
293 | static struct smp_funcall { | ||
294 | smpfunc_t func; | ||
295 | unsigned long arg1; | ||
296 | unsigned long arg2; | ||
297 | unsigned long arg3; | ||
298 | unsigned long arg4; | ||
299 | unsigned long arg5; | ||
300 | unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ | ||
301 | unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ | ||
302 | } ccall_info; | ||
303 | |||
304 | static DEFINE_SPINLOCK(cross_call_lock); | ||
305 | |||
306 | /* Cross calls must be serialized, at least currently. */ | ||
307 | static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, | ||
308 | unsigned long arg2, unsigned long arg3, | ||
309 | unsigned long arg4) | ||
310 | { | ||
311 | if (smp_processors_ready) { | ||
312 | register int high = NR_CPUS - 1; | ||
313 | unsigned long flags; | ||
314 | |||
315 | spin_lock_irqsave(&cross_call_lock, flags); | ||
316 | |||
317 | { | ||
318 | /* If you make changes here, make sure gcc generates proper code... */ | ||
319 | register smpfunc_t f asm("i0") = func; | ||
320 | register unsigned long a1 asm("i1") = arg1; | ||
321 | register unsigned long a2 asm("i2") = arg2; | ||
322 | register unsigned long a3 asm("i3") = arg3; | ||
323 | register unsigned long a4 asm("i4") = arg4; | ||
324 | register unsigned long a5 asm("i5") = 0; | ||
325 | |||
326 | __asm__ __volatile__("std %0, [%6]\n\t" | ||
327 | "std %2, [%6 + 8]\n\t" | ||
328 | "std %4, [%6 + 16]\n\t" : : | ||
329 | "r"(f), "r"(a1), "r"(a2), "r"(a3), | ||
330 | "r"(a4), "r"(a5), | ||
331 | "r"(&ccall_info.func)); | ||
332 | } | ||
333 | |||
334 | /* Init receive/complete mapping, plus fire the IPI's off. */ | ||
335 | { | ||
336 | register int i; | ||
337 | |||
338 | cpu_clear(smp_processor_id(), mask); | ||
339 | cpus_and(mask, cpu_online_map, mask); | ||
340 | for (i = 0; i <= high; i++) { | ||
341 | if (cpu_isset(i, mask)) { | ||
342 | ccall_info.processors_in[i] = 0; | ||
343 | ccall_info.processors_out[i] = 0; | ||
344 | set_cpu_int(i, LEON3_IRQ_CROSS_CALL); | ||
345 | |||
346 | } | ||
347 | } | ||
348 | } | ||
349 | |||
350 | { | ||
351 | register int i; | ||
352 | |||
353 | i = 0; | ||
354 | do { | ||
355 | if (!cpu_isset(i, mask)) | ||
356 | continue; | ||
357 | |||
358 | while (!ccall_info.processors_in[i]) | ||
359 | barrier(); | ||
360 | } while (++i <= high); | ||
361 | |||
362 | i = 0; | ||
363 | do { | ||
364 | if (!cpu_isset(i, mask)) | ||
365 | continue; | ||
366 | |||
367 | while (!ccall_info.processors_out[i]) | ||
368 | barrier(); | ||
369 | } while (++i <= high); | ||
370 | } | ||
371 | |||
372 | spin_unlock_irqrestore(&cross_call_lock, flags); | ||
373 | } | ||
374 | } | ||
375 | |||
376 | /* Running cross calls. */ | ||
377 | void leon_cross_call_irq(void) | ||
378 | { | ||
379 | int i = smp_processor_id(); | ||
380 | |||
381 | ccall_info.processors_in[i] = 1; | ||
382 | ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, | ||
383 | ccall_info.arg4, ccall_info.arg5); | ||
384 | ccall_info.processors_out[i] = 1; | ||
385 | } | ||
386 | |||
387 | void leon_percpu_timer_interrupt(struct pt_regs *regs) | ||
388 | { | ||
389 | struct pt_regs *old_regs; | ||
390 | int cpu = smp_processor_id(); | ||
391 | |||
392 | old_regs = set_irq_regs(regs); | ||
393 | |||
394 | leon_clear_profile_irq(cpu); | ||
395 | |||
396 | profile_tick(CPU_PROFILING); | ||
397 | |||
398 | if (!--prof_counter(cpu)) { | ||
399 | int user = user_mode(regs); | ||
400 | |||
401 | irq_enter(); | ||
402 | update_process_times(user); | ||
403 | irq_exit(); | ||
404 | |||
405 | prof_counter(cpu) = prof_multiplier(cpu); | ||
406 | } | ||
407 | set_irq_regs(old_regs); | ||
408 | } | ||
409 | |||
410 | static void __init smp_setup_percpu_timer(void) | ||
411 | { | ||
412 | int cpu = smp_processor_id(); | ||
413 | |||
414 | prof_counter(cpu) = prof_multiplier(cpu) = 1; | ||
415 | } | ||
416 | |||
417 | void __init leon_blackbox_id(unsigned *addr) | ||
418 | { | ||
419 | int rd = *addr & 0x3e000000; | ||
420 | int rs1 = rd >> 11; | ||
421 | |||
422 | /* patch places where ___b_hard_smp_processor_id appears */ | ||
423 | addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ | ||
424 | addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ | ||
425 | addr[2] = 0x01000000; /* nop */ | ||
426 | } | ||
427 | |||
428 | void __init leon_blackbox_current(unsigned *addr) | ||
429 | { | ||
430 | int rd = *addr & 0x3e000000; | ||
431 | int rs1 = rd >> 11; | ||
432 | |||
433 | /* patch LOAD_CURRENT macro where ___b_load_current appears */ | ||
434 | addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ | ||
435 | addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ | ||
436 | addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */ | ||
437 | |||
438 | } | ||
439 | |||
440 | /* | ||
441 | * CPU idle callback function | ||
442 | * See .../arch/sparc/kernel/process.c | ||
443 | */ | ||
444 | void pmc_leon_idle(void) | ||
445 | { | ||
446 | __asm__ volatile ("mov %g0, %asr19"); | ||
447 | } | ||
448 | |||
449 | void __init leon_init_smp(void) | ||
450 | { | ||
451 | /* Patch ipi15 trap table */ | ||
452 | t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); | ||
453 | |||
454 | BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id); | ||
455 | BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current); | ||
456 | BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); | ||
457 | BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, | ||
458 | BTFIXUPCALL_NORM); | ||
459 | |||
460 | #ifndef PMC_NO_IDLE | ||
461 | /* Assign power management IDLE handler */ | ||
462 | pm_idle = pmc_leon_idle; | ||
463 | printk(KERN_INFO "leon: power management initialized\n"); | ||
464 | #endif | ||
465 | |||
466 | } | ||
467 | |||
468 | #endif /* CONFIG_SPARC_LEON */ | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index f1be37a7b123..e1b0541feb19 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -112,7 +112,7 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static struct irq_chip msi_irq = { | 114 | static struct irq_chip msi_irq = { |
115 | .typename = "PCI-MSI", | 115 | .name = "PCI-MSI", |
116 | .mask = mask_msi_irq, | 116 | .mask = mask_msi_irq, |
117 | .unmask = unmask_msi_irq, | 117 | .unmask = unmask_msi_irq, |
118 | .enable = unmask_msi_irq, | 118 | .enable = unmask_msi_irq, |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 21180339cb09..a2a79e76344f 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/setup.h> | 46 | #include <asm/setup.h> |
47 | #include <asm/mmu.h> | 47 | #include <asm/mmu.h> |
48 | #include <asm/ns87303.h> | 48 | #include <asm/ns87303.h> |
49 | #include <asm/btext.h> | ||
49 | 50 | ||
50 | #ifdef CONFIG_IP_PNP | 51 | #ifdef CONFIG_IP_PNP |
51 | #include <net/ipconfig.h> | 52 | #include <net/ipconfig.h> |
@@ -286,7 +287,10 @@ void __init setup_arch(char **cmdline_p) | |||
286 | parse_early_param(); | 287 | parse_early_param(); |
287 | 288 | ||
288 | boot_flags_init(*cmdline_p); | 289 | boot_flags_init(*cmdline_p); |
289 | register_console(&prom_early_console); | 290 | #ifdef CONFIG_EARLYFB |
291 | if (btext_find_display()) | ||
292 | #endif | ||
293 | register_console(&prom_early_console); | ||
290 | 294 | ||
291 | if (tlb_type == hypervisor) | 295 | if (tlb_type == hypervisor) |
292 | printk("ARCH: SUN4V\n"); | 296 | printk("ARCH: SUN4V\n"); |
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 132d81fb2616..91c10fb70858 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
33 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
34 | #include <asm/cpudata.h> | 34 | #include <asm/cpudata.h> |
35 | #include <asm/leon.h> | ||
35 | 36 | ||
36 | #include "irq.h" | 37 | #include "irq.h" |
37 | 38 | ||
@@ -96,6 +97,9 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
96 | case sun4d: | 97 | case sun4d: |
97 | smp4d_smp_done(); | 98 | smp4d_smp_done(); |
98 | break; | 99 | break; |
100 | case sparc_leon: | ||
101 | leon_smp_done(); | ||
102 | break; | ||
99 | case sun4e: | 103 | case sun4e: |
100 | printk("SUN4E\n"); | 104 | printk("SUN4E\n"); |
101 | BUG(); | 105 | BUG(); |
@@ -306,6 +310,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
306 | case sun4d: | 310 | case sun4d: |
307 | smp4d_boot_cpus(); | 311 | smp4d_boot_cpus(); |
308 | break; | 312 | break; |
313 | case sparc_leon: | ||
314 | leon_boot_cpus(); | ||
315 | break; | ||
309 | case sun4e: | 316 | case sun4e: |
310 | printk("SUN4E\n"); | 317 | printk("SUN4E\n"); |
311 | BUG(); | 318 | BUG(); |
@@ -376,6 +383,9 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
376 | case sun4d: | 383 | case sun4d: |
377 | ret = smp4d_boot_one_cpu(cpu); | 384 | ret = smp4d_boot_one_cpu(cpu); |
378 | break; | 385 | break; |
386 | case sparc_leon: | ||
387 | ret = leon_boot_one_cpu(cpu); | ||
388 | break; | ||
379 | case sun4e: | 389 | case sun4e: |
380 | printk("SUN4E\n"); | 390 | printk("SUN4E\n"); |
381 | BUG(); | 391 | BUG(); |
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index 04e28b2671c8..00abe87e5b51 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c | |||
@@ -26,11 +26,6 @@ | |||
26 | #include <linux/nfs_fs.h> | 26 | #include <linux/nfs_fs.h> |
27 | #include <linux/quota.h> | 27 | #include <linux/quota.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/sunrpc/svc.h> | ||
30 | #include <linux/nfsd/nfsd.h> | ||
31 | #include <linux/nfsd/cache.h> | ||
32 | #include <linux/nfsd/xdr.h> | ||
33 | #include <linux/nfsd/syscall.h> | ||
34 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
35 | #include <linux/personality.h> | 30 | #include <linux/personality.h> |
36 | #include <linux/stat.h> | 31 | #include <linux/stat.h> |
@@ -591,63 +586,6 @@ out: | |||
591 | return ret; | 586 | return ret; |
592 | } | 587 | } |
593 | 588 | ||
594 | struct __sysctl_args32 { | ||
595 | u32 name; | ||
596 | int nlen; | ||
597 | u32 oldval; | ||
598 | u32 oldlenp; | ||
599 | u32 newval; | ||
600 | u32 newlen; | ||
601 | u32 __unused[4]; | ||
602 | }; | ||
603 | |||
604 | asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | ||
605 | { | ||
606 | #ifndef CONFIG_SYSCTL_SYSCALL | ||
607 | return -ENOSYS; | ||
608 | #else | ||
609 | struct __sysctl_args32 tmp; | ||
610 | int error; | ||
611 | size_t oldlen, __user *oldlenp = NULL; | ||
612 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL; | ||
613 | |||
614 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
615 | return -EFAULT; | ||
616 | |||
617 | if (tmp.oldval && tmp.oldlenp) { | ||
618 | /* Duh, this is ugly and might not work if sysctl_args | ||
619 | is in read-only memory, but do_sysctl does indirectly | ||
620 | a lot of uaccess in both directions and we'd have to | ||
621 | basically copy the whole sysctl.c here, and | ||
622 | glibc's __sysctl uses rw memory for the structure | ||
623 | anyway. */ | ||
624 | if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) || | ||
625 | put_user(oldlen, (size_t __user *)addr)) | ||
626 | return -EFAULT; | ||
627 | oldlenp = (size_t __user *)addr; | ||
628 | } | ||
629 | |||
630 | lock_kernel(); | ||
631 | error = do_sysctl((int __user *)(unsigned long) tmp.name, | ||
632 | tmp.nlen, | ||
633 | (void __user *)(unsigned long) tmp.oldval, | ||
634 | oldlenp, | ||
635 | (void __user *)(unsigned long) tmp.newval, | ||
636 | tmp.newlen); | ||
637 | unlock_kernel(); | ||
638 | if (oldlenp) { | ||
639 | if (!error) { | ||
640 | if (get_user(oldlen, (size_t __user *)addr) || | ||
641 | put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp)) | ||
642 | error = -EFAULT; | ||
643 | } | ||
644 | if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused))) | ||
645 | error = -EFAULT; | ||
646 | } | ||
647 | return error; | ||
648 | #endif | ||
649 | } | ||
650 | |||
651 | long sys32_lookup_dcookie(unsigned long cookie_high, | 589 | long sys32_lookup_dcookie(unsigned long cookie_high, |
652 | unsigned long cookie_low, | 590 | unsigned long cookie_low, |
653 | char __user *buf, size_t len) | 591 | char __user *buf, size_t len) |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 0f1658d37490..ceb1530f8aa6 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -82,5 +82,5 @@ sys_call_table: | |||
82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open | 85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg |
86 | 86 | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 009825f6e73c..cc8e7862e95a 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -68,7 +68,7 @@ sys_call_table32: | |||
68 | .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall | 68 | .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall |
69 | /*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler | 69 | /*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler |
70 | .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep | 70 | .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep |
71 | /*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl | 71 | /*250*/ .word sys32_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl |
72 | .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep | 72 | .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep |
73 | /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun | 73 | /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun |
74 | .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy | 74 | .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy |
@@ -83,7 +83,7 @@ sys_call_table32: | |||
83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate | 83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate |
84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv | 85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv |
86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open | 86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg |
87 | 87 | ||
88 | #endif /* CONFIG_COMPAT */ | 88 | #endif /* CONFIG_COMPAT */ |
89 | 89 | ||
@@ -158,4 +158,4 @@ sys_call_table: | |||
158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
161 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open | 161 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 614ac7b4a9dd..5b2f595fe65b 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -210,9 +210,6 @@ static void __init sbus_time_init(void) | |||
210 | btfixup(); | 210 | btfixup(); |
211 | 211 | ||
212 | sparc_init_timers(timer_interrupt); | 212 | sparc_init_timers(timer_interrupt); |
213 | |||
214 | /* Now that OBP ticker has been silenced, it is safe to enable IRQ. */ | ||
215 | local_irq_enable(); | ||
216 | } | 213 | } |
217 | 214 | ||
218 | void __init time_init(void) | 215 | void __init time_init(void) |
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S index 5e235c52d667..691f484e03b3 100644 --- a/arch/sparc/kernel/trampoline_32.S +++ b/arch/sparc/kernel/trampoline_32.S | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/contregs.h> | 15 | #include <asm/contregs.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | 17 | ||
18 | .globl sun4m_cpu_startup, __smp4m_processor_id | 18 | .globl sun4m_cpu_startup, __smp4m_processor_id, __leon_processor_id |
19 | .globl sun4d_cpu_startup, __smp4d_processor_id | 19 | .globl sun4d_cpu_startup, __smp4d_processor_id |
20 | 20 | ||
21 | __CPUINIT | 21 | __CPUINIT |
@@ -106,6 +106,12 @@ __smp4d_processor_id: | |||
106 | retl | 106 | retl |
107 | mov %g1, %o7 | 107 | mov %g1, %o7 |
108 | 108 | ||
109 | __leon_processor_id: | ||
110 | rd %asr17,%g2 | ||
111 | srl %g2,28,%g2 | ||
112 | retl | ||
113 | mov %g1, %o7 | ||
114 | |||
109 | /* CPUID in bootbus can be found at PA 0xff0140000 */ | 115 | /* CPUID in bootbus can be found at PA 0xff0140000 */ |
110 | #define SUN4D_BOOTBUS_CPUID 0xf0140000 | 116 | #define SUN4D_BOOTBUS_CPUID 0xf0140000 |
111 | 117 | ||
@@ -160,3 +166,64 @@ sun4d_cpu_startup: | |||
160 | nop | 166 | nop |
161 | 167 | ||
162 | b,a smp_do_cpu_idle | 168 | b,a smp_do_cpu_idle |
169 | |||
170 | #ifdef CONFIG_SPARC_LEON | ||
171 | |||
172 | __CPUINIT | ||
173 | .align 4 | ||
174 | .global leon_smp_cpu_startup, smp_penguin_ctable | ||
175 | |||
176 | leon_smp_cpu_startup: | ||
177 | |||
178 | set smp_penguin_ctable,%g1 | ||
179 | ld [%g1+4],%g1 | ||
180 | srl %g1,4,%g1 | ||
181 | set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ | ||
182 | sta %g1, [%g5] ASI_M_MMUREGS | ||
183 | |||
184 | /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ | ||
185 | set (PSR_PIL | PSR_S | PSR_PS), %g1 | ||
186 | wr %g1, 0x0, %psr ! traps off though | ||
187 | WRITE_PAUSE | ||
188 | |||
189 | /* Our %wim is one behind CWP */ | ||
190 | mov 2, %g1 | ||
191 | wr %g1, 0x0, %wim | ||
192 | WRITE_PAUSE | ||
193 | |||
194 | /* Set tbr - we use just one trap table. */ | ||
195 | set trapbase, %g1 | ||
196 | wr %g1, 0x0, %tbr | ||
197 | WRITE_PAUSE | ||
198 | |||
199 | /* Get our CPU id */ | ||
200 | rd %asr17,%g3 | ||
201 | |||
202 | /* Give ourselves a stack and curptr. */ | ||
203 | set current_set, %g5 | ||
204 | srl %g3, 28, %g4 | ||
205 | sll %g4, 2, %g4 | ||
206 | ld [%g5 + %g4], %g6 | ||
207 | |||
208 | sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp | ||
209 | or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp | ||
210 | add %g6, %sp, %sp | ||
211 | |||
212 | /* Turn on traps (PSR_ET). */ | ||
213 | rd %psr, %g1 | ||
214 | wr %g1, PSR_ET, %psr ! traps on | ||
215 | WRITE_PAUSE | ||
216 | |||
217 | /* Init our caches, etc. */ | ||
218 | set poke_srmmu, %g5 | ||
219 | ld [%g5], %g5 | ||
220 | call %g5 | ||
221 | nop | ||
222 | |||
223 | /* Start this processor. */ | ||
224 | call leon_callin | ||
225 | nop | ||
226 | |||
227 | b,a smp_do_cpu_idle | ||
228 | |||
229 | #endif | ||
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 509b1ffeba66..367321a030dd 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -1990,7 +1990,7 @@ void __init poke_leonsparc(void) | |||
1990 | void __init init_leon(void) | 1990 | void __init init_leon(void) |
1991 | { | 1991 | { |
1992 | 1992 | ||
1993 | srmmu_name = "Leon"; | 1993 | srmmu_name = "LEON"; |
1994 | 1994 | ||
1995 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, | 1995 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, |
1996 | BTFIXUPCALL_NORM); | 1996 | BTFIXUPCALL_NORM); |
@@ -2037,8 +2037,6 @@ static void __init get_srmmu_type(void) | |||
2037 | 2037 | ||
2038 | /* First, check for sparc-leon. */ | 2038 | /* First, check for sparc-leon. */ |
2039 | if (sparc_cpu_model == sparc_leon) { | 2039 | if (sparc_cpu_model == sparc_leon) { |
2040 | psr_typ = 0xf; /* hardcoded ids for older models/simulators */ | ||
2041 | psr_vers = 2; | ||
2042 | init_leon(); | 2040 | init_leon(); |
2043 | return; | 2041 | return; |
2044 | } | 2042 | } |
@@ -2301,7 +2299,8 @@ void __init ld_mmu_srmmu(void) | |||
2301 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); | 2299 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); |
2302 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); | 2300 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); |
2303 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); | 2301 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); |
2304 | if (sparc_cpu_model != sun4d) { | 2302 | if (sparc_cpu_model != sun4d && |
2303 | sparc_cpu_model != sparc_leon) { | ||
2305 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); | 2304 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); |
2306 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); | 2305 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); |
2307 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); | 2306 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); |
@@ -2330,6 +2329,8 @@ void __init ld_mmu_srmmu(void) | |||
2330 | #ifdef CONFIG_SMP | 2329 | #ifdef CONFIG_SMP |
2331 | if (sparc_cpu_model == sun4d) | 2330 | if (sparc_cpu_model == sun4d) |
2332 | sun4d_init_smp(); | 2331 | sun4d_init_smp(); |
2332 | else if (sparc_cpu_model == sparc_leon) | ||
2333 | leon_init_smp(); | ||
2333 | else | 2334 | else |
2334 | sun4m_init_smp(); | 2335 | sun4m_init_smp(); |
2335 | #endif | 2336 | #endif |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 178084b4377c..1b2182b4d5c8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -51,6 +51,7 @@ config X86 | |||
51 | select HAVE_KERNEL_LZMA | 51 | select HAVE_KERNEL_LZMA |
52 | select HAVE_HW_BREAKPOINT | 52 | select HAVE_HW_BREAKPOINT |
53 | select HAVE_ARCH_KMEMCHECK | 53 | select HAVE_ARCH_KMEMCHECK |
54 | select HAVE_USER_RETURN_NOTIFIER | ||
54 | 55 | ||
55 | config OUTPUT_FORMAT | 56 | config OUTPUT_FORMAT |
56 | string | 57 | string |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 581b0568fe19..4eefdca9832b 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -653,7 +653,7 @@ ia32_sys_call_table: | |||
653 | .quad compat_sys_writev | 653 | .quad compat_sys_writev |
654 | .quad sys_getsid | 654 | .quad sys_getsid |
655 | .quad sys_fdatasync | 655 | .quad sys_fdatasync |
656 | .quad sys32_sysctl /* sysctl */ | 656 | .quad compat_sys_sysctl /* sysctl */ |
657 | .quad sys_mlock /* 150 */ | 657 | .quad sys_mlock /* 150 */ |
658 | .quad sys_munlock | 658 | .quad sys_munlock |
659 | .quad sys_mlockall | 659 | .quad sys_mlockall |
@@ -841,4 +841,5 @@ ia32_sys_call_table: | |||
841 | .quad compat_sys_pwritev | 841 | .quad compat_sys_pwritev |
842 | .quad compat_sys_rt_tgsigqueueinfo /* 335 */ | 842 | .quad compat_sys_rt_tgsigqueueinfo /* 335 */ |
843 | .quad sys_perf_event_open | 843 | .quad sys_perf_event_open |
844 | .quad compat_sys_recvmmsg | ||
844 | ia32_syscall_end: | 845 | ia32_syscall_end: |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 9f5527198825..df82c0e48ded 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -434,62 +434,6 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, | |||
434 | return ret; | 434 | return ret; |
435 | } | 435 | } |
436 | 436 | ||
437 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
438 | struct sysctl_ia32 { | ||
439 | unsigned int name; | ||
440 | int nlen; | ||
441 | unsigned int oldval; | ||
442 | unsigned int oldlenp; | ||
443 | unsigned int newval; | ||
444 | unsigned int newlen; | ||
445 | unsigned int __unused[4]; | ||
446 | }; | ||
447 | |||
448 | |||
449 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *args32) | ||
450 | { | ||
451 | struct sysctl_ia32 a32; | ||
452 | mm_segment_t old_fs = get_fs(); | ||
453 | void __user *oldvalp, *newvalp; | ||
454 | size_t oldlen; | ||
455 | int __user *namep; | ||
456 | long ret; | ||
457 | |||
458 | if (copy_from_user(&a32, args32, sizeof(a32))) | ||
459 | return -EFAULT; | ||
460 | |||
461 | /* | ||
462 | * We need to pre-validate these because we have to disable | ||
463 | * address checking before calling do_sysctl() because of | ||
464 | * OLDLEN but we can't run the risk of the user specifying bad | ||
465 | * addresses here. Well, since we're dealing with 32 bit | ||
466 | * addresses, we KNOW that access_ok() will always succeed, so | ||
467 | * this is an expensive NOP, but so what... | ||
468 | */ | ||
469 | namep = compat_ptr(a32.name); | ||
470 | oldvalp = compat_ptr(a32.oldval); | ||
471 | newvalp = compat_ptr(a32.newval); | ||
472 | |||
473 | if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp))) | ||
474 | || !access_ok(VERIFY_WRITE, namep, 0) | ||
475 | || !access_ok(VERIFY_WRITE, oldvalp, 0) | ||
476 | || !access_ok(VERIFY_WRITE, newvalp, 0)) | ||
477 | return -EFAULT; | ||
478 | |||
479 | set_fs(KERNEL_DS); | ||
480 | lock_kernel(); | ||
481 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *)&oldlen, | ||
482 | newvalp, (size_t) a32.newlen); | ||
483 | unlock_kernel(); | ||
484 | set_fs(old_fs); | ||
485 | |||
486 | if (oldvalp && put_user(oldlen, (int __user *)compat_ptr(a32.oldlenp))) | ||
487 | return -EFAULT; | ||
488 | |||
489 | return ret; | ||
490 | } | ||
491 | #endif | ||
492 | |||
493 | /* warning: next two assume little endian */ | 437 | /* warning: next two assume little endian */ |
494 | asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, | 438 | asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, |
495 | u32 poslo, u32 poshi) | 439 | u32 poslo, u32 poshi) |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 4a5fe914dc59..950df434763f 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #define __KVM_HAVE_MSIX | 19 | #define __KVM_HAVE_MSIX |
20 | #define __KVM_HAVE_MCE | 20 | #define __KVM_HAVE_MCE |
21 | #define __KVM_HAVE_PIT_STATE2 | 21 | #define __KVM_HAVE_PIT_STATE2 |
22 | #define __KVM_HAVE_XEN_HVM | ||
23 | #define __KVM_HAVE_VCPU_EVENTS | ||
22 | 24 | ||
23 | /* Architectural interrupt line count. */ | 25 | /* Architectural interrupt line count. */ |
24 | #define KVM_NR_INTERRUPTS 256 | 26 | #define KVM_NR_INTERRUPTS 256 |
@@ -79,6 +81,7 @@ struct kvm_ioapic_state { | |||
79 | #define KVM_IRQCHIP_PIC_MASTER 0 | 81 | #define KVM_IRQCHIP_PIC_MASTER 0 |
80 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 82 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
81 | #define KVM_IRQCHIP_IOAPIC 2 | 83 | #define KVM_IRQCHIP_IOAPIC 2 |
84 | #define KVM_NR_IRQCHIPS 3 | ||
82 | 85 | ||
83 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 86 | /* for KVM_GET_REGS and KVM_SET_REGS */ |
84 | struct kvm_regs { | 87 | struct kvm_regs { |
@@ -250,4 +253,31 @@ struct kvm_reinject_control { | |||
250 | __u8 pit_reinject; | 253 | __u8 pit_reinject; |
251 | __u8 reserved[31]; | 254 | __u8 reserved[31]; |
252 | }; | 255 | }; |
256 | |||
257 | /* for KVM_GET/SET_VCPU_EVENTS */ | ||
258 | struct kvm_vcpu_events { | ||
259 | struct { | ||
260 | __u8 injected; | ||
261 | __u8 nr; | ||
262 | __u8 has_error_code; | ||
263 | __u8 pad; | ||
264 | __u32 error_code; | ||
265 | } exception; | ||
266 | struct { | ||
267 | __u8 injected; | ||
268 | __u8 nr; | ||
269 | __u8 soft; | ||
270 | __u8 pad; | ||
271 | } interrupt; | ||
272 | struct { | ||
273 | __u8 injected; | ||
274 | __u8 pending; | ||
275 | __u8 masked; | ||
276 | __u8 pad; | ||
277 | } nmi; | ||
278 | __u32 sipi_vector; | ||
279 | __u32 flags; | ||
280 | __u32 reserved[10]; | ||
281 | }; | ||
282 | |||
253 | #endif /* _ASM_X86_KVM_H */ | 283 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b7ed2c423116..7c18e1230f54 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -129,7 +129,7 @@ struct decode_cache { | |||
129 | u8 seg_override; | 129 | u8 seg_override; |
130 | unsigned int d; | 130 | unsigned int d; |
131 | unsigned long regs[NR_VCPU_REGS]; | 131 | unsigned long regs[NR_VCPU_REGS]; |
132 | unsigned long eip; | 132 | unsigned long eip, eip_orig; |
133 | /* modrm */ | 133 | /* modrm */ |
134 | u8 modrm; | 134 | u8 modrm; |
135 | u8 modrm_mod; | 135 | u8 modrm_mod; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d83892226f73..4f865e8b8540 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -354,7 +354,6 @@ struct kvm_vcpu_arch { | |||
354 | unsigned int time_offset; | 354 | unsigned int time_offset; |
355 | struct page *time_page; | 355 | struct page *time_page; |
356 | 356 | ||
357 | bool singlestep; /* guest is single stepped by KVM */ | ||
358 | bool nmi_pending; | 357 | bool nmi_pending; |
359 | bool nmi_injected; | 358 | bool nmi_injected; |
360 | 359 | ||
@@ -371,6 +370,10 @@ struct kvm_vcpu_arch { | |||
371 | u64 mcg_status; | 370 | u64 mcg_status; |
372 | u64 mcg_ctl; | 371 | u64 mcg_ctl; |
373 | u64 *mce_banks; | 372 | u64 *mce_banks; |
373 | |||
374 | /* used for guest single stepping over the given code position */ | ||
375 | u16 singlestep_cs; | ||
376 | unsigned long singlestep_rip; | ||
374 | }; | 377 | }; |
375 | 378 | ||
376 | struct kvm_mem_alias { | 379 | struct kvm_mem_alias { |
@@ -397,7 +400,6 @@ struct kvm_arch{ | |||
397 | struct kvm_pic *vpic; | 400 | struct kvm_pic *vpic; |
398 | struct kvm_ioapic *vioapic; | 401 | struct kvm_ioapic *vioapic; |
399 | struct kvm_pit *vpit; | 402 | struct kvm_pit *vpit; |
400 | struct hlist_head irq_ack_notifier_list; | ||
401 | int vapics_in_nmi_mode; | 403 | int vapics_in_nmi_mode; |
402 | 404 | ||
403 | unsigned int tss_addr; | 405 | unsigned int tss_addr; |
@@ -410,8 +412,10 @@ struct kvm_arch{ | |||
410 | gpa_t ept_identity_map_addr; | 412 | gpa_t ept_identity_map_addr; |
411 | 413 | ||
412 | unsigned long irq_sources_bitmap; | 414 | unsigned long irq_sources_bitmap; |
413 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
414 | u64 vm_init_tsc; | 415 | u64 vm_init_tsc; |
416 | s64 kvmclock_offset; | ||
417 | |||
418 | struct kvm_xen_hvm_config xen_hvm_config; | ||
415 | }; | 419 | }; |
416 | 420 | ||
417 | struct kvm_vm_stat { | 421 | struct kvm_vm_stat { |
@@ -461,7 +465,7 @@ struct descriptor_table { | |||
461 | struct kvm_x86_ops { | 465 | struct kvm_x86_ops { |
462 | int (*cpu_has_kvm_support)(void); /* __init */ | 466 | int (*cpu_has_kvm_support)(void); /* __init */ |
463 | int (*disabled_by_bios)(void); /* __init */ | 467 | int (*disabled_by_bios)(void); /* __init */ |
464 | void (*hardware_enable)(void *dummy); /* __init */ | 468 | int (*hardware_enable)(void *dummy); |
465 | void (*hardware_disable)(void *dummy); | 469 | void (*hardware_disable)(void *dummy); |
466 | void (*check_processor_compatibility)(void *rtn); | 470 | void (*check_processor_compatibility)(void *rtn); |
467 | int (*hardware_setup)(void); /* __init */ | 471 | int (*hardware_setup)(void); /* __init */ |
@@ -477,8 +481,8 @@ struct kvm_x86_ops { | |||
477 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 481 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
478 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 482 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
479 | 483 | ||
480 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 484 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, |
481 | struct kvm_guest_debug *dbg); | 485 | struct kvm_guest_debug *dbg); |
482 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 486 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
483 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 487 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
484 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 488 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
@@ -506,8 +510,8 @@ struct kvm_x86_ops { | |||
506 | 510 | ||
507 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 511 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
508 | 512 | ||
509 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 513 | void (*run)(struct kvm_vcpu *vcpu); |
510 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | 514 | int (*handle_exit)(struct kvm_vcpu *vcpu); |
511 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 515 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
512 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 516 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
513 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 517 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
@@ -519,6 +523,8 @@ struct kvm_x86_ops { | |||
519 | bool has_error_code, u32 error_code); | 523 | bool has_error_code, u32 error_code); |
520 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 524 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
521 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 525 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
526 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | ||
527 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | ||
522 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | 528 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
523 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | 529 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
524 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | 530 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
@@ -568,7 +574,7 @@ enum emulation_result { | |||
568 | #define EMULTYPE_NO_DECODE (1 << 0) | 574 | #define EMULTYPE_NO_DECODE (1 << 0) |
569 | #define EMULTYPE_TRAP_UD (1 << 1) | 575 | #define EMULTYPE_TRAP_UD (1 << 1) |
570 | #define EMULTYPE_SKIP (1 << 2) | 576 | #define EMULTYPE_SKIP (1 << 2) |
571 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | 577 | int emulate_instruction(struct kvm_vcpu *vcpu, |
572 | unsigned long cr2, u16 error_code, int emulation_type); | 578 | unsigned long cr2, u16 error_code, int emulation_type); |
573 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | 579 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
574 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 580 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
@@ -585,9 +591,9 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |||
585 | 591 | ||
586 | struct x86_emulate_ctxt; | 592 | struct x86_emulate_ctxt; |
587 | 593 | ||
588 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 594 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, |
589 | int size, unsigned port); | 595 | int size, unsigned port); |
590 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 596 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, |
591 | int size, unsigned long count, int down, | 597 | int size, unsigned long count, int down, |
592 | gva_t address, int rep, unsigned port); | 598 | gva_t address, int rep, unsigned port); |
593 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | 599 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
@@ -616,6 +622,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |||
616 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 622 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
617 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 623 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
618 | 624 | ||
625 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); | ||
626 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
627 | |||
619 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 628 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
620 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 629 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
621 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 630 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
@@ -802,4 +811,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | |||
802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 811 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
803 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | 812 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
804 | 813 | ||
814 | void kvm_define_shared_msr(unsigned index, u32 msr); | ||
815 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | ||
816 | |||
805 | #endif /* _ASM_X86_KVM_HOST_H */ | 817 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 85574b7c1bc1..1fecb7e61130 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -57,7 +57,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { | |||
57 | u16 intercept_dr_write; | 57 | u16 intercept_dr_write; |
58 | u32 intercept_exceptions; | 58 | u32 intercept_exceptions; |
59 | u64 intercept; | 59 | u64 intercept; |
60 | u8 reserved_1[44]; | 60 | u8 reserved_1[42]; |
61 | u16 pause_filter_count; | ||
61 | u64 iopm_base_pa; | 62 | u64 iopm_base_pa; |
62 | u64 msrpm_base_pa; | 63 | u64 msrpm_base_pa; |
63 | u64 tsc_offset; | 64 | u64 tsc_offset; |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 72a6dcd1299b..9af9decb38c3 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -51,11 +51,6 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | |||
51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | 51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); |
52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | 52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); |
53 | 53 | ||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
55 | struct sysctl_ia32; | ||
56 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); | ||
57 | #endif | ||
58 | |||
59 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 54 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); |
60 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | 55 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); |
61 | 56 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d27d0a2fec4c..375c917c37d2 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -83,6 +83,7 @@ struct thread_info { | |||
83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
84 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 85 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
86 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ | ||
86 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
87 | #define TIF_IA32 17 /* 32bit process */ | 88 | #define TIF_IA32 17 /* 32bit process */ |
88 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -107,6 +108,7 @@ struct thread_info { | |||
107 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
108 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
109 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 110 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
111 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) | ||
110 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 112 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
111 | #define _TIF_IA32 (1 << TIF_IA32) | 113 | #define _TIF_IA32 (1 << TIF_IA32) |
112 | #define _TIF_FORK (1 << TIF_FORK) | 114 | #define _TIF_FORK (1 << TIF_FORK) |
@@ -142,13 +144,14 @@ struct thread_info { | |||
142 | 144 | ||
143 | /* Only used for 64 bit */ | 145 | /* Only used for 64 bit */ |
144 | #define _TIF_DO_NOTIFY_MASK \ | 146 | #define _TIF_DO_NOTIFY_MASK \ |
145 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME) | 147 | (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ |
148 | _TIF_USER_RETURN_NOTIFY) | ||
146 | 149 | ||
147 | /* flags to check in __switch_to() */ | 150 | /* flags to check in __switch_to() */ |
148 | #define _TIF_WORK_CTXSW \ | 151 | #define _TIF_WORK_CTXSW \ |
149 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) | 152 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) |
150 | 153 | ||
151 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 154 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
152 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 155 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) |
153 | 156 | ||
154 | #define PREEMPT_ACTIVE 0x10000000 | 157 | #define PREEMPT_ACTIVE 0x10000000 |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 6fb3c209a7e3..3baf379fa840 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -342,10 +342,11 @@ | |||
342 | #define __NR_pwritev 334 | 342 | #define __NR_pwritev 334 |
343 | #define __NR_rt_tgsigqueueinfo 335 | 343 | #define __NR_rt_tgsigqueueinfo 335 |
344 | #define __NR_perf_event_open 336 | 344 | #define __NR_perf_event_open 336 |
345 | #define __NR_recvmmsg 337 | ||
345 | 346 | ||
346 | #ifdef __KERNEL__ | 347 | #ifdef __KERNEL__ |
347 | 348 | ||
348 | #define NR_syscalls 337 | 349 | #define NR_syscalls 338 |
349 | 350 | ||
350 | #define __ARCH_WANT_IPC_PARSE_VERSION | 351 | #define __ARCH_WANT_IPC_PARSE_VERSION |
351 | #define __ARCH_WANT_OLD_READDIR | 352 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 8d3ad0adbc68..4843f7ba754a 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -661,6 +661,8 @@ __SYSCALL(__NR_pwritev, sys_pwritev) | |||
661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | 661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) |
662 | #define __NR_perf_event_open 298 | 662 | #define __NR_perf_event_open 298 |
663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | 663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) |
664 | #define __NR_recvmmsg 299 | ||
665 | __SYSCALL(__NR_recvmmsg, sys_recvmmsg) | ||
664 | 666 | ||
665 | #ifndef __NO_STUBS | 667 | #ifndef __NO_STUBS |
666 | #define __ARCH_WANT_OLD_READDIR | 668 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 272514c2d456..2b4945419a84 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | 56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 |
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
59 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
59 | 60 | ||
60 | 61 | ||
61 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | 62 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 |
@@ -144,6 +145,8 @@ enum vmcs_field { | |||
144 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | 145 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, |
145 | TPR_THRESHOLD = 0x0000401c, | 146 | TPR_THRESHOLD = 0x0000401c, |
146 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | 147 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, |
148 | PLE_GAP = 0x00004020, | ||
149 | PLE_WINDOW = 0x00004022, | ||
147 | VM_INSTRUCTION_ERROR = 0x00004400, | 150 | VM_INSTRUCTION_ERROR = 0x00004400, |
148 | VM_EXIT_REASON = 0x00004402, | 151 | VM_EXIT_REASON = 0x00004402, |
149 | VM_EXIT_INTR_INFO = 0x00004404, | 152 | VM_EXIT_INTR_INFO = 0x00004404, |
@@ -248,6 +251,7 @@ enum vmcs_field { | |||
248 | #define EXIT_REASON_MSR_READ 31 | 251 | #define EXIT_REASON_MSR_READ 31 |
249 | #define EXIT_REASON_MSR_WRITE 32 | 252 | #define EXIT_REASON_MSR_WRITE 32 |
250 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 253 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
254 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
251 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | 255 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 |
252 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 256 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
253 | #define EXIT_REASON_APIC_ACCESS 44 | 257 | #define EXIT_REASON_APIC_ACCESS 44 |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 744508e7cfdd..5e2ba634ea15 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <linux/user-return-notifier.h> | ||
12 | #include <trace/events/power.h> | 13 | #include <trace/events/power.h> |
13 | #include <linux/hw_breakpoint.h> | 14 | #include <linux/hw_breakpoint.h> |
14 | #include <asm/system.h> | 15 | #include <asm/system.h> |
@@ -209,6 +210,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
209 | */ | 210 | */ |
210 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 211 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
211 | } | 212 | } |
213 | propagate_user_return_notify(prev_p, next_p); | ||
212 | } | 214 | } |
213 | 215 | ||
214 | int sys_fork(struct pt_regs *regs) | 216 | int sys_fork(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index fbf3b07c8567..74fe6d86dc5d 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/stddef.h> | 19 | #include <linux/stddef.h> |
20 | #include <linux/personality.h> | 20 | #include <linux/personality.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/user-return-notifier.h> | ||
22 | 23 | ||
23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
24 | #include <asm/ucontext.h> | 25 | #include <asm/ucontext.h> |
@@ -863,6 +864,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
863 | if (current->replacement_session_keyring) | 864 | if (current->replacement_session_keyring) |
864 | key_replace_session_keyring(); | 865 | key_replace_session_keyring(); |
865 | } | 866 | } |
867 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) | ||
868 | fire_user_return_notifiers(); | ||
866 | 869 | ||
867 | #ifdef CONFIG_X86_32 | 870 | #ifdef CONFIG_X86_32 |
868 | clear_thread_flag(TIF_IRET); | 871 | clear_thread_flag(TIF_IRET); |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 0157cd26d7cc..70c2125d55b9 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -336,3 +336,4 @@ ENTRY(sys_call_table) | |||
336 | .long sys_pwritev | 336 | .long sys_pwritev |
337 | .long sys_rt_tgsigqueueinfo /* 335 */ | 337 | .long sys_rt_tgsigqueueinfo /* 335 */ |
338 | .long sys_perf_event_open | 338 | .long sys_perf_event_open |
339 | .long sys_recvmmsg | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 8cb4974ff599..e02d92d12bcd 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -237,7 +237,7 @@ static ctl_table kernel_table2[] = { | |||
237 | }; | 237 | }; |
238 | 238 | ||
239 | static ctl_table kernel_root_table2[] = { | 239 | static ctl_table kernel_root_table2[] = { |
240 | { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, | 240 | { .procname = "kernel", .mode = 0555, |
241 | .child = kernel_table2 }, | 241 | .child = kernel_table2 }, |
242 | {} | 242 | {} |
243 | }; | 243 | }; |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index b84e571f4175..4cd498332466 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -28,6 +28,7 @@ config KVM | |||
28 | select HAVE_KVM_IRQCHIP | 28 | select HAVE_KVM_IRQCHIP |
29 | select HAVE_KVM_EVENTFD | 29 | select HAVE_KVM_EVENTFD |
30 | select KVM_APIC_ARCHITECTURE | 30 | select KVM_APIC_ARCHITECTURE |
31 | select USER_RETURN_NOTIFIER | ||
31 | ---help--- | 32 | ---help--- |
32 | Support hosting fully virtualized guest machines using hardware | 33 | Support hosting fully virtualized guest machines using hardware |
33 | virtualization extensions. You will need a fairly recent | 34 | virtualization extensions. You will need a fairly recent |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 0e7fe78d0f74..31a7035c4bd9 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
@@ -6,7 +6,8 @@ CFLAGS_svm.o := -I. | |||
6 | CFLAGS_vmx.o := -I. | 6 | CFLAGS_vmx.o := -I. |
7 | 7 | ||
8 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 8 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
9 | coalesced_mmio.o irq_comm.o eventfd.o) | 9 | coalesced_mmio.o irq_comm.o eventfd.o \ |
10 | assigned-dev.o) | ||
10 | kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) | 11 | kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) |
11 | 12 | ||
12 | kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ | 13 | kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1be5cd640e93..7e8faea4651e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -75,6 +75,8 @@ | |||
75 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ | 75 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ |
76 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ | 76 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ |
77 | #define GroupMask 0xff /* Group number stored in bits 0:7 */ | 77 | #define GroupMask 0xff /* Group number stored in bits 0:7 */ |
78 | /* Misc flags */ | ||
79 | #define No64 (1<<28) | ||
78 | /* Source 2 operand type */ | 80 | /* Source 2 operand type */ |
79 | #define Src2None (0<<29) | 81 | #define Src2None (0<<29) |
80 | #define Src2CL (1<<29) | 82 | #define Src2CL (1<<29) |
@@ -92,19 +94,23 @@ static u32 opcode_table[256] = { | |||
92 | /* 0x00 - 0x07 */ | 94 | /* 0x00 - 0x07 */ |
93 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 95 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
94 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 96 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
95 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, | 97 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, |
98 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
96 | /* 0x08 - 0x0F */ | 99 | /* 0x08 - 0x0F */ |
97 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 100 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
98 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 101 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
99 | 0, 0, 0, 0, | 102 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, |
103 | ImplicitOps | Stack | No64, 0, | ||
100 | /* 0x10 - 0x17 */ | 104 | /* 0x10 - 0x17 */ |
101 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 105 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
102 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 106 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
103 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, | 107 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, |
108 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
104 | /* 0x18 - 0x1F */ | 109 | /* 0x18 - 0x1F */ |
105 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 110 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
106 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 111 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
107 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, | 112 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, |
113 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
108 | /* 0x20 - 0x27 */ | 114 | /* 0x20 - 0x27 */ |
109 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 115 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
110 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 116 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
@@ -133,7 +139,8 @@ static u32 opcode_table[256] = { | |||
133 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, | 139 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, |
134 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, | 140 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, |
135 | /* 0x60 - 0x67 */ | 141 | /* 0x60 - 0x67 */ |
136 | 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , | 142 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, |
143 | 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , | ||
137 | 0, 0, 0, 0, | 144 | 0, 0, 0, 0, |
138 | /* 0x68 - 0x6F */ | 145 | /* 0x68 - 0x6F */ |
139 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, | 146 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, |
@@ -158,7 +165,7 @@ static u32 opcode_table[256] = { | |||
158 | /* 0x90 - 0x97 */ | 165 | /* 0x90 - 0x97 */ |
159 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | 166 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, |
160 | /* 0x98 - 0x9F */ | 167 | /* 0x98 - 0x9F */ |
161 | 0, 0, SrcImm | Src2Imm16, 0, | 168 | 0, 0, SrcImm | Src2Imm16 | No64, 0, |
162 | ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, | 169 | ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, |
163 | /* 0xA0 - 0xA7 */ | 170 | /* 0xA0 - 0xA7 */ |
164 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, | 171 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, |
@@ -185,7 +192,7 @@ static u32 opcode_table[256] = { | |||
185 | ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, | 192 | ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, |
186 | /* 0xC8 - 0xCF */ | 193 | /* 0xC8 - 0xCF */ |
187 | 0, 0, 0, ImplicitOps | Stack, | 194 | 0, 0, 0, ImplicitOps | Stack, |
188 | ImplicitOps, SrcImmByte, ImplicitOps, ImplicitOps, | 195 | ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps, |
189 | /* 0xD0 - 0xD7 */ | 196 | /* 0xD0 - 0xD7 */ |
190 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, | 197 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, |
191 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, | 198 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, |
@@ -198,7 +205,7 @@ static u32 opcode_table[256] = { | |||
198 | ByteOp | SrcImmUByte, SrcImmUByte, | 205 | ByteOp | SrcImmUByte, SrcImmUByte, |
199 | /* 0xE8 - 0xEF */ | 206 | /* 0xE8 - 0xEF */ |
200 | SrcImm | Stack, SrcImm | ImplicitOps, | 207 | SrcImm | Stack, SrcImm | ImplicitOps, |
201 | SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps, | 208 | SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps, |
202 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, | 209 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, |
203 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, | 210 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, |
204 | /* 0xF0 - 0xF7 */ | 211 | /* 0xF0 - 0xF7 */ |
@@ -244,11 +251,13 @@ static u32 twobyte_table[256] = { | |||
244 | /* 0x90 - 0x9F */ | 251 | /* 0x90 - 0x9F */ |
245 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 252 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
246 | /* 0xA0 - 0xA7 */ | 253 | /* 0xA0 - 0xA7 */ |
247 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, | 254 | ImplicitOps | Stack, ImplicitOps | Stack, |
255 | 0, DstMem | SrcReg | ModRM | BitOp, | ||
248 | DstMem | SrcReg | Src2ImmByte | ModRM, | 256 | DstMem | SrcReg | Src2ImmByte | ModRM, |
249 | DstMem | SrcReg | Src2CL | ModRM, 0, 0, | 257 | DstMem | SrcReg | Src2CL | ModRM, 0, 0, |
250 | /* 0xA8 - 0xAF */ | 258 | /* 0xA8 - 0xAF */ |
251 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, | 259 | ImplicitOps | Stack, ImplicitOps | Stack, |
260 | 0, DstMem | SrcReg | ModRM | BitOp, | ||
252 | DstMem | SrcReg | Src2ImmByte | ModRM, | 261 | DstMem | SrcReg | Src2ImmByte | ModRM, |
253 | DstMem | SrcReg | Src2CL | ModRM, | 262 | DstMem | SrcReg | Src2CL | ModRM, |
254 | ModRM, 0, | 263 | ModRM, 0, |
@@ -613,6 +622,9 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | |||
613 | { | 622 | { |
614 | int rc = 0; | 623 | int rc = 0; |
615 | 624 | ||
625 | /* x86 instructions are limited to 15 bytes. */ | ||
626 | if (eip + size - ctxt->decode.eip_orig > 15) | ||
627 | return X86EMUL_UNHANDLEABLE; | ||
616 | eip += ctxt->cs_base; | 628 | eip += ctxt->cs_base; |
617 | while (size--) { | 629 | while (size--) { |
618 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); | 630 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); |
@@ -871,7 +883,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
871 | /* Shadow copy of register state. Committed on successful emulation. */ | 883 | /* Shadow copy of register state. Committed on successful emulation. */ |
872 | 884 | ||
873 | memset(c, 0, sizeof(struct decode_cache)); | 885 | memset(c, 0, sizeof(struct decode_cache)); |
874 | c->eip = kvm_rip_read(ctxt->vcpu); | 886 | c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu); |
875 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); | 887 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); |
876 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | 888 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); |
877 | 889 | ||
@@ -962,6 +974,11 @@ done_prefixes: | |||
962 | } | 974 | } |
963 | } | 975 | } |
964 | 976 | ||
977 | if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { | ||
978 | kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");; | ||
979 | return -1; | ||
980 | } | ||
981 | |||
965 | if (c->d & Group) { | 982 | if (c->d & Group) { |
966 | group = c->d & GroupMask; | 983 | group = c->d & GroupMask; |
967 | c->modrm = insn_fetch(u8, 1, c->eip); | 984 | c->modrm = insn_fetch(u8, 1, c->eip); |
@@ -1186,6 +1203,69 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1186 | return rc; | 1203 | return rc; |
1187 | } | 1204 | } |
1188 | 1205 | ||
1206 | static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) | ||
1207 | { | ||
1208 | struct decode_cache *c = &ctxt->decode; | ||
1209 | struct kvm_segment segment; | ||
1210 | |||
1211 | kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg); | ||
1212 | |||
1213 | c->src.val = segment.selector; | ||
1214 | emulate_push(ctxt); | ||
1215 | } | ||
1216 | |||
1217 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | ||
1218 | struct x86_emulate_ops *ops, int seg) | ||
1219 | { | ||
1220 | struct decode_cache *c = &ctxt->decode; | ||
1221 | unsigned long selector; | ||
1222 | int rc; | ||
1223 | |||
1224 | rc = emulate_pop(ctxt, ops, &selector, c->op_bytes); | ||
1225 | if (rc != 0) | ||
1226 | return rc; | ||
1227 | |||
1228 | rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg); | ||
1229 | return rc; | ||
1230 | } | ||
1231 | |||
1232 | static void emulate_pusha(struct x86_emulate_ctxt *ctxt) | ||
1233 | { | ||
1234 | struct decode_cache *c = &ctxt->decode; | ||
1235 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; | ||
1236 | int reg = VCPU_REGS_RAX; | ||
1237 | |||
1238 | while (reg <= VCPU_REGS_RDI) { | ||
1239 | (reg == VCPU_REGS_RSP) ? | ||
1240 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); | ||
1241 | |||
1242 | emulate_push(ctxt); | ||
1243 | ++reg; | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | static int emulate_popa(struct x86_emulate_ctxt *ctxt, | ||
1248 | struct x86_emulate_ops *ops) | ||
1249 | { | ||
1250 | struct decode_cache *c = &ctxt->decode; | ||
1251 | int rc = 0; | ||
1252 | int reg = VCPU_REGS_RDI; | ||
1253 | |||
1254 | while (reg >= VCPU_REGS_RAX) { | ||
1255 | if (reg == VCPU_REGS_RSP) { | ||
1256 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], | ||
1257 | c->op_bytes); | ||
1258 | --reg; | ||
1259 | } | ||
1260 | |||
1261 | rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes); | ||
1262 | if (rc != 0) | ||
1263 | break; | ||
1264 | --reg; | ||
1265 | } | ||
1266 | return rc; | ||
1267 | } | ||
1268 | |||
1189 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | 1269 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, |
1190 | struct x86_emulate_ops *ops) | 1270 | struct x86_emulate_ops *ops) |
1191 | { | 1271 | { |
@@ -1707,18 +1787,45 @@ special_insn: | |||
1707 | add: /* add */ | 1787 | add: /* add */ |
1708 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | 1788 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); |
1709 | break; | 1789 | break; |
1790 | case 0x06: /* push es */ | ||
1791 | emulate_push_sreg(ctxt, VCPU_SREG_ES); | ||
1792 | break; | ||
1793 | case 0x07: /* pop es */ | ||
1794 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); | ||
1795 | if (rc != 0) | ||
1796 | goto done; | ||
1797 | break; | ||
1710 | case 0x08 ... 0x0d: | 1798 | case 0x08 ... 0x0d: |
1711 | or: /* or */ | 1799 | or: /* or */ |
1712 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | 1800 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); |
1713 | break; | 1801 | break; |
1802 | case 0x0e: /* push cs */ | ||
1803 | emulate_push_sreg(ctxt, VCPU_SREG_CS); | ||
1804 | break; | ||
1714 | case 0x10 ... 0x15: | 1805 | case 0x10 ... 0x15: |
1715 | adc: /* adc */ | 1806 | adc: /* adc */ |
1716 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | 1807 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); |
1717 | break; | 1808 | break; |
1809 | case 0x16: /* push ss */ | ||
1810 | emulate_push_sreg(ctxt, VCPU_SREG_SS); | ||
1811 | break; | ||
1812 | case 0x17: /* pop ss */ | ||
1813 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); | ||
1814 | if (rc != 0) | ||
1815 | goto done; | ||
1816 | break; | ||
1718 | case 0x18 ... 0x1d: | 1817 | case 0x18 ... 0x1d: |
1719 | sbb: /* sbb */ | 1818 | sbb: /* sbb */ |
1720 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | 1819 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); |
1721 | break; | 1820 | break; |
1821 | case 0x1e: /* push ds */ | ||
1822 | emulate_push_sreg(ctxt, VCPU_SREG_DS); | ||
1823 | break; | ||
1824 | case 0x1f: /* pop ds */ | ||
1825 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); | ||
1826 | if (rc != 0) | ||
1827 | goto done; | ||
1828 | break; | ||
1722 | case 0x20 ... 0x25: | 1829 | case 0x20 ... 0x25: |
1723 | and: /* and */ | 1830 | and: /* and */ |
1724 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); | 1831 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); |
@@ -1750,6 +1857,14 @@ special_insn: | |||
1750 | if (rc != 0) | 1857 | if (rc != 0) |
1751 | goto done; | 1858 | goto done; |
1752 | break; | 1859 | break; |
1860 | case 0x60: /* pusha */ | ||
1861 | emulate_pusha(ctxt); | ||
1862 | break; | ||
1863 | case 0x61: /* popa */ | ||
1864 | rc = emulate_popa(ctxt, ops); | ||
1865 | if (rc != 0) | ||
1866 | goto done; | ||
1867 | break; | ||
1753 | case 0x63: /* movsxd */ | 1868 | case 0x63: /* movsxd */ |
1754 | if (ctxt->mode != X86EMUL_MODE_PROT64) | 1869 | if (ctxt->mode != X86EMUL_MODE_PROT64) |
1755 | goto cannot_emulate; | 1870 | goto cannot_emulate; |
@@ -1761,7 +1876,7 @@ special_insn: | |||
1761 | break; | 1876 | break; |
1762 | case 0x6c: /* insb */ | 1877 | case 0x6c: /* insb */ |
1763 | case 0x6d: /* insw/insd */ | 1878 | case 0x6d: /* insw/insd */ |
1764 | if (kvm_emulate_pio_string(ctxt->vcpu, NULL, | 1879 | if (kvm_emulate_pio_string(ctxt->vcpu, |
1765 | 1, | 1880 | 1, |
1766 | (c->d & ByteOp) ? 1 : c->op_bytes, | 1881 | (c->d & ByteOp) ? 1 : c->op_bytes, |
1767 | c->rep_prefix ? | 1882 | c->rep_prefix ? |
@@ -1777,7 +1892,7 @@ special_insn: | |||
1777 | return 0; | 1892 | return 0; |
1778 | case 0x6e: /* outsb */ | 1893 | case 0x6e: /* outsb */ |
1779 | case 0x6f: /* outsw/outsd */ | 1894 | case 0x6f: /* outsw/outsd */ |
1780 | if (kvm_emulate_pio_string(ctxt->vcpu, NULL, | 1895 | if (kvm_emulate_pio_string(ctxt->vcpu, |
1781 | 0, | 1896 | 0, |
1782 | (c->d & ByteOp) ? 1 : c->op_bytes, | 1897 | (c->d & ByteOp) ? 1 : c->op_bytes, |
1783 | c->rep_prefix ? | 1898 | c->rep_prefix ? |
@@ -2070,7 +2185,7 @@ special_insn: | |||
2070 | case 0xef: /* out (e/r)ax,dx */ | 2185 | case 0xef: /* out (e/r)ax,dx */ |
2071 | port = c->regs[VCPU_REGS_RDX]; | 2186 | port = c->regs[VCPU_REGS_RDX]; |
2072 | io_dir_in = 0; | 2187 | io_dir_in = 0; |
2073 | do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in, | 2188 | do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, |
2074 | (c->d & ByteOp) ? 1 : c->op_bytes, | 2189 | (c->d & ByteOp) ? 1 : c->op_bytes, |
2075 | port) != 0) { | 2190 | port) != 0) { |
2076 | c->eip = saved_eip; | 2191 | c->eip = saved_eip; |
@@ -2297,6 +2412,14 @@ twobyte_insn: | |||
2297 | jmp_rel(c, c->src.val); | 2412 | jmp_rel(c, c->src.val); |
2298 | c->dst.type = OP_NONE; | 2413 | c->dst.type = OP_NONE; |
2299 | break; | 2414 | break; |
2415 | case 0xa0: /* push fs */ | ||
2416 | emulate_push_sreg(ctxt, VCPU_SREG_FS); | ||
2417 | break; | ||
2418 | case 0xa1: /* pop fs */ | ||
2419 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); | ||
2420 | if (rc != 0) | ||
2421 | goto done; | ||
2422 | break; | ||
2300 | case 0xa3: | 2423 | case 0xa3: |
2301 | bt: /* bt */ | 2424 | bt: /* bt */ |
2302 | c->dst.type = OP_NONE; | 2425 | c->dst.type = OP_NONE; |
@@ -2308,6 +2431,14 @@ twobyte_insn: | |||
2308 | case 0xa5: /* shld cl, r, r/m */ | 2431 | case 0xa5: /* shld cl, r, r/m */ |
2309 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); | 2432 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); |
2310 | break; | 2433 | break; |
2434 | case 0xa8: /* push gs */ | ||
2435 | emulate_push_sreg(ctxt, VCPU_SREG_GS); | ||
2436 | break; | ||
2437 | case 0xa9: /* pop gs */ | ||
2438 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); | ||
2439 | if (rc != 0) | ||
2440 | goto done; | ||
2441 | break; | ||
2311 | case 0xab: | 2442 | case 0xab: |
2312 | bts: /* bts */ | 2443 | bts: /* bts */ |
2313 | /* only subword offset */ | 2444 | /* only subword offset */ |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 144e7f60b5e2..fab7440c9bb2 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -688,10 +688,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm) | |||
688 | struct kvm_vcpu *vcpu; | 688 | struct kvm_vcpu *vcpu; |
689 | int i; | 689 | int i; |
690 | 690 | ||
691 | mutex_lock(&kvm->irq_lock); | ||
692 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); | 691 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
693 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); | 692 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
694 | mutex_unlock(&kvm->irq_lock); | ||
695 | 693 | ||
696 | /* | 694 | /* |
697 | * Provides NMI watchdog support via Virtual Wire mode. | 695 | * Provides NMI watchdog support via Virtual Wire mode. |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 01f151682802..d057c0cbd245 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -38,7 +38,15 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | |||
38 | s->isr_ack |= (1 << irq); | 38 | s->isr_ack |= (1 << irq); |
39 | if (s != &s->pics_state->pics[0]) | 39 | if (s != &s->pics_state->pics[0]) |
40 | irq += 8; | 40 | irq += 8; |
41 | /* | ||
42 | * We are dropping lock while calling ack notifiers since ack | ||
43 | * notifier callbacks for assigned devices call into PIC recursively. | ||
44 | * Other interrupt may be delivered to PIC while lock is dropped but | ||
45 | * it should be safe since PIC state is already updated at this stage. | ||
46 | */ | ||
47 | spin_unlock(&s->pics_state->lock); | ||
41 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); | 48 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); |
49 | spin_lock(&s->pics_state->lock); | ||
42 | } | 50 | } |
43 | 51 | ||
44 | void kvm_pic_clear_isr_ack(struct kvm *kvm) | 52 | void kvm_pic_clear_isr_ack(struct kvm *kvm) |
@@ -176,16 +184,18 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) | |||
176 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) | 184 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) |
177 | { | 185 | { |
178 | s->isr |= 1 << irq; | 186 | s->isr |= 1 << irq; |
179 | if (s->auto_eoi) { | ||
180 | if (s->rotate_on_auto_eoi) | ||
181 | s->priority_add = (irq + 1) & 7; | ||
182 | pic_clear_isr(s, irq); | ||
183 | } | ||
184 | /* | 187 | /* |
185 | * We don't clear a level sensitive interrupt here | 188 | * We don't clear a level sensitive interrupt here |
186 | */ | 189 | */ |
187 | if (!(s->elcr & (1 << irq))) | 190 | if (!(s->elcr & (1 << irq))) |
188 | s->irr &= ~(1 << irq); | 191 | s->irr &= ~(1 << irq); |
192 | |||
193 | if (s->auto_eoi) { | ||
194 | if (s->rotate_on_auto_eoi) | ||
195 | s->priority_add = (irq + 1) & 7; | ||
196 | pic_clear_isr(s, irq); | ||
197 | } | ||
198 | |||
189 | } | 199 | } |
190 | 200 | ||
191 | int kvm_pic_read_irq(struct kvm *kvm) | 201 | int kvm_pic_read_irq(struct kvm *kvm) |
@@ -225,22 +235,11 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
225 | 235 | ||
226 | void kvm_pic_reset(struct kvm_kpic_state *s) | 236 | void kvm_pic_reset(struct kvm_kpic_state *s) |
227 | { | 237 | { |
228 | int irq, irqbase, n; | 238 | int irq; |
229 | struct kvm *kvm = s->pics_state->irq_request_opaque; | 239 | struct kvm *kvm = s->pics_state->irq_request_opaque; |
230 | struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu; | 240 | struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu; |
241 | u8 irr = s->irr, isr = s->imr; | ||
231 | 242 | ||
232 | if (s == &s->pics_state->pics[0]) | ||
233 | irqbase = 0; | ||
234 | else | ||
235 | irqbase = 8; | ||
236 | |||
237 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { | ||
238 | if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) | ||
239 | if (s->irr & (1 << irq) || s->isr & (1 << irq)) { | ||
240 | n = irq + irqbase; | ||
241 | kvm_notify_acked_irq(kvm, SELECT_PIC(n), n); | ||
242 | } | ||
243 | } | ||
244 | s->last_irr = 0; | 243 | s->last_irr = 0; |
245 | s->irr = 0; | 244 | s->irr = 0; |
246 | s->imr = 0; | 245 | s->imr = 0; |
@@ -256,6 +255,13 @@ void kvm_pic_reset(struct kvm_kpic_state *s) | |||
256 | s->rotate_on_auto_eoi = 0; | 255 | s->rotate_on_auto_eoi = 0; |
257 | s->special_fully_nested_mode = 0; | 256 | s->special_fully_nested_mode = 0; |
258 | s->init4 = 0; | 257 | s->init4 = 0; |
258 | |||
259 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { | ||
260 | if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) | ||
261 | if (irr & (1 << irq) || isr & (1 << irq)) { | ||
262 | pic_clear_isr(s, irq); | ||
263 | } | ||
264 | } | ||
259 | } | 265 | } |
260 | 266 | ||
261 | static void pic_ioport_write(void *opaque, u32 addr, u32 val) | 267 | static void pic_ioport_write(void *opaque, u32 addr, u32 val) |
@@ -298,9 +304,9 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) | |||
298 | priority = get_priority(s, s->isr); | 304 | priority = get_priority(s, s->isr); |
299 | if (priority != 8) { | 305 | if (priority != 8) { |
300 | irq = (priority + s->priority_add) & 7; | 306 | irq = (priority + s->priority_add) & 7; |
301 | pic_clear_isr(s, irq); | ||
302 | if (cmd == 5) | 307 | if (cmd == 5) |
303 | s->priority_add = (irq + 1) & 7; | 308 | s->priority_add = (irq + 1) & 7; |
309 | pic_clear_isr(s, irq); | ||
304 | pic_update_irq(s->pics_state); | 310 | pic_update_irq(s->pics_state); |
305 | } | 311 | } |
306 | break; | 312 | break; |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 7d6058a2fd38..be399e207d57 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -71,6 +71,7 @@ struct kvm_pic { | |||
71 | int output; /* intr from master PIC */ | 71 | int output; /* intr from master PIC */ |
72 | struct kvm_io_device dev; | 72 | struct kvm_io_device dev; |
73 | void (*ack_notifier)(void *opaque, int irq); | 73 | void (*ack_notifier)(void *opaque, int irq); |
74 | unsigned long irq_states[16]; | ||
74 | }; | 75 | }; |
75 | 76 | ||
76 | struct kvm_pic *kvm_create_pic(struct kvm *kvm); | 77 | struct kvm_pic *kvm_create_pic(struct kvm *kvm); |
@@ -85,7 +86,11 @@ static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) | |||
85 | 86 | ||
86 | static inline int irqchip_in_kernel(struct kvm *kvm) | 87 | static inline int irqchip_in_kernel(struct kvm *kvm) |
87 | { | 88 | { |
88 | return pic_irqchip(kvm) != NULL; | 89 | int ret; |
90 | |||
91 | ret = (pic_irqchip(kvm) != NULL); | ||
92 | smp_rmb(); | ||
93 | return ret; | ||
89 | } | 94 | } |
90 | 95 | ||
91 | void kvm_pic_reset(struct kvm_kpic_state *s); | 96 | void kvm_pic_reset(struct kvm_kpic_state *s); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 23c217692ea9..cd60c0bd1b32 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <asm/current.h> | 32 | #include <asm/current.h> |
33 | #include <asm/apicdef.h> | 33 | #include <asm/apicdef.h> |
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | #include <asm/apicdef.h> | ||
36 | #include "kvm_cache_regs.h" | 35 | #include "kvm_cache_regs.h" |
37 | #include "irq.h" | 36 | #include "irq.h" |
38 | #include "trace.h" | 37 | #include "trace.h" |
@@ -471,11 +470,8 @@ static void apic_set_eoi(struct kvm_lapic *apic) | |||
471 | trigger_mode = IOAPIC_LEVEL_TRIG; | 470 | trigger_mode = IOAPIC_LEVEL_TRIG; |
472 | else | 471 | else |
473 | trigger_mode = IOAPIC_EDGE_TRIG; | 472 | trigger_mode = IOAPIC_EDGE_TRIG; |
474 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) { | 473 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) |
475 | mutex_lock(&apic->vcpu->kvm->irq_lock); | ||
476 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); | 474 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); |
477 | mutex_unlock(&apic->vcpu->kvm->irq_lock); | ||
478 | } | ||
479 | } | 475 | } |
480 | 476 | ||
481 | static void apic_send_ipi(struct kvm_lapic *apic) | 477 | static void apic_send_ipi(struct kvm_lapic *apic) |
@@ -504,9 +500,7 @@ static void apic_send_ipi(struct kvm_lapic *apic) | |||
504 | irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, | 500 | irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, |
505 | irq.vector); | 501 | irq.vector); |
506 | 502 | ||
507 | mutex_lock(&apic->vcpu->kvm->irq_lock); | ||
508 | kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); | 503 | kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); |
509 | mutex_unlock(&apic->vcpu->kvm->irq_lock); | ||
510 | } | 504 | } |
511 | 505 | ||
512 | static u32 apic_get_tmcct(struct kvm_lapic *apic) | 506 | static u32 apic_get_tmcct(struct kvm_lapic *apic) |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 818b92ad82cf..4c3e5b2314cb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2789,7 +2789,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) | |||
2789 | if (r) | 2789 | if (r) |
2790 | goto out; | 2790 | goto out; |
2791 | 2791 | ||
2792 | er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0); | 2792 | er = emulate_instruction(vcpu, cr2, error_code, 0); |
2793 | 2793 | ||
2794 | switch (er) { | 2794 | switch (er) { |
2795 | case EMULATE_DONE: | 2795 | case EMULATE_DONE: |
@@ -2800,6 +2800,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) | |||
2800 | case EMULATE_FAIL: | 2800 | case EMULATE_FAIL: |
2801 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 2801 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
2802 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | 2802 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
2803 | vcpu->run->internal.ndata = 0; | ||
2803 | return 0; | 2804 | return 0; |
2804 | default: | 2805 | default: |
2805 | BUG(); | 2806 | BUG(); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 72558f8ff3f5..a6017132fba8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -467,7 +467,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
467 | level = iterator.level; | 467 | level = iterator.level; |
468 | sptep = iterator.sptep; | 468 | sptep = iterator.sptep; |
469 | 469 | ||
470 | /* FIXME: properly handle invlpg on large guest pages */ | ||
471 | if (level == PT_PAGE_TABLE_LEVEL || | 470 | if (level == PT_PAGE_TABLE_LEVEL || |
472 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || | 471 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || |
473 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | 472 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c17404add91f..3de0b37ec038 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -46,6 +46,7 @@ MODULE_LICENSE("GPL"); | |||
46 | #define SVM_FEATURE_NPT (1 << 0) | 46 | #define SVM_FEATURE_NPT (1 << 0) |
47 | #define SVM_FEATURE_LBRV (1 << 1) | 47 | #define SVM_FEATURE_LBRV (1 << 1) |
48 | #define SVM_FEATURE_SVML (1 << 2) | 48 | #define SVM_FEATURE_SVML (1 << 2) |
49 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) | ||
49 | 50 | ||
50 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ | 51 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ |
51 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ | 52 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ |
@@ -53,15 +54,6 @@ MODULE_LICENSE("GPL"); | |||
53 | 54 | ||
54 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) | 55 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
55 | 56 | ||
56 | /* Turn on to get debugging output*/ | ||
57 | /* #define NESTED_DEBUG */ | ||
58 | |||
59 | #ifdef NESTED_DEBUG | ||
60 | #define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args) | ||
61 | #else | ||
62 | #define nsvm_printk(fmt, args...) do {} while(0) | ||
63 | #endif | ||
64 | |||
65 | static const u32 host_save_user_msrs[] = { | 57 | static const u32 host_save_user_msrs[] = { |
66 | #ifdef CONFIG_X86_64 | 58 | #ifdef CONFIG_X86_64 |
67 | MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, | 59 | MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, |
@@ -85,6 +77,9 @@ struct nested_state { | |||
85 | /* gpa pointers to the real vectors */ | 77 | /* gpa pointers to the real vectors */ |
86 | u64 vmcb_msrpm; | 78 | u64 vmcb_msrpm; |
87 | 79 | ||
80 | /* A VMEXIT is required but not yet emulated */ | ||
81 | bool exit_required; | ||
82 | |||
88 | /* cache for intercepts of the guest */ | 83 | /* cache for intercepts of the guest */ |
89 | u16 intercept_cr_read; | 84 | u16 intercept_cr_read; |
90 | u16 intercept_cr_write; | 85 | u16 intercept_cr_write; |
@@ -112,6 +107,8 @@ struct vcpu_svm { | |||
112 | u32 *msrpm; | 107 | u32 *msrpm; |
113 | 108 | ||
114 | struct nested_state nested; | 109 | struct nested_state nested; |
110 | |||
111 | bool nmi_singlestep; | ||
115 | }; | 112 | }; |
116 | 113 | ||
117 | /* enable NPT for AMD64 and X86 with PAE */ | 114 | /* enable NPT for AMD64 and X86 with PAE */ |
@@ -286,7 +283,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
286 | struct vcpu_svm *svm = to_svm(vcpu); | 283 | struct vcpu_svm *svm = to_svm(vcpu); |
287 | 284 | ||
288 | if (!svm->next_rip) { | 285 | if (!svm->next_rip) { |
289 | if (emulate_instruction(vcpu, vcpu->run, 0, 0, EMULTYPE_SKIP) != | 286 | if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) != |
290 | EMULATE_DONE) | 287 | EMULATE_DONE) |
291 | printk(KERN_DEBUG "%s: NOP\n", __func__); | 288 | printk(KERN_DEBUG "%s: NOP\n", __func__); |
292 | return; | 289 | return; |
@@ -316,7 +313,7 @@ static void svm_hardware_disable(void *garbage) | |||
316 | cpu_svm_disable(); | 313 | cpu_svm_disable(); |
317 | } | 314 | } |
318 | 315 | ||
319 | static void svm_hardware_enable(void *garbage) | 316 | static int svm_hardware_enable(void *garbage) |
320 | { | 317 | { |
321 | 318 | ||
322 | struct svm_cpu_data *svm_data; | 319 | struct svm_cpu_data *svm_data; |
@@ -325,16 +322,21 @@ static void svm_hardware_enable(void *garbage) | |||
325 | struct desc_struct *gdt; | 322 | struct desc_struct *gdt; |
326 | int me = raw_smp_processor_id(); | 323 | int me = raw_smp_processor_id(); |
327 | 324 | ||
325 | rdmsrl(MSR_EFER, efer); | ||
326 | if (efer & EFER_SVME) | ||
327 | return -EBUSY; | ||
328 | |||
328 | if (!has_svm()) { | 329 | if (!has_svm()) { |
329 | printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); | 330 | printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n", |
330 | return; | 331 | me); |
332 | return -EINVAL; | ||
331 | } | 333 | } |
332 | svm_data = per_cpu(svm_data, me); | 334 | svm_data = per_cpu(svm_data, me); |
333 | 335 | ||
334 | if (!svm_data) { | 336 | if (!svm_data) { |
335 | printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", | 337 | printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", |
336 | me); | 338 | me); |
337 | return; | 339 | return -EINVAL; |
338 | } | 340 | } |
339 | 341 | ||
340 | svm_data->asid_generation = 1; | 342 | svm_data->asid_generation = 1; |
@@ -345,11 +347,12 @@ static void svm_hardware_enable(void *garbage) | |||
345 | gdt = (struct desc_struct *)gdt_descr.base; | 347 | gdt = (struct desc_struct *)gdt_descr.base; |
346 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); | 348 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
347 | 349 | ||
348 | rdmsrl(MSR_EFER, efer); | ||
349 | wrmsrl(MSR_EFER, efer | EFER_SVME); | 350 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
350 | 351 | ||
351 | wrmsrl(MSR_VM_HSAVE_PA, | 352 | wrmsrl(MSR_VM_HSAVE_PA, |
352 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | 353 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); |
354 | |||
355 | return 0; | ||
353 | } | 356 | } |
354 | 357 | ||
355 | static void svm_cpu_uninit(int cpu) | 358 | static void svm_cpu_uninit(int cpu) |
@@ -476,7 +479,7 @@ static __init int svm_hardware_setup(void) | |||
476 | kvm_enable_efer_bits(EFER_SVME); | 479 | kvm_enable_efer_bits(EFER_SVME); |
477 | } | 480 | } |
478 | 481 | ||
479 | for_each_online_cpu(cpu) { | 482 | for_each_possible_cpu(cpu) { |
480 | r = svm_cpu_init(cpu); | 483 | r = svm_cpu_init(cpu); |
481 | if (r) | 484 | if (r) |
482 | goto err; | 485 | goto err; |
@@ -510,7 +513,7 @@ static __exit void svm_hardware_unsetup(void) | |||
510 | { | 513 | { |
511 | int cpu; | 514 | int cpu; |
512 | 515 | ||
513 | for_each_online_cpu(cpu) | 516 | for_each_possible_cpu(cpu) |
514 | svm_cpu_uninit(cpu); | 517 | svm_cpu_uninit(cpu); |
515 | 518 | ||
516 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); | 519 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); |
@@ -625,11 +628,12 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
625 | save->rip = 0x0000fff0; | 628 | save->rip = 0x0000fff0; |
626 | svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; | 629 | svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; |
627 | 630 | ||
628 | /* | 631 | /* This is the guest-visible cr0 value. |
629 | * cr0 val on cpu init should be 0x60000010, we enable cpu | 632 | * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. |
630 | * cache by default. the orderly way is to enable cache in bios. | ||
631 | */ | 633 | */ |
632 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; | 634 | svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; |
635 | kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0); | ||
636 | |||
633 | save->cr4 = X86_CR4_PAE; | 637 | save->cr4 = X86_CR4_PAE; |
634 | /* rdx = ?? */ | 638 | /* rdx = ?? */ |
635 | 639 | ||
@@ -644,8 +648,6 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
644 | control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK| | 648 | control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK| |
645 | INTERCEPT_CR3_MASK); | 649 | INTERCEPT_CR3_MASK); |
646 | save->g_pat = 0x0007040600070406ULL; | 650 | save->g_pat = 0x0007040600070406ULL; |
647 | /* enable caching because the QEMU Bios doesn't enable it */ | ||
648 | save->cr0 = X86_CR0_ET; | ||
649 | save->cr3 = 0; | 651 | save->cr3 = 0; |
650 | save->cr4 = 0; | 652 | save->cr4 = 0; |
651 | } | 653 | } |
@@ -654,6 +656,11 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
654 | svm->nested.vmcb = 0; | 656 | svm->nested.vmcb = 0; |
655 | svm->vcpu.arch.hflags = 0; | 657 | svm->vcpu.arch.hflags = 0; |
656 | 658 | ||
659 | if (svm_has(SVM_FEATURE_PAUSE_FILTER)) { | ||
660 | control->pause_filter_count = 3000; | ||
661 | control->intercept |= (1ULL << INTERCEPT_PAUSE); | ||
662 | } | ||
663 | |||
657 | enable_gif(svm); | 664 | enable_gif(svm); |
658 | } | 665 | } |
659 | 666 | ||
@@ -758,14 +765,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
758 | int i; | 765 | int i; |
759 | 766 | ||
760 | if (unlikely(cpu != vcpu->cpu)) { | 767 | if (unlikely(cpu != vcpu->cpu)) { |
761 | u64 tsc_this, delta; | 768 | u64 delta; |
762 | 769 | ||
763 | /* | 770 | /* |
764 | * Make sure that the guest sees a monotonically | 771 | * Make sure that the guest sees a monotonically |
765 | * increasing TSC. | 772 | * increasing TSC. |
766 | */ | 773 | */ |
767 | rdtscll(tsc_this); | 774 | delta = vcpu->arch.host_tsc - native_read_tsc(); |
768 | delta = vcpu->arch.host_tsc - tsc_this; | ||
769 | svm->vmcb->control.tsc_offset += delta; | 775 | svm->vmcb->control.tsc_offset += delta; |
770 | if (is_nested(svm)) | 776 | if (is_nested(svm)) |
771 | svm->nested.hsave->control.tsc_offset += delta; | 777 | svm->nested.hsave->control.tsc_offset += delta; |
@@ -787,7 +793,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |||
787 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 793 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
788 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 794 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
789 | 795 | ||
790 | rdtscll(vcpu->arch.host_tsc); | 796 | vcpu->arch.host_tsc = native_read_tsc(); |
791 | } | 797 | } |
792 | 798 | ||
793 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | 799 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
@@ -1045,7 +1051,7 @@ static void update_db_intercept(struct kvm_vcpu *vcpu) | |||
1045 | svm->vmcb->control.intercept_exceptions &= | 1051 | svm->vmcb->control.intercept_exceptions &= |
1046 | ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); | 1052 | ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); |
1047 | 1053 | ||
1048 | if (vcpu->arch.singlestep) | 1054 | if (svm->nmi_singlestep) |
1049 | svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR); | 1055 | svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR); |
1050 | 1056 | ||
1051 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { | 1057 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
@@ -1060,26 +1066,16 @@ static void update_db_intercept(struct kvm_vcpu *vcpu) | |||
1060 | vcpu->guest_debug = 0; | 1066 | vcpu->guest_debug = 0; |
1061 | } | 1067 | } |
1062 | 1068 | ||
1063 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | 1069 | static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) |
1064 | { | 1070 | { |
1065 | int old_debug = vcpu->guest_debug; | ||
1066 | struct vcpu_svm *svm = to_svm(vcpu); | 1071 | struct vcpu_svm *svm = to_svm(vcpu); |
1067 | 1072 | ||
1068 | vcpu->guest_debug = dbg->control; | ||
1069 | |||
1070 | update_db_intercept(vcpu); | ||
1071 | |||
1072 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | 1073 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
1073 | svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; | 1074 | svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; |
1074 | else | 1075 | else |
1075 | svm->vmcb->save.dr7 = vcpu->arch.dr7; | 1076 | svm->vmcb->save.dr7 = vcpu->arch.dr7; |
1076 | 1077 | ||
1077 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 1078 | update_db_intercept(vcpu); |
1078 | svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
1079 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1080 | svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
1081 | |||
1082 | return 0; | ||
1083 | } | 1079 | } |
1084 | 1080 | ||
1085 | static void load_host_msrs(struct kvm_vcpu *vcpu) | 1081 | static void load_host_msrs(struct kvm_vcpu *vcpu) |
@@ -1180,7 +1176,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
1180 | } | 1176 | } |
1181 | } | 1177 | } |
1182 | 1178 | ||
1183 | static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1179 | static int pf_interception(struct vcpu_svm *svm) |
1184 | { | 1180 | { |
1185 | u64 fault_address; | 1181 | u64 fault_address; |
1186 | u32 error_code; | 1182 | u32 error_code; |
@@ -1194,17 +1190,19 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1194 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1190 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1195 | } | 1191 | } |
1196 | 1192 | ||
1197 | static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1193 | static int db_interception(struct vcpu_svm *svm) |
1198 | { | 1194 | { |
1195 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1196 | |||
1199 | if (!(svm->vcpu.guest_debug & | 1197 | if (!(svm->vcpu.guest_debug & |
1200 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && | 1198 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
1201 | !svm->vcpu.arch.singlestep) { | 1199 | !svm->nmi_singlestep) { |
1202 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); | 1200 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); |
1203 | return 1; | 1201 | return 1; |
1204 | } | 1202 | } |
1205 | 1203 | ||
1206 | if (svm->vcpu.arch.singlestep) { | 1204 | if (svm->nmi_singlestep) { |
1207 | svm->vcpu.arch.singlestep = false; | 1205 | svm->nmi_singlestep = false; |
1208 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) | 1206 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) |
1209 | svm->vmcb->save.rflags &= | 1207 | svm->vmcb->save.rflags &= |
1210 | ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | 1208 | ~(X86_EFLAGS_TF | X86_EFLAGS_RF); |
@@ -1223,25 +1221,27 @@ static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1223 | return 1; | 1221 | return 1; |
1224 | } | 1222 | } |
1225 | 1223 | ||
1226 | static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1224 | static int bp_interception(struct vcpu_svm *svm) |
1227 | { | 1225 | { |
1226 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1227 | |||
1228 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 1228 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
1229 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | 1229 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
1230 | kvm_run->debug.arch.exception = BP_VECTOR; | 1230 | kvm_run->debug.arch.exception = BP_VECTOR; |
1231 | return 0; | 1231 | return 0; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1234 | static int ud_interception(struct vcpu_svm *svm) |
1235 | { | 1235 | { |
1236 | int er; | 1236 | int er; |
1237 | 1237 | ||
1238 | er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | 1238 | er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD); |
1239 | if (er != EMULATE_DONE) | 1239 | if (er != EMULATE_DONE) |
1240 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | 1240 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
1241 | return 1; | 1241 | return 1; |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1244 | static int nm_interception(struct vcpu_svm *svm) |
1245 | { | 1245 | { |
1246 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 1246 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
1247 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) | 1247 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) |
@@ -1251,7 +1251,7 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1251 | return 1; | 1251 | return 1; |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1254 | static int mc_interception(struct vcpu_svm *svm) |
1255 | { | 1255 | { |
1256 | /* | 1256 | /* |
1257 | * On an #MC intercept the MCE handler is not called automatically in | 1257 | * On an #MC intercept the MCE handler is not called automatically in |
@@ -1264,8 +1264,10 @@ static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1264 | return 1; | 1264 | return 1; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1267 | static int shutdown_interception(struct vcpu_svm *svm) |
1268 | { | 1268 | { |
1269 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
1270 | |||
1269 | /* | 1271 | /* |
1270 | * VMCB is undefined after a SHUTDOWN intercept | 1272 | * VMCB is undefined after a SHUTDOWN intercept |
1271 | * so reinitialize it. | 1273 | * so reinitialize it. |
@@ -1277,7 +1279,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1277 | return 0; | 1279 | return 0; |
1278 | } | 1280 | } |
1279 | 1281 | ||
1280 | static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1282 | static int io_interception(struct vcpu_svm *svm) |
1281 | { | 1283 | { |
1282 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ | 1284 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
1283 | int size, in, string; | 1285 | int size, in, string; |
@@ -1291,7 +1293,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1291 | 1293 | ||
1292 | if (string) { | 1294 | if (string) { |
1293 | if (emulate_instruction(&svm->vcpu, | 1295 | if (emulate_instruction(&svm->vcpu, |
1294 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | 1296 | 0, 0, 0) == EMULATE_DO_MMIO) |
1295 | return 0; | 1297 | return 0; |
1296 | return 1; | 1298 | return 1; |
1297 | } | 1299 | } |
@@ -1301,33 +1303,33 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1301 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | 1303 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
1302 | 1304 | ||
1303 | skip_emulated_instruction(&svm->vcpu); | 1305 | skip_emulated_instruction(&svm->vcpu); |
1304 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); | 1306 | return kvm_emulate_pio(&svm->vcpu, in, size, port); |
1305 | } | 1307 | } |
1306 | 1308 | ||
1307 | static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1309 | static int nmi_interception(struct vcpu_svm *svm) |
1308 | { | 1310 | { |
1309 | return 1; | 1311 | return 1; |
1310 | } | 1312 | } |
1311 | 1313 | ||
1312 | static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1314 | static int intr_interception(struct vcpu_svm *svm) |
1313 | { | 1315 | { |
1314 | ++svm->vcpu.stat.irq_exits; | 1316 | ++svm->vcpu.stat.irq_exits; |
1315 | return 1; | 1317 | return 1; |
1316 | } | 1318 | } |
1317 | 1319 | ||
1318 | static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1320 | static int nop_on_interception(struct vcpu_svm *svm) |
1319 | { | 1321 | { |
1320 | return 1; | 1322 | return 1; |
1321 | } | 1323 | } |
1322 | 1324 | ||
1323 | static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1325 | static int halt_interception(struct vcpu_svm *svm) |
1324 | { | 1326 | { |
1325 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; | 1327 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; |
1326 | skip_emulated_instruction(&svm->vcpu); | 1328 | skip_emulated_instruction(&svm->vcpu); |
1327 | return kvm_emulate_halt(&svm->vcpu); | 1329 | return kvm_emulate_halt(&svm->vcpu); |
1328 | } | 1330 | } |
1329 | 1331 | ||
1330 | static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1332 | static int vmmcall_interception(struct vcpu_svm *svm) |
1331 | { | 1333 | { |
1332 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 1334 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
1333 | skip_emulated_instruction(&svm->vcpu); | 1335 | skip_emulated_instruction(&svm->vcpu); |
@@ -1378,8 +1380,15 @@ static inline int nested_svm_intr(struct vcpu_svm *svm) | |||
1378 | 1380 | ||
1379 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; | 1381 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; |
1380 | 1382 | ||
1381 | if (nested_svm_exit_handled(svm)) { | 1383 | if (svm->nested.intercept & 1ULL) { |
1382 | nsvm_printk("VMexit -> INTR\n"); | 1384 | /* |
1385 | * The #vmexit can't be emulated here directly because this | ||
1386 | * code path runs with irqs and preemtion disabled. A | ||
1387 | * #vmexit emulation might sleep. Only signal request for | ||
1388 | * the #vmexit here. | ||
1389 | */ | ||
1390 | svm->nested.exit_required = true; | ||
1391 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); | ||
1383 | return 1; | 1392 | return 1; |
1384 | } | 1393 | } |
1385 | 1394 | ||
@@ -1390,10 +1399,7 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) | |||
1390 | { | 1399 | { |
1391 | struct page *page; | 1400 | struct page *page; |
1392 | 1401 | ||
1393 | down_read(¤t->mm->mmap_sem); | ||
1394 | page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); | 1402 | page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); |
1395 | up_read(¤t->mm->mmap_sem); | ||
1396 | |||
1397 | if (is_error_page(page)) | 1403 | if (is_error_page(page)) |
1398 | goto error; | 1404 | goto error; |
1399 | 1405 | ||
@@ -1532,14 +1538,12 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm) | |||
1532 | } | 1538 | } |
1533 | default: { | 1539 | default: { |
1534 | u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); | 1540 | u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); |
1535 | nsvm_printk("exit code: 0x%x\n", exit_code); | ||
1536 | if (svm->nested.intercept & exit_bits) | 1541 | if (svm->nested.intercept & exit_bits) |
1537 | vmexit = NESTED_EXIT_DONE; | 1542 | vmexit = NESTED_EXIT_DONE; |
1538 | } | 1543 | } |
1539 | } | 1544 | } |
1540 | 1545 | ||
1541 | if (vmexit == NESTED_EXIT_DONE) { | 1546 | if (vmexit == NESTED_EXIT_DONE) { |
1542 | nsvm_printk("#VMEXIT reason=%04x\n", exit_code); | ||
1543 | nested_svm_vmexit(svm); | 1547 | nested_svm_vmexit(svm); |
1544 | } | 1548 | } |
1545 | 1549 | ||
@@ -1584,6 +1588,12 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
1584 | struct vmcb *hsave = svm->nested.hsave; | 1588 | struct vmcb *hsave = svm->nested.hsave; |
1585 | struct vmcb *vmcb = svm->vmcb; | 1589 | struct vmcb *vmcb = svm->vmcb; |
1586 | 1590 | ||
1591 | trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, | ||
1592 | vmcb->control.exit_info_1, | ||
1593 | vmcb->control.exit_info_2, | ||
1594 | vmcb->control.exit_int_info, | ||
1595 | vmcb->control.exit_int_info_err); | ||
1596 | |||
1587 | nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0); | 1597 | nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0); |
1588 | if (!nested_vmcb) | 1598 | if (!nested_vmcb) |
1589 | return 1; | 1599 | return 1; |
@@ -1617,6 +1627,22 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
1617 | nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; | 1627 | nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; |
1618 | nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; | 1628 | nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; |
1619 | nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; | 1629 | nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; |
1630 | |||
1631 | /* | ||
1632 | * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have | ||
1633 | * to make sure that we do not lose injected events. So check event_inj | ||
1634 | * here and copy it to exit_int_info if it is valid. | ||
1635 | * Exit_int_info and event_inj can't be both valid because the case | ||
1636 | * below only happens on a VMRUN instruction intercept which has | ||
1637 | * no valid exit_int_info set. | ||
1638 | */ | ||
1639 | if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { | ||
1640 | struct vmcb_control_area *nc = &nested_vmcb->control; | ||
1641 | |||
1642 | nc->exit_int_info = vmcb->control.event_inj; | ||
1643 | nc->exit_int_info_err = vmcb->control.event_inj_err; | ||
1644 | } | ||
1645 | |||
1620 | nested_vmcb->control.tlb_ctl = 0; | 1646 | nested_vmcb->control.tlb_ctl = 0; |
1621 | nested_vmcb->control.event_inj = 0; | 1647 | nested_vmcb->control.event_inj = 0; |
1622 | nested_vmcb->control.event_inj_err = 0; | 1648 | nested_vmcb->control.event_inj_err = 0; |
@@ -1628,10 +1654,6 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
1628 | /* Restore the original control entries */ | 1654 | /* Restore the original control entries */ |
1629 | copy_vmcb_control_area(vmcb, hsave); | 1655 | copy_vmcb_control_area(vmcb, hsave); |
1630 | 1656 | ||
1631 | /* Kill any pending exceptions */ | ||
1632 | if (svm->vcpu.arch.exception.pending == true) | ||
1633 | nsvm_printk("WARNING: Pending Exception\n"); | ||
1634 | |||
1635 | kvm_clear_exception_queue(&svm->vcpu); | 1657 | kvm_clear_exception_queue(&svm->vcpu); |
1636 | kvm_clear_interrupt_queue(&svm->vcpu); | 1658 | kvm_clear_interrupt_queue(&svm->vcpu); |
1637 | 1659 | ||
@@ -1702,6 +1724,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) | |||
1702 | /* nested_vmcb is our indicator if nested SVM is activated */ | 1724 | /* nested_vmcb is our indicator if nested SVM is activated */ |
1703 | svm->nested.vmcb = svm->vmcb->save.rax; | 1725 | svm->nested.vmcb = svm->vmcb->save.rax; |
1704 | 1726 | ||
1727 | trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb, | ||
1728 | nested_vmcb->save.rip, | ||
1729 | nested_vmcb->control.int_ctl, | ||
1730 | nested_vmcb->control.event_inj, | ||
1731 | nested_vmcb->control.nested_ctl); | ||
1732 | |||
1705 | /* Clear internal status */ | 1733 | /* Clear internal status */ |
1706 | kvm_clear_exception_queue(&svm->vcpu); | 1734 | kvm_clear_exception_queue(&svm->vcpu); |
1707 | kvm_clear_interrupt_queue(&svm->vcpu); | 1735 | kvm_clear_interrupt_queue(&svm->vcpu); |
@@ -1789,28 +1817,15 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) | |||
1789 | svm->nested.intercept = nested_vmcb->control.intercept; | 1817 | svm->nested.intercept = nested_vmcb->control.intercept; |
1790 | 1818 | ||
1791 | force_new_asid(&svm->vcpu); | 1819 | force_new_asid(&svm->vcpu); |
1792 | svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; | ||
1793 | svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err; | ||
1794 | svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; | 1820 | svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; |
1795 | if (nested_vmcb->control.int_ctl & V_IRQ_MASK) { | ||
1796 | nsvm_printk("nSVM Injecting Interrupt: 0x%x\n", | ||
1797 | nested_vmcb->control.int_ctl); | ||
1798 | } | ||
1799 | if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) | 1821 | if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) |
1800 | svm->vcpu.arch.hflags |= HF_VINTR_MASK; | 1822 | svm->vcpu.arch.hflags |= HF_VINTR_MASK; |
1801 | else | 1823 | else |
1802 | svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; | 1824 | svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; |
1803 | 1825 | ||
1804 | nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n", | ||
1805 | nested_vmcb->control.exit_int_info, | ||
1806 | nested_vmcb->control.int_state); | ||
1807 | |||
1808 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; | 1826 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
1809 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; | 1827 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
1810 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; | 1828 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; |
1811 | if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID) | ||
1812 | nsvm_printk("Injecting Event: 0x%x\n", | ||
1813 | nested_vmcb->control.event_inj); | ||
1814 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; | 1829 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
1815 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; | 1830 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
1816 | 1831 | ||
@@ -1837,7 +1852,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) | |||
1837 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; | 1852 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
1838 | } | 1853 | } |
1839 | 1854 | ||
1840 | static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1855 | static int vmload_interception(struct vcpu_svm *svm) |
1841 | { | 1856 | { |
1842 | struct vmcb *nested_vmcb; | 1857 | struct vmcb *nested_vmcb; |
1843 | 1858 | ||
@@ -1857,7 +1872,7 @@ static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1857 | return 1; | 1872 | return 1; |
1858 | } | 1873 | } |
1859 | 1874 | ||
1860 | static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1875 | static int vmsave_interception(struct vcpu_svm *svm) |
1861 | { | 1876 | { |
1862 | struct vmcb *nested_vmcb; | 1877 | struct vmcb *nested_vmcb; |
1863 | 1878 | ||
@@ -1877,10 +1892,8 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1877 | return 1; | 1892 | return 1; |
1878 | } | 1893 | } |
1879 | 1894 | ||
1880 | static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1895 | static int vmrun_interception(struct vcpu_svm *svm) |
1881 | { | 1896 | { |
1882 | nsvm_printk("VMrun\n"); | ||
1883 | |||
1884 | if (nested_svm_check_permissions(svm)) | 1897 | if (nested_svm_check_permissions(svm)) |
1885 | return 1; | 1898 | return 1; |
1886 | 1899 | ||
@@ -1907,7 +1920,7 @@ failed: | |||
1907 | return 1; | 1920 | return 1; |
1908 | } | 1921 | } |
1909 | 1922 | ||
1910 | static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1923 | static int stgi_interception(struct vcpu_svm *svm) |
1911 | { | 1924 | { |
1912 | if (nested_svm_check_permissions(svm)) | 1925 | if (nested_svm_check_permissions(svm)) |
1913 | return 1; | 1926 | return 1; |
@@ -1920,7 +1933,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1920 | return 1; | 1933 | return 1; |
1921 | } | 1934 | } |
1922 | 1935 | ||
1923 | static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1936 | static int clgi_interception(struct vcpu_svm *svm) |
1924 | { | 1937 | { |
1925 | if (nested_svm_check_permissions(svm)) | 1938 | if (nested_svm_check_permissions(svm)) |
1926 | return 1; | 1939 | return 1; |
@@ -1937,10 +1950,12 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1937 | return 1; | 1950 | return 1; |
1938 | } | 1951 | } |
1939 | 1952 | ||
1940 | static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1953 | static int invlpga_interception(struct vcpu_svm *svm) |
1941 | { | 1954 | { |
1942 | struct kvm_vcpu *vcpu = &svm->vcpu; | 1955 | struct kvm_vcpu *vcpu = &svm->vcpu; |
1943 | nsvm_printk("INVLPGA\n"); | 1956 | |
1957 | trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX], | ||
1958 | vcpu->arch.regs[VCPU_REGS_RAX]); | ||
1944 | 1959 | ||
1945 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ | 1960 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
1946 | kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); | 1961 | kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); |
@@ -1950,15 +1965,21 @@ static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1950 | return 1; | 1965 | return 1; |
1951 | } | 1966 | } |
1952 | 1967 | ||
1953 | static int invalid_op_interception(struct vcpu_svm *svm, | 1968 | static int skinit_interception(struct vcpu_svm *svm) |
1954 | struct kvm_run *kvm_run) | ||
1955 | { | 1969 | { |
1970 | trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]); | ||
1971 | |||
1956 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | 1972 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
1957 | return 1; | 1973 | return 1; |
1958 | } | 1974 | } |
1959 | 1975 | ||
1960 | static int task_switch_interception(struct vcpu_svm *svm, | 1976 | static int invalid_op_interception(struct vcpu_svm *svm) |
1961 | struct kvm_run *kvm_run) | 1977 | { |
1978 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | ||
1979 | return 1; | ||
1980 | } | ||
1981 | |||
1982 | static int task_switch_interception(struct vcpu_svm *svm) | ||
1962 | { | 1983 | { |
1963 | u16 tss_selector; | 1984 | u16 tss_selector; |
1964 | int reason; | 1985 | int reason; |
@@ -2008,14 +2029,14 @@ static int task_switch_interception(struct vcpu_svm *svm, | |||
2008 | return kvm_task_switch(&svm->vcpu, tss_selector, reason); | 2029 | return kvm_task_switch(&svm->vcpu, tss_selector, reason); |
2009 | } | 2030 | } |
2010 | 2031 | ||
2011 | static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2032 | static int cpuid_interception(struct vcpu_svm *svm) |
2012 | { | 2033 | { |
2013 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 2034 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
2014 | kvm_emulate_cpuid(&svm->vcpu); | 2035 | kvm_emulate_cpuid(&svm->vcpu); |
2015 | return 1; | 2036 | return 1; |
2016 | } | 2037 | } |
2017 | 2038 | ||
2018 | static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2039 | static int iret_interception(struct vcpu_svm *svm) |
2019 | { | 2040 | { |
2020 | ++svm->vcpu.stat.nmi_window_exits; | 2041 | ++svm->vcpu.stat.nmi_window_exits; |
2021 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | 2042 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); |
@@ -2023,26 +2044,27 @@ static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2023 | return 1; | 2044 | return 1; |
2024 | } | 2045 | } |
2025 | 2046 | ||
2026 | static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2047 | static int invlpg_interception(struct vcpu_svm *svm) |
2027 | { | 2048 | { |
2028 | if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) | 2049 | if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) |
2029 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); | 2050 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); |
2030 | return 1; | 2051 | return 1; |
2031 | } | 2052 | } |
2032 | 2053 | ||
2033 | static int emulate_on_interception(struct vcpu_svm *svm, | 2054 | static int emulate_on_interception(struct vcpu_svm *svm) |
2034 | struct kvm_run *kvm_run) | ||
2035 | { | 2055 | { |
2036 | if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) | 2056 | if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE) |
2037 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); | 2057 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); |
2038 | return 1; | 2058 | return 1; |
2039 | } | 2059 | } |
2040 | 2060 | ||
2041 | static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2061 | static int cr8_write_interception(struct vcpu_svm *svm) |
2042 | { | 2062 | { |
2063 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
2064 | |||
2043 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 2065 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
2044 | /* instruction emulation calls kvm_set_cr8() */ | 2066 | /* instruction emulation calls kvm_set_cr8() */ |
2045 | emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); | 2067 | emulate_instruction(&svm->vcpu, 0, 0, 0); |
2046 | if (irqchip_in_kernel(svm->vcpu.kvm)) { | 2068 | if (irqchip_in_kernel(svm->vcpu.kvm)) { |
2047 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; | 2069 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; |
2048 | return 1; | 2070 | return 1; |
@@ -2128,7 +2150,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
2128 | return 0; | 2150 | return 0; |
2129 | } | 2151 | } |
2130 | 2152 | ||
2131 | static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2153 | static int rdmsr_interception(struct vcpu_svm *svm) |
2132 | { | 2154 | { |
2133 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 2155 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
2134 | u64 data; | 2156 | u64 data; |
@@ -2221,7 +2243,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
2221 | return 0; | 2243 | return 0; |
2222 | } | 2244 | } |
2223 | 2245 | ||
2224 | static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2246 | static int wrmsr_interception(struct vcpu_svm *svm) |
2225 | { | 2247 | { |
2226 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | 2248 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
2227 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | 2249 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) |
@@ -2237,17 +2259,18 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2237 | return 1; | 2259 | return 1; |
2238 | } | 2260 | } |
2239 | 2261 | ||
2240 | static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 2262 | static int msr_interception(struct vcpu_svm *svm) |
2241 | { | 2263 | { |
2242 | if (svm->vmcb->control.exit_info_1) | 2264 | if (svm->vmcb->control.exit_info_1) |
2243 | return wrmsr_interception(svm, kvm_run); | 2265 | return wrmsr_interception(svm); |
2244 | else | 2266 | else |
2245 | return rdmsr_interception(svm, kvm_run); | 2267 | return rdmsr_interception(svm); |
2246 | } | 2268 | } |
2247 | 2269 | ||
2248 | static int interrupt_window_interception(struct vcpu_svm *svm, | 2270 | static int interrupt_window_interception(struct vcpu_svm *svm) |
2249 | struct kvm_run *kvm_run) | ||
2250 | { | 2271 | { |
2272 | struct kvm_run *kvm_run = svm->vcpu.run; | ||
2273 | |||
2251 | svm_clear_vintr(svm); | 2274 | svm_clear_vintr(svm); |
2252 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | 2275 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
2253 | /* | 2276 | /* |
@@ -2265,8 +2288,13 @@ static int interrupt_window_interception(struct vcpu_svm *svm, | |||
2265 | return 1; | 2288 | return 1; |
2266 | } | 2289 | } |
2267 | 2290 | ||
2268 | static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | 2291 | static int pause_interception(struct vcpu_svm *svm) |
2269 | struct kvm_run *kvm_run) = { | 2292 | { |
2293 | kvm_vcpu_on_spin(&(svm->vcpu)); | ||
2294 | return 1; | ||
2295 | } | ||
2296 | |||
2297 | static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { | ||
2270 | [SVM_EXIT_READ_CR0] = emulate_on_interception, | 2298 | [SVM_EXIT_READ_CR0] = emulate_on_interception, |
2271 | [SVM_EXIT_READ_CR3] = emulate_on_interception, | 2299 | [SVM_EXIT_READ_CR3] = emulate_on_interception, |
2272 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | 2300 | [SVM_EXIT_READ_CR4] = emulate_on_interception, |
@@ -2301,6 +2329,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
2301 | [SVM_EXIT_CPUID] = cpuid_interception, | 2329 | [SVM_EXIT_CPUID] = cpuid_interception, |
2302 | [SVM_EXIT_IRET] = iret_interception, | 2330 | [SVM_EXIT_IRET] = iret_interception, |
2303 | [SVM_EXIT_INVD] = emulate_on_interception, | 2331 | [SVM_EXIT_INVD] = emulate_on_interception, |
2332 | [SVM_EXIT_PAUSE] = pause_interception, | ||
2304 | [SVM_EXIT_HLT] = halt_interception, | 2333 | [SVM_EXIT_HLT] = halt_interception, |
2305 | [SVM_EXIT_INVLPG] = invlpg_interception, | 2334 | [SVM_EXIT_INVLPG] = invlpg_interception, |
2306 | [SVM_EXIT_INVLPGA] = invlpga_interception, | 2335 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
@@ -2314,26 +2343,36 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
2314 | [SVM_EXIT_VMSAVE] = vmsave_interception, | 2343 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
2315 | [SVM_EXIT_STGI] = stgi_interception, | 2344 | [SVM_EXIT_STGI] = stgi_interception, |
2316 | [SVM_EXIT_CLGI] = clgi_interception, | 2345 | [SVM_EXIT_CLGI] = clgi_interception, |
2317 | [SVM_EXIT_SKINIT] = invalid_op_interception, | 2346 | [SVM_EXIT_SKINIT] = skinit_interception, |
2318 | [SVM_EXIT_WBINVD] = emulate_on_interception, | 2347 | [SVM_EXIT_WBINVD] = emulate_on_interception, |
2319 | [SVM_EXIT_MONITOR] = invalid_op_interception, | 2348 | [SVM_EXIT_MONITOR] = invalid_op_interception, |
2320 | [SVM_EXIT_MWAIT] = invalid_op_interception, | 2349 | [SVM_EXIT_MWAIT] = invalid_op_interception, |
2321 | [SVM_EXIT_NPF] = pf_interception, | 2350 | [SVM_EXIT_NPF] = pf_interception, |
2322 | }; | 2351 | }; |
2323 | 2352 | ||
2324 | static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 2353 | static int handle_exit(struct kvm_vcpu *vcpu) |
2325 | { | 2354 | { |
2326 | struct vcpu_svm *svm = to_svm(vcpu); | 2355 | struct vcpu_svm *svm = to_svm(vcpu); |
2356 | struct kvm_run *kvm_run = vcpu->run; | ||
2327 | u32 exit_code = svm->vmcb->control.exit_code; | 2357 | u32 exit_code = svm->vmcb->control.exit_code; |
2328 | 2358 | ||
2329 | trace_kvm_exit(exit_code, svm->vmcb->save.rip); | 2359 | trace_kvm_exit(exit_code, svm->vmcb->save.rip); |
2330 | 2360 | ||
2361 | if (unlikely(svm->nested.exit_required)) { | ||
2362 | nested_svm_vmexit(svm); | ||
2363 | svm->nested.exit_required = false; | ||
2364 | |||
2365 | return 1; | ||
2366 | } | ||
2367 | |||
2331 | if (is_nested(svm)) { | 2368 | if (is_nested(svm)) { |
2332 | int vmexit; | 2369 | int vmexit; |
2333 | 2370 | ||
2334 | nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", | 2371 | trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, |
2335 | exit_code, svm->vmcb->control.exit_info_1, | 2372 | svm->vmcb->control.exit_info_1, |
2336 | svm->vmcb->control.exit_info_2, svm->vmcb->save.rip); | 2373 | svm->vmcb->control.exit_info_2, |
2374 | svm->vmcb->control.exit_int_info, | ||
2375 | svm->vmcb->control.exit_int_info_err); | ||
2337 | 2376 | ||
2338 | vmexit = nested_svm_exit_special(svm); | 2377 | vmexit = nested_svm_exit_special(svm); |
2339 | 2378 | ||
@@ -2383,7 +2422,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2383 | return 0; | 2422 | return 0; |
2384 | } | 2423 | } |
2385 | 2424 | ||
2386 | return svm_exit_handlers[exit_code](svm, kvm_run); | 2425 | return svm_exit_handlers[exit_code](svm); |
2387 | } | 2426 | } |
2388 | 2427 | ||
2389 | static void reload_tss(struct kvm_vcpu *vcpu) | 2428 | static void reload_tss(struct kvm_vcpu *vcpu) |
@@ -2460,20 +2499,47 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu) | |||
2460 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); | 2499 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); |
2461 | } | 2500 | } |
2462 | 2501 | ||
2502 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) | ||
2503 | { | ||
2504 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2505 | |||
2506 | return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); | ||
2507 | } | ||
2508 | |||
2509 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | ||
2510 | { | ||
2511 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2512 | |||
2513 | if (masked) { | ||
2514 | svm->vcpu.arch.hflags |= HF_NMI_MASK; | ||
2515 | svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); | ||
2516 | } else { | ||
2517 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; | ||
2518 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | ||
2519 | } | ||
2520 | } | ||
2521 | |||
2463 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | 2522 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) |
2464 | { | 2523 | { |
2465 | struct vcpu_svm *svm = to_svm(vcpu); | 2524 | struct vcpu_svm *svm = to_svm(vcpu); |
2466 | struct vmcb *vmcb = svm->vmcb; | 2525 | struct vmcb *vmcb = svm->vmcb; |
2467 | return (vmcb->save.rflags & X86_EFLAGS_IF) && | 2526 | int ret; |
2468 | !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && | 2527 | |
2469 | gif_set(svm) && | 2528 | if (!gif_set(svm) || |
2470 | !(is_nested(svm) && (svm->vcpu.arch.hflags & HF_VINTR_MASK)); | 2529 | (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) |
2530 | return 0; | ||
2531 | |||
2532 | ret = !!(vmcb->save.rflags & X86_EFLAGS_IF); | ||
2533 | |||
2534 | if (is_nested(svm)) | ||
2535 | return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); | ||
2536 | |||
2537 | return ret; | ||
2471 | } | 2538 | } |
2472 | 2539 | ||
2473 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 2540 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
2474 | { | 2541 | { |
2475 | struct vcpu_svm *svm = to_svm(vcpu); | 2542 | struct vcpu_svm *svm = to_svm(vcpu); |
2476 | nsvm_printk("Trying to open IRQ window\n"); | ||
2477 | 2543 | ||
2478 | nested_svm_intr(svm); | 2544 | nested_svm_intr(svm); |
2479 | 2545 | ||
@@ -2498,7 +2564,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) | |||
2498 | /* Something prevents NMI from been injected. Single step over | 2564 | /* Something prevents NMI from been injected. Single step over |
2499 | possible problem (IRET or exception injection or interrupt | 2565 | possible problem (IRET or exception injection or interrupt |
2500 | shadow) */ | 2566 | shadow) */ |
2501 | vcpu->arch.singlestep = true; | 2567 | svm->nmi_singlestep = true; |
2502 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); | 2568 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
2503 | update_db_intercept(vcpu); | 2569 | update_db_intercept(vcpu); |
2504 | } | 2570 | } |
@@ -2588,13 +2654,20 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
2588 | #define R "e" | 2654 | #define R "e" |
2589 | #endif | 2655 | #endif |
2590 | 2656 | ||
2591 | static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2657 | static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
2592 | { | 2658 | { |
2593 | struct vcpu_svm *svm = to_svm(vcpu); | 2659 | struct vcpu_svm *svm = to_svm(vcpu); |
2594 | u16 fs_selector; | 2660 | u16 fs_selector; |
2595 | u16 gs_selector; | 2661 | u16 gs_selector; |
2596 | u16 ldt_selector; | 2662 | u16 ldt_selector; |
2597 | 2663 | ||
2664 | /* | ||
2665 | * A vmexit emulation is required before the vcpu can be executed | ||
2666 | * again. | ||
2667 | */ | ||
2668 | if (unlikely(svm->nested.exit_required)) | ||
2669 | return; | ||
2670 | |||
2598 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; | 2671 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
2599 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; | 2672 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
2600 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; | 2673 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
@@ -2893,6 +2966,8 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2893 | .queue_exception = svm_queue_exception, | 2966 | .queue_exception = svm_queue_exception, |
2894 | .interrupt_allowed = svm_interrupt_allowed, | 2967 | .interrupt_allowed = svm_interrupt_allowed, |
2895 | .nmi_allowed = svm_nmi_allowed, | 2968 | .nmi_allowed = svm_nmi_allowed, |
2969 | .get_nmi_mask = svm_get_nmi_mask, | ||
2970 | .set_nmi_mask = svm_set_nmi_mask, | ||
2896 | .enable_nmi_window = enable_nmi_window, | 2971 | .enable_nmi_window = enable_nmi_window, |
2897 | .enable_irq_window = enable_irq_window, | 2972 | .enable_irq_window = enable_irq_window, |
2898 | .update_cr8_intercept = update_cr8_intercept, | 2973 | .update_cr8_intercept = update_cr8_intercept, |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0d480e77eacf..816e0449db0b 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
@@ -349,6 +349,171 @@ TRACE_EVENT(kvm_apic_accept_irq, | |||
349 | __entry->coalesced ? " (coalesced)" : "") | 349 | __entry->coalesced ? " (coalesced)" : "") |
350 | ); | 350 | ); |
351 | 351 | ||
352 | /* | ||
353 | * Tracepoint for nested VMRUN | ||
354 | */ | ||
355 | TRACE_EVENT(kvm_nested_vmrun, | ||
356 | TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, | ||
357 | __u32 event_inj, bool npt), | ||
358 | TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), | ||
359 | |||
360 | TP_STRUCT__entry( | ||
361 | __field( __u64, rip ) | ||
362 | __field( __u64, vmcb ) | ||
363 | __field( __u64, nested_rip ) | ||
364 | __field( __u32, int_ctl ) | ||
365 | __field( __u32, event_inj ) | ||
366 | __field( bool, npt ) | ||
367 | ), | ||
368 | |||
369 | TP_fast_assign( | ||
370 | __entry->rip = rip; | ||
371 | __entry->vmcb = vmcb; | ||
372 | __entry->nested_rip = nested_rip; | ||
373 | __entry->int_ctl = int_ctl; | ||
374 | __entry->event_inj = event_inj; | ||
375 | __entry->npt = npt; | ||
376 | ), | ||
377 | |||
378 | TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " | ||
379 | "event_inj: 0x%08x npt: %s\n", | ||
380 | __entry->rip, __entry->vmcb, __entry->nested_rip, | ||
381 | __entry->int_ctl, __entry->event_inj, | ||
382 | __entry->npt ? "on" : "off") | ||
383 | ); | ||
384 | |||
385 | /* | ||
386 | * Tracepoint for #VMEXIT while nested | ||
387 | */ | ||
388 | TRACE_EVENT(kvm_nested_vmexit, | ||
389 | TP_PROTO(__u64 rip, __u32 exit_code, | ||
390 | __u64 exit_info1, __u64 exit_info2, | ||
391 | __u32 exit_int_info, __u32 exit_int_info_err), | ||
392 | TP_ARGS(rip, exit_code, exit_info1, exit_info2, | ||
393 | exit_int_info, exit_int_info_err), | ||
394 | |||
395 | TP_STRUCT__entry( | ||
396 | __field( __u64, rip ) | ||
397 | __field( __u32, exit_code ) | ||
398 | __field( __u64, exit_info1 ) | ||
399 | __field( __u64, exit_info2 ) | ||
400 | __field( __u32, exit_int_info ) | ||
401 | __field( __u32, exit_int_info_err ) | ||
402 | ), | ||
403 | |||
404 | TP_fast_assign( | ||
405 | __entry->rip = rip; | ||
406 | __entry->exit_code = exit_code; | ||
407 | __entry->exit_info1 = exit_info1; | ||
408 | __entry->exit_info2 = exit_info2; | ||
409 | __entry->exit_int_info = exit_int_info; | ||
410 | __entry->exit_int_info_err = exit_int_info_err; | ||
411 | ), | ||
412 | TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx " | ||
413 | "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", | ||
414 | __entry->rip, | ||
415 | ftrace_print_symbols_seq(p, __entry->exit_code, | ||
416 | kvm_x86_ops->exit_reasons_str), | ||
417 | __entry->exit_info1, __entry->exit_info2, | ||
418 | __entry->exit_int_info, __entry->exit_int_info_err) | ||
419 | ); | ||
420 | |||
421 | /* | ||
422 | * Tracepoint for #VMEXIT reinjected to the guest | ||
423 | */ | ||
424 | TRACE_EVENT(kvm_nested_vmexit_inject, | ||
425 | TP_PROTO(__u32 exit_code, | ||
426 | __u64 exit_info1, __u64 exit_info2, | ||
427 | __u32 exit_int_info, __u32 exit_int_info_err), | ||
428 | TP_ARGS(exit_code, exit_info1, exit_info2, | ||
429 | exit_int_info, exit_int_info_err), | ||
430 | |||
431 | TP_STRUCT__entry( | ||
432 | __field( __u32, exit_code ) | ||
433 | __field( __u64, exit_info1 ) | ||
434 | __field( __u64, exit_info2 ) | ||
435 | __field( __u32, exit_int_info ) | ||
436 | __field( __u32, exit_int_info_err ) | ||
437 | ), | ||
438 | |||
439 | TP_fast_assign( | ||
440 | __entry->exit_code = exit_code; | ||
441 | __entry->exit_info1 = exit_info1; | ||
442 | __entry->exit_info2 = exit_info2; | ||
443 | __entry->exit_int_info = exit_int_info; | ||
444 | __entry->exit_int_info_err = exit_int_info_err; | ||
445 | ), | ||
446 | |||
447 | TP_printk("reason: %s ext_inf1: 0x%016llx " | ||
448 | "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n", | ||
449 | ftrace_print_symbols_seq(p, __entry->exit_code, | ||
450 | kvm_x86_ops->exit_reasons_str), | ||
451 | __entry->exit_info1, __entry->exit_info2, | ||
452 | __entry->exit_int_info, __entry->exit_int_info_err) | ||
453 | ); | ||
454 | |||
455 | /* | ||
456 | * Tracepoint for nested #vmexit because of interrupt pending | ||
457 | */ | ||
458 | TRACE_EVENT(kvm_nested_intr_vmexit, | ||
459 | TP_PROTO(__u64 rip), | ||
460 | TP_ARGS(rip), | ||
461 | |||
462 | TP_STRUCT__entry( | ||
463 | __field( __u64, rip ) | ||
464 | ), | ||
465 | |||
466 | TP_fast_assign( | ||
467 | __entry->rip = rip | ||
468 | ), | ||
469 | |||
470 | TP_printk("rip: 0x%016llx\n", __entry->rip) | ||
471 | ); | ||
472 | |||
473 | /* | ||
474 | * Tracepoint for nested #vmexit because of interrupt pending | ||
475 | */ | ||
476 | TRACE_EVENT(kvm_invlpga, | ||
477 | TP_PROTO(__u64 rip, int asid, u64 address), | ||
478 | TP_ARGS(rip, asid, address), | ||
479 | |||
480 | TP_STRUCT__entry( | ||
481 | __field( __u64, rip ) | ||
482 | __field( int, asid ) | ||
483 | __field( __u64, address ) | ||
484 | ), | ||
485 | |||
486 | TP_fast_assign( | ||
487 | __entry->rip = rip; | ||
488 | __entry->asid = asid; | ||
489 | __entry->address = address; | ||
490 | ), | ||
491 | |||
492 | TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n", | ||
493 | __entry->rip, __entry->asid, __entry->address) | ||
494 | ); | ||
495 | |||
496 | /* | ||
497 | * Tracepoint for nested #vmexit because of interrupt pending | ||
498 | */ | ||
499 | TRACE_EVENT(kvm_skinit, | ||
500 | TP_PROTO(__u64 rip, __u32 slb), | ||
501 | TP_ARGS(rip, slb), | ||
502 | |||
503 | TP_STRUCT__entry( | ||
504 | __field( __u64, rip ) | ||
505 | __field( __u32, slb ) | ||
506 | ), | ||
507 | |||
508 | TP_fast_assign( | ||
509 | __entry->rip = rip; | ||
510 | __entry->slb = slb; | ||
511 | ), | ||
512 | |||
513 | TP_printk("rip: 0x%016llx slb: 0x%08x\n", | ||
514 | __entry->rip, __entry->slb) | ||
515 | ); | ||
516 | |||
352 | #endif /* _TRACE_KVM_H */ | 517 | #endif /* _TRACE_KVM_H */ |
353 | 518 | ||
354 | /* This part must be outside protection */ | 519 | /* This part must be outside protection */ |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ed53b42caba1..d4918d6fc924 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -61,12 +61,37 @@ module_param_named(unrestricted_guest, | |||
61 | static int __read_mostly emulate_invalid_guest_state = 0; | 61 | static int __read_mostly emulate_invalid_guest_state = 0; |
62 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); | 62 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
63 | 63 | ||
64 | /* | ||
65 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | ||
66 | * ple_gap: upper bound on the amount of time between two successive | ||
67 | * executions of PAUSE in a loop. Also indicate if ple enabled. | ||
68 | * According to test, this time is usually small than 41 cycles. | ||
69 | * ple_window: upper bound on the amount of time a guest is allowed to execute | ||
70 | * in a PAUSE loop. Tests indicate that most spinlocks are held for | ||
71 | * less than 2^12 cycles | ||
72 | * Time is measured based on a counter that runs at the same rate as the TSC, | ||
73 | * refer SDM volume 3b section 21.6.13 & 22.1.3. | ||
74 | */ | ||
75 | #define KVM_VMX_DEFAULT_PLE_GAP 41 | ||
76 | #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 | ||
77 | static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; | ||
78 | module_param(ple_gap, int, S_IRUGO); | ||
79 | |||
80 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; | ||
81 | module_param(ple_window, int, S_IRUGO); | ||
82 | |||
64 | struct vmcs { | 83 | struct vmcs { |
65 | u32 revision_id; | 84 | u32 revision_id; |
66 | u32 abort; | 85 | u32 abort; |
67 | char data[0]; | 86 | char data[0]; |
68 | }; | 87 | }; |
69 | 88 | ||
89 | struct shared_msr_entry { | ||
90 | unsigned index; | ||
91 | u64 data; | ||
92 | u64 mask; | ||
93 | }; | ||
94 | |||
70 | struct vcpu_vmx { | 95 | struct vcpu_vmx { |
71 | struct kvm_vcpu vcpu; | 96 | struct kvm_vcpu vcpu; |
72 | struct list_head local_vcpus_link; | 97 | struct list_head local_vcpus_link; |
@@ -74,13 +99,12 @@ struct vcpu_vmx { | |||
74 | int launched; | 99 | int launched; |
75 | u8 fail; | 100 | u8 fail; |
76 | u32 idt_vectoring_info; | 101 | u32 idt_vectoring_info; |
77 | struct kvm_msr_entry *guest_msrs; | 102 | struct shared_msr_entry *guest_msrs; |
78 | struct kvm_msr_entry *host_msrs; | ||
79 | int nmsrs; | 103 | int nmsrs; |
80 | int save_nmsrs; | 104 | int save_nmsrs; |
81 | int msr_offset_efer; | ||
82 | #ifdef CONFIG_X86_64 | 105 | #ifdef CONFIG_X86_64 |
83 | int msr_offset_kernel_gs_base; | 106 | u64 msr_host_kernel_gs_base; |
107 | u64 msr_guest_kernel_gs_base; | ||
84 | #endif | 108 | #endif |
85 | struct vmcs *vmcs; | 109 | struct vmcs *vmcs; |
86 | struct { | 110 | struct { |
@@ -88,7 +112,6 @@ struct vcpu_vmx { | |||
88 | u16 fs_sel, gs_sel, ldt_sel; | 112 | u16 fs_sel, gs_sel, ldt_sel; |
89 | int gs_ldt_reload_needed; | 113 | int gs_ldt_reload_needed; |
90 | int fs_reload_needed; | 114 | int fs_reload_needed; |
91 | int guest_efer_loaded; | ||
92 | } host_state; | 115 | } host_state; |
93 | struct { | 116 | struct { |
94 | int vm86_active; | 117 | int vm86_active; |
@@ -107,7 +130,6 @@ struct vcpu_vmx { | |||
107 | } rmode; | 130 | } rmode; |
108 | int vpid; | 131 | int vpid; |
109 | bool emulation_required; | 132 | bool emulation_required; |
110 | enum emulation_result invalid_state_emulation_result; | ||
111 | 133 | ||
112 | /* Support for vnmi-less CPUs */ | 134 | /* Support for vnmi-less CPUs */ |
113 | int soft_vnmi_blocked; | 135 | int soft_vnmi_blocked; |
@@ -176,6 +198,8 @@ static struct kvm_vmx_segment_field { | |||
176 | VMX_SEGMENT_FIELD(LDTR), | 198 | VMX_SEGMENT_FIELD(LDTR), |
177 | }; | 199 | }; |
178 | 200 | ||
201 | static u64 host_efer; | ||
202 | |||
179 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | 203 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); |
180 | 204 | ||
181 | /* | 205 | /* |
@@ -184,28 +208,12 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |||
184 | */ | 208 | */ |
185 | static const u32 vmx_msr_index[] = { | 209 | static const u32 vmx_msr_index[] = { |
186 | #ifdef CONFIG_X86_64 | 210 | #ifdef CONFIG_X86_64 |
187 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, | 211 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, |
188 | #endif | 212 | #endif |
189 | MSR_EFER, MSR_K6_STAR, | 213 | MSR_EFER, MSR_K6_STAR, |
190 | }; | 214 | }; |
191 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 215 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
192 | 216 | ||
193 | static void load_msrs(struct kvm_msr_entry *e, int n) | ||
194 | { | ||
195 | int i; | ||
196 | |||
197 | for (i = 0; i < n; ++i) | ||
198 | wrmsrl(e[i].index, e[i].data); | ||
199 | } | ||
200 | |||
201 | static void save_msrs(struct kvm_msr_entry *e, int n) | ||
202 | { | ||
203 | int i; | ||
204 | |||
205 | for (i = 0; i < n; ++i) | ||
206 | rdmsrl(e[i].index, e[i].data); | ||
207 | } | ||
208 | |||
209 | static inline int is_page_fault(u32 intr_info) | 217 | static inline int is_page_fault(u32 intr_info) |
210 | { | 218 | { |
211 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 219 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
@@ -320,6 +328,12 @@ static inline int cpu_has_vmx_unrestricted_guest(void) | |||
320 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | 328 | SECONDARY_EXEC_UNRESTRICTED_GUEST; |
321 | } | 329 | } |
322 | 330 | ||
331 | static inline int cpu_has_vmx_ple(void) | ||
332 | { | ||
333 | return vmcs_config.cpu_based_2nd_exec_ctrl & | ||
334 | SECONDARY_EXEC_PAUSE_LOOP_EXITING; | ||
335 | } | ||
336 | |||
323 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | 337 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) |
324 | { | 338 | { |
325 | return flexpriority_enabled && | 339 | return flexpriority_enabled && |
@@ -348,7 +362,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | |||
348 | int i; | 362 | int i; |
349 | 363 | ||
350 | for (i = 0; i < vmx->nmsrs; ++i) | 364 | for (i = 0; i < vmx->nmsrs; ++i) |
351 | if (vmx->guest_msrs[i].index == msr) | 365 | if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) |
352 | return i; | 366 | return i; |
353 | return -1; | 367 | return -1; |
354 | } | 368 | } |
@@ -379,7 +393,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) | |||
379 | : : "a" (&operand), "c" (ext) : "cc", "memory"); | 393 | : : "a" (&operand), "c" (ext) : "cc", "memory"); |
380 | } | 394 | } |
381 | 395 | ||
382 | static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) | 396 | static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) |
383 | { | 397 | { |
384 | int i; | 398 | int i; |
385 | 399 | ||
@@ -570,17 +584,12 @@ static void reload_tss(void) | |||
570 | load_TR_desc(); | 584 | load_TR_desc(); |
571 | } | 585 | } |
572 | 586 | ||
573 | static void load_transition_efer(struct vcpu_vmx *vmx) | 587 | static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) |
574 | { | 588 | { |
575 | int efer_offset = vmx->msr_offset_efer; | ||
576 | u64 host_efer; | ||
577 | u64 guest_efer; | 589 | u64 guest_efer; |
578 | u64 ignore_bits; | 590 | u64 ignore_bits; |
579 | 591 | ||
580 | if (efer_offset < 0) | 592 | guest_efer = vmx->vcpu.arch.shadow_efer; |
581 | return; | ||
582 | host_efer = vmx->host_msrs[efer_offset].data; | ||
583 | guest_efer = vmx->guest_msrs[efer_offset].data; | ||
584 | 593 | ||
585 | /* | 594 | /* |
586 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | 595 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless |
@@ -593,27 +602,17 @@ static void load_transition_efer(struct vcpu_vmx *vmx) | |||
593 | if (guest_efer & EFER_LMA) | 602 | if (guest_efer & EFER_LMA) |
594 | ignore_bits &= ~(u64)EFER_SCE; | 603 | ignore_bits &= ~(u64)EFER_SCE; |
595 | #endif | 604 | #endif |
596 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | ||
597 | return; | ||
598 | |||
599 | vmx->host_state.guest_efer_loaded = 1; | ||
600 | guest_efer &= ~ignore_bits; | 605 | guest_efer &= ~ignore_bits; |
601 | guest_efer |= host_efer & ignore_bits; | 606 | guest_efer |= host_efer & ignore_bits; |
602 | wrmsrl(MSR_EFER, guest_efer); | 607 | vmx->guest_msrs[efer_offset].data = guest_efer; |
603 | vmx->vcpu.stat.efer_reload++; | 608 | vmx->guest_msrs[efer_offset].mask = ~ignore_bits; |
604 | } | 609 | return true; |
605 | |||
606 | static void reload_host_efer(struct vcpu_vmx *vmx) | ||
607 | { | ||
608 | if (vmx->host_state.guest_efer_loaded) { | ||
609 | vmx->host_state.guest_efer_loaded = 0; | ||
610 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); | ||
611 | } | ||
612 | } | 610 | } |
613 | 611 | ||
614 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | 612 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
615 | { | 613 | { |
616 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 614 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
615 | int i; | ||
617 | 616 | ||
618 | if (vmx->host_state.loaded) | 617 | if (vmx->host_state.loaded) |
619 | return; | 618 | return; |
@@ -650,13 +649,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
650 | #endif | 649 | #endif |
651 | 650 | ||
652 | #ifdef CONFIG_X86_64 | 651 | #ifdef CONFIG_X86_64 |
653 | if (is_long_mode(&vmx->vcpu)) | 652 | if (is_long_mode(&vmx->vcpu)) { |
654 | save_msrs(vmx->host_msrs + | 653 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
655 | vmx->msr_offset_kernel_gs_base, 1); | 654 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
656 | 655 | } | |
657 | #endif | 656 | #endif |
658 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | 657 | for (i = 0; i < vmx->save_nmsrs; ++i) |
659 | load_transition_efer(vmx); | 658 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
659 | vmx->guest_msrs[i].data, | ||
660 | vmx->guest_msrs[i].mask); | ||
660 | } | 661 | } |
661 | 662 | ||
662 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | 663 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -684,9 +685,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
684 | local_irq_restore(flags); | 685 | local_irq_restore(flags); |
685 | } | 686 | } |
686 | reload_tss(); | 687 | reload_tss(); |
687 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); | 688 | #ifdef CONFIG_X86_64 |
688 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); | 689 | if (is_long_mode(&vmx->vcpu)) { |
689 | reload_host_efer(vmx); | 690 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
691 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | ||
692 | } | ||
693 | #endif | ||
690 | } | 694 | } |
691 | 695 | ||
692 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | 696 | static void vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -877,19 +881,14 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
877 | /* | 881 | /* |
878 | * Swap MSR entry in host/guest MSR entry array. | 882 | * Swap MSR entry in host/guest MSR entry array. |
879 | */ | 883 | */ |
880 | #ifdef CONFIG_X86_64 | ||
881 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | 884 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) |
882 | { | 885 | { |
883 | struct kvm_msr_entry tmp; | 886 | struct shared_msr_entry tmp; |
884 | 887 | ||
885 | tmp = vmx->guest_msrs[to]; | 888 | tmp = vmx->guest_msrs[to]; |
886 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; | 889 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; |
887 | vmx->guest_msrs[from] = tmp; | 890 | vmx->guest_msrs[from] = tmp; |
888 | tmp = vmx->host_msrs[to]; | ||
889 | vmx->host_msrs[to] = vmx->host_msrs[from]; | ||
890 | vmx->host_msrs[from] = tmp; | ||
891 | } | 891 | } |
892 | #endif | ||
893 | 892 | ||
894 | /* | 893 | /* |
895 | * Set up the vmcs to automatically save and restore system | 894 | * Set up the vmcs to automatically save and restore system |
@@ -898,15 +897,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | |||
898 | */ | 897 | */ |
899 | static void setup_msrs(struct vcpu_vmx *vmx) | 898 | static void setup_msrs(struct vcpu_vmx *vmx) |
900 | { | 899 | { |
901 | int save_nmsrs; | 900 | int save_nmsrs, index; |
902 | unsigned long *msr_bitmap; | 901 | unsigned long *msr_bitmap; |
903 | 902 | ||
904 | vmx_load_host_state(vmx); | 903 | vmx_load_host_state(vmx); |
905 | save_nmsrs = 0; | 904 | save_nmsrs = 0; |
906 | #ifdef CONFIG_X86_64 | 905 | #ifdef CONFIG_X86_64 |
907 | if (is_long_mode(&vmx->vcpu)) { | 906 | if (is_long_mode(&vmx->vcpu)) { |
908 | int index; | ||
909 | |||
910 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); | 907 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); |
911 | if (index >= 0) | 908 | if (index >= 0) |
912 | move_msr_up(vmx, index, save_nmsrs++); | 909 | move_msr_up(vmx, index, save_nmsrs++); |
@@ -916,9 +913,6 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
916 | index = __find_msr_index(vmx, MSR_CSTAR); | 913 | index = __find_msr_index(vmx, MSR_CSTAR); |
917 | if (index >= 0) | 914 | if (index >= 0) |
918 | move_msr_up(vmx, index, save_nmsrs++); | 915 | move_msr_up(vmx, index, save_nmsrs++); |
919 | index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
920 | if (index >= 0) | ||
921 | move_msr_up(vmx, index, save_nmsrs++); | ||
922 | /* | 916 | /* |
923 | * MSR_K6_STAR is only needed on long mode guests, and only | 917 | * MSR_K6_STAR is only needed on long mode guests, and only |
924 | * if efer.sce is enabled. | 918 | * if efer.sce is enabled. |
@@ -928,13 +922,11 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
928 | move_msr_up(vmx, index, save_nmsrs++); | 922 | move_msr_up(vmx, index, save_nmsrs++); |
929 | } | 923 | } |
930 | #endif | 924 | #endif |
931 | vmx->save_nmsrs = save_nmsrs; | 925 | index = __find_msr_index(vmx, MSR_EFER); |
926 | if (index >= 0 && update_transition_efer(vmx, index)) | ||
927 | move_msr_up(vmx, index, save_nmsrs++); | ||
932 | 928 | ||
933 | #ifdef CONFIG_X86_64 | 929 | vmx->save_nmsrs = save_nmsrs; |
934 | vmx->msr_offset_kernel_gs_base = | ||
935 | __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
936 | #endif | ||
937 | vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); | ||
938 | 930 | ||
939 | if (cpu_has_vmx_msr_bitmap()) { | 931 | if (cpu_has_vmx_msr_bitmap()) { |
940 | if (is_long_mode(&vmx->vcpu)) | 932 | if (is_long_mode(&vmx->vcpu)) |
@@ -976,7 +968,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) | |||
976 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | 968 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) |
977 | { | 969 | { |
978 | u64 data; | 970 | u64 data; |
979 | struct kvm_msr_entry *msr; | 971 | struct shared_msr_entry *msr; |
980 | 972 | ||
981 | if (!pdata) { | 973 | if (!pdata) { |
982 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | 974 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); |
@@ -991,9 +983,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
991 | case MSR_GS_BASE: | 983 | case MSR_GS_BASE: |
992 | data = vmcs_readl(GUEST_GS_BASE); | 984 | data = vmcs_readl(GUEST_GS_BASE); |
993 | break; | 985 | break; |
986 | case MSR_KERNEL_GS_BASE: | ||
987 | vmx_load_host_state(to_vmx(vcpu)); | ||
988 | data = to_vmx(vcpu)->msr_guest_kernel_gs_base; | ||
989 | break; | ||
990 | #endif | ||
994 | case MSR_EFER: | 991 | case MSR_EFER: |
995 | return kvm_get_msr_common(vcpu, msr_index, pdata); | 992 | return kvm_get_msr_common(vcpu, msr_index, pdata); |
996 | #endif | ||
997 | case MSR_IA32_TSC: | 993 | case MSR_IA32_TSC: |
998 | data = guest_read_tsc(); | 994 | data = guest_read_tsc(); |
999 | break; | 995 | break; |
@@ -1007,6 +1003,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
1007 | data = vmcs_readl(GUEST_SYSENTER_ESP); | 1003 | data = vmcs_readl(GUEST_SYSENTER_ESP); |
1008 | break; | 1004 | break; |
1009 | default: | 1005 | default: |
1006 | vmx_load_host_state(to_vmx(vcpu)); | ||
1010 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | 1007 | msr = find_msr_entry(to_vmx(vcpu), msr_index); |
1011 | if (msr) { | 1008 | if (msr) { |
1012 | vmx_load_host_state(to_vmx(vcpu)); | 1009 | vmx_load_host_state(to_vmx(vcpu)); |
@@ -1028,7 +1025,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
1028 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 1025 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
1029 | { | 1026 | { |
1030 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1027 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1031 | struct kvm_msr_entry *msr; | 1028 | struct shared_msr_entry *msr; |
1032 | u64 host_tsc; | 1029 | u64 host_tsc; |
1033 | int ret = 0; | 1030 | int ret = 0; |
1034 | 1031 | ||
@@ -1044,6 +1041,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
1044 | case MSR_GS_BASE: | 1041 | case MSR_GS_BASE: |
1045 | vmcs_writel(GUEST_GS_BASE, data); | 1042 | vmcs_writel(GUEST_GS_BASE, data); |
1046 | break; | 1043 | break; |
1044 | case MSR_KERNEL_GS_BASE: | ||
1045 | vmx_load_host_state(vmx); | ||
1046 | vmx->msr_guest_kernel_gs_base = data; | ||
1047 | break; | ||
1047 | #endif | 1048 | #endif |
1048 | case MSR_IA32_SYSENTER_CS: | 1049 | case MSR_IA32_SYSENTER_CS: |
1049 | vmcs_write32(GUEST_SYSENTER_CS, data); | 1050 | vmcs_write32(GUEST_SYSENTER_CS, data); |
@@ -1097,30 +1098,14 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |||
1097 | } | 1098 | } |
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | 1101 | static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) |
1101 | { | 1102 | { |
1102 | int old_debug = vcpu->guest_debug; | ||
1103 | unsigned long flags; | ||
1104 | |||
1105 | vcpu->guest_debug = dbg->control; | ||
1106 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) | ||
1107 | vcpu->guest_debug = 0; | ||
1108 | |||
1109 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | 1103 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
1110 | vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); | 1104 | vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); |
1111 | else | 1105 | else |
1112 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | 1106 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); |
1113 | 1107 | ||
1114 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1115 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1116 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
1117 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1118 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
1119 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1120 | |||
1121 | update_exception_bitmap(vcpu); | 1108 | update_exception_bitmap(vcpu); |
1122 | |||
1123 | return 0; | ||
1124 | } | 1109 | } |
1125 | 1110 | ||
1126 | static __init int cpu_has_kvm_support(void) | 1111 | static __init int cpu_has_kvm_support(void) |
@@ -1139,12 +1124,15 @@ static __init int vmx_disabled_by_bios(void) | |||
1139 | /* locked but not enabled */ | 1124 | /* locked but not enabled */ |
1140 | } | 1125 | } |
1141 | 1126 | ||
1142 | static void hardware_enable(void *garbage) | 1127 | static int hardware_enable(void *garbage) |
1143 | { | 1128 | { |
1144 | int cpu = raw_smp_processor_id(); | 1129 | int cpu = raw_smp_processor_id(); |
1145 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | 1130 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
1146 | u64 old; | 1131 | u64 old; |
1147 | 1132 | ||
1133 | if (read_cr4() & X86_CR4_VMXE) | ||
1134 | return -EBUSY; | ||
1135 | |||
1148 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); | 1136 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); |
1149 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 1137 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1150 | if ((old & (FEATURE_CONTROL_LOCKED | | 1138 | if ((old & (FEATURE_CONTROL_LOCKED | |
@@ -1159,6 +1147,10 @@ static void hardware_enable(void *garbage) | |||
1159 | asm volatile (ASM_VMX_VMXON_RAX | 1147 | asm volatile (ASM_VMX_VMXON_RAX |
1160 | : : "a"(&phys_addr), "m"(phys_addr) | 1148 | : : "a"(&phys_addr), "m"(phys_addr) |
1161 | : "memory", "cc"); | 1149 | : "memory", "cc"); |
1150 | |||
1151 | ept_sync_global(); | ||
1152 | |||
1153 | return 0; | ||
1162 | } | 1154 | } |
1163 | 1155 | ||
1164 | static void vmclear_local_vcpus(void) | 1156 | static void vmclear_local_vcpus(void) |
@@ -1250,7 +1242,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1250 | SECONDARY_EXEC_WBINVD_EXITING | | 1242 | SECONDARY_EXEC_WBINVD_EXITING | |
1251 | SECONDARY_EXEC_ENABLE_VPID | | 1243 | SECONDARY_EXEC_ENABLE_VPID | |
1252 | SECONDARY_EXEC_ENABLE_EPT | | 1244 | SECONDARY_EXEC_ENABLE_EPT | |
1253 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | 1245 | SECONDARY_EXEC_UNRESTRICTED_GUEST | |
1246 | SECONDARY_EXEC_PAUSE_LOOP_EXITING; | ||
1254 | if (adjust_vmx_controls(min2, opt2, | 1247 | if (adjust_vmx_controls(min2, opt2, |
1255 | MSR_IA32_VMX_PROCBASED_CTLS2, | 1248 | MSR_IA32_VMX_PROCBASED_CTLS2, |
1256 | &_cpu_based_2nd_exec_control) < 0) | 1249 | &_cpu_based_2nd_exec_control) < 0) |
@@ -1344,15 +1337,17 @@ static void free_kvm_area(void) | |||
1344 | { | 1337 | { |
1345 | int cpu; | 1338 | int cpu; |
1346 | 1339 | ||
1347 | for_each_online_cpu(cpu) | 1340 | for_each_possible_cpu(cpu) { |
1348 | free_vmcs(per_cpu(vmxarea, cpu)); | 1341 | free_vmcs(per_cpu(vmxarea, cpu)); |
1342 | per_cpu(vmxarea, cpu) = NULL; | ||
1343 | } | ||
1349 | } | 1344 | } |
1350 | 1345 | ||
1351 | static __init int alloc_kvm_area(void) | 1346 | static __init int alloc_kvm_area(void) |
1352 | { | 1347 | { |
1353 | int cpu; | 1348 | int cpu; |
1354 | 1349 | ||
1355 | for_each_online_cpu(cpu) { | 1350 | for_each_possible_cpu(cpu) { |
1356 | struct vmcs *vmcs; | 1351 | struct vmcs *vmcs; |
1357 | 1352 | ||
1358 | vmcs = alloc_vmcs_cpu(cpu); | 1353 | vmcs = alloc_vmcs_cpu(cpu); |
@@ -1394,6 +1389,9 @@ static __init int hardware_setup(void) | |||
1394 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) | 1389 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) |
1395 | kvm_disable_largepages(); | 1390 | kvm_disable_largepages(); |
1396 | 1391 | ||
1392 | if (!cpu_has_vmx_ple()) | ||
1393 | ple_gap = 0; | ||
1394 | |||
1397 | return alloc_kvm_area(); | 1395 | return alloc_kvm_area(); |
1398 | } | 1396 | } |
1399 | 1397 | ||
@@ -1536,8 +1534,16 @@ continue_rmode: | |||
1536 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 1534 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
1537 | { | 1535 | { |
1538 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1536 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1539 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | 1537 | struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
1538 | |||
1539 | if (!msr) | ||
1540 | return; | ||
1540 | 1541 | ||
1542 | /* | ||
1543 | * Force kernel_gs_base reloading before EFER changes, as control | ||
1544 | * of this msr depends on is_long_mode(). | ||
1545 | */ | ||
1546 | vmx_load_host_state(to_vmx(vcpu)); | ||
1541 | vcpu->arch.shadow_efer = efer; | 1547 | vcpu->arch.shadow_efer = efer; |
1542 | if (!msr) | 1548 | if (!msr) |
1543 | return; | 1549 | return; |
@@ -1727,6 +1733,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
1727 | vmcs_write64(EPT_POINTER, eptp); | 1733 | vmcs_write64(EPT_POINTER, eptp); |
1728 | guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : | 1734 | guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : |
1729 | vcpu->kvm->arch.ept_identity_map_addr; | 1735 | vcpu->kvm->arch.ept_identity_map_addr; |
1736 | ept_load_pdptrs(vcpu); | ||
1730 | } | 1737 | } |
1731 | 1738 | ||
1732 | vmx_flush_tlb(vcpu); | 1739 | vmx_flush_tlb(vcpu); |
@@ -2302,13 +2309,22 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2302 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | 2309 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
2303 | if (vmx->vpid == 0) | 2310 | if (vmx->vpid == 0) |
2304 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; | 2311 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; |
2305 | if (!enable_ept) | 2312 | if (!enable_ept) { |
2306 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; | 2313 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; |
2314 | enable_unrestricted_guest = 0; | ||
2315 | } | ||
2307 | if (!enable_unrestricted_guest) | 2316 | if (!enable_unrestricted_guest) |
2308 | exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; | 2317 | exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; |
2318 | if (!ple_gap) | ||
2319 | exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; | ||
2309 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | 2320 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); |
2310 | } | 2321 | } |
2311 | 2322 | ||
2323 | if (ple_gap) { | ||
2324 | vmcs_write32(PLE_GAP, ple_gap); | ||
2325 | vmcs_write32(PLE_WINDOW, ple_window); | ||
2326 | } | ||
2327 | |||
2312 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); | 2328 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); |
2313 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); | 2329 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); |
2314 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ | 2330 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ |
@@ -2376,10 +2392,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2376 | if (wrmsr_safe(index, data_low, data_high) < 0) | 2392 | if (wrmsr_safe(index, data_low, data_high) < 0) |
2377 | continue; | 2393 | continue; |
2378 | data = data_low | ((u64)data_high << 32); | 2394 | data = data_low | ((u64)data_high << 32); |
2379 | vmx->host_msrs[j].index = index; | 2395 | vmx->guest_msrs[j].index = i; |
2380 | vmx->host_msrs[j].reserved = 0; | 2396 | vmx->guest_msrs[j].data = 0; |
2381 | vmx->host_msrs[j].data = data; | 2397 | vmx->guest_msrs[j].mask = -1ull; |
2382 | vmx->guest_msrs[j] = vmx->host_msrs[j]; | ||
2383 | ++vmx->nmsrs; | 2398 | ++vmx->nmsrs; |
2384 | } | 2399 | } |
2385 | 2400 | ||
@@ -2510,7 +2525,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2510 | if (vmx->vpid != 0) | 2525 | if (vmx->vpid != 0) |
2511 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); | 2526 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); |
2512 | 2527 | ||
2513 | vmx->vcpu.arch.cr0 = 0x60000010; | 2528 | vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; |
2514 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ | 2529 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ |
2515 | vmx_set_cr4(&vmx->vcpu, 0); | 2530 | vmx_set_cr4(&vmx->vcpu, 0); |
2516 | vmx_set_efer(&vmx->vcpu, 0); | 2531 | vmx_set_efer(&vmx->vcpu, 0); |
@@ -2627,6 +2642,34 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | |||
2627 | GUEST_INTR_STATE_NMI)); | 2642 | GUEST_INTR_STATE_NMI)); |
2628 | } | 2643 | } |
2629 | 2644 | ||
2645 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) | ||
2646 | { | ||
2647 | if (!cpu_has_virtual_nmis()) | ||
2648 | return to_vmx(vcpu)->soft_vnmi_blocked; | ||
2649 | else | ||
2650 | return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | ||
2651 | GUEST_INTR_STATE_NMI); | ||
2652 | } | ||
2653 | |||
2654 | static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | ||
2655 | { | ||
2656 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2657 | |||
2658 | if (!cpu_has_virtual_nmis()) { | ||
2659 | if (vmx->soft_vnmi_blocked != masked) { | ||
2660 | vmx->soft_vnmi_blocked = masked; | ||
2661 | vmx->vnmi_blocked_time = 0; | ||
2662 | } | ||
2663 | } else { | ||
2664 | if (masked) | ||
2665 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2666 | GUEST_INTR_STATE_NMI); | ||
2667 | else | ||
2668 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2669 | GUEST_INTR_STATE_NMI); | ||
2670 | } | ||
2671 | } | ||
2672 | |||
2630 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | 2673 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
2631 | { | 2674 | { |
2632 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | 2675 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
@@ -2659,7 +2702,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2659 | * Cause the #SS fault with 0 error code in VM86 mode. | 2702 | * Cause the #SS fault with 0 error code in VM86 mode. |
2660 | */ | 2703 | */ |
2661 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | 2704 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) |
2662 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | 2705 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE) |
2663 | return 1; | 2706 | return 1; |
2664 | /* | 2707 | /* |
2665 | * Forward all other exceptions that are valid in real mode. | 2708 | * Forward all other exceptions that are valid in real mode. |
@@ -2710,15 +2753,16 @@ static void kvm_machine_check(void) | |||
2710 | #endif | 2753 | #endif |
2711 | } | 2754 | } |
2712 | 2755 | ||
2713 | static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2756 | static int handle_machine_check(struct kvm_vcpu *vcpu) |
2714 | { | 2757 | { |
2715 | /* already handled by vcpu_run */ | 2758 | /* already handled by vcpu_run */ |
2716 | return 1; | 2759 | return 1; |
2717 | } | 2760 | } |
2718 | 2761 | ||
2719 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2762 | static int handle_exception(struct kvm_vcpu *vcpu) |
2720 | { | 2763 | { |
2721 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2764 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2765 | struct kvm_run *kvm_run = vcpu->run; | ||
2722 | u32 intr_info, ex_no, error_code; | 2766 | u32 intr_info, ex_no, error_code; |
2723 | unsigned long cr2, rip, dr6; | 2767 | unsigned long cr2, rip, dr6; |
2724 | u32 vect_info; | 2768 | u32 vect_info; |
@@ -2728,12 +2772,17 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2728 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2772 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2729 | 2773 | ||
2730 | if (is_machine_check(intr_info)) | 2774 | if (is_machine_check(intr_info)) |
2731 | return handle_machine_check(vcpu, kvm_run); | 2775 | return handle_machine_check(vcpu); |
2732 | 2776 | ||
2733 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 2777 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
2734 | !is_page_fault(intr_info)) | 2778 | !is_page_fault(intr_info)) { |
2735 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | 2779 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
2736 | "intr info 0x%x\n", __func__, vect_info, intr_info); | 2780 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; |
2781 | vcpu->run->internal.ndata = 2; | ||
2782 | vcpu->run->internal.data[0] = vect_info; | ||
2783 | vcpu->run->internal.data[1] = intr_info; | ||
2784 | return 0; | ||
2785 | } | ||
2737 | 2786 | ||
2738 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) | 2787 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) |
2739 | return 1; /* already handled by vmx_vcpu_run() */ | 2788 | return 1; /* already handled by vmx_vcpu_run() */ |
@@ -2744,7 +2793,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2744 | } | 2793 | } |
2745 | 2794 | ||
2746 | if (is_invalid_opcode(intr_info)) { | 2795 | if (is_invalid_opcode(intr_info)) { |
2747 | er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD); | 2796 | er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD); |
2748 | if (er != EMULATE_DONE) | 2797 | if (er != EMULATE_DONE) |
2749 | kvm_queue_exception(vcpu, UD_VECTOR); | 2798 | kvm_queue_exception(vcpu, UD_VECTOR); |
2750 | return 1; | 2799 | return 1; |
@@ -2803,20 +2852,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2803 | return 0; | 2852 | return 0; |
2804 | } | 2853 | } |
2805 | 2854 | ||
2806 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | 2855 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
2807 | struct kvm_run *kvm_run) | ||
2808 | { | 2856 | { |
2809 | ++vcpu->stat.irq_exits; | 2857 | ++vcpu->stat.irq_exits; |
2810 | return 1; | 2858 | return 1; |
2811 | } | 2859 | } |
2812 | 2860 | ||
2813 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2861 | static int handle_triple_fault(struct kvm_vcpu *vcpu) |
2814 | { | 2862 | { |
2815 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 2863 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
2816 | return 0; | 2864 | return 0; |
2817 | } | 2865 | } |
2818 | 2866 | ||
2819 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2867 | static int handle_io(struct kvm_vcpu *vcpu) |
2820 | { | 2868 | { |
2821 | unsigned long exit_qualification; | 2869 | unsigned long exit_qualification; |
2822 | int size, in, string; | 2870 | int size, in, string; |
@@ -2827,8 +2875,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2827 | string = (exit_qualification & 16) != 0; | 2875 | string = (exit_qualification & 16) != 0; |
2828 | 2876 | ||
2829 | if (string) { | 2877 | if (string) { |
2830 | if (emulate_instruction(vcpu, | 2878 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO) |
2831 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
2832 | return 0; | 2879 | return 0; |
2833 | return 1; | 2880 | return 1; |
2834 | } | 2881 | } |
@@ -2838,7 +2885,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2838 | port = exit_qualification >> 16; | 2885 | port = exit_qualification >> 16; |
2839 | 2886 | ||
2840 | skip_emulated_instruction(vcpu); | 2887 | skip_emulated_instruction(vcpu); |
2841 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | 2888 | return kvm_emulate_pio(vcpu, in, size, port); |
2842 | } | 2889 | } |
2843 | 2890 | ||
2844 | static void | 2891 | static void |
@@ -2852,7 +2899,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
2852 | hypercall[2] = 0xc1; | 2899 | hypercall[2] = 0xc1; |
2853 | } | 2900 | } |
2854 | 2901 | ||
2855 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2902 | static int handle_cr(struct kvm_vcpu *vcpu) |
2856 | { | 2903 | { |
2857 | unsigned long exit_qualification, val; | 2904 | unsigned long exit_qualification, val; |
2858 | int cr; | 2905 | int cr; |
@@ -2887,7 +2934,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2887 | return 1; | 2934 | return 1; |
2888 | if (cr8_prev <= cr8) | 2935 | if (cr8_prev <= cr8) |
2889 | return 1; | 2936 | return 1; |
2890 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 2937 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
2891 | return 0; | 2938 | return 0; |
2892 | } | 2939 | } |
2893 | }; | 2940 | }; |
@@ -2922,13 +2969,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2922 | default: | 2969 | default: |
2923 | break; | 2970 | break; |
2924 | } | 2971 | } |
2925 | kvm_run->exit_reason = 0; | 2972 | vcpu->run->exit_reason = 0; |
2926 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | 2973 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", |
2927 | (int)(exit_qualification >> 4) & 3, cr); | 2974 | (int)(exit_qualification >> 4) & 3, cr); |
2928 | return 0; | 2975 | return 0; |
2929 | } | 2976 | } |
2930 | 2977 | ||
2931 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2978 | static int handle_dr(struct kvm_vcpu *vcpu) |
2932 | { | 2979 | { |
2933 | unsigned long exit_qualification; | 2980 | unsigned long exit_qualification; |
2934 | unsigned long val; | 2981 | unsigned long val; |
@@ -2944,13 +2991,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2944 | * guest debugging itself. | 2991 | * guest debugging itself. |
2945 | */ | 2992 | */ |
2946 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | 2993 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { |
2947 | kvm_run->debug.arch.dr6 = vcpu->arch.dr6; | 2994 | vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; |
2948 | kvm_run->debug.arch.dr7 = dr; | 2995 | vcpu->run->debug.arch.dr7 = dr; |
2949 | kvm_run->debug.arch.pc = | 2996 | vcpu->run->debug.arch.pc = |
2950 | vmcs_readl(GUEST_CS_BASE) + | 2997 | vmcs_readl(GUEST_CS_BASE) + |
2951 | vmcs_readl(GUEST_RIP); | 2998 | vmcs_readl(GUEST_RIP); |
2952 | kvm_run->debug.arch.exception = DB_VECTOR; | 2999 | vcpu->run->debug.arch.exception = DB_VECTOR; |
2953 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 3000 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
2954 | return 0; | 3001 | return 0; |
2955 | } else { | 3002 | } else { |
2956 | vcpu->arch.dr7 &= ~DR7_GD; | 3003 | vcpu->arch.dr7 &= ~DR7_GD; |
@@ -3016,13 +3063,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3016 | return 1; | 3063 | return 1; |
3017 | } | 3064 | } |
3018 | 3065 | ||
3019 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3066 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
3020 | { | 3067 | { |
3021 | kvm_emulate_cpuid(vcpu); | 3068 | kvm_emulate_cpuid(vcpu); |
3022 | return 1; | 3069 | return 1; |
3023 | } | 3070 | } |
3024 | 3071 | ||
3025 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3072 | static int handle_rdmsr(struct kvm_vcpu *vcpu) |
3026 | { | 3073 | { |
3027 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3074 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3028 | u64 data; | 3075 | u64 data; |
@@ -3041,7 +3088,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3041 | return 1; | 3088 | return 1; |
3042 | } | 3089 | } |
3043 | 3090 | ||
3044 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3091 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
3045 | { | 3092 | { |
3046 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | 3093 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
3047 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 3094 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
@@ -3058,14 +3105,12 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3058 | return 1; | 3105 | return 1; |
3059 | } | 3106 | } |
3060 | 3107 | ||
3061 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | 3108 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
3062 | struct kvm_run *kvm_run) | ||
3063 | { | 3109 | { |
3064 | return 1; | 3110 | return 1; |
3065 | } | 3111 | } |
3066 | 3112 | ||
3067 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | 3113 | static int handle_interrupt_window(struct kvm_vcpu *vcpu) |
3068 | struct kvm_run *kvm_run) | ||
3069 | { | 3114 | { |
3070 | u32 cpu_based_vm_exec_control; | 3115 | u32 cpu_based_vm_exec_control; |
3071 | 3116 | ||
@@ -3081,34 +3126,34 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
3081 | * possible | 3126 | * possible |
3082 | */ | 3127 | */ |
3083 | if (!irqchip_in_kernel(vcpu->kvm) && | 3128 | if (!irqchip_in_kernel(vcpu->kvm) && |
3084 | kvm_run->request_interrupt_window && | 3129 | vcpu->run->request_interrupt_window && |
3085 | !kvm_cpu_has_interrupt(vcpu)) { | 3130 | !kvm_cpu_has_interrupt(vcpu)) { |
3086 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 3131 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
3087 | return 0; | 3132 | return 0; |
3088 | } | 3133 | } |
3089 | return 1; | 3134 | return 1; |
3090 | } | 3135 | } |
3091 | 3136 | ||
3092 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3137 | static int handle_halt(struct kvm_vcpu *vcpu) |
3093 | { | 3138 | { |
3094 | skip_emulated_instruction(vcpu); | 3139 | skip_emulated_instruction(vcpu); |
3095 | return kvm_emulate_halt(vcpu); | 3140 | return kvm_emulate_halt(vcpu); |
3096 | } | 3141 | } |
3097 | 3142 | ||
3098 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3143 | static int handle_vmcall(struct kvm_vcpu *vcpu) |
3099 | { | 3144 | { |
3100 | skip_emulated_instruction(vcpu); | 3145 | skip_emulated_instruction(vcpu); |
3101 | kvm_emulate_hypercall(vcpu); | 3146 | kvm_emulate_hypercall(vcpu); |
3102 | return 1; | 3147 | return 1; |
3103 | } | 3148 | } |
3104 | 3149 | ||
3105 | static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3150 | static int handle_vmx_insn(struct kvm_vcpu *vcpu) |
3106 | { | 3151 | { |
3107 | kvm_queue_exception(vcpu, UD_VECTOR); | 3152 | kvm_queue_exception(vcpu, UD_VECTOR); |
3108 | return 1; | 3153 | return 1; |
3109 | } | 3154 | } |
3110 | 3155 | ||
3111 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3156 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
3112 | { | 3157 | { |
3113 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3158 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3114 | 3159 | ||
@@ -3117,14 +3162,14 @@ static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3117 | return 1; | 3162 | return 1; |
3118 | } | 3163 | } |
3119 | 3164 | ||
3120 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3165 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
3121 | { | 3166 | { |
3122 | skip_emulated_instruction(vcpu); | 3167 | skip_emulated_instruction(vcpu); |
3123 | /* TODO: Add support for VT-d/pass-through device */ | 3168 | /* TODO: Add support for VT-d/pass-through device */ |
3124 | return 1; | 3169 | return 1; |
3125 | } | 3170 | } |
3126 | 3171 | ||
3127 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3172 | static int handle_apic_access(struct kvm_vcpu *vcpu) |
3128 | { | 3173 | { |
3129 | unsigned long exit_qualification; | 3174 | unsigned long exit_qualification; |
3130 | enum emulation_result er; | 3175 | enum emulation_result er; |
@@ -3133,7 +3178,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3133 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 3178 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
3134 | offset = exit_qualification & 0xffful; | 3179 | offset = exit_qualification & 0xffful; |
3135 | 3180 | ||
3136 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3181 | er = emulate_instruction(vcpu, 0, 0, 0); |
3137 | 3182 | ||
3138 | if (er != EMULATE_DONE) { | 3183 | if (er != EMULATE_DONE) { |
3139 | printk(KERN_ERR | 3184 | printk(KERN_ERR |
@@ -3144,7 +3189,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3144 | return 1; | 3189 | return 1; |
3145 | } | 3190 | } |
3146 | 3191 | ||
3147 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3192 | static int handle_task_switch(struct kvm_vcpu *vcpu) |
3148 | { | 3193 | { |
3149 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3194 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3150 | unsigned long exit_qualification; | 3195 | unsigned long exit_qualification; |
@@ -3198,7 +3243,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3198 | return 1; | 3243 | return 1; |
3199 | } | 3244 | } |
3200 | 3245 | ||
3201 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3246 | static int handle_ept_violation(struct kvm_vcpu *vcpu) |
3202 | { | 3247 | { |
3203 | unsigned long exit_qualification; | 3248 | unsigned long exit_qualification; |
3204 | gpa_t gpa; | 3249 | gpa_t gpa; |
@@ -3219,8 +3264,8 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3219 | vmcs_readl(GUEST_LINEAR_ADDRESS)); | 3264 | vmcs_readl(GUEST_LINEAR_ADDRESS)); |
3220 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", | 3265 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", |
3221 | (long unsigned int)exit_qualification); | 3266 | (long unsigned int)exit_qualification); |
3222 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3267 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3223 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; | 3268 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; |
3224 | return 0; | 3269 | return 0; |
3225 | } | 3270 | } |
3226 | 3271 | ||
@@ -3290,7 +3335,7 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, | |||
3290 | } | 3335 | } |
3291 | } | 3336 | } |
3292 | 3337 | ||
3293 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3338 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu) |
3294 | { | 3339 | { |
3295 | u64 sptes[4]; | 3340 | u64 sptes[4]; |
3296 | int nr_sptes, i; | 3341 | int nr_sptes, i; |
@@ -3306,13 +3351,13 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3306 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) | 3351 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) |
3307 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); | 3352 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); |
3308 | 3353 | ||
3309 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3354 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3310 | kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; | 3355 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; |
3311 | 3356 | ||
3312 | return 0; | 3357 | return 0; |
3313 | } | 3358 | } |
3314 | 3359 | ||
3315 | static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3360 | static int handle_nmi_window(struct kvm_vcpu *vcpu) |
3316 | { | 3361 | { |
3317 | u32 cpu_based_vm_exec_control; | 3362 | u32 cpu_based_vm_exec_control; |
3318 | 3363 | ||
@@ -3325,36 +3370,50 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3325 | return 1; | 3370 | return 1; |
3326 | } | 3371 | } |
3327 | 3372 | ||
3328 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | 3373 | static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
3329 | struct kvm_run *kvm_run) | ||
3330 | { | 3374 | { |
3331 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3375 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3332 | enum emulation_result err = EMULATE_DONE; | 3376 | enum emulation_result err = EMULATE_DONE; |
3333 | 3377 | int ret = 1; | |
3334 | local_irq_enable(); | ||
3335 | preempt_enable(); | ||
3336 | 3378 | ||
3337 | while (!guest_state_valid(vcpu)) { | 3379 | while (!guest_state_valid(vcpu)) { |
3338 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3380 | err = emulate_instruction(vcpu, 0, 0, 0); |
3339 | 3381 | ||
3340 | if (err == EMULATE_DO_MMIO) | 3382 | if (err == EMULATE_DO_MMIO) { |
3341 | break; | 3383 | ret = 0; |
3384 | goto out; | ||
3385 | } | ||
3342 | 3386 | ||
3343 | if (err != EMULATE_DONE) { | 3387 | if (err != EMULATE_DONE) { |
3344 | kvm_report_emulation_failure(vcpu, "emulation failure"); | 3388 | kvm_report_emulation_failure(vcpu, "emulation failure"); |
3345 | break; | 3389 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
3390 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | ||
3391 | vcpu->run->internal.ndata = 0; | ||
3392 | ret = 0; | ||
3393 | goto out; | ||
3346 | } | 3394 | } |
3347 | 3395 | ||
3348 | if (signal_pending(current)) | 3396 | if (signal_pending(current)) |
3349 | break; | 3397 | goto out; |
3350 | if (need_resched()) | 3398 | if (need_resched()) |
3351 | schedule(); | 3399 | schedule(); |
3352 | } | 3400 | } |
3353 | 3401 | ||
3354 | preempt_disable(); | 3402 | vmx->emulation_required = 0; |
3355 | local_irq_disable(); | 3403 | out: |
3404 | return ret; | ||
3405 | } | ||
3356 | 3406 | ||
3357 | vmx->invalid_state_emulation_result = err; | 3407 | /* |
3408 | * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE | ||
3409 | * exiting, so only get here on cpu with PAUSE-Loop-Exiting. | ||
3410 | */ | ||
3411 | static int handle_pause(struct kvm_vcpu *vcpu) | ||
3412 | { | ||
3413 | skip_emulated_instruction(vcpu); | ||
3414 | kvm_vcpu_on_spin(vcpu); | ||
3415 | |||
3416 | return 1; | ||
3358 | } | 3417 | } |
3359 | 3418 | ||
3360 | /* | 3419 | /* |
@@ -3362,8 +3421,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3362 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 3421 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
3363 | * to be done to userspace and return 0. | 3422 | * to be done to userspace and return 0. |
3364 | */ | 3423 | */ |
3365 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | 3424 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
3366 | struct kvm_run *kvm_run) = { | ||
3367 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 3425 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
3368 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 3426 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
3369 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | 3427 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
@@ -3394,6 +3452,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
3394 | [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, | 3452 | [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, |
3395 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, | 3453 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, |
3396 | [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, | 3454 | [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, |
3455 | [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, | ||
3397 | }; | 3456 | }; |
3398 | 3457 | ||
3399 | static const int kvm_vmx_max_exit_handlers = | 3458 | static const int kvm_vmx_max_exit_handlers = |
@@ -3403,7 +3462,7 @@ static const int kvm_vmx_max_exit_handlers = | |||
3403 | * The guest has exited. See if we can fix it or if we need userspace | 3462 | * The guest has exited. See if we can fix it or if we need userspace |
3404 | * assistance. | 3463 | * assistance. |
3405 | */ | 3464 | */ |
3406 | static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 3465 | static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
3407 | { | 3466 | { |
3408 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3467 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3409 | u32 exit_reason = vmx->exit_reason; | 3468 | u32 exit_reason = vmx->exit_reason; |
@@ -3411,13 +3470,9 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3411 | 3470 | ||
3412 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); | 3471 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); |
3413 | 3472 | ||
3414 | /* If we need to emulate an MMIO from handle_invalid_guest_state | 3473 | /* If guest state is invalid, start emulating */ |
3415 | * we just return 0 */ | 3474 | if (vmx->emulation_required && emulate_invalid_guest_state) |
3416 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3475 | return handle_invalid_guest_state(vcpu); |
3417 | if (guest_state_valid(vcpu)) | ||
3418 | vmx->emulation_required = 0; | ||
3419 | return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO; | ||
3420 | } | ||
3421 | 3476 | ||
3422 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3477 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3423 | * to sync with guest real CR3. */ | 3478 | * to sync with guest real CR3. */ |
@@ -3425,8 +3480,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3425 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3480 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
3426 | 3481 | ||
3427 | if (unlikely(vmx->fail)) { | 3482 | if (unlikely(vmx->fail)) { |
3428 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 3483 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
3429 | kvm_run->fail_entry.hardware_entry_failure_reason | 3484 | vcpu->run->fail_entry.hardware_entry_failure_reason |
3430 | = vmcs_read32(VM_INSTRUCTION_ERROR); | 3485 | = vmcs_read32(VM_INSTRUCTION_ERROR); |
3431 | return 0; | 3486 | return 0; |
3432 | } | 3487 | } |
@@ -3459,10 +3514,10 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3459 | 3514 | ||
3460 | if (exit_reason < kvm_vmx_max_exit_handlers | 3515 | if (exit_reason < kvm_vmx_max_exit_handlers |
3461 | && kvm_vmx_exit_handlers[exit_reason]) | 3516 | && kvm_vmx_exit_handlers[exit_reason]) |
3462 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | 3517 | return kvm_vmx_exit_handlers[exit_reason](vcpu); |
3463 | else { | 3518 | else { |
3464 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 3519 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
3465 | kvm_run->hw.hardware_exit_reason = exit_reason; | 3520 | vcpu->run->hw.hardware_exit_reason = exit_reason; |
3466 | } | 3521 | } |
3467 | return 0; | 3522 | return 0; |
3468 | } | 3523 | } |
@@ -3600,23 +3655,18 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx) | |||
3600 | #define Q "l" | 3655 | #define Q "l" |
3601 | #endif | 3656 | #endif |
3602 | 3657 | ||
3603 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3658 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
3604 | { | 3659 | { |
3605 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3660 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3606 | 3661 | ||
3607 | if (enable_ept && is_paging(vcpu)) { | ||
3608 | vmcs_writel(GUEST_CR3, vcpu->arch.cr3); | ||
3609 | ept_load_pdptrs(vcpu); | ||
3610 | } | ||
3611 | /* Record the guest's net vcpu time for enforced NMI injections. */ | 3662 | /* Record the guest's net vcpu time for enforced NMI injections. */ |
3612 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) | 3663 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) |
3613 | vmx->entry_time = ktime_get(); | 3664 | vmx->entry_time = ktime_get(); |
3614 | 3665 | ||
3615 | /* Handle invalid guest state instead of entering VMX */ | 3666 | /* Don't enter VMX if guest state is invalid, let the exit handler |
3616 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3667 | start emulation until we arrive back to a valid state */ |
3617 | handle_invalid_guest_state(vcpu, kvm_run); | 3668 | if (vmx->emulation_required && emulate_invalid_guest_state) |
3618 | return; | 3669 | return; |
3619 | } | ||
3620 | 3670 | ||
3621 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | 3671 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) |
3622 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | 3672 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |
@@ -3775,7 +3825,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | |||
3775 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); | 3825 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); |
3776 | spin_unlock(&vmx_vpid_lock); | 3826 | spin_unlock(&vmx_vpid_lock); |
3777 | vmx_free_vmcs(vcpu); | 3827 | vmx_free_vmcs(vcpu); |
3778 | kfree(vmx->host_msrs); | ||
3779 | kfree(vmx->guest_msrs); | 3828 | kfree(vmx->guest_msrs); |
3780 | kvm_vcpu_uninit(vcpu); | 3829 | kvm_vcpu_uninit(vcpu); |
3781 | kmem_cache_free(kvm_vcpu_cache, vmx); | 3830 | kmem_cache_free(kvm_vcpu_cache, vmx); |
@@ -3802,10 +3851,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3802 | goto uninit_vcpu; | 3851 | goto uninit_vcpu; |
3803 | } | 3852 | } |
3804 | 3853 | ||
3805 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
3806 | if (!vmx->host_msrs) | ||
3807 | goto free_guest_msrs; | ||
3808 | |||
3809 | vmx->vmcs = alloc_vmcs(); | 3854 | vmx->vmcs = alloc_vmcs(); |
3810 | if (!vmx->vmcs) | 3855 | if (!vmx->vmcs) |
3811 | goto free_msrs; | 3856 | goto free_msrs; |
@@ -3836,8 +3881,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3836 | free_vmcs: | 3881 | free_vmcs: |
3837 | free_vmcs(vmx->vmcs); | 3882 | free_vmcs(vmx->vmcs); |
3838 | free_msrs: | 3883 | free_msrs: |
3839 | kfree(vmx->host_msrs); | ||
3840 | free_guest_msrs: | ||
3841 | kfree(vmx->guest_msrs); | 3884 | kfree(vmx->guest_msrs); |
3842 | uninit_vcpu: | 3885 | uninit_vcpu: |
3843 | kvm_vcpu_uninit(&vmx->vcpu); | 3886 | kvm_vcpu_uninit(&vmx->vcpu); |
@@ -3973,6 +4016,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3973 | .queue_exception = vmx_queue_exception, | 4016 | .queue_exception = vmx_queue_exception, |
3974 | .interrupt_allowed = vmx_interrupt_allowed, | 4017 | .interrupt_allowed = vmx_interrupt_allowed, |
3975 | .nmi_allowed = vmx_nmi_allowed, | 4018 | .nmi_allowed = vmx_nmi_allowed, |
4019 | .get_nmi_mask = vmx_get_nmi_mask, | ||
4020 | .set_nmi_mask = vmx_set_nmi_mask, | ||
3976 | .enable_nmi_window = enable_nmi_window, | 4021 | .enable_nmi_window = enable_nmi_window, |
3977 | .enable_irq_window = enable_irq_window, | 4022 | .enable_irq_window = enable_irq_window, |
3978 | .update_cr8_intercept = update_cr8_intercept, | 4023 | .update_cr8_intercept = update_cr8_intercept, |
@@ -3987,7 +4032,12 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3987 | 4032 | ||
3988 | static int __init vmx_init(void) | 4033 | static int __init vmx_init(void) |
3989 | { | 4034 | { |
3990 | int r; | 4035 | int r, i; |
4036 | |||
4037 | rdmsrl_safe(MSR_EFER, &host_efer); | ||
4038 | |||
4039 | for (i = 0; i < NR_VMX_MSR; ++i) | ||
4040 | kvm_define_shared_msr(i, vmx_msr_index[i]); | ||
3991 | 4041 | ||
3992 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); | 4042 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); |
3993 | if (!vmx_io_bitmap_a) | 4043 | if (!vmx_io_bitmap_a) |
@@ -4049,8 +4099,6 @@ static int __init vmx_init(void) | |||
4049 | if (bypass_guest_pf) | 4099 | if (bypass_guest_pf) |
4050 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | 4100 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); |
4051 | 4101 | ||
4052 | ept_sync_global(); | ||
4053 | |||
4054 | return 0; | 4102 | return 0; |
4055 | 4103 | ||
4056 | out3: | 4104 | out3: |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4fc80174191c..9d068966fb2a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <linux/cpufreq.h> | 39 | #include <linux/cpufreq.h> |
40 | #include <linux/user-return-notifier.h> | ||
40 | #include <trace/events/kvm.h> | 41 | #include <trace/events/kvm.h> |
41 | #undef TRACE_INCLUDE_FILE | 42 | #undef TRACE_INCLUDE_FILE |
42 | #define CREATE_TRACE_POINTS | 43 | #define CREATE_TRACE_POINTS |
@@ -88,6 +89,25 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); | |||
88 | int ignore_msrs = 0; | 89 | int ignore_msrs = 0; |
89 | module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); | 90 | module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); |
90 | 91 | ||
92 | #define KVM_NR_SHARED_MSRS 16 | ||
93 | |||
94 | struct kvm_shared_msrs_global { | ||
95 | int nr; | ||
96 | struct kvm_shared_msr { | ||
97 | u32 msr; | ||
98 | u64 value; | ||
99 | } msrs[KVM_NR_SHARED_MSRS]; | ||
100 | }; | ||
101 | |||
102 | struct kvm_shared_msrs { | ||
103 | struct user_return_notifier urn; | ||
104 | bool registered; | ||
105 | u64 current_value[KVM_NR_SHARED_MSRS]; | ||
106 | }; | ||
107 | |||
108 | static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; | ||
109 | static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); | ||
110 | |||
91 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 111 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
92 | { "pf_fixed", VCPU_STAT(pf_fixed) }, | 112 | { "pf_fixed", VCPU_STAT(pf_fixed) }, |
93 | { "pf_guest", VCPU_STAT(pf_guest) }, | 113 | { "pf_guest", VCPU_STAT(pf_guest) }, |
@@ -124,6 +144,72 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
124 | { NULL } | 144 | { NULL } |
125 | }; | 145 | }; |
126 | 146 | ||
147 | static void kvm_on_user_return(struct user_return_notifier *urn) | ||
148 | { | ||
149 | unsigned slot; | ||
150 | struct kvm_shared_msr *global; | ||
151 | struct kvm_shared_msrs *locals | ||
152 | = container_of(urn, struct kvm_shared_msrs, urn); | ||
153 | |||
154 | for (slot = 0; slot < shared_msrs_global.nr; ++slot) { | ||
155 | global = &shared_msrs_global.msrs[slot]; | ||
156 | if (global->value != locals->current_value[slot]) { | ||
157 | wrmsrl(global->msr, global->value); | ||
158 | locals->current_value[slot] = global->value; | ||
159 | } | ||
160 | } | ||
161 | locals->registered = false; | ||
162 | user_return_notifier_unregister(urn); | ||
163 | } | ||
164 | |||
165 | void kvm_define_shared_msr(unsigned slot, u32 msr) | ||
166 | { | ||
167 | int cpu; | ||
168 | u64 value; | ||
169 | |||
170 | if (slot >= shared_msrs_global.nr) | ||
171 | shared_msrs_global.nr = slot + 1; | ||
172 | shared_msrs_global.msrs[slot].msr = msr; | ||
173 | rdmsrl_safe(msr, &value); | ||
174 | shared_msrs_global.msrs[slot].value = value; | ||
175 | for_each_online_cpu(cpu) | ||
176 | per_cpu(shared_msrs, cpu).current_value[slot] = value; | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(kvm_define_shared_msr); | ||
179 | |||
180 | static void kvm_shared_msr_cpu_online(void) | ||
181 | { | ||
182 | unsigned i; | ||
183 | struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs); | ||
184 | |||
185 | for (i = 0; i < shared_msrs_global.nr; ++i) | ||
186 | locals->current_value[i] = shared_msrs_global.msrs[i].value; | ||
187 | } | ||
188 | |||
189 | void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) | ||
190 | { | ||
191 | struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); | ||
192 | |||
193 | if (((value ^ smsr->current_value[slot]) & mask) == 0) | ||
194 | return; | ||
195 | smsr->current_value[slot] = value; | ||
196 | wrmsrl(shared_msrs_global.msrs[slot].msr, value); | ||
197 | if (!smsr->registered) { | ||
198 | smsr->urn.on_user_return = kvm_on_user_return; | ||
199 | user_return_notifier_register(&smsr->urn); | ||
200 | smsr->registered = true; | ||
201 | } | ||
202 | } | ||
203 | EXPORT_SYMBOL_GPL(kvm_set_shared_msr); | ||
204 | |||
205 | static void drop_user_return_notifiers(void *ignore) | ||
206 | { | ||
207 | struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); | ||
208 | |||
209 | if (smsr->registered) | ||
210 | kvm_on_user_return(&smsr->urn); | ||
211 | } | ||
212 | |||
127 | unsigned long segment_base(u16 selector) | 213 | unsigned long segment_base(u16 selector) |
128 | { | 214 | { |
129 | struct descriptor_table gdt; | 215 | struct descriptor_table gdt; |
@@ -485,16 +571,19 @@ static inline u32 bit(int bitno) | |||
485 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. | 571 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. |
486 | * | 572 | * |
487 | * This list is modified at module load time to reflect the | 573 | * This list is modified at module load time to reflect the |
488 | * capabilities of the host cpu. | 574 | * capabilities of the host cpu. This capabilities test skips MSRs that are |
575 | * kvm-specific. Those are put in the beginning of the list. | ||
489 | */ | 576 | */ |
577 | |||
578 | #define KVM_SAVE_MSRS_BEGIN 2 | ||
490 | static u32 msrs_to_save[] = { | 579 | static u32 msrs_to_save[] = { |
580 | MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | ||
491 | MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, | 581 | MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, |
492 | MSR_K6_STAR, | 582 | MSR_K6_STAR, |
493 | #ifdef CONFIG_X86_64 | 583 | #ifdef CONFIG_X86_64 |
494 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, | 584 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, |
495 | #endif | 585 | #endif |
496 | MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | 586 | MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA |
497 | MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA | ||
498 | }; | 587 | }; |
499 | 588 | ||
500 | static unsigned num_msrs_to_save; | 589 | static unsigned num_msrs_to_save; |
@@ -678,7 +767,8 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) | |||
678 | /* With all the info we got, fill in the values */ | 767 | /* With all the info we got, fill in the values */ |
679 | 768 | ||
680 | vcpu->hv_clock.system_time = ts.tv_nsec + | 769 | vcpu->hv_clock.system_time = ts.tv_nsec + |
681 | (NSEC_PER_SEC * (u64)ts.tv_sec); | 770 | (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset; |
771 | |||
682 | /* | 772 | /* |
683 | * The interface expects us to write an even number signaling that the | 773 | * The interface expects us to write an even number signaling that the |
684 | * update is finished. Since the guest won't see the intermediate | 774 | * update is finished. Since the guest won't see the intermediate |
@@ -836,6 +926,38 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
836 | return 0; | 926 | return 0; |
837 | } | 927 | } |
838 | 928 | ||
929 | static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) | ||
930 | { | ||
931 | struct kvm *kvm = vcpu->kvm; | ||
932 | int lm = is_long_mode(vcpu); | ||
933 | u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 | ||
934 | : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; | ||
935 | u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 | ||
936 | : kvm->arch.xen_hvm_config.blob_size_32; | ||
937 | u32 page_num = data & ~PAGE_MASK; | ||
938 | u64 page_addr = data & PAGE_MASK; | ||
939 | u8 *page; | ||
940 | int r; | ||
941 | |||
942 | r = -E2BIG; | ||
943 | if (page_num >= blob_size) | ||
944 | goto out; | ||
945 | r = -ENOMEM; | ||
946 | page = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
947 | if (!page) | ||
948 | goto out; | ||
949 | r = -EFAULT; | ||
950 | if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE)) | ||
951 | goto out_free; | ||
952 | if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) | ||
953 | goto out_free; | ||
954 | r = 0; | ||
955 | out_free: | ||
956 | kfree(page); | ||
957 | out: | ||
958 | return r; | ||
959 | } | ||
960 | |||
839 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 961 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
840 | { | 962 | { |
841 | switch (msr) { | 963 | switch (msr) { |
@@ -951,6 +1073,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
951 | "0x%x data 0x%llx\n", msr, data); | 1073 | "0x%x data 0x%llx\n", msr, data); |
952 | break; | 1074 | break; |
953 | default: | 1075 | default: |
1076 | if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) | ||
1077 | return xen_hvm_config(vcpu, data); | ||
954 | if (!ignore_msrs) { | 1078 | if (!ignore_msrs) { |
955 | pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", | 1079 | pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", |
956 | msr, data); | 1080 | msr, data); |
@@ -1225,6 +1349,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
1225 | case KVM_CAP_PIT2: | 1349 | case KVM_CAP_PIT2: |
1226 | case KVM_CAP_PIT_STATE2: | 1350 | case KVM_CAP_PIT_STATE2: |
1227 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: | 1351 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: |
1352 | case KVM_CAP_XEN_HVM: | ||
1353 | case KVM_CAP_ADJUST_CLOCK: | ||
1354 | case KVM_CAP_VCPU_EVENTS: | ||
1228 | r = 1; | 1355 | r = 1; |
1229 | break; | 1356 | break; |
1230 | case KVM_CAP_COALESCED_MMIO: | 1357 | case KVM_CAP_COALESCED_MMIO: |
@@ -1239,8 +1366,8 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
1239 | case KVM_CAP_NR_MEMSLOTS: | 1366 | case KVM_CAP_NR_MEMSLOTS: |
1240 | r = KVM_MEMORY_SLOTS; | 1367 | r = KVM_MEMORY_SLOTS; |
1241 | break; | 1368 | break; |
1242 | case KVM_CAP_PV_MMU: | 1369 | case KVM_CAP_PV_MMU: /* obsolete */ |
1243 | r = !tdp_enabled; | 1370 | r = 0; |
1244 | break; | 1371 | break; |
1245 | case KVM_CAP_IOMMU: | 1372 | case KVM_CAP_IOMMU: |
1246 | r = iommu_found(); | 1373 | r = iommu_found(); |
@@ -1327,6 +1454,12 @@ out: | |||
1327 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1454 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1328 | { | 1455 | { |
1329 | kvm_x86_ops->vcpu_load(vcpu, cpu); | 1456 | kvm_x86_ops->vcpu_load(vcpu, cpu); |
1457 | if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) { | ||
1458 | unsigned long khz = cpufreq_quick_get(cpu); | ||
1459 | if (!khz) | ||
1460 | khz = tsc_khz; | ||
1461 | per_cpu(cpu_tsc_khz, cpu) = khz; | ||
1462 | } | ||
1330 | kvm_request_guest_time_update(vcpu); | 1463 | kvm_request_guest_time_update(vcpu); |
1331 | } | 1464 | } |
1332 | 1465 | ||
@@ -1760,6 +1893,61 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, | |||
1760 | return 0; | 1893 | return 0; |
1761 | } | 1894 | } |
1762 | 1895 | ||
1896 | static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | ||
1897 | struct kvm_vcpu_events *events) | ||
1898 | { | ||
1899 | vcpu_load(vcpu); | ||
1900 | |||
1901 | events->exception.injected = vcpu->arch.exception.pending; | ||
1902 | events->exception.nr = vcpu->arch.exception.nr; | ||
1903 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; | ||
1904 | events->exception.error_code = vcpu->arch.exception.error_code; | ||
1905 | |||
1906 | events->interrupt.injected = vcpu->arch.interrupt.pending; | ||
1907 | events->interrupt.nr = vcpu->arch.interrupt.nr; | ||
1908 | events->interrupt.soft = vcpu->arch.interrupt.soft; | ||
1909 | |||
1910 | events->nmi.injected = vcpu->arch.nmi_injected; | ||
1911 | events->nmi.pending = vcpu->arch.nmi_pending; | ||
1912 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); | ||
1913 | |||
1914 | events->sipi_vector = vcpu->arch.sipi_vector; | ||
1915 | |||
1916 | events->flags = 0; | ||
1917 | |||
1918 | vcpu_put(vcpu); | ||
1919 | } | ||
1920 | |||
1921 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | ||
1922 | struct kvm_vcpu_events *events) | ||
1923 | { | ||
1924 | if (events->flags) | ||
1925 | return -EINVAL; | ||
1926 | |||
1927 | vcpu_load(vcpu); | ||
1928 | |||
1929 | vcpu->arch.exception.pending = events->exception.injected; | ||
1930 | vcpu->arch.exception.nr = events->exception.nr; | ||
1931 | vcpu->arch.exception.has_error_code = events->exception.has_error_code; | ||
1932 | vcpu->arch.exception.error_code = events->exception.error_code; | ||
1933 | |||
1934 | vcpu->arch.interrupt.pending = events->interrupt.injected; | ||
1935 | vcpu->arch.interrupt.nr = events->interrupt.nr; | ||
1936 | vcpu->arch.interrupt.soft = events->interrupt.soft; | ||
1937 | if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) | ||
1938 | kvm_pic_clear_isr_ack(vcpu->kvm); | ||
1939 | |||
1940 | vcpu->arch.nmi_injected = events->nmi.injected; | ||
1941 | vcpu->arch.nmi_pending = events->nmi.pending; | ||
1942 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); | ||
1943 | |||
1944 | vcpu->arch.sipi_vector = events->sipi_vector; | ||
1945 | |||
1946 | vcpu_put(vcpu); | ||
1947 | |||
1948 | return 0; | ||
1949 | } | ||
1950 | |||
1763 | long kvm_arch_vcpu_ioctl(struct file *filp, | 1951 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1764 | unsigned int ioctl, unsigned long arg) | 1952 | unsigned int ioctl, unsigned long arg) |
1765 | { | 1953 | { |
@@ -1770,6 +1958,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1770 | 1958 | ||
1771 | switch (ioctl) { | 1959 | switch (ioctl) { |
1772 | case KVM_GET_LAPIC: { | 1960 | case KVM_GET_LAPIC: { |
1961 | r = -EINVAL; | ||
1962 | if (!vcpu->arch.apic) | ||
1963 | goto out; | ||
1773 | lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); | 1964 | lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); |
1774 | 1965 | ||
1775 | r = -ENOMEM; | 1966 | r = -ENOMEM; |
@@ -1785,6 +1976,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1785 | break; | 1976 | break; |
1786 | } | 1977 | } |
1787 | case KVM_SET_LAPIC: { | 1978 | case KVM_SET_LAPIC: { |
1979 | r = -EINVAL; | ||
1980 | if (!vcpu->arch.apic) | ||
1981 | goto out; | ||
1788 | lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); | 1982 | lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); |
1789 | r = -ENOMEM; | 1983 | r = -ENOMEM; |
1790 | if (!lapic) | 1984 | if (!lapic) |
@@ -1911,6 +2105,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1911 | r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); | 2105 | r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); |
1912 | break; | 2106 | break; |
1913 | } | 2107 | } |
2108 | case KVM_GET_VCPU_EVENTS: { | ||
2109 | struct kvm_vcpu_events events; | ||
2110 | |||
2111 | kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); | ||
2112 | |||
2113 | r = -EFAULT; | ||
2114 | if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) | ||
2115 | break; | ||
2116 | r = 0; | ||
2117 | break; | ||
2118 | } | ||
2119 | case KVM_SET_VCPU_EVENTS: { | ||
2120 | struct kvm_vcpu_events events; | ||
2121 | |||
2122 | r = -EFAULT; | ||
2123 | if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) | ||
2124 | break; | ||
2125 | |||
2126 | r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); | ||
2127 | break; | ||
2128 | } | ||
1914 | default: | 2129 | default: |
1915 | r = -EINVAL; | 2130 | r = -EINVAL; |
1916 | } | 2131 | } |
@@ -2039,9 +2254,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
2039 | sizeof(struct kvm_pic_state)); | 2254 | sizeof(struct kvm_pic_state)); |
2040 | break; | 2255 | break; |
2041 | case KVM_IRQCHIP_IOAPIC: | 2256 | case KVM_IRQCHIP_IOAPIC: |
2042 | memcpy(&chip->chip.ioapic, | 2257 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
2043 | ioapic_irqchip(kvm), | ||
2044 | sizeof(struct kvm_ioapic_state)); | ||
2045 | break; | 2258 | break; |
2046 | default: | 2259 | default: |
2047 | r = -EINVAL; | 2260 | r = -EINVAL; |
@@ -2071,11 +2284,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
2071 | spin_unlock(&pic_irqchip(kvm)->lock); | 2284 | spin_unlock(&pic_irqchip(kvm)->lock); |
2072 | break; | 2285 | break; |
2073 | case KVM_IRQCHIP_IOAPIC: | 2286 | case KVM_IRQCHIP_IOAPIC: |
2074 | mutex_lock(&kvm->irq_lock); | 2287 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
2075 | memcpy(ioapic_irqchip(kvm), | ||
2076 | &chip->chip.ioapic, | ||
2077 | sizeof(struct kvm_ioapic_state)); | ||
2078 | mutex_unlock(&kvm->irq_lock); | ||
2079 | break; | 2288 | break; |
2080 | default: | 2289 | default: |
2081 | r = -EINVAL; | 2290 | r = -EINVAL; |
@@ -2183,7 +2392,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
2183 | { | 2392 | { |
2184 | struct kvm *kvm = filp->private_data; | 2393 | struct kvm *kvm = filp->private_data; |
2185 | void __user *argp = (void __user *)arg; | 2394 | void __user *argp = (void __user *)arg; |
2186 | int r = -EINVAL; | 2395 | int r = -ENOTTY; |
2187 | /* | 2396 | /* |
2188 | * This union makes it completely explicit to gcc-3.x | 2397 | * This union makes it completely explicit to gcc-3.x |
2189 | * that these two variables' stack usage should be | 2398 | * that these two variables' stack usage should be |
@@ -2245,25 +2454,39 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
2245 | if (r) | 2454 | if (r) |
2246 | goto out; | 2455 | goto out; |
2247 | break; | 2456 | break; |
2248 | case KVM_CREATE_IRQCHIP: | 2457 | case KVM_CREATE_IRQCHIP: { |
2458 | struct kvm_pic *vpic; | ||
2459 | |||
2460 | mutex_lock(&kvm->lock); | ||
2461 | r = -EEXIST; | ||
2462 | if (kvm->arch.vpic) | ||
2463 | goto create_irqchip_unlock; | ||
2249 | r = -ENOMEM; | 2464 | r = -ENOMEM; |
2250 | kvm->arch.vpic = kvm_create_pic(kvm); | 2465 | vpic = kvm_create_pic(kvm); |
2251 | if (kvm->arch.vpic) { | 2466 | if (vpic) { |
2252 | r = kvm_ioapic_init(kvm); | 2467 | r = kvm_ioapic_init(kvm); |
2253 | if (r) { | 2468 | if (r) { |
2254 | kfree(kvm->arch.vpic); | 2469 | kfree(vpic); |
2255 | kvm->arch.vpic = NULL; | 2470 | goto create_irqchip_unlock; |
2256 | goto out; | ||
2257 | } | 2471 | } |
2258 | } else | 2472 | } else |
2259 | goto out; | 2473 | goto create_irqchip_unlock; |
2474 | smp_wmb(); | ||
2475 | kvm->arch.vpic = vpic; | ||
2476 | smp_wmb(); | ||
2260 | r = kvm_setup_default_irq_routing(kvm); | 2477 | r = kvm_setup_default_irq_routing(kvm); |
2261 | if (r) { | 2478 | if (r) { |
2479 | mutex_lock(&kvm->irq_lock); | ||
2262 | kfree(kvm->arch.vpic); | 2480 | kfree(kvm->arch.vpic); |
2263 | kfree(kvm->arch.vioapic); | 2481 | kfree(kvm->arch.vioapic); |
2264 | goto out; | 2482 | kvm->arch.vpic = NULL; |
2483 | kvm->arch.vioapic = NULL; | ||
2484 | mutex_unlock(&kvm->irq_lock); | ||
2265 | } | 2485 | } |
2486 | create_irqchip_unlock: | ||
2487 | mutex_unlock(&kvm->lock); | ||
2266 | break; | 2488 | break; |
2489 | } | ||
2267 | case KVM_CREATE_PIT: | 2490 | case KVM_CREATE_PIT: |
2268 | u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; | 2491 | u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; |
2269 | goto create_pit; | 2492 | goto create_pit; |
@@ -2293,10 +2516,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
2293 | goto out; | 2516 | goto out; |
2294 | if (irqchip_in_kernel(kvm)) { | 2517 | if (irqchip_in_kernel(kvm)) { |
2295 | __s32 status; | 2518 | __s32 status; |
2296 | mutex_lock(&kvm->irq_lock); | ||
2297 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 2519 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
2298 | irq_event.irq, irq_event.level); | 2520 | irq_event.irq, irq_event.level); |
2299 | mutex_unlock(&kvm->irq_lock); | ||
2300 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 2521 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
2301 | irq_event.status = status; | 2522 | irq_event.status = status; |
2302 | if (copy_to_user(argp, &irq_event, | 2523 | if (copy_to_user(argp, &irq_event, |
@@ -2422,6 +2643,55 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
2422 | r = 0; | 2643 | r = 0; |
2423 | break; | 2644 | break; |
2424 | } | 2645 | } |
2646 | case KVM_XEN_HVM_CONFIG: { | ||
2647 | r = -EFAULT; | ||
2648 | if (copy_from_user(&kvm->arch.xen_hvm_config, argp, | ||
2649 | sizeof(struct kvm_xen_hvm_config))) | ||
2650 | goto out; | ||
2651 | r = -EINVAL; | ||
2652 | if (kvm->arch.xen_hvm_config.flags) | ||
2653 | goto out; | ||
2654 | r = 0; | ||
2655 | break; | ||
2656 | } | ||
2657 | case KVM_SET_CLOCK: { | ||
2658 | struct timespec now; | ||
2659 | struct kvm_clock_data user_ns; | ||
2660 | u64 now_ns; | ||
2661 | s64 delta; | ||
2662 | |||
2663 | r = -EFAULT; | ||
2664 | if (copy_from_user(&user_ns, argp, sizeof(user_ns))) | ||
2665 | goto out; | ||
2666 | |||
2667 | r = -EINVAL; | ||
2668 | if (user_ns.flags) | ||
2669 | goto out; | ||
2670 | |||
2671 | r = 0; | ||
2672 | ktime_get_ts(&now); | ||
2673 | now_ns = timespec_to_ns(&now); | ||
2674 | delta = user_ns.clock - now_ns; | ||
2675 | kvm->arch.kvmclock_offset = delta; | ||
2676 | break; | ||
2677 | } | ||
2678 | case KVM_GET_CLOCK: { | ||
2679 | struct timespec now; | ||
2680 | struct kvm_clock_data user_ns; | ||
2681 | u64 now_ns; | ||
2682 | |||
2683 | ktime_get_ts(&now); | ||
2684 | now_ns = timespec_to_ns(&now); | ||
2685 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; | ||
2686 | user_ns.flags = 0; | ||
2687 | |||
2688 | r = -EFAULT; | ||
2689 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) | ||
2690 | goto out; | ||
2691 | r = 0; | ||
2692 | break; | ||
2693 | } | ||
2694 | |||
2425 | default: | 2695 | default: |
2426 | ; | 2696 | ; |
2427 | } | 2697 | } |
@@ -2434,7 +2704,8 @@ static void kvm_init_msr_list(void) | |||
2434 | u32 dummy[2]; | 2704 | u32 dummy[2]; |
2435 | unsigned i, j; | 2705 | unsigned i, j; |
2436 | 2706 | ||
2437 | for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { | 2707 | /* skip the first msrs in the list. KVM-specific */ |
2708 | for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { | ||
2438 | if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) | 2709 | if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) |
2439 | continue; | 2710 | continue; |
2440 | if (j < i) | 2711 | if (j < i) |
@@ -2758,13 +3029,13 @@ static void cache_all_regs(struct kvm_vcpu *vcpu) | |||
2758 | } | 3029 | } |
2759 | 3030 | ||
2760 | int emulate_instruction(struct kvm_vcpu *vcpu, | 3031 | int emulate_instruction(struct kvm_vcpu *vcpu, |
2761 | struct kvm_run *run, | ||
2762 | unsigned long cr2, | 3032 | unsigned long cr2, |
2763 | u16 error_code, | 3033 | u16 error_code, |
2764 | int emulation_type) | 3034 | int emulation_type) |
2765 | { | 3035 | { |
2766 | int r, shadow_mask; | 3036 | int r, shadow_mask; |
2767 | struct decode_cache *c; | 3037 | struct decode_cache *c; |
3038 | struct kvm_run *run = vcpu->run; | ||
2768 | 3039 | ||
2769 | kvm_clear_exception_queue(vcpu); | 3040 | kvm_clear_exception_queue(vcpu); |
2770 | vcpu->arch.mmio_fault_cr2 = cr2; | 3041 | vcpu->arch.mmio_fault_cr2 = cr2; |
@@ -2784,7 +3055,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
2784 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 3055 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
2785 | 3056 | ||
2786 | vcpu->arch.emulate_ctxt.vcpu = vcpu; | 3057 | vcpu->arch.emulate_ctxt.vcpu = vcpu; |
2787 | vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); | 3058 | vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu); |
2788 | vcpu->arch.emulate_ctxt.mode = | 3059 | vcpu->arch.emulate_ctxt.mode = |
2789 | (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) | 3060 | (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) |
2790 | ? X86EMUL_MODE_REAL : cs_l | 3061 | ? X86EMUL_MODE_REAL : cs_l |
@@ -2862,7 +3133,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
2862 | return EMULATE_DO_MMIO; | 3133 | return EMULATE_DO_MMIO; |
2863 | } | 3134 | } |
2864 | 3135 | ||
2865 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); | 3136 | kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); |
2866 | 3137 | ||
2867 | if (vcpu->mmio_is_write) { | 3138 | if (vcpu->mmio_is_write) { |
2868 | vcpu->mmio_needed = 0; | 3139 | vcpu->mmio_needed = 0; |
@@ -2970,8 +3241,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu) | |||
2970 | return r; | 3241 | return r; |
2971 | } | 3242 | } |
2972 | 3243 | ||
2973 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 3244 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port) |
2974 | int size, unsigned port) | ||
2975 | { | 3245 | { |
2976 | unsigned long val; | 3246 | unsigned long val; |
2977 | 3247 | ||
@@ -3000,7 +3270,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
3000 | } | 3270 | } |
3001 | EXPORT_SYMBOL_GPL(kvm_emulate_pio); | 3271 | EXPORT_SYMBOL_GPL(kvm_emulate_pio); |
3002 | 3272 | ||
3003 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | 3273 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, |
3004 | int size, unsigned long count, int down, | 3274 | int size, unsigned long count, int down, |
3005 | gva_t address, int rep, unsigned port) | 3275 | gva_t address, int rep, unsigned port) |
3006 | { | 3276 | { |
@@ -3073,9 +3343,6 @@ static void bounce_off(void *info) | |||
3073 | /* nothing */ | 3343 | /* nothing */ |
3074 | } | 3344 | } |
3075 | 3345 | ||
3076 | static unsigned int ref_freq; | ||
3077 | static unsigned long tsc_khz_ref; | ||
3078 | |||
3079 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 3346 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
3080 | void *data) | 3347 | void *data) |
3081 | { | 3348 | { |
@@ -3084,14 +3351,11 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va | |||
3084 | struct kvm_vcpu *vcpu; | 3351 | struct kvm_vcpu *vcpu; |
3085 | int i, send_ipi = 0; | 3352 | int i, send_ipi = 0; |
3086 | 3353 | ||
3087 | if (!ref_freq) | ||
3088 | ref_freq = freq->old; | ||
3089 | |||
3090 | if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) | 3354 | if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) |
3091 | return 0; | 3355 | return 0; |
3092 | if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) | 3356 | if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) |
3093 | return 0; | 3357 | return 0; |
3094 | per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | 3358 | per_cpu(cpu_tsc_khz, freq->cpu) = freq->new; |
3095 | 3359 | ||
3096 | spin_lock(&kvm_lock); | 3360 | spin_lock(&kvm_lock); |
3097 | list_for_each_entry(kvm, &vm_list, vm_list) { | 3361 | list_for_each_entry(kvm, &vm_list, vm_list) { |
@@ -3128,9 +3392,28 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = { | |||
3128 | .notifier_call = kvmclock_cpufreq_notifier | 3392 | .notifier_call = kvmclock_cpufreq_notifier |
3129 | }; | 3393 | }; |
3130 | 3394 | ||
3395 | static void kvm_timer_init(void) | ||
3396 | { | ||
3397 | int cpu; | ||
3398 | |||
3399 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { | ||
3400 | cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, | ||
3401 | CPUFREQ_TRANSITION_NOTIFIER); | ||
3402 | for_each_online_cpu(cpu) { | ||
3403 | unsigned long khz = cpufreq_get(cpu); | ||
3404 | if (!khz) | ||
3405 | khz = tsc_khz; | ||
3406 | per_cpu(cpu_tsc_khz, cpu) = khz; | ||
3407 | } | ||
3408 | } else { | ||
3409 | for_each_possible_cpu(cpu) | ||
3410 | per_cpu(cpu_tsc_khz, cpu) = tsc_khz; | ||
3411 | } | ||
3412 | } | ||
3413 | |||
3131 | int kvm_arch_init(void *opaque) | 3414 | int kvm_arch_init(void *opaque) |
3132 | { | 3415 | { |
3133 | int r, cpu; | 3416 | int r; |
3134 | struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; | 3417 | struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; |
3135 | 3418 | ||
3136 | if (kvm_x86_ops) { | 3419 | if (kvm_x86_ops) { |
@@ -3162,13 +3445,7 @@ int kvm_arch_init(void *opaque) | |||
3162 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 3445 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
3163 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | 3446 | PT_DIRTY_MASK, PT64_NX_MASK, 0); |
3164 | 3447 | ||
3165 | for_each_possible_cpu(cpu) | 3448 | kvm_timer_init(); |
3166 | per_cpu(cpu_tsc_khz, cpu) = tsc_khz; | ||
3167 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { | ||
3168 | tsc_khz_ref = tsc_khz; | ||
3169 | cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, | ||
3170 | CPUFREQ_TRANSITION_NOTIFIER); | ||
3171 | } | ||
3172 | 3449 | ||
3173 | return 0; | 3450 | return 0; |
3174 | 3451 | ||
@@ -3296,7 +3573,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |||
3296 | unsigned long *rflags) | 3573 | unsigned long *rflags) |
3297 | { | 3574 | { |
3298 | kvm_lmsw(vcpu, msw); | 3575 | kvm_lmsw(vcpu, msw); |
3299 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 3576 | *rflags = kvm_get_rflags(vcpu); |
3300 | } | 3577 | } |
3301 | 3578 | ||
3302 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 3579 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
@@ -3334,7 +3611,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
3334 | switch (cr) { | 3611 | switch (cr) { |
3335 | case 0: | 3612 | case 0: |
3336 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); | 3613 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); |
3337 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 3614 | *rflags = kvm_get_rflags(vcpu); |
3338 | break; | 3615 | break; |
3339 | case 2: | 3616 | case 2: |
3340 | vcpu->arch.cr2 = val; | 3617 | vcpu->arch.cr2 = val; |
@@ -3454,18 +3731,18 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | |||
3454 | * | 3731 | * |
3455 | * No need to exit to userspace if we already have an interrupt queued. | 3732 | * No need to exit to userspace if we already have an interrupt queued. |
3456 | */ | 3733 | */ |
3457 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | 3734 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) |
3458 | struct kvm_run *kvm_run) | ||
3459 | { | 3735 | { |
3460 | return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && | 3736 | return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && |
3461 | kvm_run->request_interrupt_window && | 3737 | vcpu->run->request_interrupt_window && |
3462 | kvm_arch_interrupt_allowed(vcpu)); | 3738 | kvm_arch_interrupt_allowed(vcpu)); |
3463 | } | 3739 | } |
3464 | 3740 | ||
3465 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | 3741 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) |
3466 | struct kvm_run *kvm_run) | ||
3467 | { | 3742 | { |
3468 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | 3743 | struct kvm_run *kvm_run = vcpu->run; |
3744 | |||
3745 | kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | ||
3469 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 3746 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
3470 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 3747 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
3471 | if (irqchip_in_kernel(vcpu->kvm)) | 3748 | if (irqchip_in_kernel(vcpu->kvm)) |
@@ -3526,7 +3803,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) | |||
3526 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); | 3803 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); |
3527 | } | 3804 | } |
3528 | 3805 | ||
3529 | static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3806 | static void inject_pending_event(struct kvm_vcpu *vcpu) |
3530 | { | 3807 | { |
3531 | /* try to reinject previous events if any */ | 3808 | /* try to reinject previous events if any */ |
3532 | if (vcpu->arch.exception.pending) { | 3809 | if (vcpu->arch.exception.pending) { |
@@ -3562,11 +3839,11 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3562 | } | 3839 | } |
3563 | } | 3840 | } |
3564 | 3841 | ||
3565 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3842 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
3566 | { | 3843 | { |
3567 | int r; | 3844 | int r; |
3568 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 3845 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
3569 | kvm_run->request_interrupt_window; | 3846 | vcpu->run->request_interrupt_window; |
3570 | 3847 | ||
3571 | if (vcpu->requests) | 3848 | if (vcpu->requests) |
3572 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 3849 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
@@ -3587,12 +3864,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3587 | kvm_x86_ops->tlb_flush(vcpu); | 3864 | kvm_x86_ops->tlb_flush(vcpu); |
3588 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, | 3865 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, |
3589 | &vcpu->requests)) { | 3866 | &vcpu->requests)) { |
3590 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; | 3867 | vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; |
3591 | r = 0; | 3868 | r = 0; |
3592 | goto out; | 3869 | goto out; |
3593 | } | 3870 | } |
3594 | if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { | 3871 | if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { |
3595 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 3872 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
3596 | r = 0; | 3873 | r = 0; |
3597 | goto out; | 3874 | goto out; |
3598 | } | 3875 | } |
@@ -3616,7 +3893,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3616 | goto out; | 3893 | goto out; |
3617 | } | 3894 | } |
3618 | 3895 | ||
3619 | inject_pending_event(vcpu, kvm_run); | 3896 | inject_pending_event(vcpu); |
3620 | 3897 | ||
3621 | /* enable NMI/IRQ window open exits if needed */ | 3898 | /* enable NMI/IRQ window open exits if needed */ |
3622 | if (vcpu->arch.nmi_pending) | 3899 | if (vcpu->arch.nmi_pending) |
@@ -3642,7 +3919,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3642 | } | 3919 | } |
3643 | 3920 | ||
3644 | trace_kvm_entry(vcpu->vcpu_id); | 3921 | trace_kvm_entry(vcpu->vcpu_id); |
3645 | kvm_x86_ops->run(vcpu, kvm_run); | 3922 | kvm_x86_ops->run(vcpu); |
3646 | 3923 | ||
3647 | /* | 3924 | /* |
3648 | * If the guest has used debug registers, at least dr7 | 3925 | * If the guest has used debug registers, at least dr7 |
@@ -3684,13 +3961,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3684 | 3961 | ||
3685 | kvm_lapic_sync_from_vapic(vcpu); | 3962 | kvm_lapic_sync_from_vapic(vcpu); |
3686 | 3963 | ||
3687 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 3964 | r = kvm_x86_ops->handle_exit(vcpu); |
3688 | out: | 3965 | out: |
3689 | return r; | 3966 | return r; |
3690 | } | 3967 | } |
3691 | 3968 | ||
3692 | 3969 | ||
3693 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3970 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
3694 | { | 3971 | { |
3695 | int r; | 3972 | int r; |
3696 | 3973 | ||
@@ -3710,7 +3987,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3710 | r = 1; | 3987 | r = 1; |
3711 | while (r > 0) { | 3988 | while (r > 0) { |
3712 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 3989 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) |
3713 | r = vcpu_enter_guest(vcpu, kvm_run); | 3990 | r = vcpu_enter_guest(vcpu); |
3714 | else { | 3991 | else { |
3715 | up_read(&vcpu->kvm->slots_lock); | 3992 | up_read(&vcpu->kvm->slots_lock); |
3716 | kvm_vcpu_block(vcpu); | 3993 | kvm_vcpu_block(vcpu); |
@@ -3738,14 +4015,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3738 | if (kvm_cpu_has_pending_timer(vcpu)) | 4015 | if (kvm_cpu_has_pending_timer(vcpu)) |
3739 | kvm_inject_pending_timer_irqs(vcpu); | 4016 | kvm_inject_pending_timer_irqs(vcpu); |
3740 | 4017 | ||
3741 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 4018 | if (dm_request_for_irq_injection(vcpu)) { |
3742 | r = -EINTR; | 4019 | r = -EINTR; |
3743 | kvm_run->exit_reason = KVM_EXIT_INTR; | 4020 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
3744 | ++vcpu->stat.request_irq_exits; | 4021 | ++vcpu->stat.request_irq_exits; |
3745 | } | 4022 | } |
3746 | if (signal_pending(current)) { | 4023 | if (signal_pending(current)) { |
3747 | r = -EINTR; | 4024 | r = -EINTR; |
3748 | kvm_run->exit_reason = KVM_EXIT_INTR; | 4025 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
3749 | ++vcpu->stat.signal_exits; | 4026 | ++vcpu->stat.signal_exits; |
3750 | } | 4027 | } |
3751 | if (need_resched()) { | 4028 | if (need_resched()) { |
@@ -3756,7 +4033,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3756 | } | 4033 | } |
3757 | 4034 | ||
3758 | up_read(&vcpu->kvm->slots_lock); | 4035 | up_read(&vcpu->kvm->slots_lock); |
3759 | post_kvm_run_save(vcpu, kvm_run); | 4036 | post_kvm_run_save(vcpu); |
3760 | 4037 | ||
3761 | vapic_exit(vcpu); | 4038 | vapic_exit(vcpu); |
3762 | 4039 | ||
@@ -3789,15 +4066,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3789 | if (r) | 4066 | if (r) |
3790 | goto out; | 4067 | goto out; |
3791 | } | 4068 | } |
3792 | #if CONFIG_HAS_IOMEM | ||
3793 | if (vcpu->mmio_needed) { | 4069 | if (vcpu->mmio_needed) { |
3794 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | 4070 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); |
3795 | vcpu->mmio_read_completed = 1; | 4071 | vcpu->mmio_read_completed = 1; |
3796 | vcpu->mmio_needed = 0; | 4072 | vcpu->mmio_needed = 0; |
3797 | 4073 | ||
3798 | down_read(&vcpu->kvm->slots_lock); | 4074 | down_read(&vcpu->kvm->slots_lock); |
3799 | r = emulate_instruction(vcpu, kvm_run, | 4075 | r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, |
3800 | vcpu->arch.mmio_fault_cr2, 0, | ||
3801 | EMULTYPE_NO_DECODE); | 4076 | EMULTYPE_NO_DECODE); |
3802 | up_read(&vcpu->kvm->slots_lock); | 4077 | up_read(&vcpu->kvm->slots_lock); |
3803 | if (r == EMULATE_DO_MMIO) { | 4078 | if (r == EMULATE_DO_MMIO) { |
@@ -3808,12 +4083,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3808 | goto out; | 4083 | goto out; |
3809 | } | 4084 | } |
3810 | } | 4085 | } |
3811 | #endif | ||
3812 | if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) | 4086 | if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) |
3813 | kvm_register_write(vcpu, VCPU_REGS_RAX, | 4087 | kvm_register_write(vcpu, VCPU_REGS_RAX, |
3814 | kvm_run->hypercall.ret); | 4088 | kvm_run->hypercall.ret); |
3815 | 4089 | ||
3816 | r = __vcpu_run(vcpu, kvm_run); | 4090 | r = __vcpu_run(vcpu); |
3817 | 4091 | ||
3818 | out: | 4092 | out: |
3819 | if (vcpu->sigset_active) | 4093 | if (vcpu->sigset_active) |
@@ -3847,13 +4121,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3847 | #endif | 4121 | #endif |
3848 | 4122 | ||
3849 | regs->rip = kvm_rip_read(vcpu); | 4123 | regs->rip = kvm_rip_read(vcpu); |
3850 | regs->rflags = kvm_x86_ops->get_rflags(vcpu); | 4124 | regs->rflags = kvm_get_rflags(vcpu); |
3851 | |||
3852 | /* | ||
3853 | * Don't leak debug flags in case they were set for guest debugging | ||
3854 | */ | ||
3855 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
3856 | regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
3857 | 4125 | ||
3858 | vcpu_put(vcpu); | 4126 | vcpu_put(vcpu); |
3859 | 4127 | ||
@@ -3881,12 +4149,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3881 | kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); | 4149 | kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); |
3882 | kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); | 4150 | kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); |
3883 | kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); | 4151 | kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); |
3884 | |||
3885 | #endif | 4152 | #endif |
3886 | 4153 | ||
3887 | kvm_rip_write(vcpu, regs->rip); | 4154 | kvm_rip_write(vcpu, regs->rip); |
3888 | kvm_x86_ops->set_rflags(vcpu, regs->rflags); | 4155 | kvm_set_rflags(vcpu, regs->rflags); |
3889 | |||
3890 | 4156 | ||
3891 | vcpu->arch.exception.pending = false; | 4157 | vcpu->arch.exception.pending = false; |
3892 | 4158 | ||
@@ -4105,7 +4371,7 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) | |||
4105 | { | 4371 | { |
4106 | return (seg != VCPU_SREG_LDTR) && | 4372 | return (seg != VCPU_SREG_LDTR) && |
4107 | (seg != VCPU_SREG_TR) && | 4373 | (seg != VCPU_SREG_TR) && |
4108 | (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM); | 4374 | (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); |
4109 | } | 4375 | } |
4110 | 4376 | ||
4111 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 4377 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, |
@@ -4133,7 +4399,7 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu, | |||
4133 | { | 4399 | { |
4134 | tss->cr3 = vcpu->arch.cr3; | 4400 | tss->cr3 = vcpu->arch.cr3; |
4135 | tss->eip = kvm_rip_read(vcpu); | 4401 | tss->eip = kvm_rip_read(vcpu); |
4136 | tss->eflags = kvm_x86_ops->get_rflags(vcpu); | 4402 | tss->eflags = kvm_get_rflags(vcpu); |
4137 | tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); | 4403 | tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
4138 | tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); | 4404 | tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); |
4139 | tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); | 4405 | tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); |
@@ -4157,7 +4423,7 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu, | |||
4157 | kvm_set_cr3(vcpu, tss->cr3); | 4423 | kvm_set_cr3(vcpu, tss->cr3); |
4158 | 4424 | ||
4159 | kvm_rip_write(vcpu, tss->eip); | 4425 | kvm_rip_write(vcpu, tss->eip); |
4160 | kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2); | 4426 | kvm_set_rflags(vcpu, tss->eflags | 2); |
4161 | 4427 | ||
4162 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); | 4428 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); |
4163 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); | 4429 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); |
@@ -4195,7 +4461,7 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu, | |||
4195 | struct tss_segment_16 *tss) | 4461 | struct tss_segment_16 *tss) |
4196 | { | 4462 | { |
4197 | tss->ip = kvm_rip_read(vcpu); | 4463 | tss->ip = kvm_rip_read(vcpu); |
4198 | tss->flag = kvm_x86_ops->get_rflags(vcpu); | 4464 | tss->flag = kvm_get_rflags(vcpu); |
4199 | tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); | 4465 | tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
4200 | tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); | 4466 | tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); |
4201 | tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); | 4467 | tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); |
@@ -4210,14 +4476,13 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu, | |||
4210 | tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); | 4476 | tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); |
4211 | tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS); | 4477 | tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS); |
4212 | tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR); | 4478 | tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR); |
4213 | tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR); | ||
4214 | } | 4479 | } |
4215 | 4480 | ||
4216 | static int load_state_from_tss16(struct kvm_vcpu *vcpu, | 4481 | static int load_state_from_tss16(struct kvm_vcpu *vcpu, |
4217 | struct tss_segment_16 *tss) | 4482 | struct tss_segment_16 *tss) |
4218 | { | 4483 | { |
4219 | kvm_rip_write(vcpu, tss->ip); | 4484 | kvm_rip_write(vcpu, tss->ip); |
4220 | kvm_x86_ops->set_rflags(vcpu, tss->flag | 2); | 4485 | kvm_set_rflags(vcpu, tss->flag | 2); |
4221 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); | 4486 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); |
4222 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); | 4487 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); |
4223 | kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); | 4488 | kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); |
@@ -4363,8 +4628,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4363 | } | 4628 | } |
4364 | 4629 | ||
4365 | if (reason == TASK_SWITCH_IRET) { | 4630 | if (reason == TASK_SWITCH_IRET) { |
4366 | u32 eflags = kvm_x86_ops->get_rflags(vcpu); | 4631 | u32 eflags = kvm_get_rflags(vcpu); |
4367 | kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT); | 4632 | kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT); |
4368 | } | 4633 | } |
4369 | 4634 | ||
4370 | /* set back link to prev task only if NT bit is set in eflags | 4635 | /* set back link to prev task only if NT bit is set in eflags |
@@ -4372,11 +4637,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4372 | if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) | 4637 | if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) |
4373 | old_tss_sel = 0xffff; | 4638 | old_tss_sel = 0xffff; |
4374 | 4639 | ||
4375 | /* set back link to prev task only if NT bit is set in eflags | ||
4376 | note that old_tss_sel is not used afetr this point */ | ||
4377 | if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) | ||
4378 | old_tss_sel = 0xffff; | ||
4379 | |||
4380 | if (nseg_desc.type & 8) | 4640 | if (nseg_desc.type & 8) |
4381 | ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel, | 4641 | ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel, |
4382 | old_tss_base, &nseg_desc); | 4642 | old_tss_base, &nseg_desc); |
@@ -4385,8 +4645,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4385 | old_tss_base, &nseg_desc); | 4645 | old_tss_base, &nseg_desc); |
4386 | 4646 | ||
4387 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { | 4647 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { |
4388 | u32 eflags = kvm_x86_ops->get_rflags(vcpu); | 4648 | u32 eflags = kvm_get_rflags(vcpu); |
4389 | kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT); | 4649 | kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT); |
4390 | } | 4650 | } |
4391 | 4651 | ||
4392 | if (reason != TASK_SWITCH_IRET) { | 4652 | if (reason != TASK_SWITCH_IRET) { |
@@ -4438,8 +4698,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
4438 | 4698 | ||
4439 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; | 4699 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; |
4440 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 4700 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
4441 | if (!is_long_mode(vcpu) && is_pae(vcpu)) | 4701 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { |
4442 | load_pdptrs(vcpu, vcpu->arch.cr3); | 4702 | load_pdptrs(vcpu, vcpu->arch.cr3); |
4703 | mmu_reset_needed = 1; | ||
4704 | } | ||
4443 | 4705 | ||
4444 | if (mmu_reset_needed) | 4706 | if (mmu_reset_needed) |
4445 | kvm_mmu_reset_context(vcpu); | 4707 | kvm_mmu_reset_context(vcpu); |
@@ -4480,12 +4742,32 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
4480 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 4742 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
4481 | struct kvm_guest_debug *dbg) | 4743 | struct kvm_guest_debug *dbg) |
4482 | { | 4744 | { |
4745 | unsigned long rflags; | ||
4483 | int i, r; | 4746 | int i, r; |
4484 | 4747 | ||
4485 | vcpu_load(vcpu); | 4748 | vcpu_load(vcpu); |
4486 | 4749 | ||
4487 | if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) == | 4750 | if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { |
4488 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) { | 4751 | r = -EBUSY; |
4752 | if (vcpu->arch.exception.pending) | ||
4753 | goto unlock_out; | ||
4754 | if (dbg->control & KVM_GUESTDBG_INJECT_DB) | ||
4755 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
4756 | else | ||
4757 | kvm_queue_exception(vcpu, BP_VECTOR); | ||
4758 | } | ||
4759 | |||
4760 | /* | ||
4761 | * Read rflags as long as potentially injected trace flags are still | ||
4762 | * filtered out. | ||
4763 | */ | ||
4764 | rflags = kvm_get_rflags(vcpu); | ||
4765 | |||
4766 | vcpu->guest_debug = dbg->control; | ||
4767 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) | ||
4768 | vcpu->guest_debug = 0; | ||
4769 | |||
4770 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | ||
4489 | for (i = 0; i < KVM_NR_DB_REGS; ++i) | 4771 | for (i = 0; i < KVM_NR_DB_REGS; ++i) |
4490 | vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; | 4772 | vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; |
4491 | vcpu->arch.switch_db_regs = | 4773 | vcpu->arch.switch_db_regs = |
@@ -4496,13 +4778,23 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
4496 | vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); | 4778 | vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); |
4497 | } | 4779 | } |
4498 | 4780 | ||
4499 | r = kvm_x86_ops->set_guest_debug(vcpu, dbg); | 4781 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
4782 | vcpu->arch.singlestep_cs = | ||
4783 | get_segment_selector(vcpu, VCPU_SREG_CS); | ||
4784 | vcpu->arch.singlestep_rip = kvm_rip_read(vcpu); | ||
4785 | } | ||
4786 | |||
4787 | /* | ||
4788 | * Trigger an rflags update that will inject or remove the trace | ||
4789 | * flags. | ||
4790 | */ | ||
4791 | kvm_set_rflags(vcpu, rflags); | ||
4792 | |||
4793 | kvm_x86_ops->set_guest_debug(vcpu, dbg); | ||
4500 | 4794 | ||
4501 | if (dbg->control & KVM_GUESTDBG_INJECT_DB) | 4795 | r = 0; |
4502 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
4503 | else if (dbg->control & KVM_GUESTDBG_INJECT_BP) | ||
4504 | kvm_queue_exception(vcpu, BP_VECTOR); | ||
4505 | 4796 | ||
4797 | unlock_out: | ||
4506 | vcpu_put(vcpu); | 4798 | vcpu_put(vcpu); |
4507 | 4799 | ||
4508 | return r; | 4800 | return r; |
@@ -4703,14 +4995,26 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) | |||
4703 | return kvm_x86_ops->vcpu_reset(vcpu); | 4995 | return kvm_x86_ops->vcpu_reset(vcpu); |
4704 | } | 4996 | } |
4705 | 4997 | ||
4706 | void kvm_arch_hardware_enable(void *garbage) | 4998 | int kvm_arch_hardware_enable(void *garbage) |
4707 | { | 4999 | { |
4708 | kvm_x86_ops->hardware_enable(garbage); | 5000 | /* |
5001 | * Since this may be called from a hotplug notifcation, | ||
5002 | * we can't get the CPU frequency directly. | ||
5003 | */ | ||
5004 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { | ||
5005 | int cpu = raw_smp_processor_id(); | ||
5006 | per_cpu(cpu_tsc_khz, cpu) = 0; | ||
5007 | } | ||
5008 | |||
5009 | kvm_shared_msr_cpu_online(); | ||
5010 | |||
5011 | return kvm_x86_ops->hardware_enable(garbage); | ||
4709 | } | 5012 | } |
4710 | 5013 | ||
4711 | void kvm_arch_hardware_disable(void *garbage) | 5014 | void kvm_arch_hardware_disable(void *garbage) |
4712 | { | 5015 | { |
4713 | kvm_x86_ops->hardware_disable(garbage); | 5016 | kvm_x86_ops->hardware_disable(garbage); |
5017 | drop_user_return_notifiers(garbage); | ||
4714 | } | 5018 | } |
4715 | 5019 | ||
4716 | int kvm_arch_hardware_setup(void) | 5020 | int kvm_arch_hardware_setup(void) |
@@ -4948,8 +5252,36 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
4948 | return kvm_x86_ops->interrupt_allowed(vcpu); | 5252 | return kvm_x86_ops->interrupt_allowed(vcpu); |
4949 | } | 5253 | } |
4950 | 5254 | ||
5255 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) | ||
5256 | { | ||
5257 | unsigned long rflags; | ||
5258 | |||
5259 | rflags = kvm_x86_ops->get_rflags(vcpu); | ||
5260 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
5261 | rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
5262 | return rflags; | ||
5263 | } | ||
5264 | EXPORT_SYMBOL_GPL(kvm_get_rflags); | ||
5265 | |||
5266 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
5267 | { | ||
5268 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && | ||
5269 | vcpu->arch.singlestep_cs == | ||
5270 | get_segment_selector(vcpu, VCPU_SREG_CS) && | ||
5271 | vcpu->arch.singlestep_rip == kvm_rip_read(vcpu)) | ||
5272 | rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
5273 | kvm_x86_ops->set_rflags(vcpu, rflags); | ||
5274 | } | ||
5275 | EXPORT_SYMBOL_GPL(kvm_set_rflags); | ||
5276 | |||
4951 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); | 5277 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); |
4952 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); | 5278 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); |
4953 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); | 5279 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); |
4954 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); | 5280 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); |
4955 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); | 5281 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); |
5282 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); | ||
5283 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); | ||
5284 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); | ||
5285 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); | ||
5286 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); | ||
5287 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); | ||
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 58bc00f68b12..02b442e92007 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -393,7 +393,6 @@ static ctl_table abi_table2[] = { | |||
393 | 393 | ||
394 | static ctl_table abi_root_table2[] = { | 394 | static ctl_table abi_root_table2[] = { |
395 | { | 395 | { |
396 | .ctl_name = CTL_ABI, | ||
397 | .procname = "abi", | 396 | .procname = "abi", |
398 | .mode = 0555, | 397 | .mode = 0555, |
399 | .child = abi_table2 | 398 | .child = abi_table2 |
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h index beb3a6bdb61d..cbdf2ffaacff 100644 --- a/arch/xtensa/include/asm/socket.h +++ b/arch/xtensa/include/asm/socket.h | |||
@@ -71,4 +71,6 @@ | |||
71 | #define SO_PROTOCOL 38 | 71 | #define SO_PROTOCOL 38 |
72 | #define SO_DOMAIN 39 | 72 | #define SO_DOMAIN 39 |
73 | 73 | ||
74 | #define SO_RXQ_OVFL 40 | ||
75 | |||
74 | #endif /* _XTENSA_SOCKET_H */ | 76 | #endif /* _XTENSA_SOCKET_H */ |
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index c092c8fbb2cf..4e55dc763021 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h | |||
@@ -681,8 +681,10 @@ __SYSCALL(304, sys_signalfd, 3) | |||
681 | __SYSCALL(305, sys_ni_syscall, 0) | 681 | __SYSCALL(305, sys_ni_syscall, 0) |
682 | #define __NR_eventfd 306 | 682 | #define __NR_eventfd 306 |
683 | __SYSCALL(306, sys_eventfd, 1) | 683 | __SYSCALL(306, sys_eventfd, 1) |
684 | #define __NR_recvmmsg 307 | ||
685 | __SYSCALL(307, sys_recvmmsg, 5) | ||
684 | 686 | ||
685 | #define __NR_syscall_count 307 | 687 | #define __NR_syscall_count 308 |
686 | 688 | ||
687 | /* | 689 | /* |
688 | * sysxtensa syscall handler | 690 | * sysxtensa syscall handler |