diff options
author | Paul Mackerras <paulus@samba.org> | 2008-06-16 00:53:25 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-06-16 00:53:25 -0400 |
commit | a9653cf540d407fb75deb3db65a1be6c81d53ee0 (patch) | |
tree | 075fb79746f1d74443c9a9062e73c26a6266b05c /arch | |
parent | e80ac32767d0f781ac195c472d500a7451d3729a (diff) | |
parent | 066519068ad2fbe98c7f45552b1f592903a9c8c8 (diff) |
Merge branch 'linux-2.6' into merge
Diffstat (limited to 'arch')
50 files changed, 216 insertions, 176 deletions
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c index 00af7f2fed66..0bb31982fb6f 100644 --- a/arch/arm/mach-pxa/ssp.c +++ b/arch/arm/mach-pxa/ssp.c | |||
@@ -330,7 +330,7 @@ struct ssp_device *ssp_request(int port, const char *label) | |||
330 | 330 | ||
331 | mutex_unlock(&ssp_lock); | 331 | mutex_unlock(&ssp_lock); |
332 | 332 | ||
333 | if (ssp->port_id != port) | 333 | if (&ssp->node == &ssp_list) |
334 | return NULL; | 334 | return NULL; |
335 | 335 | ||
336 | return ssp; | 336 | return ssp; |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 853d1f11be00..43687cc60dfb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | |||
465 | printk(KERN_ERR | 465 | printk(KERN_ERR |
466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | 466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", |
467 | len, slit->header.length); | 467 | len, slit->header.length); |
468 | memset(numa_slit, 10, sizeof(numa_slit)); | ||
469 | return; | 468 | return; |
470 | } | 469 | } |
471 | slit_table = slit; | 470 | slit_table = slit; |
@@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) | |||
574 | printk(KERN_INFO "Number of memory chunks in system = %d\n", | 573 | printk(KERN_INFO "Number of memory chunks in system = %d\n", |
575 | num_node_memblks); | 574 | num_node_memblks); |
576 | 575 | ||
577 | if (!slit_table) | 576 | if (!slit_table) { |
577 | for (i = 0; i < MAX_NUMNODES; i++) | ||
578 | for (j = 0; j < MAX_NUMNODES; j++) | ||
579 | node_distance(i, j) = i == j ? LOCAL_DISTANCE : | ||
580 | REMOTE_DISTANCE; | ||
578 | return; | 581 | return; |
582 | } | ||
583 | |||
579 | memset(numa_slit, -1, sizeof(numa_slit)); | 584 | memset(numa_slit, -1, sizeof(numa_slit)); |
580 | for (i = 0; i < slit_table->locality_count; i++) { | 585 | for (i = 0; i < slit_table->locality_count; i++) { |
581 | if (!pxm_bit_test(i)) | 586 | if (!pxm_bit_test(i)) |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 351bf70da463..7f1a858bc69f 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
@@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | |||
159 | 159 | ||
160 | if (p->u.ioreq.state == STATE_IORESP_READY) { | 160 | if (p->u.ioreq.state == STATE_IORESP_READY) { |
161 | if (dir == IOREQ_READ) | 161 | if (dir == IOREQ_READ) |
162 | *dest = p->u.ioreq.data; | 162 | /* it's necessary to ensure zero extending */ |
163 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); | ||
163 | } else | 164 | } else |
164 | panic_vm(vcpu); | 165 | panic_vm(vcpu); |
165 | out: | 166 | out: |
diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index ba5a9f32ebd4..454f25493491 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c | |||
@@ -111,7 +111,13 @@ void hw_timer_init(void) | |||
111 | 111 | ||
112 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); | 112 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); |
113 | mcftmr_cycles_per_jiffy = FREQ / HZ; | 113 | mcftmr_cycles_per_jiffy = FREQ / HZ; |
114 | __raw_writetrr(mcftmr_cycles_per_jiffy, TA(MCFTIMER_TRR)); | 114 | /* |
115 | * The coldfire timer runs from 0 to TRR included, then 0 | ||
116 | * again and so on. It counts thus actually TRR + 1 steps | ||
117 | * for 1 tick, not TRR. So if you want n cycles, | ||
118 | * initialize TRR with n - 1. | ||
119 | */ | ||
120 | __raw_writetrr(mcftmr_cycles_per_jiffy - 1, TA(MCFTIMER_TRR)); | ||
115 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 121 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
116 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); | 122 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); |
117 | 123 | ||
diff --git a/arch/parisc/hpux/gate.S b/arch/parisc/hpux/gate.S index f0b18ce89842..38a1c1b8d4e8 100644 --- a/arch/parisc/hpux/gate.S +++ b/arch/parisc/hpux/gate.S | |||
@@ -13,10 +13,9 @@ | |||
13 | #include <asm/unistd.h> | 13 | #include <asm/unistd.h> |
14 | #include <asm/errno.h> | 14 | #include <asm/errno.h> |
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <linux/init.h> | ||
17 | 16 | ||
18 | .level LEVEL | 17 | .level LEVEL |
19 | __HEAD | 18 | .text |
20 | 19 | ||
21 | .import hpux_call_table | 20 | .import hpux_call_table |
22 | .import hpux_syscall_exit,code | 21 | .import hpux_syscall_exit,code |
diff --git a/arch/parisc/hpux/wrappers.S b/arch/parisc/hpux/wrappers.S index ccd3a50c0995..58c53c879c02 100644 --- a/arch/parisc/hpux/wrappers.S +++ b/arch/parisc/hpux/wrappers.S | |||
@@ -28,10 +28,9 @@ | |||
28 | #include <asm/assembly.h> | 28 | #include <asm/assembly.h> |
29 | #include <asm/signal.h> | 29 | #include <asm/signal.h> |
30 | #include <linux/linkage.h> | 30 | #include <linux/linkage.h> |
31 | #include <linux/init.h> | ||
32 | 31 | ||
33 | .level LEVEL | 32 | .level LEVEL |
34 | __HEAD | 33 | .text |
35 | 34 | ||
36 | /* These should probably go in a header file somewhere. | 35 | /* These should probably go in a header file somewhere. |
37 | * They are duplicated in kernel/wrappers.S | 36 | * They are duplicated in kernel/wrappers.S |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 5d0837458c19..d1fa4edd2d80 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/thread_info.h> | 38 | #include <asm/thread_info.h> |
39 | 39 | ||
40 | #include <linux/linkage.h> | 40 | #include <linux/linkage.h> |
41 | #include <linux/init.h> | ||
42 | 41 | ||
43 | #ifdef CONFIG_64BIT | 42 | #ifdef CONFIG_64BIT |
44 | .level 2.0w | 43 | .level 2.0w |
@@ -622,7 +621,7 @@ | |||
622 | * the static part of the kernel address space. | 621 | * the static part of the kernel address space. |
623 | */ | 622 | */ |
624 | 623 | ||
625 | __HEAD | 624 | .text |
626 | 625 | ||
627 | .align PAGE_SIZE | 626 | .align PAGE_SIZE |
628 | 627 | ||
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 5680a2c3b13d..a84e31e82876 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
@@ -33,6 +33,7 @@ ENTRY(boot_args) | |||
33 | END(boot_args) | 33 | END(boot_args) |
34 | 34 | ||
35 | __HEAD | 35 | __HEAD |
36 | |||
36 | .align 4 | 37 | .align 4 |
37 | .import init_thread_union,data | 38 | .import init_thread_union,data |
38 | .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ | 39 | .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ |
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 068322eb8c9b..2cbf13b3ef11 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <asm/pdc.h> | 47 | #include <asm/pdc.h> |
48 | 48 | ||
49 | #include <linux/linkage.h> | 49 | #include <linux/linkage.h> |
50 | #include <linux/init.h> | ||
51 | 50 | ||
52 | /* | 51 | /* |
53 | * stack for os_hpmc, the HPMC handler. | 52 | * stack for os_hpmc, the HPMC handler. |
@@ -77,7 +76,7 @@ ENTRY(hpmc_pim_data) | |||
77 | .block HPMC_PIM_DATA_SIZE | 76 | .block HPMC_PIM_DATA_SIZE |
78 | END(hpmc_pim_data) | 77 | END(hpmc_pim_data) |
79 | 78 | ||
80 | __HEAD | 79 | .text |
81 | 80 | ||
82 | .import intr_save, code | 81 | .import intr_save, code |
83 | ENTRY(os_hpmc) | 82 | ENTRY(os_hpmc) |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index e3246a5ca74f..09b77b2553c6 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -37,9 +37,8 @@ | |||
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/cache.h> | 38 | #include <asm/cache.h> |
39 | #include <linux/linkage.h> | 39 | #include <linux/linkage.h> |
40 | #include <linux/init.h> | ||
41 | 40 | ||
42 | __HEAD | 41 | .text |
43 | .align 128 | 42 | .align 128 |
44 | 43 | ||
45 | ENTRY(flush_tlb_all_local) | 44 | ENTRY(flush_tlb_all_local) |
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 5b7fc4aa044d..0eecfbbc59cd 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c | |||
@@ -152,3 +152,6 @@ EXPORT_SYMBOL($$dyncall); | |||
152 | EXPORT_SYMBOL(node_data); | 152 | EXPORT_SYMBOL(node_data); |
153 | EXPORT_SYMBOL(pfnnid_map); | 153 | EXPORT_SYMBOL(pfnnid_map); |
154 | #endif | 154 | #endif |
155 | |||
156 | /* from pacache.S -- needed for copy_page */ | ||
157 | EXPORT_SYMBOL(copy_user_page_asm); | ||
diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S index d411dfb5b6d1..fa6ea99bb324 100644 --- a/arch/parisc/kernel/perf_asm.S +++ b/arch/parisc/kernel/perf_asm.S | |||
@@ -43,7 +43,7 @@ | |||
43 | ; The coprocessor only needs to be enabled when | 43 | ; The coprocessor only needs to be enabled when |
44 | ; starting/stopping the coprocessor with the pmenb/pmdis. | 44 | ; starting/stopping the coprocessor with the pmenb/pmdis. |
45 | ; | 45 | ; |
46 | __HEAD | 46 | .text |
47 | 47 | ||
48 | ENTRY(perf_intrigue_enable_perf_counters) | 48 | ENTRY(perf_intrigue_enable_perf_counters) |
49 | .proc | 49 | .proc |
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 47fbdae6efd5..7a92695d95a6 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/assembly.h> | 12 | #include <asm/assembly.h> |
13 | 13 | ||
14 | #include <linux/linkage.h> | 14 | #include <linux/linkage.h> |
15 | #include <linux/init.h> | ||
16 | 15 | ||
17 | .section .bss | 16 | .section .bss |
18 | .export real_stack | 17 | .export real_stack |
@@ -40,7 +39,7 @@ save_cr_end: | |||
40 | /************************ 32-bit real-mode calls ***********************/ | 39 | /************************ 32-bit real-mode calls ***********************/ |
41 | /* This can be called in both narrow and wide kernels */ | 40 | /* This can be called in both narrow and wide kernels */ |
42 | 41 | ||
43 | __HEAD | 42 | .text |
44 | 43 | ||
45 | /* unsigned long real32_call_asm(unsigned int *sp, | 44 | /* unsigned long real32_call_asm(unsigned int *sp, |
46 | * unsigned int *arg0p, | 45 | * unsigned int *arg0p, |
@@ -114,7 +113,7 @@ ENDPROC(real32_call_asm) | |||
114 | # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) | 113 | # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) |
115 | # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r | 114 | # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r |
116 | 115 | ||
117 | __HEAD | 116 | .text |
118 | save_control_regs: | 117 | save_control_regs: |
119 | load32 PA(save_cr_space), %r28 | 118 | load32 PA(save_cr_space), %r28 |
120 | PUSH_CR(%cr24, %r28) | 119 | PUSH_CR(%cr24, %r28) |
@@ -146,7 +145,7 @@ restore_control_regs: | |||
146 | /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for | 145 | /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for |
147 | * more general-purpose use by the several places which need RFIs | 146 | * more general-purpose use by the several places which need RFIs |
148 | */ | 147 | */ |
149 | __HEAD | 148 | .text |
150 | .align 128 | 149 | .align 128 |
151 | rfi_virt2real: | 150 | rfi_virt2real: |
152 | /* switch to real mode... */ | 151 | /* switch to real mode... */ |
@@ -181,7 +180,7 @@ rfi_v2r_1: | |||
181 | bv 0(%r2) | 180 | bv 0(%r2) |
182 | nop | 181 | nop |
183 | 182 | ||
184 | __HEAD | 183 | .text |
185 | .align 128 | 184 | .align 128 |
186 | rfi_real2virt: | 185 | rfi_real2virt: |
187 | rsm PSW_SM_I,%r0 | 186 | rsm PSW_SM_I,%r0 |
@@ -219,7 +218,7 @@ rfi_r2v_1: | |||
219 | 218 | ||
220 | /************************ 64-bit real-mode calls ***********************/ | 219 | /************************ 64-bit real-mode calls ***********************/ |
221 | /* This is only usable in wide kernels right now and will probably stay so */ | 220 | /* This is only usable in wide kernels right now and will probably stay so */ |
222 | __HEAD | 221 | .text |
223 | /* unsigned long real64_call_asm(unsigned long *sp, | 222 | /* unsigned long real64_call_asm(unsigned long *sp, |
224 | * unsigned long *arg0p, | 223 | * unsigned long *arg0p, |
225 | * unsigned long fn) | 224 | * unsigned long fn) |
@@ -277,7 +276,7 @@ ENDPROC(real64_call_asm) | |||
277 | 276 | ||
278 | #endif | 277 | #endif |
279 | 278 | ||
280 | __HEAD | 279 | .text |
281 | /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html | 280 | /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html |
282 | ** GCC 3.3 and later has a new function in libgcc.a for | 281 | ** GCC 3.3 and later has a new function in libgcc.a for |
283 | ** comparing function pointers. | 282 | ** comparing function pointers. |
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index db94affe5c71..fb59852006de 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c | |||
@@ -289,7 +289,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __ | |||
289 | &sc->sc_iaoq[0], compat_reg); | 289 | &sc->sc_iaoq[0], compat_reg); |
290 | 290 | ||
291 | /* Store upper half */ | 291 | /* Store upper half */ |
292 | compat_reg = (compat_uint_t)(regs->gr[32] >> 32); | 292 | compat_reg = (compat_uint_t)(regs->gr[31] >> 32); |
293 | err |= __put_user(compat_reg, &rf->rf_iaoq[0]); | 293 | err |= __put_user(compat_reg, &rf->rf_iaoq[0]); |
294 | DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); | 294 | DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); |
295 | 295 | ||
@@ -299,7 +299,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __ | |||
299 | DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", | 299 | DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", |
300 | &sc->sc_iaoq[1], compat_reg); | 300 | &sc->sc_iaoq[1], compat_reg); |
301 | /* Store upper half */ | 301 | /* Store upper half */ |
302 | compat_reg = (compat_uint_t)((regs->gr[32]+4) >> 32); | 302 | compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32); |
303 | err |= __put_user(compat_reg, &rf->rf_iaoq[1]); | 303 | err |= __put_user(compat_reg, &rf->rf_iaoq[1]); |
304 | DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); | 304 | DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); |
305 | 305 | ||
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index ae509d8cd03f..69b6eebc466e 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
18 | 18 | ||
19 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
20 | #include <linux/init.h> | ||
21 | 20 | ||
22 | /* We fill the empty parts of the gateway page with | 21 | /* We fill the empty parts of the gateway page with |
23 | * something that will kill the kernel or a | 22 | * something that will kill the kernel or a |
@@ -27,7 +26,7 @@ | |||
27 | 26 | ||
28 | .level LEVEL | 27 | .level LEVEL |
29 | 28 | ||
30 | __HEAD | 29 | .text |
31 | 30 | ||
32 | .import syscall_exit,code | 31 | .import syscall_exit,code |
33 | .import syscall_exit_rfi,code | 32 | .import syscall_exit_rfi,code |
@@ -637,7 +636,7 @@ END(sys_call_table64) | |||
637 | All light-weight-syscall atomic operations | 636 | All light-weight-syscall atomic operations |
638 | will use this set of locks | 637 | will use this set of locks |
639 | */ | 638 | */ |
640 | .section .data, "aw" | 639 | .section .data |
641 | .align PAGE_SIZE | 640 | .align PAGE_SIZE |
642 | ENTRY(lws_lock_start) | 641 | ENTRY(lws_lock_start) |
643 | /* lws locks */ | 642 | /* lws locks */ |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 50b4a3a25d0a..2e516b871752 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -50,6 +50,7 @@ SECTIONS | |||
50 | 50 | ||
51 | _text = .; /* Text and read-only data */ | 51 | _text = .; /* Text and read-only data */ |
52 | .text ALIGN(16) : { | 52 | .text ALIGN(16) : { |
53 | HEAD_TEXT | ||
53 | TEXT_TEXT | 54 | TEXT_TEXT |
54 | SCHED_TEXT | 55 | SCHED_TEXT |
55 | LOCK_TEXT | 56 | LOCK_TEXT |
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S index 4821ad6d5269..d172d4245cdc 100644 --- a/arch/parisc/lib/fixup.S +++ b/arch/parisc/lib/fixup.S | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <asm/assembly.h> | 23 | #include <asm/assembly.h> |
24 | #include <asm/errno.h> | 24 | #include <asm/errno.h> |
25 | #include <linux/linkage.h> | 25 | #include <linux/linkage.h> |
26 | #include <linux/init.h> | ||
27 | 26 | ||
28 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
29 | .macro get_fault_ip t1 t2 | 28 | .macro get_fault_ip t1 t2 |
@@ -56,7 +55,7 @@ | |||
56 | 55 | ||
57 | .level LEVEL | 56 | .level LEVEL |
58 | 57 | ||
59 | __HEAD | 58 | .text |
60 | .section .fixup, "ax" | 59 | .section .fixup, "ax" |
61 | 60 | ||
62 | /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ | 61 | /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ |
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index b0d885350846..1bd23ccec17b 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S | |||
@@ -33,12 +33,11 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | 35 | ||
36 | .text | ||
37 | |||
36 | #include <asm/assembly.h> | 38 | #include <asm/assembly.h> |
37 | #include <asm/errno.h> | 39 | #include <asm/errno.h> |
38 | #include <linux/linkage.h> | 40 | #include <linux/linkage.h> |
39 | #include <linux/init.h> | ||
40 | |||
41 | __HEAD | ||
42 | 41 | ||
43 | /* | 42 | /* |
44 | * get_sr gets the appropriate space value into | 43 | * get_sr gets the appropriate space value into |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 78fe252b92c3..ce0da689a89d 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -547,6 +547,7 @@ void __init mem_init(void) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | unsigned long *empty_zero_page __read_mostly; | 549 | unsigned long *empty_zero_page __read_mostly; |
550 | EXPORT_SYMBOL(empty_zero_page); | ||
550 | 551 | ||
551 | void show_mem(void) | 552 | void show_mem(void) |
552 | { | 553 | { |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index f5d7a5eab96e..75dff7cfa814 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | |||
116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | 116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; |
117 | struct page *page = vcpu->arch.shadow_pages[index]; | 117 | struct page *page = vcpu->arch.shadow_pages[index]; |
118 | 118 | ||
119 | kunmap(vcpu->arch.shadow_pages[index]); | ||
120 | |||
121 | if (get_tlb_v(stlbe)) { | 119 | if (get_tlb_v(stlbe)) { |
122 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 120 | if (kvmppc_44x_tlbe_is_writable(stlbe)) |
123 | kvm_release_page_dirty(page); | 121 | kvm_release_page_dirty(page); |
@@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
144 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 142 | stlbe = &vcpu->arch.shadow_tlb[victim]; |
145 | 143 | ||
146 | /* Get reference to new page. */ | 144 | /* Get reference to new page. */ |
147 | down_write(¤t->mm->mmap_sem); | 145 | down_read(¤t->mm->mmap_sem); |
148 | new_page = gfn_to_page(vcpu->kvm, gfn); | 146 | new_page = gfn_to_page(vcpu->kvm, gfn); |
149 | if (is_error_page(new_page)) { | 147 | if (is_error_page(new_page)) { |
150 | printk(KERN_ERR "Couldn't get guest page!\n"); | 148 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
151 | kvm_release_page_clean(new_page); | 149 | kvm_release_page_clean(new_page); |
150 | up_read(¤t->mm->mmap_sem); | ||
152 | return; | 151 | return; |
153 | } | 152 | } |
154 | hpaddr = page_to_phys(new_page); | 153 | hpaddr = page_to_phys(new_page); |
155 | 154 | ||
156 | /* Drop reference to old page. */ | 155 | /* Drop reference to old page. */ |
157 | kvmppc_44x_shadow_release(vcpu, victim); | 156 | kvmppc_44x_shadow_release(vcpu, victim); |
158 | up_write(¤t->mm->mmap_sem); | 157 | up_read(¤t->mm->mmap_sem); |
159 | 158 | ||
160 | vcpu->arch.shadow_pages[victim] = new_page; | 159 | vcpu->arch.shadow_pages[victim] = new_page; |
161 | 160 | ||
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c index 712d89a28c46..9c8ad850c6e3 100644 --- a/arch/powerpc/kvm/booke_guest.c +++ b/arch/powerpc/kvm/booke_guest.c | |||
@@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
227 | } | 227 | } |
228 | } | 228 | } |
229 | 229 | ||
230 | static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | enum emulation_result er; | ||
233 | int r; | ||
234 | |||
235 | er = kvmppc_emulate_instruction(run, vcpu); | ||
236 | switch (er) { | ||
237 | case EMULATE_DONE: | ||
238 | /* Future optimization: only reload non-volatiles if they were | ||
239 | * actually modified. */ | ||
240 | r = RESUME_GUEST_NV; | ||
241 | break; | ||
242 | case EMULATE_DO_MMIO: | ||
243 | run->exit_reason = KVM_EXIT_MMIO; | ||
244 | /* We must reload nonvolatiles because "update" load/store | ||
245 | * instructions modify register state. */ | ||
246 | /* Future optimization: only reload non-volatiles if they were | ||
247 | * actually modified. */ | ||
248 | r = RESUME_HOST_NV; | ||
249 | break; | ||
250 | case EMULATE_FAIL: | ||
251 | /* XXX Deliver Program interrupt to guest. */ | ||
252 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | ||
253 | vcpu->arch.last_inst); | ||
254 | r = RESUME_HOST; | ||
255 | break; | ||
256 | default: | ||
257 | BUG(); | ||
258 | } | ||
259 | |||
260 | return r; | ||
261 | } | ||
262 | |||
263 | /** | 230 | /** |
264 | * kvmppc_handle_exit | 231 | * kvmppc_handle_exit |
265 | * | 232 | * |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index a03fe0c80698..000097461283 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
246 | case 31: | 246 | case 31: |
247 | switch (get_xop(inst)) { | 247 | switch (get_xop(inst)) { |
248 | 248 | ||
249 | case 23: /* lwzx */ | ||
250 | rt = get_rt(inst); | ||
251 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
252 | break; | ||
253 | |||
249 | case 83: /* mfmsr */ | 254 | case 83: /* mfmsr */ |
250 | rt = get_rt(inst); | 255 | rt = get_rt(inst); |
251 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | 256 | vcpu->arch.gpr[rt] = vcpu->arch.msr; |
@@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
267 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | 272 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); |
268 | break; | 273 | break; |
269 | 274 | ||
275 | case 151: /* stwx */ | ||
276 | rs = get_rs(inst); | ||
277 | emulated = kvmppc_handle_store(run, vcpu, | ||
278 | vcpu->arch.gpr[rs], | ||
279 | 4, 1); | ||
280 | break; | ||
281 | |||
270 | case 163: /* wrteei */ | 282 | case 163: /* wrteei */ |
271 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 283 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
272 | | (inst & MSR_EE); | 284 | | (inst & MSR_EE); |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index f639a152869f..a0775e1f08df 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | |||
20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | 20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); |
21 | vcpu->stat.diagnose_44++; | 21 | vcpu->stat.diagnose_44++; |
22 | vcpu_put(vcpu); | 22 | vcpu_put(vcpu); |
23 | schedule(); | 23 | yield(); |
24 | vcpu_load(vcpu); | 24 | vcpu_load(vcpu); |
25 | return 0; | 25 | return 0; |
26 | } | 26 | } |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index fcd1ed8015c1..84a7fed4cd4e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
339 | if (kvm_cpu_has_interrupt(vcpu)) | 339 | if (kvm_cpu_has_interrupt(vcpu)) |
340 | return 0; | 340 | return 0; |
341 | 341 | ||
342 | __set_cpu_idle(vcpu); | ||
343 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
344 | vcpu->arch.local_int.timer_due = 0; | ||
345 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
346 | |||
342 | if (psw_interrupts_disabled(vcpu)) { | 347 | if (psw_interrupts_disabled(vcpu)) { |
343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 348 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
344 | __unset_cpu_idle(vcpu); | 349 | __unset_cpu_idle(vcpu); |
@@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
366 | no_timer: | 371 | no_timer: |
367 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | 372 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); |
368 | spin_lock_bh(&vcpu->arch.local_int.lock); | 373 | spin_lock_bh(&vcpu->arch.local_int.lock); |
369 | __set_cpu_idle(vcpu); | ||
370 | vcpu->arch.local_int.timer_due = 0; | ||
371 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); | 374 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); |
372 | while (list_empty(&vcpu->arch.local_int.list) && | 375 | while (list_empty(&vcpu->arch.local_int.list) && |
373 | list_empty(&vcpu->arch.local_int.float_int->list) && | 376 | list_empty(&vcpu->arch.local_int.float_int->list) && |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0ac36a649eba..6558b09ff579 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
423 | return -EINVAL; /* not implemented yet */ | 423 | return -EINVAL; /* not implemented yet */ |
424 | } | 424 | } |
425 | 425 | ||
426 | extern void s390_handle_mcck(void); | ||
427 | |||
426 | static void __vcpu_run(struct kvm_vcpu *vcpu) | 428 | static void __vcpu_run(struct kvm_vcpu *vcpu) |
427 | { | 429 | { |
428 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); | 430 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); |
@@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) | |||
430 | if (need_resched()) | 432 | if (need_resched()) |
431 | schedule(); | 433 | schedule(); |
432 | 434 | ||
435 | if (test_thread_flag(TIF_MCCK_PENDING)) | ||
436 | s390_handle_mcck(); | ||
437 | |||
438 | kvm_s390_deliver_pending_interrupts(vcpu); | ||
439 | |||
433 | vcpu->arch.sie_block->icptcode = 0; | 440 | vcpu->arch.sie_block->icptcode = 0; |
434 | local_irq_disable(); | 441 | local_irq_disable(); |
435 | kvm_guest_enter(); | 442 | kvm_guest_enter(); |
436 | local_irq_enable(); | 443 | local_irq_enable(); |
437 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 444 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
438 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 445 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
439 | sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); | 446 | if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { |
447 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | ||
448 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
449 | } | ||
440 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | 450 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", |
441 | vcpu->arch.sie_block->icptcode); | 451 | vcpu->arch.sie_block->icptcode); |
442 | local_irq_disable(); | 452 | local_irq_disable(); |
@@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
475 | might_sleep(); | 485 | might_sleep(); |
476 | 486 | ||
477 | do { | 487 | do { |
478 | kvm_s390_deliver_pending_interrupts(vcpu); | ||
479 | __vcpu_run(vcpu); | 488 | __vcpu_run(vcpu); |
480 | rc = kvm_handle_sie_intercept(vcpu); | 489 | rc = kvm_handle_sie_intercept(vcpu); |
481 | } while (!signal_pending(current) && !rc); | 490 | } while (!signal_pending(current) && !rc); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd12..3d98ba82ea67 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
254 | int s390_enable_sie(void) | 254 | int s390_enable_sie(void) |
255 | { | 255 | { |
256 | struct task_struct *tsk = current; | 256 | struct task_struct *tsk = current; |
257 | struct mm_struct *mm; | 257 | struct mm_struct *mm, *old_mm; |
258 | int rc; | ||
259 | 258 | ||
260 | task_lock(tsk); | 259 | /* Do we have pgstes? if yes, we are done */ |
261 | |||
262 | rc = 0; | ||
263 | if (tsk->mm->context.pgstes) | 260 | if (tsk->mm->context.pgstes) |
264 | goto unlock; | 261 | return 0; |
265 | 262 | ||
266 | rc = -EINVAL; | 263 | /* lets check if we are allowed to replace the mm */ |
264 | task_lock(tsk); | ||
267 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 265 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
268 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) | 266 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { |
269 | goto unlock; | 267 | task_unlock(tsk); |
268 | return -EINVAL; | ||
269 | } | ||
270 | task_unlock(tsk); | ||
270 | 271 | ||
271 | tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ | 272 | /* we copy the mm with pgstes enabled */ |
273 | tsk->mm->context.pgstes = 1; | ||
272 | mm = dup_mm(tsk); | 274 | mm = dup_mm(tsk); |
273 | tsk->mm->context.pgstes = 0; | 275 | tsk->mm->context.pgstes = 0; |
274 | |||
275 | rc = -ENOMEM; | ||
276 | if (!mm) | 276 | if (!mm) |
277 | goto unlock; | 277 | return -ENOMEM; |
278 | mmput(tsk->mm); | 278 | |
279 | /* Now lets check again if somebody attached ptrace etc */ | ||
280 | task_lock(tsk); | ||
281 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | ||
282 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { | ||
283 | mmput(mm); | ||
284 | task_unlock(tsk); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | /* ok, we are alone. No ptrace, no threads, etc. */ | ||
289 | old_mm = tsk->mm; | ||
279 | tsk->mm = tsk->active_mm = mm; | 290 | tsk->mm = tsk->active_mm = mm; |
280 | preempt_disable(); | 291 | preempt_disable(); |
281 | update_mm(mm, tsk); | 292 | update_mm(mm, tsk); |
282 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 293 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
283 | preempt_enable(); | 294 | preempt_enable(); |
284 | rc = 0; | ||
285 | unlock: | ||
286 | task_unlock(tsk); | 295 | task_unlock(tsk); |
287 | return rc; | 296 | mmput(old_mm); |
297 | return 0; | ||
288 | } | 298 | } |
289 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 299 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index b4b36e0f2e89..183db26d01bf 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c | |||
@@ -121,8 +121,10 @@ static int stop_ptraced_child(int pid, int exitcode, int mustexit) | |||
121 | { | 121 | { |
122 | int status, n, ret = 0; | 122 | int status, n, ret = 0; |
123 | 123 | ||
124 | if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) | 124 | if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) { |
125 | fatal_perror("stop_ptraced_child : ptrace failed"); | 125 | perror("stop_ptraced_child : ptrace failed"); |
126 | return -1; | ||
127 | } | ||
126 | CATCH_EINTR(n = waitpid(pid, &status, 0)); | 128 | CATCH_EINTR(n = waitpid(pid, &status, 0)); |
127 | if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { | 129 | if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { |
128 | int exit_with = WEXITSTATUS(status); | 130 | int exit_with = WEXITSTATUS(status); |
@@ -212,7 +214,7 @@ static void __init check_sysemu(void) | |||
212 | if (n < 0) | 214 | if (n < 0) |
213 | fatal_perror("check_sysemu : wait failed"); | 215 | fatal_perror("check_sysemu : wait failed"); |
214 | if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) | 216 | if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) |
215 | fatal("check_sysemu : expected SIGTRAP, got status = %d", | 217 | fatal("check_sysemu : expected SIGTRAP, got status = %d\n", |
216 | status); | 218 | status); |
217 | 219 | ||
218 | if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) | 220 | if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) |
@@ -254,9 +256,11 @@ static void __init check_sysemu(void) | |||
254 | 256 | ||
255 | if (WIFSTOPPED(status) && | 257 | if (WIFSTOPPED(status) && |
256 | (WSTOPSIG(status) == (SIGTRAP|0x80))) { | 258 | (WSTOPSIG(status) == (SIGTRAP|0x80))) { |
257 | if (!count) | 259 | if (!count) { |
258 | fatal("check_ptrace : SYSEMU_SINGLESTEP " | 260 | non_fatal("check_ptrace : SYSEMU_SINGLESTEP " |
259 | "doesn't singlestep"); | 261 | "doesn't singlestep"); |
262 | goto fail; | ||
263 | } | ||
260 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, | 264 | n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, |
261 | os_getpid()); | 265 | os_getpid()); |
262 | if (n < 0) | 266 | if (n < 0) |
@@ -266,9 +270,12 @@ static void __init check_sysemu(void) | |||
266 | } | 270 | } |
267 | else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) | 271 | else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) |
268 | count++; | 272 | count++; |
269 | else | 273 | else { |
270 | fatal("check_ptrace : expected SIGTRAP or " | 274 | non_fatal("check_ptrace : expected SIGTRAP or " |
271 | "(SIGTRAP | 0x80), got status = %d", status); | 275 | "(SIGTRAP | 0x80), got status = %d\n", |
276 | status); | ||
277 | goto fail; | ||
278 | } | ||
272 | } | 279 | } |
273 | if (stop_ptraced_child(pid, 0, 0) < 0) | 280 | if (stop_ptraced_child(pid, 0, 0) < 0) |
274 | goto fail_stopped; | 281 | goto fail_stopped; |
diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c index b487cbead1bd..229f7a53d8da 100644 --- a/arch/um/os-Linux/sys-i386/registers.c +++ b/arch/um/os-Linux/sys-i386/registers.c | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #include <errno.h> | 7 | #include <errno.h> |
8 | #include <sys/ptrace.h> | 8 | #include <sys/ptrace.h> |
9 | #include <asm/user.h> | 9 | #include <sys/user.h> |
10 | #include "kern_constants.h" | 10 | #include "kern_constants.h" |
11 | #include "longjmp.h" | 11 | #include "longjmp.h" |
12 | #include "user.h" | 12 | #include "user.h" |
@@ -76,7 +76,7 @@ int put_fp_registers(int pid, unsigned long *regs) | |||
76 | 76 | ||
77 | void arch_init_registers(int pid) | 77 | void arch_init_registers(int pid) |
78 | { | 78 | { |
79 | struct user_fxsr_struct fpx_regs; | 79 | struct user_fpxregs_struct fpx_regs; |
80 | int err; | 80 | int err; |
81 | 81 | ||
82 | err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs); | 82 | err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs); |
diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c index 90943f83e84d..e01aafd03bde 100644 --- a/arch/x86/boot/a20.c +++ b/arch/x86/boot/a20.c | |||
@@ -115,8 +115,6 @@ static void enable_a20_fast(void) | |||
115 | 115 | ||
116 | int enable_a20(void) | 116 | int enable_a20(void) |
117 | { | 117 | { |
118 | int loops = A20_ENABLE_LOOPS; | ||
119 | |||
120 | #if defined(CONFIG_X86_ELAN) | 118 | #if defined(CONFIG_X86_ELAN) |
121 | /* Elan croaks if we try to touch the KBC */ | 119 | /* Elan croaks if we try to touch the KBC */ |
122 | enable_a20_fast(); | 120 | enable_a20_fast(); |
@@ -128,6 +126,7 @@ int enable_a20(void) | |||
128 | enable_a20_kbc(); | 126 | enable_a20_kbc(); |
129 | return 0; | 127 | return 0; |
130 | #else | 128 | #else |
129 | int loops = A20_ENABLE_LOOPS; | ||
131 | while (loops--) { | 130 | while (loops--) { |
132 | /* First, check to see if A20 is already enabled | 131 | /* First, check to see if A20 is already enabled |
133 | (legacy free, etc.) */ | 132 | (legacy free, etc.) */ |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 2a609dc3271c..c778e4fa55a2 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -248,6 +248,7 @@ ENTRY(resume_userspace) | |||
248 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 248 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
249 | # setting need_resched or sigpending | 249 | # setting need_resched or sigpending |
250 | # between sampling and the iret | 250 | # between sampling and the iret |
251 | TRACE_IRQS_OFF | ||
251 | movl TI_flags(%ebp), %ecx | 252 | movl TI_flags(%ebp), %ecx |
252 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | 253 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on |
253 | # int/exception return? | 254 | # int/exception return? |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b2cc73768a9d..f7357cc0162c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -189,7 +189,7 @@ default_entry: | |||
189 | * this stage. | 189 | * this stage. |
190 | */ | 190 | */ |
191 | 191 | ||
192 | #define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */ | 192 | #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ |
193 | 193 | ||
194 | xorl %ebx,%ebx /* %ebx is kept at zero */ | 194 | xorl %ebx,%ebx /* %ebx is kept at zero */ |
195 | 195 | ||
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index a40d54fc1fdd..4dc8600d9d20 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -2130,14 +2130,10 @@ static inline void __init check_timer(void) | |||
2130 | { | 2130 | { |
2131 | int apic1, pin1, apic2, pin2; | 2131 | int apic1, pin1, apic2, pin2; |
2132 | int vector; | 2132 | int vector; |
2133 | unsigned int ver; | ||
2134 | unsigned long flags; | 2133 | unsigned long flags; |
2135 | 2134 | ||
2136 | local_irq_save(flags); | 2135 | local_irq_save(flags); |
2137 | 2136 | ||
2138 | ver = apic_read(APIC_LVR); | ||
2139 | ver = GET_APIC_VERSION(ver); | ||
2140 | |||
2141 | /* | 2137 | /* |
2142 | * get/set the timer IRQ vector: | 2138 | * get/set the timer IRQ vector: |
2143 | */ | 2139 | */ |
@@ -2150,15 +2146,11 @@ static inline void __init check_timer(void) | |||
2150 | * mode for the 8259A whenever interrupts are routed | 2146 | * mode for the 8259A whenever interrupts are routed |
2151 | * through I/O APICs. Also IRQ0 has to be enabled in | 2147 | * through I/O APICs. Also IRQ0 has to be enabled in |
2152 | * the 8259A which implies the virtual wire has to be | 2148 | * the 8259A which implies the virtual wire has to be |
2153 | * disabled in the local APIC. Finally timer interrupts | 2149 | * disabled in the local APIC. |
2154 | * need to be acknowledged manually in the 8259A for | ||
2155 | * timer_interrupt() and for the i82489DX when using | ||
2156 | * the NMI watchdog. | ||
2157 | */ | 2150 | */ |
2158 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 2151 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
2159 | init_8259A(1); | 2152 | init_8259A(1); |
2160 | timer_ack = !cpu_has_tsc; | 2153 | timer_ack = 1; |
2161 | timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); | ||
2162 | if (timer_over_8254 > 0) | 2154 | if (timer_over_8254 > 0) |
2163 | enable_8259A_irq(0); | 2155 | enable_8259A_irq(0); |
2164 | 2156 | ||
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 3cad17fe026b..07c0f828f488 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -155,6 +155,7 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) | |||
155 | wrmsr(msr, value, dummy); | 155 | wrmsr(msr, value, dummy); |
156 | return 0; | 156 | return 0; |
157 | } | 157 | } |
158 | EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); | ||
158 | 159 | ||
159 | int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) | 160 | int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) |
160 | { | 161 | { |
@@ -222,6 +223,7 @@ int geode_mfgpt_alloc_timer(int timer, int domain) | |||
222 | /* No timers available - too bad */ | 223 | /* No timers available - too bad */ |
223 | return -1; | 224 | return -1; |
224 | } | 225 | } |
226 | EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); | ||
225 | 227 | ||
226 | 228 | ||
227 | #ifdef CONFIG_GEODE_MFGPT_TIMER | 229 | #ifdef CONFIG_GEODE_MFGPT_TIMER |
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 11b14bbaa61e..84160f74eeb0 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
28 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
29 | #include <asm/timer.h> | ||
30 | 29 | ||
31 | #include "mach_traps.h" | 30 | #include "mach_traps.h" |
32 | 31 | ||
@@ -82,7 +81,7 @@ int __init check_nmi_watchdog(void) | |||
82 | 81 | ||
83 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | 82 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
84 | if (!prev_nmi_count) | 83 | if (!prev_nmi_count) |
85 | goto error; | 84 | return -1; |
86 | 85 | ||
87 | printk(KERN_INFO "Testing NMI watchdog ... "); | 86 | printk(KERN_INFO "Testing NMI watchdog ... "); |
88 | 87 | ||
@@ -119,7 +118,7 @@ int __init check_nmi_watchdog(void) | |||
119 | if (!atomic_read(&nmi_active)) { | 118 | if (!atomic_read(&nmi_active)) { |
120 | kfree(prev_nmi_count); | 119 | kfree(prev_nmi_count); |
121 | atomic_set(&nmi_active, -1); | 120 | atomic_set(&nmi_active, -1); |
122 | goto error; | 121 | return -1; |
123 | } | 122 | } |
124 | printk("OK.\n"); | 123 | printk("OK.\n"); |
125 | 124 | ||
@@ -130,10 +129,6 @@ int __init check_nmi_watchdog(void) | |||
130 | 129 | ||
131 | kfree(prev_nmi_count); | 130 | kfree(prev_nmi_count); |
132 | return 0; | 131 | return 0; |
133 | error: | ||
134 | timer_ack = !cpu_has_tsc; | ||
135 | |||
136 | return -1; | ||
137 | } | 132 | } |
138 | 133 | ||
139 | static int __init setup_nmi_watchdog(char *str) | 134 | static int __init setup_nmi_watchdog(char *str) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index c5ef1af8e79d..dc00a1331ace 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
378 | struct page *page; | 378 | struct page *page; |
379 | unsigned long dma_mask = 0; | 379 | unsigned long dma_mask = 0; |
380 | dma_addr_t bus; | 380 | dma_addr_t bus; |
381 | int noretry = 0; | ||
381 | 382 | ||
382 | /* ignore region specifiers */ | 383 | /* ignore region specifiers */ |
383 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 384 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -397,20 +398,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
397 | if (dev->dma_mask == NULL) | 398 | if (dev->dma_mask == NULL) |
398 | return NULL; | 399 | return NULL; |
399 | 400 | ||
400 | /* Don't invoke OOM killer */ | 401 | /* Don't invoke OOM killer or retry in lower 16MB DMA zone */ |
401 | gfp |= __GFP_NORETRY; | 402 | if (gfp & __GFP_DMA) |
403 | noretry = 1; | ||
402 | 404 | ||
403 | #ifdef CONFIG_X86_64 | 405 | #ifdef CONFIG_X86_64 |
404 | /* Why <=? Even when the mask is smaller than 4GB it is often | 406 | /* Why <=? Even when the mask is smaller than 4GB it is often |
405 | larger than 16MB and in this case we have a chance of | 407 | larger than 16MB and in this case we have a chance of |
406 | finding fitting memory in the next higher zone first. If | 408 | finding fitting memory in the next higher zone first. If |
407 | not retry with true GFP_DMA. -AK */ | 409 | not retry with true GFP_DMA. -AK */ |
408 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) | 410 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) { |
409 | gfp |= GFP_DMA32; | 411 | gfp |= GFP_DMA32; |
412 | if (dma_mask < DMA_32BIT_MASK) | ||
413 | noretry = 1; | ||
414 | } | ||
410 | #endif | 415 | #endif |
411 | 416 | ||
412 | again: | 417 | again: |
413 | page = dma_alloc_pages(dev, gfp, get_order(size)); | 418 | page = dma_alloc_pages(dev, |
419 | noretry ? gfp | __GFP_NORETRY : gfp, get_order(size)); | ||
414 | if (page == NULL) | 420 | if (page == NULL) |
415 | return NULL; | 421 | return NULL; |
416 | 422 | ||
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 9615eee9b775..05191bbc68b8 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/acpi.h> | 4 | #include <linux/acpi.h> |
5 | #include <linux/bcd.h> | 5 | #include <linux/bcd.h> |
6 | #include <linux/mc146818rtc.h> | 6 | #include <linux/mc146818rtc.h> |
7 | #include <linux/platform_device.h> | ||
8 | #include <linux/pnp.h> | ||
7 | 9 | ||
8 | #include <asm/time.h> | 10 | #include <asm/time.h> |
9 | #include <asm/vsyscall.h> | 11 | #include <asm/vsyscall.h> |
@@ -197,3 +199,35 @@ unsigned long long native_read_tsc(void) | |||
197 | } | 199 | } |
198 | EXPORT_SYMBOL(native_read_tsc); | 200 | EXPORT_SYMBOL(native_read_tsc); |
199 | 201 | ||
202 | |||
203 | static struct resource rtc_resources[] = { | ||
204 | [0] = { | ||
205 | .start = RTC_PORT(0), | ||
206 | .end = RTC_PORT(1), | ||
207 | .flags = IORESOURCE_IO, | ||
208 | }, | ||
209 | [1] = { | ||
210 | .start = RTC_IRQ, | ||
211 | .end = RTC_IRQ, | ||
212 | .flags = IORESOURCE_IRQ, | ||
213 | } | ||
214 | }; | ||
215 | |||
216 | static struct platform_device rtc_device = { | ||
217 | .name = "rtc_cmos", | ||
218 | .id = -1, | ||
219 | .resource = rtc_resources, | ||
220 | .num_resources = ARRAY_SIZE(rtc_resources), | ||
221 | }; | ||
222 | |||
223 | static __init int add_rtc_cmos(void) | ||
224 | { | ||
225 | #ifdef CONFIG_PNP | ||
226 | if (!pnp_platform_devices) | ||
227 | platform_device_register(&rtc_device); | ||
228 | #else | ||
229 | platform_device_register(&rtc_device); | ||
230 | #endif /* CONFIG_PNP */ | ||
231 | return 0; | ||
232 | } | ||
233 | device_initcall(add_rtc_cmos); | ||
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index bde6f63e15d5..08d752de4eee 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -544,6 +544,7 @@ vm86_trap: | |||
544 | #define DO_ERROR(trapnr, signr, str, name) \ | 544 | #define DO_ERROR(trapnr, signr, str, name) \ |
545 | void do_##name(struct pt_regs *regs, long error_code) \ | 545 | void do_##name(struct pt_regs *regs, long error_code) \ |
546 | { \ | 546 | { \ |
547 | trace_hardirqs_fixup(); \ | ||
547 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 548 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
548 | == NOTIFY_STOP) \ | 549 | == NOTIFY_STOP) \ |
549 | return; \ | 550 | return; \ |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 7c077a9d9777..f2f5d260874e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
200 | 200 | ||
201 | atomic_inc(&pt->pending); | 201 | atomic_inc(&pt->pending); |
202 | smp_mb__after_atomic_inc(); | 202 | smp_mb__after_atomic_inc(); |
203 | /* FIXME: handle case where the guest is in guest mode */ | ||
204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { |
205 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
206 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
@@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) | |||
237 | return HRTIMER_NORESTART; | 236 | return HRTIMER_NORESTART; |
238 | } | 237 | } |
239 | 238 | ||
239 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) | ||
240 | { | ||
241 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | ||
242 | struct hrtimer *timer; | ||
243 | |||
244 | if (vcpu->vcpu_id != 0 || !pit) | ||
245 | return; | ||
246 | |||
247 | timer = &pit->pit_state.pit_timer.timer; | ||
248 | if (hrtimer_cancel(timer)) | ||
249 | hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); | ||
250 | } | ||
251 | |||
240 | static void destroy_pit_timer(struct kvm_kpit_timer *pt) | 252 | static void destroy_pit_timer(struct kvm_kpit_timer *pt) |
241 | { | 253 | { |
242 | pr_debug("pit: execute del timer!\n"); | 254 | pr_debug("pit: execute del timer!\n"); |
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index ce1f583459b1..76d736b5f664 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c | |||
@@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | |||
94 | /* TODO: PIT, RTC etc. */ | 94 | /* TODO: PIT, RTC etc. */ |
95 | } | 95 | } |
96 | EXPORT_SYMBOL_GPL(kvm_timer_intr_post); | 96 | EXPORT_SYMBOL_GPL(kvm_timer_intr_post); |
97 | |||
98 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | __kvm_migrate_apic_timer(vcpu); | ||
101 | __kvm_migrate_pit_timer(vcpu); | ||
102 | } | ||
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 1802134b836f..2a15be2275c0 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); | |||
84 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); | 84 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); |
85 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); | 85 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); |
86 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); | 86 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); |
87 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); | ||
88 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu); | ||
87 | 89 | ||
88 | int pit_has_pending_timer(struct kvm_vcpu *vcpu); | 90 | int pit_has_pending_timer(struct kvm_vcpu *vcpu); |
89 | int apic_has_pending_timer(struct kvm_vcpu *vcpu); | 91 | int apic_has_pending_timer(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7246b60afb96..ee3f53098f0c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
658 | u64 *end; | 658 | u64 *end; |
659 | 659 | ||
660 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) | 660 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
661 | if (*pos != shadow_trap_nonpresent_pte) { | 661 | if (is_shadow_present_pte(*pos)) { |
662 | printk(KERN_ERR "%s: %p %llx\n", __func__, | 662 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
663 | pos, *pos); | 663 | pos, *pos); |
664 | return 0; | 664 | return 0; |
@@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1858 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, | 1858 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, |
1859 | struct kvm_mmu_page, link); | 1859 | struct kvm_mmu_page, link); |
1860 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1860 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1861 | cond_resched(); | ||
1861 | } | 1862 | } |
1862 | free_page((unsigned long)vcpu->arch.mmu.pae_root); | 1863 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
1863 | } | 1864 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 156fe10288ae..934c7b619396 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
418 | 418 | ||
419 | /* mmio */ | 419 | /* mmio */ |
420 | if (is_error_pfn(pfn)) { | 420 | if (is_error_pfn(pfn)) { |
421 | pgprintk("gfn %x is mmio\n", walker.gfn); | 421 | pgprintk("gfn %lx is mmio\n", walker.gfn); |
422 | kvm_release_pfn_clean(pfn); | 422 | kvm_release_pfn_clean(pfn); |
423 | return 1; | 423 | return 1; |
424 | } | 424 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ab22615eee89..6b0d5fa5bab3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
688 | delta = vcpu->arch.host_tsc - tsc_this; | 688 | delta = vcpu->arch.host_tsc - tsc_this; |
689 | svm->vmcb->control.tsc_offset += delta; | 689 | svm->vmcb->control.tsc_offset += delta; |
690 | vcpu->cpu = cpu; | 690 | vcpu->cpu = cpu; |
691 | kvm_migrate_apic_timer(vcpu); | 691 | kvm_migrate_timers(vcpu); |
692 | } | 692 | } |
693 | 693 | ||
694 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 694 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bfe4db11989c..02efbe75f317 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
608 | 608 | ||
609 | if (vcpu->cpu != cpu) { | 609 | if (vcpu->cpu != cpu) { |
610 | vcpu_clear(vmx); | 610 | vcpu_clear(vmx); |
611 | kvm_migrate_apic_timer(vcpu); | 611 | kvm_migrate_timers(vcpu); |
612 | vpid_sync_vcpu_all(vmx); | 612 | vpid_sync_vcpu_all(vmx); |
613 | } | 613 | } |
614 | 614 | ||
@@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage) | |||
1036 | static void hardware_disable(void *garbage) | 1036 | static void hardware_disable(void *garbage) |
1037 | { | 1037 | { |
1038 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | 1038 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); |
1039 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
1039 | } | 1040 | } |
1040 | 1041 | ||
1041 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 1042 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21338bdb28ff..00acf1301a15 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2758,7 +2758,7 @@ again: | |||
2758 | 2758 | ||
2759 | if (vcpu->requests) { | 2759 | if (vcpu->requests) { |
2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) | 2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) |
2761 | __kvm_migrate_apic_timer(vcpu); | 2761 | __kvm_migrate_timers(vcpu); |
2762 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, | 2762 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, |
2763 | &vcpu->requests)) { | 2763 | &vcpu->requests)) { |
2764 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; | 2764 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 8a96320ab071..932f216d890c 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -1727,7 +1727,8 @@ twobyte_insn: | |||
1727 | if (rc) | 1727 | if (rc) |
1728 | goto done; | 1728 | goto done; |
1729 | 1729 | ||
1730 | kvm_emulate_hypercall(ctxt->vcpu); | 1730 | /* Let the processor re-execute the fixed hypercall */ |
1731 | c->eip = ctxt->vcpu->arch.rip; | ||
1731 | /* Disable writeback. */ | 1732 | /* Disable writeback. */ |
1732 | c->dst.type = OP_NONE; | 1733 | c->dst.type = OP_NONE; |
1733 | break; | 1734 | break; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fd7e1798c75a..8bcb6f40ccb6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -497,6 +497,11 @@ static int vmalloc_fault(unsigned long address) | |||
497 | unsigned long pgd_paddr; | 497 | unsigned long pgd_paddr; |
498 | pmd_t *pmd_k; | 498 | pmd_t *pmd_k; |
499 | pte_t *pte_k; | 499 | pte_t *pte_k; |
500 | |||
501 | /* Make sure we are in vmalloc area */ | ||
502 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
503 | return -1; | ||
504 | |||
500 | /* | 505 | /* |
501 | * Synchronize this task's top level page-table | 506 | * Synchronize this task's top level page-table |
502 | * with the 'reference' page table. | 507 | * with the 'reference' page table. |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 3890234e5b26..99649dccad28 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -97,36 +97,9 @@ static __init inline int srat_disabled(void) | |||
97 | return numa_off || acpi_numa < 0; | 97 | return numa_off || acpi_numa < 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | ||
101 | * A lot of BIOS fill in 10 (= no distance) everywhere. This messes | ||
102 | * up the NUMA heuristics which wants the local node to have a smaller | ||
103 | * distance than the others. | ||
104 | * Do some quick checks here and only use the SLIT if it passes. | ||
105 | */ | ||
106 | static __init int slit_valid(struct acpi_table_slit *slit) | ||
107 | { | ||
108 | int i, j; | ||
109 | int d = slit->locality_count; | ||
110 | for (i = 0; i < d; i++) { | ||
111 | for (j = 0; j < d; j++) { | ||
112 | u8 val = slit->entry[d*i + j]; | ||
113 | if (i == j) { | ||
114 | if (val != LOCAL_DISTANCE) | ||
115 | return 0; | ||
116 | } else if (val <= LOCAL_DISTANCE) | ||
117 | return 0; | ||
118 | } | ||
119 | } | ||
120 | return 1; | ||
121 | } | ||
122 | |||
123 | /* Callback for SLIT parsing */ | 100 | /* Callback for SLIT parsing */ |
124 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | 101 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
125 | { | 102 | { |
126 | if (!slit_valid(slit)) { | ||
127 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); | ||
128 | return; | ||
129 | } | ||
130 | acpi_slit = slit; | 103 | acpi_slit = slit; |
131 | } | 104 | } |
132 | 105 | ||
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 6e64aaf00d1d..940185ecaeda 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | |||
328 | #endif | 328 | #endif |
329 | { | 329 | { |
330 | .callback = set_bf_sort, | 330 | .callback = set_bf_sort, |
331 | .ident = "HP ProLiant DL385 G2", | 331 | .ident = "HP ProLiant DL360", |
332 | .matches = { | 332 | .matches = { |
333 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 333 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
334 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), | 334 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"), |
335 | }, | 335 | }, |
336 | }, | 336 | }, |
337 | { | 337 | { |
338 | .callback = set_bf_sort, | 338 | .callback = set_bf_sort, |
339 | .ident = "HP ProLiant DL585 G2", | 339 | .ident = "HP ProLiant DL380", |
340 | .matches = { | 340 | .matches = { |
341 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 341 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
342 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), | 342 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"), |
343 | }, | 343 | }, |
344 | }, | 344 | }, |
345 | {} | 345 | {} |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c39e1a5aa241..52b2e3856980 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/clocksource.h> | 12 | #include <linux/clocksource.h> |
13 | #include <linux/clockchips.h> | 13 | #include <linux/clockchips.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/math64.h> | ||
15 | 16 | ||
16 | #include <asm/xen/hypervisor.h> | 17 | #include <asm/xen/hypervisor.h> |
17 | #include <asm/xen/hypercall.h> | 18 | #include <asm/xen/hypercall.h> |
@@ -150,11 +151,7 @@ static void do_stolen_accounting(void) | |||
150 | if (stolen < 0) | 151 | if (stolen < 0) |
151 | stolen = 0; | 152 | stolen = 0; |
152 | 153 | ||
153 | ticks = 0; | 154 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
154 | while (stolen >= NS_PER_TICK) { | ||
155 | ticks++; | ||
156 | stolen -= NS_PER_TICK; | ||
157 | } | ||
158 | __get_cpu_var(residual_stolen) = stolen; | 155 | __get_cpu_var(residual_stolen) = stolen; |
159 | account_steal_time(NULL, ticks); | 156 | account_steal_time(NULL, ticks); |
160 | 157 | ||
@@ -166,11 +163,7 @@ static void do_stolen_accounting(void) | |||
166 | if (blocked < 0) | 163 | if (blocked < 0) |
167 | blocked = 0; | 164 | blocked = 0; |
168 | 165 | ||
169 | ticks = 0; | 166 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
170 | while (blocked >= NS_PER_TICK) { | ||
171 | ticks++; | ||
172 | blocked -= NS_PER_TICK; | ||
173 | } | ||
174 | __get_cpu_var(residual_blocked) = blocked; | 167 | __get_cpu_var(residual_blocked) = blocked; |
175 | account_steal_time(idle_task(smp_processor_id()), ticks); | 168 | account_steal_time(idle_task(smp_processor_id()), ticks); |
176 | } | 169 | } |