diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-07-04 17:11:22 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-07-04 17:11:22 -0400 |
commit | 2b0f89317e99735bbf32eaede81f707f98ab1b5e (patch) | |
tree | 16daa236e21876b11f1c0b9256cd4046aadba020 /arch/x86 | |
parent | 07bd1172902e782f288e4d44b1fde7dec0f08b6f (diff) | |
parent | fa18f7bde3ad4568d1d343b60d963bfbd8dc3991 (diff) |
Merge branch 'timers/posix-cpu-timers-for-tglx' of
git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core
Frederic sayed: "Most of these patches have been hanging around for
several month now, in -mmotm for a significant chunk. They already
missed a few releases."
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
34 files changed, 258 insertions, 271 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 685692c94f05..fe120da25625 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt" | |||
2265 | config IA32_EMULATION | 2265 | config IA32_EMULATION |
2266 | bool "IA32 Emulation" | 2266 | bool "IA32 Emulation" |
2267 | depends on X86_64 | 2267 | depends on X86_64 |
2268 | select BINFMT_ELF | ||
2268 | select COMPAT_BINFMT_ELF | 2269 | select COMPAT_BINFMT_ELF |
2269 | select HAVE_UID16 | 2270 | select HAVE_UID16 |
2270 | ---help--- | 2271 | ---help--- |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 35ee62fccf98..c205035a6b96 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) | |||
251 | *size = len; | 251 | *size = len; |
252 | } | 252 | } |
253 | 253 | ||
254 | static efi_status_t setup_efi_vars(struct boot_params *params) | ||
255 | { | ||
256 | struct setup_data *data; | ||
257 | struct efi_var_bootdata *efidata; | ||
258 | u64 store_size, remaining_size, var_size; | ||
259 | efi_status_t status; | ||
260 | |||
261 | if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
262 | return EFI_UNSUPPORTED; | ||
263 | |||
264 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
265 | |||
266 | while (data && data->next) | ||
267 | data = (struct setup_data *)(unsigned long)data->next; | ||
268 | |||
269 | status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, | ||
270 | EFI_VARIABLE_NON_VOLATILE | | ||
271 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
272 | EFI_VARIABLE_RUNTIME_ACCESS, &store_size, | ||
273 | &remaining_size, &var_size); | ||
274 | |||
275 | if (status != EFI_SUCCESS) | ||
276 | return status; | ||
277 | |||
278 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | ||
279 | EFI_LOADER_DATA, sizeof(*efidata), &efidata); | ||
280 | |||
281 | if (status != EFI_SUCCESS) | ||
282 | return status; | ||
283 | |||
284 | efidata->data.type = SETUP_EFI_VARS; | ||
285 | efidata->data.len = sizeof(struct efi_var_bootdata) - | ||
286 | sizeof(struct setup_data); | ||
287 | efidata->data.next = 0; | ||
288 | efidata->store_size = store_size; | ||
289 | efidata->remaining_size = remaining_size; | ||
290 | efidata->max_var_size = var_size; | ||
291 | |||
292 | if (data) | ||
293 | data->next = (unsigned long)efidata; | ||
294 | else | ||
295 | params->hdr.setup_data = (unsigned long)efidata; | ||
296 | |||
297 | } | ||
298 | |||
299 | static efi_status_t setup_efi_pci(struct boot_params *params) | 254 | static efi_status_t setup_efi_pci(struct boot_params *params) |
300 | { | 255 | { |
301 | efi_pci_io_protocol *pci; | 256 | efi_pci_io_protocol *pci; |
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, | |||
1202 | 1157 | ||
1203 | setup_graphics(boot_params); | 1158 | setup_graphics(boot_params); |
1204 | 1159 | ||
1205 | setup_efi_vars(boot_params); | ||
1206 | |||
1207 | setup_efi_pci(boot_params); | 1160 | setup_efi_pci(boot_params); |
1208 | 1161 | ||
1209 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | 1162 | status = efi_call_phys3(sys_table->boottime->allocate_pool, |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 62fe22cd4cba..477e9d75149b 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8) | |||
2681 | addq %rcx, KEYP | 2681 | addq %rcx, KEYP |
2682 | 2682 | ||
2683 | movdqa IV, STATE1 | 2683 | movdqa IV, STATE1 |
2684 | pxor 0x00(INP), STATE1 | 2684 | movdqu 0x00(INP), INC |
2685 | pxor INC, STATE1 | ||
2685 | movdqu IV, 0x00(OUTP) | 2686 | movdqu IV, 0x00(OUTP) |
2686 | 2687 | ||
2687 | _aesni_gf128mul_x_ble() | 2688 | _aesni_gf128mul_x_ble() |
2688 | movdqa IV, STATE2 | 2689 | movdqa IV, STATE2 |
2689 | pxor 0x10(INP), STATE2 | 2690 | movdqu 0x10(INP), INC |
2691 | pxor INC, STATE2 | ||
2690 | movdqu IV, 0x10(OUTP) | 2692 | movdqu IV, 0x10(OUTP) |
2691 | 2693 | ||
2692 | _aesni_gf128mul_x_ble() | 2694 | _aesni_gf128mul_x_ble() |
2693 | movdqa IV, STATE3 | 2695 | movdqa IV, STATE3 |
2694 | pxor 0x20(INP), STATE3 | 2696 | movdqu 0x20(INP), INC |
2697 | pxor INC, STATE3 | ||
2695 | movdqu IV, 0x20(OUTP) | 2698 | movdqu IV, 0x20(OUTP) |
2696 | 2699 | ||
2697 | _aesni_gf128mul_x_ble() | 2700 | _aesni_gf128mul_x_ble() |
2698 | movdqa IV, STATE4 | 2701 | movdqa IV, STATE4 |
2699 | pxor 0x30(INP), STATE4 | 2702 | movdqu 0x30(INP), INC |
2703 | pxor INC, STATE4 | ||
2700 | movdqu IV, 0x30(OUTP) | 2704 | movdqu IV, 0x30(OUTP) |
2701 | 2705 | ||
2702 | call *%r11 | 2706 | call *%r11 |
2703 | 2707 | ||
2704 | pxor 0x00(OUTP), STATE1 | 2708 | movdqu 0x00(OUTP), INC |
2709 | pxor INC, STATE1 | ||
2705 | movdqu STATE1, 0x00(OUTP) | 2710 | movdqu STATE1, 0x00(OUTP) |
2706 | 2711 | ||
2707 | _aesni_gf128mul_x_ble() | 2712 | _aesni_gf128mul_x_ble() |
2708 | movdqa IV, STATE1 | 2713 | movdqa IV, STATE1 |
2709 | pxor 0x40(INP), STATE1 | 2714 | movdqu 0x40(INP), INC |
2715 | pxor INC, STATE1 | ||
2710 | movdqu IV, 0x40(OUTP) | 2716 | movdqu IV, 0x40(OUTP) |
2711 | 2717 | ||
2712 | pxor 0x10(OUTP), STATE2 | 2718 | movdqu 0x10(OUTP), INC |
2719 | pxor INC, STATE2 | ||
2713 | movdqu STATE2, 0x10(OUTP) | 2720 | movdqu STATE2, 0x10(OUTP) |
2714 | 2721 | ||
2715 | _aesni_gf128mul_x_ble() | 2722 | _aesni_gf128mul_x_ble() |
2716 | movdqa IV, STATE2 | 2723 | movdqa IV, STATE2 |
2717 | pxor 0x50(INP), STATE2 | 2724 | movdqu 0x50(INP), INC |
2725 | pxor INC, STATE2 | ||
2718 | movdqu IV, 0x50(OUTP) | 2726 | movdqu IV, 0x50(OUTP) |
2719 | 2727 | ||
2720 | pxor 0x20(OUTP), STATE3 | 2728 | movdqu 0x20(OUTP), INC |
2729 | pxor INC, STATE3 | ||
2721 | movdqu STATE3, 0x20(OUTP) | 2730 | movdqu STATE3, 0x20(OUTP) |
2722 | 2731 | ||
2723 | _aesni_gf128mul_x_ble() | 2732 | _aesni_gf128mul_x_ble() |
2724 | movdqa IV, STATE3 | 2733 | movdqa IV, STATE3 |
2725 | pxor 0x60(INP), STATE3 | 2734 | movdqu 0x60(INP), INC |
2735 | pxor INC, STATE3 | ||
2726 | movdqu IV, 0x60(OUTP) | 2736 | movdqu IV, 0x60(OUTP) |
2727 | 2737 | ||
2728 | pxor 0x30(OUTP), STATE4 | 2738 | movdqu 0x30(OUTP), INC |
2739 | pxor INC, STATE4 | ||
2729 | movdqu STATE4, 0x30(OUTP) | 2740 | movdqu STATE4, 0x30(OUTP) |
2730 | 2741 | ||
2731 | _aesni_gf128mul_x_ble() | 2742 | _aesni_gf128mul_x_ble() |
2732 | movdqa IV, STATE4 | 2743 | movdqa IV, STATE4 |
2733 | pxor 0x70(INP), STATE4 | 2744 | movdqu 0x70(INP), INC |
2745 | pxor INC, STATE4 | ||
2734 | movdqu IV, 0x70(OUTP) | 2746 | movdqu IV, 0x70(OUTP) |
2735 | 2747 | ||
2736 | _aesni_gf128mul_x_ble() | 2748 | _aesni_gf128mul_x_ble() |
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8) | |||
2738 | 2750 | ||
2739 | call *%r11 | 2751 | call *%r11 |
2740 | 2752 | ||
2741 | pxor 0x40(OUTP), STATE1 | 2753 | movdqu 0x40(OUTP), INC |
2754 | pxor INC, STATE1 | ||
2742 | movdqu STATE1, 0x40(OUTP) | 2755 | movdqu STATE1, 0x40(OUTP) |
2743 | 2756 | ||
2744 | pxor 0x50(OUTP), STATE2 | 2757 | movdqu 0x50(OUTP), INC |
2758 | pxor INC, STATE2 | ||
2745 | movdqu STATE2, 0x50(OUTP) | 2759 | movdqu STATE2, 0x50(OUTP) |
2746 | 2760 | ||
2747 | pxor 0x60(OUTP), STATE3 | 2761 | movdqu 0x60(OUTP), INC |
2762 | pxor INC, STATE3 | ||
2748 | movdqu STATE3, 0x60(OUTP) | 2763 | movdqu STATE3, 0x60(OUTP) |
2749 | 2764 | ||
2750 | pxor 0x70(OUTP), STATE4 | 2765 | movdqu 0x70(OUTP), INC |
2766 | pxor INC, STATE4 | ||
2751 | movdqu STATE4, 0x70(OUTP) | 2767 | movdqu STATE4, 0x70(OUTP) |
2752 | 2768 | ||
2753 | ret | 2769 | ret |
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 94c27df8a549..f247304299a2 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S | |||
@@ -240,7 +240,7 @@ fold_64: | |||
240 | pand %xmm3, %xmm1 | 240 | pand %xmm3, %xmm1 |
241 | PCLMULQDQ 0x00, CONSTANT, %xmm1 | 241 | PCLMULQDQ 0x00, CONSTANT, %xmm1 |
242 | pxor %xmm2, %xmm1 | 242 | pxor %xmm2, %xmm1 |
243 | pextrd $0x01, %xmm1, %eax | 243 | PEXTRD 0x01, %xmm1, %eax |
244 | 244 | ||
245 | ret | 245 | ret |
246 | ENDPROC(crc32_pclmul_le_16) | 246 | ENDPROC(crc32_pclmul_le_16) |
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 56610c4bf31b..642f15687a0a 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S | |||
@@ -118,7 +118,7 @@ y2 = %r15d | |||
118 | 118 | ||
119 | _INP_END_SIZE = 8 | 119 | _INP_END_SIZE = 8 |
120 | _INP_SIZE = 8 | 120 | _INP_SIZE = 8 |
121 | _XFER_SIZE = 8 | 121 | _XFER_SIZE = 16 |
122 | _XMM_SAVE_SIZE = 0 | 122 | _XMM_SAVE_SIZE = 0 |
123 | 123 | ||
124 | _INP_END = 0 | 124 | _INP_END = 0 |
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index 98d3c391da81..f833b74d902b 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S | |||
@@ -111,7 +111,7 @@ y2 = %r15d | |||
111 | 111 | ||
112 | _INP_END_SIZE = 8 | 112 | _INP_END_SIZE = 8 |
113 | _INP_SIZE = 8 | 113 | _INP_SIZE = 8 |
114 | _XFER_SIZE = 8 | 114 | _XFER_SIZE = 16 |
115 | _XMM_SAVE_SIZE = 0 | 115 | _XMM_SAVE_SIZE = 0 |
116 | 116 | ||
117 | _INP_END = 0 | 117 | _INP_END = 0 |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 805078e08013..52ff81cce008 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, | |||
192 | /* struct user */ | 192 | /* struct user */ |
193 | DUMP_WRITE(&dump, sizeof(dump)); | 193 | DUMP_WRITE(&dump, sizeof(dump)); |
194 | /* Now dump all of the user data. Include malloced stuff as well */ | 194 | /* Now dump all of the user data. Include malloced stuff as well */ |
195 | DUMP_SEEK(PAGE_SIZE); | 195 | DUMP_SEEK(PAGE_SIZE - sizeof(dump)); |
196 | /* now we start writing out the user space info */ | 196 | /* now we start writing out the user space info */ |
197 | set_fs(USER_DS); | 197 | set_fs(USER_DS); |
198 | /* Dump the data area */ | 198 | /* Dump the data area */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 2fb5d5884e23..60c89f30c727 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void); | |||
102 | extern void efi_unmap_memmap(void); | 102 | extern void efi_unmap_memmap(void); |
103 | extern void efi_memory_uc(u64 addr, unsigned long size); | 103 | extern void efi_memory_uc(u64 addr, unsigned long size); |
104 | 104 | ||
105 | struct efi_var_bootdata { | ||
106 | struct setup_data data; | ||
107 | u64 store_size; | ||
108 | u64 remaining_size; | ||
109 | u64 max_var_size; | ||
110 | }; | ||
111 | |||
112 | #ifdef CONFIG_EFI | 105 | #ifdef CONFIG_EFI |
113 | 106 | ||
114 | static inline bool efi_is_native(void) | 107 | static inline bool efi_is_native(void) |
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h index 280bf7fb6aba..3e115273ed88 100644 --- a/arch/x86/include/asm/inst.h +++ b/arch/x86/include/asm/inst.h | |||
@@ -9,12 +9,68 @@ | |||
9 | 9 | ||
10 | #define REG_NUM_INVALID 100 | 10 | #define REG_NUM_INVALID 100 |
11 | 11 | ||
12 | #define REG_TYPE_R64 0 | 12 | #define REG_TYPE_R32 0 |
13 | #define REG_TYPE_XMM 1 | 13 | #define REG_TYPE_R64 1 |
14 | #define REG_TYPE_XMM 2 | ||
14 | #define REG_TYPE_INVALID 100 | 15 | #define REG_TYPE_INVALID 100 |
15 | 16 | ||
17 | .macro R32_NUM opd r32 | ||
18 | \opd = REG_NUM_INVALID | ||
19 | .ifc \r32,%eax | ||
20 | \opd = 0 | ||
21 | .endif | ||
22 | .ifc \r32,%ecx | ||
23 | \opd = 1 | ||
24 | .endif | ||
25 | .ifc \r32,%edx | ||
26 | \opd = 2 | ||
27 | .endif | ||
28 | .ifc \r32,%ebx | ||
29 | \opd = 3 | ||
30 | .endif | ||
31 | .ifc \r32,%esp | ||
32 | \opd = 4 | ||
33 | .endif | ||
34 | .ifc \r32,%ebp | ||
35 | \opd = 5 | ||
36 | .endif | ||
37 | .ifc \r32,%esi | ||
38 | \opd = 6 | ||
39 | .endif | ||
40 | .ifc \r32,%edi | ||
41 | \opd = 7 | ||
42 | .endif | ||
43 | #ifdef CONFIG_X86_64 | ||
44 | .ifc \r32,%r8d | ||
45 | \opd = 8 | ||
46 | .endif | ||
47 | .ifc \r32,%r9d | ||
48 | \opd = 9 | ||
49 | .endif | ||
50 | .ifc \r32,%r10d | ||
51 | \opd = 10 | ||
52 | .endif | ||
53 | .ifc \r32,%r11d | ||
54 | \opd = 11 | ||
55 | .endif | ||
56 | .ifc \r32,%r12d | ||
57 | \opd = 12 | ||
58 | .endif | ||
59 | .ifc \r32,%r13d | ||
60 | \opd = 13 | ||
61 | .endif | ||
62 | .ifc \r32,%r14d | ||
63 | \opd = 14 | ||
64 | .endif | ||
65 | .ifc \r32,%r15d | ||
66 | \opd = 15 | ||
67 | .endif | ||
68 | #endif | ||
69 | .endm | ||
70 | |||
16 | .macro R64_NUM opd r64 | 71 | .macro R64_NUM opd r64 |
17 | \opd = REG_NUM_INVALID | 72 | \opd = REG_NUM_INVALID |
73 | #ifdef CONFIG_X86_64 | ||
18 | .ifc \r64,%rax | 74 | .ifc \r64,%rax |
19 | \opd = 0 | 75 | \opd = 0 |
20 | .endif | 76 | .endif |
@@ -63,6 +119,7 @@ | |||
63 | .ifc \r64,%r15 | 119 | .ifc \r64,%r15 |
64 | \opd = 15 | 120 | \opd = 15 |
65 | .endif | 121 | .endif |
122 | #endif | ||
66 | .endm | 123 | .endm |
67 | 124 | ||
68 | .macro XMM_NUM opd xmm | 125 | .macro XMM_NUM opd xmm |
@@ -118,10 +175,13 @@ | |||
118 | .endm | 175 | .endm |
119 | 176 | ||
120 | .macro REG_TYPE type reg | 177 | .macro REG_TYPE type reg |
178 | R32_NUM reg_type_r32 \reg | ||
121 | R64_NUM reg_type_r64 \reg | 179 | R64_NUM reg_type_r64 \reg |
122 | XMM_NUM reg_type_xmm \reg | 180 | XMM_NUM reg_type_xmm \reg |
123 | .if reg_type_r64 <> REG_NUM_INVALID | 181 | .if reg_type_r64 <> REG_NUM_INVALID |
124 | \type = REG_TYPE_R64 | 182 | \type = REG_TYPE_R64 |
183 | .elseif reg_type_r32 <> REG_NUM_INVALID | ||
184 | \type = REG_TYPE_R32 | ||
125 | .elseif reg_type_xmm <> REG_NUM_INVALID | 185 | .elseif reg_type_xmm <> REG_NUM_INVALID |
126 | \type = REG_TYPE_XMM | 186 | \type = REG_TYPE_XMM |
127 | .else | 187 | .else |
@@ -162,6 +222,16 @@ | |||
162 | .byte \imm8 | 222 | .byte \imm8 |
163 | .endm | 223 | .endm |
164 | 224 | ||
225 | .macro PEXTRD imm8 xmm gpr | ||
226 | R32_NUM extrd_opd1 \gpr | ||
227 | XMM_NUM extrd_opd2 \xmm | ||
228 | PFX_OPD_SIZE | ||
229 | PFX_REX extrd_opd1 extrd_opd2 | ||
230 | .byte 0x0f, 0x3a, 0x16 | ||
231 | MODRM 0xc0 extrd_opd1 extrd_opd2 | ||
232 | .byte \imm8 | ||
233 | .endm | ||
234 | |||
165 | .macro AESKEYGENASSIST rcon xmm1 xmm2 | 235 | .macro AESKEYGENASSIST rcon xmm1 xmm2 |
166 | XMM_NUM aeskeygen_opd1 \xmm1 | 236 | XMM_NUM aeskeygen_opd1 \xmm1 |
167 | XMM_NUM aeskeygen_opd2 \xmm2 | 237 | XMM_NUM aeskeygen_opd2 \xmm2 |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ba870bb6dd8e..57873beb3292 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector); | |||
41 | 41 | ||
42 | extern void init_ISA_irqs(void); | 42 | extern void init_ISA_irqs(void); |
43 | 43 | ||
44 | #ifdef CONFIG_X86_LOCAL_APIC | ||
45 | void arch_trigger_all_cpu_backtrace(void); | ||
46 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
47 | #endif | ||
48 | |||
44 | #endif /* _ASM_X86_IRQ_H */ | 49 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 6825e2efd1b4..6bc3985ee473 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} | |||
60 | #ifdef CONFIG_MICROCODE_EARLY | 60 | #ifdef CONFIG_MICROCODE_EARLY |
61 | #define MAX_UCODE_COUNT 128 | 61 | #define MAX_UCODE_COUNT 128 |
62 | extern void __init load_ucode_bsp(void); | 62 | extern void __init load_ucode_bsp(void); |
63 | extern __init void load_ucode_ap(void); | 63 | extern void __cpuinit load_ucode_ap(void); |
64 | extern int __init save_microcode_in_initrd(void); | 64 | extern int __init save_microcode_in_initrd(void); |
65 | #else | 65 | #else |
66 | static inline void __init load_ucode_bsp(void) {} | 66 | static inline void __init load_ucode_bsp(void) {} |
67 | static inline __init void load_ucode_ap(void) {} | 67 | static inline void __cpuinit load_ucode_ap(void) {} |
68 | static inline int __init save_microcode_in_initrd(void) | 68 | static inline int __init save_microcode_in_initrd(void) |
69 | { | 69 | { |
70 | return 0; | 70 | return 0; |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c0fa356e90de..86f9301903c8 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , | |||
18 | void __user *, size_t *, loff_t *); | 18 | void __user *, size_t *, loff_t *); |
19 | extern int unknown_nmi_panic; | 19 | extern int unknown_nmi_panic; |
20 | 20 | ||
21 | void arch_trigger_all_cpu_backtrace(void); | 21 | #endif /* CONFIG_X86_LOCAL_APIC */ |
22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
23 | #endif | ||
24 | 22 | ||
25 | #define NMI_FLAG_FIRST 1 | 23 | #define NMI_FLAG_FIRST 1 |
26 | 24 | ||
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 08744242b8d2..c15ddaf90710 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #define SETUP_E820_EXT 1 | 6 | #define SETUP_E820_EXT 1 |
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI_VARS 4 | ||
10 | 9 | ||
11 | /* ram_size flags */ | 10 | /* ram_size flags */ |
12 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 11 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 31cb9ae992b7..a698d7165c96 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #include <asm/apic.h> | 11 | #include <asm/apic.h> |
12 | #include <asm/nmi.h> | ||
12 | 13 | ||
13 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
14 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 35ffda5d0727..5f90b85ff22e 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
714 | if (mtrr_tom2) | 714 | if (mtrr_tom2) |
715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; | 715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
716 | 716 | ||
717 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); | ||
718 | /* | 717 | /* |
719 | * [0, 1M) should always be covered by var mtrr with WB | 718 | * [0, 1M) should always be covered by var mtrr with WB |
720 | * and fixed mtrrs should take effect before var mtrr for it: | 719 | * and fixed mtrrs should take effect before var mtrr for it: |
721 | */ | 720 | */ |
722 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, | 721 | nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, |
723 | 1ULL<<(20 - PAGE_SHIFT)); | 722 | 1ULL<<(20 - PAGE_SHIFT)); |
724 | /* Sort the ranges: */ | 723 | /* add from var mtrr at last */ |
725 | sort_range(range, nr_range); | 724 | nr_range = x86_get_mtrr_mem_range(range, nr_range, |
725 | x_remove_base, x_remove_size); | ||
726 | 726 | ||
727 | range_sums = sum_ranges(range, nr_range); | 727 | range_sums = sum_ranges(range, nr_range); |
728 | printk(KERN_INFO "total RAM covered: %ldM\n", | 728 | printk(KERN_INFO "total RAM covered: %ldM\n", |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f60d41ff9a97..a9e22073bd56 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | |||
165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), | 165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | 166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), |
167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
168 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
169 | EVENT_EXTRA_END | 168 | EVENT_EXTRA_END |
170 | }; | 169 | }; |
171 | 170 | ||
172 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | 171 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { |
173 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 172 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
174 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 173 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), |
174 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
175 | EVENT_EXTRA_END | 175 | EVENT_EXTRA_END |
176 | }; | 176 | }; |
177 | 177 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index dab95a85f7f8..55b67614ed94 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; | 34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; | 35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
36 | static unsigned int __initdata next_early_pgt = 2; | 36 | static unsigned int __initdata next_early_pgt = 2; |
37 | pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); | 37 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
38 | 38 | ||
39 | /* Wipe all early page tables except for the kernel symbol map */ | 39 | /* Wipe all early page tables except for the kernel symbol map */ |
40 | static void __init reset_early_page_tables(void) | 40 | static void __init reset_early_page_tables(void) |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 08f7e8039099..321d65ebaffe 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -115,8 +115,10 @@ startup_64: | |||
115 | movq %rdi, %rax | 115 | movq %rdi, %rax |
116 | shrq $PUD_SHIFT, %rax | 116 | shrq $PUD_SHIFT, %rax |
117 | andl $(PTRS_PER_PUD-1), %eax | 117 | andl $(PTRS_PER_PUD-1), %eax |
118 | movq %rdx, (4096+0)(%rbx,%rax,8) | 118 | movq %rdx, 4096(%rbx,%rax,8) |
119 | movq %rdx, (4096+8)(%rbx,%rax,8) | 119 | incl %eax |
120 | andl $(PTRS_PER_PUD-1), %eax | ||
121 | movq %rdx, 4096(%rbx,%rax,8) | ||
120 | 122 | ||
121 | addq $8192, %rbx | 123 | addq $8192, %rbx |
122 | movq %rdi, %rax | 124 | movq %rdi, %rax |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 245a71db401a..cb339097b9ea 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -22,23 +22,19 @@ | |||
22 | /* | 22 | /* |
23 | * Were we in an interrupt that interrupted kernel mode? | 23 | * Were we in an interrupt that interrupted kernel mode? |
24 | * | 24 | * |
25 | * For now, with eagerfpu we will return interrupted kernel FPU | ||
26 | * state as not-idle. TBD: Ideally we can change the return value | ||
27 | * to something like __thread_has_fpu(current). But we need to | ||
28 | * be careful of doing __thread_clear_has_fpu() before saving | ||
29 | * the FPU etc for supporting nested uses etc. For now, take | ||
30 | * the simple route! | ||
31 | * | ||
32 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that | 25 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
33 | * pair does nothing at all: the thread must not have fpu (so | 26 | * pair does nothing at all: the thread must not have fpu (so |
34 | * that we don't try to save the FPU state), and TS must | 27 | * that we don't try to save the FPU state), and TS must |
35 | * be set (so that the clts/stts pair does nothing that is | 28 | * be set (so that the clts/stts pair does nothing that is |
36 | * visible in the interrupted kernel thread). | 29 | * visible in the interrupted kernel thread). |
30 | * | ||
31 | * Except for the eagerfpu case when we return 1 unless we've already | ||
32 | * been eager and saved the state in kernel_fpu_begin(). | ||
37 | */ | 33 | */ |
38 | static inline bool interrupted_kernel_fpu_idle(void) | 34 | static inline bool interrupted_kernel_fpu_idle(void) |
39 | { | 35 | { |
40 | if (use_eager_fpu()) | 36 | if (use_eager_fpu()) |
41 | return 0; | 37 | return __thread_has_fpu(current); |
42 | 38 | ||
43 | return !__thread_has_fpu(current) && | 39 | return !__thread_has_fpu(current) && |
44 | (read_cr0() & X86_CR0_TS); | 40 | (read_cr0() & X86_CR0_TS); |
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void) | |||
78 | struct task_struct *me = current; | 74 | struct task_struct *me = current; |
79 | 75 | ||
80 | if (__thread_has_fpu(me)) { | 76 | if (__thread_has_fpu(me)) { |
81 | __save_init_fpu(me); | ||
82 | __thread_clear_has_fpu(me); | 77 | __thread_clear_has_fpu(me); |
78 | __save_init_fpu(me); | ||
83 | /* We do 'stts()' in __kernel_fpu_end() */ | 79 | /* We do 'stts()' in __kernel_fpu_end() */ |
84 | } else if (!use_eager_fpu()) { | 80 | } else if (!use_eager_fpu()) { |
85 | this_cpu_write(fpu_owner_task, NULL); | 81 | this_cpu_write(fpu_owner_task, NULL); |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9895a9a41380..211bce445522 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) | |||
365 | return insn.length; | 365 | return insn.length; |
366 | } | 366 | } |
367 | 367 | ||
368 | static void __kprobes arch_copy_kprobe(struct kprobe *p) | 368 | static int __kprobes arch_copy_kprobe(struct kprobe *p) |
369 | { | 369 | { |
370 | int ret; | ||
371 | |||
370 | /* Copy an instruction with recovering if other optprobe modifies it.*/ | 372 | /* Copy an instruction with recovering if other optprobe modifies it.*/ |
371 | __copy_instruction(p->ainsn.insn, p->addr); | 373 | ret = __copy_instruction(p->ainsn.insn, p->addr); |
374 | if (!ret) | ||
375 | return -EINVAL; | ||
372 | 376 | ||
373 | /* | 377 | /* |
374 | * __copy_instruction can modify the displacement of the instruction, | 378 | * __copy_instruction can modify the displacement of the instruction, |
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
384 | 388 | ||
385 | /* Also, displacement change doesn't affect the first byte */ | 389 | /* Also, displacement change doesn't affect the first byte */ |
386 | p->opcode = p->ainsn.insn[0]; | 390 | p->opcode = p->ainsn.insn[0]; |
391 | |||
392 | return 0; | ||
387 | } | 393 | } |
388 | 394 | ||
389 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 395 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
397 | p->ainsn.insn = get_insn_slot(); | 403 | p->ainsn.insn = get_insn_slot(); |
398 | if (!p->ainsn.insn) | 404 | if (!p->ainsn.insn) |
399 | return -ENOMEM; | 405 | return -ENOMEM; |
400 | arch_copy_kprobe(p); | 406 | |
401 | return 0; | 407 | return arch_copy_kprobe(p); |
402 | } | 408 | } |
403 | 409 | ||
404 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 410 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 0db81ab511cc..1f354f4b602b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -239,6 +239,7 @@ void __init kvmclock_init(void) | |||
239 | if (!mem) | 239 | if (!mem) |
240 | return; | 240 | return; |
241 | hv_clock = __va(mem); | 241 | hv_clock = __va(mem); |
242 | memset(hv_clock, 0, size); | ||
242 | 243 | ||
243 | if (kvm_register_clock("boot clock")) { | 244 | if (kvm_register_clock("boot clock")) { |
244 | hv_clock = NULL; | 245 | hv_clock = NULL; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4e7a37ff03ab..81a5f5e8f142 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -277,18 +277,6 @@ void exit_idle(void) | |||
277 | } | 277 | } |
278 | #endif | 278 | #endif |
279 | 279 | ||
280 | void arch_cpu_idle_prepare(void) | ||
281 | { | ||
282 | /* | ||
283 | * If we're the non-boot CPU, nothing set the stack canary up | ||
284 | * for us. CPU0 already has it initialized but no harm in | ||
285 | * doing it again. This is a good place for updating it, as | ||
286 | * we wont ever return from this function (so the invalid | ||
287 | * canaries already on the stack wont ever trigger). | ||
288 | */ | ||
289 | boot_init_stack_canary(); | ||
290 | } | ||
291 | |||
292 | void arch_cpu_idle_enter(void) | 280 | void arch_cpu_idle_enter(void) |
293 | { | 281 | { |
294 | local_touch_nmi(); | 282 | local_touch_nmi(); |
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 7a6f3b3be3cf..f2bb9c96720a 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S | |||
@@ -160,7 +160,7 @@ identity_mapped: | |||
160 | xorq %rbp, %rbp | 160 | xorq %rbp, %rbp |
161 | xorq %r8, %r8 | 161 | xorq %r8, %r8 |
162 | xorq %r9, %r9 | 162 | xorq %r9, %r9 |
163 | xorq %r10, %r9 | 163 | xorq %r10, %r10 |
164 | xorq %r11, %r11 | 164 | xorq %r11, %r11 |
165 | xorq %r12, %r12 | 165 | xorq %r12, %r12 |
166 | xorq %r13, %r13 | 166 | xorq %r13, %r13 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9c73b51817e4..bfd348e99369 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
372 | 372 | ||
373 | void __cpuinit set_cpu_sibling_map(int cpu) | 373 | void __cpuinit set_cpu_sibling_map(int cpu) |
374 | { | 374 | { |
375 | bool has_mc = boot_cpu_data.x86_max_cores > 1; | ||
376 | bool has_smt = smp_num_siblings > 1; | 375 | bool has_smt = smp_num_siblings > 1; |
376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; | ||
377 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 377 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
378 | struct cpuinfo_x86 *o; | 378 | struct cpuinfo_x86 *o; |
379 | int i; | 379 | int i; |
380 | 380 | ||
381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); | 381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
382 | 382 | ||
383 | if (!has_smt && !has_mc) { | 383 | if (!has_mp) { |
384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); | 385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); | 386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); |
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
394 | if ((i == cpu) || (has_smt && match_smt(c, o))) | 394 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
395 | link_mask(sibling, cpu, i); | 395 | link_mask(sibling, cpu, i); |
396 | 396 | ||
397 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 397 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
398 | link_mask(llc_shared, cpu, i); | 398 | link_mask(llc_shared, cpu, i); |
399 | 399 | ||
400 | } | 400 | } |
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
406 | for_each_cpu(i, cpu_sibling_setup_mask) { | 406 | for_each_cpu(i, cpu_sibling_setup_mask) { |
407 | o = &cpu_data(i); | 407 | o = &cpu_data(i); |
408 | 408 | ||
409 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 409 | if ((i == cpu) || (has_mp && match_mc(c, o))) { |
410 | link_mask(core, cpu, i); | 410 | link_mask(core, cpu, i); |
411 | 411 | ||
412 | /* | 412 | /* |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8db0010ed150..5953dcea752d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1240,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1240 | ctxt->modrm_seg = VCPU_SREG_DS; | 1240 | ctxt->modrm_seg = VCPU_SREG_DS; |
1241 | 1241 | ||
1242 | if (ctxt->modrm_mod == 3) { | 1242 | if (ctxt->modrm_mod == 3) { |
1243 | int highbyte_regs = ctxt->rex_prefix == 0; | ||
1244 | |||
1243 | op->type = OP_REG; | 1245 | op->type = OP_REG; |
1244 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 1246 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
1245 | op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); | 1247 | op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, |
1248 | highbyte_regs && (ctxt->d & ByteOp)); | ||
1246 | if (ctxt->d & Sse) { | 1249 | if (ctxt->d & Sse) { |
1247 | op->type = OP_XMM; | 1250 | op->type = OP_XMM; |
1248 | op->bytes = 16; | 1251 | op->bytes = 16; |
@@ -3997,7 +4000,8 @@ static const struct opcode twobyte_table[256] = { | |||
3997 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, | 4000 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, |
3998 | N, D(ImplicitOps | ModRM), N, N, | 4001 | N, D(ImplicitOps | ModRM), N, N, |
3999 | /* 0x10 - 0x1F */ | 4002 | /* 0x10 - 0x1F */ |
4000 | N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, | 4003 | N, N, N, N, N, N, N, N, |
4004 | D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), | ||
4001 | /* 0x20 - 0x2F */ | 4005 | /* 0x20 - 0x2F */ |
4002 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), | 4006 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), |
4003 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), | 4007 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), |
@@ -4836,6 +4840,7 @@ twobyte_insn: | |||
4836 | case 0x08: /* invd */ | 4840 | case 0x08: /* invd */ |
4837 | case 0x0d: /* GrpP (prefetch) */ | 4841 | case 0x0d: /* GrpP (prefetch) */ |
4838 | case 0x18: /* Grp16 (prefetch/nop) */ | 4842 | case 0x18: /* Grp16 (prefetch/nop) */ |
4843 | case 0x1f: /* nop */ | ||
4839 | break; | 4844 | break; |
4840 | case 0x20: /* mov cr, reg */ | 4845 | case 0x20: /* mov cr, reg */ |
4841 | ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); | 4846 | ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e1adbb4aca75..0eee2c8b64d1 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1861 | { | 1861 | { |
1862 | struct kvm_lapic *apic = vcpu->arch.apic; | 1862 | struct kvm_lapic *apic = vcpu->arch.apic; |
1863 | unsigned int sipi_vector; | 1863 | unsigned int sipi_vector; |
1864 | unsigned long pe; | ||
1864 | 1865 | ||
1865 | if (!kvm_vcpu_has_lapic(vcpu)) | 1866 | if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) |
1866 | return; | 1867 | return; |
1867 | 1868 | ||
1868 | if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { | 1869 | pe = xchg(&apic->pending_events, 0); |
1870 | |||
1871 | if (test_bit(KVM_APIC_INIT, &pe)) { | ||
1869 | kvm_lapic_reset(vcpu); | 1872 | kvm_lapic_reset(vcpu); |
1870 | kvm_vcpu_reset(vcpu); | 1873 | kvm_vcpu_reset(vcpu); |
1871 | if (kvm_vcpu_is_bsp(apic->vcpu)) | 1874 | if (kvm_vcpu_is_bsp(apic->vcpu)) |
@@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1873 | else | 1876 | else |
1874 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; | 1877 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; |
1875 | } | 1878 | } |
1876 | if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && | 1879 | if (test_bit(KVM_APIC_SIPI, &pe) && |
1877 | vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { | 1880 | vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
1878 | /* evaluate pending_events before reading the vector */ | 1881 | /* evaluate pending_events before reading the vector */ |
1879 | smp_rmb(); | 1882 | smp_rmb(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 094b5d96ab14..e8ba99c34180 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
582 | if (index != XCR_XFEATURE_ENABLED_MASK) | 582 | if (index != XCR_XFEATURE_ENABLED_MASK) |
583 | return 1; | 583 | return 1; |
584 | xcr0 = xcr; | 584 | xcr0 = xcr; |
585 | if (kvm_x86_ops->get_cpl(vcpu) != 0) | ||
586 | return 1; | ||
587 | if (!(xcr0 & XSTATE_FP)) | 585 | if (!(xcr0 & XSTATE_FP)) |
588 | return 1; | 586 | return 1; |
589 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) | 587 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) |
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
597 | 595 | ||
598 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 596 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
599 | { | 597 | { |
600 | if (__kvm_set_xcr(vcpu, index, xcr)) { | 598 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || |
599 | __kvm_set_xcr(vcpu, index, xcr)) { | ||
601 | kvm_inject_gp(vcpu, 0); | 600 | kvm_inject_gp(vcpu, 0); |
602 | return 1; | 601 | return 1; |
603 | } | 602 | } |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index eaac1743def7..1f34e9219775 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
277 | end_pfn = limit_pfn; | 277 | end_pfn = limit_pfn; |
278 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 278 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
279 | 279 | ||
280 | if (!after_bootmem) | ||
281 | adjust_range_page_size_mask(mr, nr_range); | ||
282 | |||
280 | /* try to merge same page size and continuous */ | 283 | /* try to merge same page size and continuous */ |
281 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | 284 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { |
282 | unsigned long old_start; | 285 | unsigned long old_start; |
@@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
291 | nr_range--; | 294 | nr_range--; |
292 | } | 295 | } |
293 | 296 | ||
294 | if (!after_bootmem) | ||
295 | adjust_range_page_size_mask(mr, nr_range); | ||
296 | |||
297 | for (i = 0; i < nr_range; i++) | 297 | for (i = 0; i < nr_range; i++) |
298 | printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", | 298 | printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", |
299 | mr[i].start, mr[i].end - 1, | 299 | mr[i].start, mr[i].end - 1, |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 305c68b8d538..981c2dbd72cc 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev) | |||
628 | 628 | ||
629 | pa_data = boot_params.hdr.setup_data; | 629 | pa_data = boot_params.hdr.setup_data; |
630 | while (pa_data) { | 630 | while (pa_data) { |
631 | data = phys_to_virt(pa_data); | 631 | data = ioremap(pa_data, sizeof(*rom)); |
632 | if (!data) | ||
633 | return -ENOMEM; | ||
632 | 634 | ||
633 | if (data->type == SETUP_PCI) { | 635 | if (data->type == SETUP_PCI) { |
634 | rom = (struct pci_setup_rom *)data; | 636 | rom = (struct pci_setup_rom *)data; |
@@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev) | |||
645 | } | 647 | } |
646 | } | 648 | } |
647 | pa_data = data->next; | 649 | pa_data = data->next; |
650 | iounmap(data); | ||
648 | } | 651 | } |
649 | return 0; | 652 | return 0; |
650 | } | 653 | } |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index 0e0fabf17342..6eb18c42a28a 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c | |||
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, | |||
141 | */ | 141 | */ |
142 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | 142 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) |
143 | { | 143 | { |
144 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) | ||
145 | || devfn == PCI_DEVFN(0, 0) | ||
146 | || devfn == PCI_DEVFN(3, 0))) | ||
147 | return 1; | ||
148 | |||
149 | /* This is a workaround for A0 LNC bug where PCI status register does | 144 | /* This is a workaround for A0 LNC bug where PCI status register does |
150 | * not have new CAP bit set. can not be written by SW either. | 145 | * not have new CAP bit set. can not be written by SW either. |
151 | * | 146 | * |
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | |||
155 | */ | 150 | */ |
156 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) | 151 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) |
157 | return 0; | 152 | return 0; |
158 | 153 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) | |
154 | || devfn == PCI_DEVFN(0, 0) | ||
155 | || devfn == PCI_DEVFN(3, 0))) | ||
156 | return 1; | ||
159 | return 0; /* langwell on others */ | 157 | return 0; /* langwell on others */ |
160 | } | 158 | } |
161 | 159 | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index dd3b82530145..90f6ed127096 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/io.h> | 42 | #include <linux/io.h> |
43 | #include <linux/reboot.h> | 43 | #include <linux/reboot.h> |
44 | #include <linux/bcd.h> | 44 | #include <linux/bcd.h> |
45 | #include <linux/ucs2_string.h> | ||
46 | 45 | ||
47 | #include <asm/setup.h> | 46 | #include <asm/setup.h> |
48 | #include <asm/efi.h> | 47 | #include <asm/efi.h> |
@@ -54,12 +53,12 @@ | |||
54 | 53 | ||
55 | #define EFI_DEBUG 1 | 54 | #define EFI_DEBUG 1 |
56 | 55 | ||
57 | /* | 56 | #define EFI_MIN_RESERVE 5120 |
58 | * There's some additional metadata associated with each | 57 | |
59 | * variable. Intel's reference implementation is 60 bytes - bump that | 58 | #define EFI_DUMMY_GUID \ |
60 | * to account for potential alignment constraints | 59 | EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) |
61 | */ | 60 | |
62 | #define VAR_METADATA_SIZE 64 | 61 | static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; |
63 | 62 | ||
64 | struct efi __read_mostly efi = { | 63 | struct efi __read_mostly efi = { |
65 | .mps = EFI_INVALID_TABLE_ADDR, | 64 | .mps = EFI_INVALID_TABLE_ADDR, |
@@ -79,13 +78,6 @@ struct efi_memory_map memmap; | |||
79 | static struct efi efi_phys __initdata; | 78 | static struct efi efi_phys __initdata; |
80 | static efi_system_table_t efi_systab __initdata; | 79 | static efi_system_table_t efi_systab __initdata; |
81 | 80 | ||
82 | static u64 efi_var_store_size; | ||
83 | static u64 efi_var_remaining_size; | ||
84 | static u64 efi_var_max_var_size; | ||
85 | static u64 boot_used_size; | ||
86 | static u64 boot_var_size; | ||
87 | static u64 active_size; | ||
88 | |||
89 | unsigned long x86_efi_facility; | 81 | unsigned long x86_efi_facility; |
90 | 82 | ||
91 | /* | 83 | /* |
@@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, | |||
188 | efi_char16_t *name, | 180 | efi_char16_t *name, |
189 | efi_guid_t *vendor) | 181 | efi_guid_t *vendor) |
190 | { | 182 | { |
191 | efi_status_t status; | 183 | return efi_call_virt3(get_next_variable, |
192 | static bool finished = false; | 184 | name_size, name, vendor); |
193 | static u64 var_size; | ||
194 | |||
195 | status = efi_call_virt3(get_next_variable, | ||
196 | name_size, name, vendor); | ||
197 | |||
198 | if (status == EFI_NOT_FOUND) { | ||
199 | finished = true; | ||
200 | if (var_size < boot_used_size) { | ||
201 | boot_var_size = boot_used_size - var_size; | ||
202 | active_size += boot_var_size; | ||
203 | } else { | ||
204 | printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | if (boot_used_size && !finished) { | ||
209 | unsigned long size; | ||
210 | u32 attr; | ||
211 | efi_status_t s; | ||
212 | void *tmp; | ||
213 | |||
214 | s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); | ||
215 | |||
216 | if (s != EFI_BUFFER_TOO_SMALL || !size) | ||
217 | return status; | ||
218 | |||
219 | tmp = kmalloc(size, GFP_ATOMIC); | ||
220 | |||
221 | if (!tmp) | ||
222 | return status; | ||
223 | |||
224 | s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); | ||
225 | |||
226 | if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { | ||
227 | var_size += size; | ||
228 | var_size += ucs2_strsize(name, 1024); | ||
229 | active_size += size; | ||
230 | active_size += VAR_METADATA_SIZE; | ||
231 | active_size += ucs2_strsize(name, 1024); | ||
232 | } | ||
233 | |||
234 | kfree(tmp); | ||
235 | } | ||
236 | |||
237 | return status; | ||
238 | } | 185 | } |
239 | 186 | ||
240 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, | 187 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, |
@@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, | |||
243 | unsigned long data_size, | 190 | unsigned long data_size, |
244 | void *data) | 191 | void *data) |
245 | { | 192 | { |
246 | efi_status_t status; | 193 | return efi_call_virt5(set_variable, |
247 | u32 orig_attr = 0; | 194 | name, vendor, attr, |
248 | unsigned long orig_size = 0; | 195 | data_size, data); |
249 | |||
250 | status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, | ||
251 | NULL); | ||
252 | |||
253 | if (status != EFI_BUFFER_TOO_SMALL) | ||
254 | orig_size = 0; | ||
255 | |||
256 | status = efi_call_virt5(set_variable, | ||
257 | name, vendor, attr, | ||
258 | data_size, data); | ||
259 | |||
260 | if (status == EFI_SUCCESS) { | ||
261 | if (orig_size) { | ||
262 | active_size -= orig_size; | ||
263 | active_size -= ucs2_strsize(name, 1024); | ||
264 | active_size -= VAR_METADATA_SIZE; | ||
265 | } | ||
266 | if (data_size) { | ||
267 | active_size += data_size; | ||
268 | active_size += ucs2_strsize(name, 1024); | ||
269 | active_size += VAR_METADATA_SIZE; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | return status; | ||
274 | } | 196 | } |
275 | 197 | ||
276 | static efi_status_t virt_efi_query_variable_info(u32 attr, | 198 | static efi_status_t virt_efi_query_variable_info(u32 attr, |
@@ -788,9 +710,6 @@ void __init efi_init(void) | |||
788 | char vendor[100] = "unknown"; | 710 | char vendor[100] = "unknown"; |
789 | int i = 0; | 711 | int i = 0; |
790 | void *tmp; | 712 | void *tmp; |
791 | struct setup_data *data; | ||
792 | struct efi_var_bootdata *efi_var_data; | ||
793 | u64 pa_data; | ||
794 | 713 | ||
795 | #ifdef CONFIG_X86_32 | 714 | #ifdef CONFIG_X86_32 |
796 | if (boot_params.efi_info.efi_systab_hi || | 715 | if (boot_params.efi_info.efi_systab_hi || |
@@ -808,22 +727,6 @@ void __init efi_init(void) | |||
808 | if (efi_systab_init(efi_phys.systab)) | 727 | if (efi_systab_init(efi_phys.systab)) |
809 | return; | 728 | return; |
810 | 729 | ||
811 | pa_data = boot_params.hdr.setup_data; | ||
812 | while (pa_data) { | ||
813 | data = early_ioremap(pa_data, sizeof(*efi_var_data)); | ||
814 | if (data->type == SETUP_EFI_VARS) { | ||
815 | efi_var_data = (struct efi_var_bootdata *)data; | ||
816 | |||
817 | efi_var_store_size = efi_var_data->store_size; | ||
818 | efi_var_remaining_size = efi_var_data->remaining_size; | ||
819 | efi_var_max_var_size = efi_var_data->max_var_size; | ||
820 | } | ||
821 | pa_data = data->next; | ||
822 | early_iounmap(data, sizeof(*efi_var_data)); | ||
823 | } | ||
824 | |||
825 | boot_used_size = efi_var_store_size - efi_var_remaining_size; | ||
826 | |||
827 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); | 730 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); |
828 | 731 | ||
829 | /* | 732 | /* |
@@ -1087,6 +990,13 @@ void __init efi_enter_virtual_mode(void) | |||
1087 | runtime_code_page_mkexec(); | 990 | runtime_code_page_mkexec(); |
1088 | 991 | ||
1089 | kfree(new_memmap); | 992 | kfree(new_memmap); |
993 | |||
994 | /* clean DUMMY object */ | ||
995 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
996 | EFI_VARIABLE_NON_VOLATILE | | ||
997 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
998 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
999 | 0, NULL); | ||
1090 | } | 1000 | } |
1091 | 1001 | ||
1092 | /* | 1002 | /* |
@@ -1138,33 +1048,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
1138 | efi_status_t status; | 1048 | efi_status_t status; |
1139 | u64 storage_size, remaining_size, max_size; | 1049 | u64 storage_size, remaining_size, max_size; |
1140 | 1050 | ||
1051 | if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) | ||
1052 | return 0; | ||
1053 | |||
1141 | status = efi.query_variable_info(attributes, &storage_size, | 1054 | status = efi.query_variable_info(attributes, &storage_size, |
1142 | &remaining_size, &max_size); | 1055 | &remaining_size, &max_size); |
1143 | if (status != EFI_SUCCESS) | 1056 | if (status != EFI_SUCCESS) |
1144 | return status; | 1057 | return status; |
1145 | 1058 | ||
1146 | if (!max_size && remaining_size > size) | ||
1147 | printk_once(KERN_ERR FW_BUG "Broken EFI implementation" | ||
1148 | " is returning MaxVariableSize=0\n"); | ||
1149 | /* | 1059 | /* |
1150 | * Some firmware implementations refuse to boot if there's insufficient | 1060 | * Some firmware implementations refuse to boot if there's insufficient |
1151 | * space in the variable store. We account for that by refusing the | 1061 | * space in the variable store. We account for that by refusing the |
1152 | * write if permitting it would reduce the available space to under | 1062 | * write if permitting it would reduce the available space to under |
1153 | * 50%. However, some firmware won't reclaim variable space until | 1063 | * 5KB. This figure was provided by Samsung, so should be safe. |
1154 | * after the used (not merely the actively used) space drops below | ||
1155 | * a threshold. We can approximate that case with the value calculated | ||
1156 | * above. If both the firmware and our calculations indicate that the | ||
1157 | * available space would drop below 50%, refuse the write. | ||
1158 | */ | 1064 | */ |
1065 | if ((remaining_size - size < EFI_MIN_RESERVE) && | ||
1066 | !efi_no_storage_paranoia) { | ||
1067 | |||
1068 | /* | ||
1069 | * Triggering garbage collection may require that the firmware | ||
1070 | * generate a real EFI_OUT_OF_RESOURCES error. We can force | ||
1071 | * that by attempting to use more space than is available. | ||
1072 | */ | ||
1073 | unsigned long dummy_size = remaining_size + 1024; | ||
1074 | void *dummy = kzalloc(dummy_size, GFP_ATOMIC); | ||
1075 | |||
1076 | if (!dummy) | ||
1077 | return EFI_OUT_OF_RESOURCES; | ||
1078 | |||
1079 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1080 | EFI_VARIABLE_NON_VOLATILE | | ||
1081 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1082 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1083 | dummy_size, dummy); | ||
1084 | |||
1085 | if (status == EFI_SUCCESS) { | ||
1086 | /* | ||
1087 | * This should have failed, so if it didn't make sure | ||
1088 | * that we delete it... | ||
1089 | */ | ||
1090 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1091 | EFI_VARIABLE_NON_VOLATILE | | ||
1092 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1093 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1094 | 0, dummy); | ||
1095 | } | ||
1096 | |||
1097 | kfree(dummy); | ||
1159 | 1098 | ||
1160 | if (!storage_size || size > remaining_size || | 1099 | /* |
1161 | (max_size && size > max_size)) | 1100 | * The runtime code may now have triggered a garbage collection |
1162 | return EFI_OUT_OF_RESOURCES; | 1101 | * run, so check the variable info again |
1102 | */ | ||
1103 | status = efi.query_variable_info(attributes, &storage_size, | ||
1104 | &remaining_size, &max_size); | ||
1163 | 1105 | ||
1164 | if (!efi_no_storage_paranoia && | 1106 | if (status != EFI_SUCCESS) |
1165 | ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && | 1107 | return status; |
1166 | (remaining_size - size < storage_size / 2))) | 1108 | |
1167 | return EFI_OUT_OF_RESOURCES; | 1109 | /* |
1110 | * There still isn't enough room, so return an error | ||
1111 | */ | ||
1112 | if (remaining_size - size < EFI_MIN_RESERVE) | ||
1113 | return EFI_OUT_OF_RESOURCES; | ||
1114 | } | ||
1168 | 1115 | ||
1169 | return EFI_SUCCESS; | 1116 | return EFI_SUCCESS; |
1170 | } | 1117 | } |
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 590be1090892..f7bab68a4b83 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c | |||
@@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { | |||
42 | "^(xen_irq_disable_direct_reloc$|" | 42 | "^(xen_irq_disable_direct_reloc$|" |
43 | "xen_save_fl_direct_reloc$|" | 43 | "xen_save_fl_direct_reloc$|" |
44 | "VDSO|" | 44 | "VDSO|" |
45 | #if ELF_BITS == 64 | ||
46 | "__vvar_page|" | ||
47 | #endif | ||
48 | "__crc_)", | 45 | "__crc_)", |
49 | 46 | ||
50 | /* | 47 | /* |
@@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { | |||
72 | "__per_cpu_load|" | 69 | "__per_cpu_load|" |
73 | "init_per_cpu__.*|" | 70 | "init_per_cpu__.*|" |
74 | "__end_rodata_hpage_align|" | 71 | "__end_rodata_hpage_align|" |
72 | "__vvar_page|" | ||
75 | #endif | 73 | #endif |
76 | "_end)$" | 74 | "_end)$" |
77 | }; | 75 | }; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8ff37995d54e..d99cae8147d1 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/irq_work.h> | 19 | #include <linux/irq_work.h> |
20 | #include <linux/tick.h> | ||
20 | 21 | ||
21 | #include <asm/paravirt.h> | 22 | #include <asm/paravirt.h> |
22 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
447 | play_dead_common(); | 448 | play_dead_common(); |
448 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 449 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
449 | cpu_bringup(); | 450 | cpu_bringup(); |
451 | /* | ||
452 | * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) | ||
453 | * clears certain data that the cpu_idle loop (which called us | ||
454 | * and that we return from) expects. The only way to get that | ||
455 | * data back is to call: | ||
456 | */ | ||
457 | tick_nohz_idle_enter(); | ||
450 | } | 458 | } |
451 | 459 | ||
452 | #else /* !CONFIG_HOTPLUG_CPU */ | 460 | #else /* !CONFIG_HOTPLUG_CPU */ |
@@ -576,24 +584,22 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask, | |||
576 | { | 584 | { |
577 | unsigned cpu; | 585 | unsigned cpu; |
578 | unsigned int this_cpu = smp_processor_id(); | 586 | unsigned int this_cpu = smp_processor_id(); |
587 | int xen_vector = xen_map_vector(vector); | ||
579 | 588 | ||
580 | if (!(num_online_cpus() > 1)) | 589 | if (!(num_online_cpus() > 1) || (xen_vector < 0)) |
581 | return; | 590 | return; |
582 | 591 | ||
583 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | 592 | for_each_cpu_and(cpu, mask, cpu_online_mask) { |
584 | if (this_cpu == cpu) | 593 | if (this_cpu == cpu) |
585 | continue; | 594 | continue; |
586 | 595 | ||
587 | xen_smp_send_call_function_single_ipi(cpu); | 596 | xen_send_IPI_one(cpu, xen_vector); |
588 | } | 597 | } |
589 | } | 598 | } |
590 | 599 | ||
591 | void xen_send_IPI_allbutself(int vector) | 600 | void xen_send_IPI_allbutself(int vector) |
592 | { | 601 | { |
593 | int xen_vector = xen_map_vector(vector); | 602 | xen_send_IPI_mask_allbutself(cpu_online_mask, vector); |
594 | |||
595 | if (xen_vector >= 0) | ||
596 | xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector); | ||
597 | } | 603 | } |
598 | 604 | ||
599 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 605 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h index 8981a76d081a..c7c2d89efd76 100644 --- a/arch/x86/xen/smp.h +++ b/arch/x86/xen/smp.h | |||
@@ -5,7 +5,6 @@ extern void xen_send_IPI_mask(const struct cpumask *mask, | |||
5 | extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, | 5 | extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, |
6 | int vector); | 6 | int vector); |
7 | extern void xen_send_IPI_allbutself(int vector); | 7 | extern void xen_send_IPI_allbutself(int vector); |
8 | extern void physflat_send_IPI_allbutself(int vector); | ||
9 | extern void xen_send_IPI_all(int vector); | 8 | extern void xen_send_IPI_all(int vector); |
10 | extern void xen_send_IPI_self(int vector); | 9 | extern void xen_send_IPI_self(int vector); |
11 | 10 | ||