aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S48
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c14
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/process.c12
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/platform/efi/efi.c7
14 files changed, 70 insertions, 50 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 685692c94f05..fe120da25625 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
2265config IA32_EMULATION 2265config IA32_EMULATION
2266 bool "IA32 Emulation" 2266 bool "IA32 Emulation"
2267 depends on X86_64 2267 depends on X86_64
2268 select BINFMT_ELF
2268 select COMPAT_BINFMT_ELF 2269 select COMPAT_BINFMT_ELF
2269 select HAVE_UID16 2270 select HAVE_UID16
2270 ---help--- 2271 ---help---
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 62fe22cd4cba..477e9d75149b 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
2681 addq %rcx, KEYP 2681 addq %rcx, KEYP
2682 2682
2683 movdqa IV, STATE1 2683 movdqa IV, STATE1
2684 pxor 0x00(INP), STATE1 2684 movdqu 0x00(INP), INC
2685 pxor INC, STATE1
2685 movdqu IV, 0x00(OUTP) 2686 movdqu IV, 0x00(OUTP)
2686 2687
2687 _aesni_gf128mul_x_ble() 2688 _aesni_gf128mul_x_ble()
2688 movdqa IV, STATE2 2689 movdqa IV, STATE2
2689 pxor 0x10(INP), STATE2 2690 movdqu 0x10(INP), INC
2691 pxor INC, STATE2
2690 movdqu IV, 0x10(OUTP) 2692 movdqu IV, 0x10(OUTP)
2691 2693
2692 _aesni_gf128mul_x_ble() 2694 _aesni_gf128mul_x_ble()
2693 movdqa IV, STATE3 2695 movdqa IV, STATE3
2694 pxor 0x20(INP), STATE3 2696 movdqu 0x20(INP), INC
2697 pxor INC, STATE3
2695 movdqu IV, 0x20(OUTP) 2698 movdqu IV, 0x20(OUTP)
2696 2699
2697 _aesni_gf128mul_x_ble() 2700 _aesni_gf128mul_x_ble()
2698 movdqa IV, STATE4 2701 movdqa IV, STATE4
2699 pxor 0x30(INP), STATE4 2702 movdqu 0x30(INP), INC
2703 pxor INC, STATE4
2700 movdqu IV, 0x30(OUTP) 2704 movdqu IV, 0x30(OUTP)
2701 2705
2702 call *%r11 2706 call *%r11
2703 2707
2704 pxor 0x00(OUTP), STATE1 2708 movdqu 0x00(OUTP), INC
2709 pxor INC, STATE1
2705 movdqu STATE1, 0x00(OUTP) 2710 movdqu STATE1, 0x00(OUTP)
2706 2711
2707 _aesni_gf128mul_x_ble() 2712 _aesni_gf128mul_x_ble()
2708 movdqa IV, STATE1 2713 movdqa IV, STATE1
2709 pxor 0x40(INP), STATE1 2714 movdqu 0x40(INP), INC
2715 pxor INC, STATE1
2710 movdqu IV, 0x40(OUTP) 2716 movdqu IV, 0x40(OUTP)
2711 2717
2712 pxor 0x10(OUTP), STATE2 2718 movdqu 0x10(OUTP), INC
2719 pxor INC, STATE2
2713 movdqu STATE2, 0x10(OUTP) 2720 movdqu STATE2, 0x10(OUTP)
2714 2721
2715 _aesni_gf128mul_x_ble() 2722 _aesni_gf128mul_x_ble()
2716 movdqa IV, STATE2 2723 movdqa IV, STATE2
2717 pxor 0x50(INP), STATE2 2724 movdqu 0x50(INP), INC
2725 pxor INC, STATE2
2718 movdqu IV, 0x50(OUTP) 2726 movdqu IV, 0x50(OUTP)
2719 2727
2720 pxor 0x20(OUTP), STATE3 2728 movdqu 0x20(OUTP), INC
2729 pxor INC, STATE3
2721 movdqu STATE3, 0x20(OUTP) 2730 movdqu STATE3, 0x20(OUTP)
2722 2731
2723 _aesni_gf128mul_x_ble() 2732 _aesni_gf128mul_x_ble()
2724 movdqa IV, STATE3 2733 movdqa IV, STATE3
2725 pxor 0x60(INP), STATE3 2734 movdqu 0x60(INP), INC
2735 pxor INC, STATE3
2726 movdqu IV, 0x60(OUTP) 2736 movdqu IV, 0x60(OUTP)
2727 2737
2728 pxor 0x30(OUTP), STATE4 2738 movdqu 0x30(OUTP), INC
2739 pxor INC, STATE4
2729 movdqu STATE4, 0x30(OUTP) 2740 movdqu STATE4, 0x30(OUTP)
2730 2741
2731 _aesni_gf128mul_x_ble() 2742 _aesni_gf128mul_x_ble()
2732 movdqa IV, STATE4 2743 movdqa IV, STATE4
2733 pxor 0x70(INP), STATE4 2744 movdqu 0x70(INP), INC
2745 pxor INC, STATE4
2734 movdqu IV, 0x70(OUTP) 2746 movdqu IV, 0x70(OUTP)
2735 2747
2736 _aesni_gf128mul_x_ble() 2748 _aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
2738 2750
2739 call *%r11 2751 call *%r11
2740 2752
2741 pxor 0x40(OUTP), STATE1 2753 movdqu 0x40(OUTP), INC
2754 pxor INC, STATE1
2742 movdqu STATE1, 0x40(OUTP) 2755 movdqu STATE1, 0x40(OUTP)
2743 2756
2744 pxor 0x50(OUTP), STATE2 2757 movdqu 0x50(OUTP), INC
2758 pxor INC, STATE2
2745 movdqu STATE2, 0x50(OUTP) 2759 movdqu STATE2, 0x50(OUTP)
2746 2760
2747 pxor 0x60(OUTP), STATE3 2761 movdqu 0x60(OUTP), INC
2762 pxor INC, STATE3
2748 movdqu STATE3, 0x60(OUTP) 2763 movdqu STATE3, 0x60(OUTP)
2749 2764
2750 pxor 0x70(OUTP), STATE4 2765 movdqu 0x70(OUTP), INC
2766 pxor INC, STATE4
2751 movdqu STATE4, 0x70(OUTP) 2767 movdqu STATE4, 0x70(OUTP)
2752 2768
2753 ret 2769 ret
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb6dd8e..57873beb3292 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
41 41
42extern void init_ISA_irqs(void); 42extern void init_ISA_irqs(void);
43 43
44#ifdef CONFIG_X86_LOCAL_APIC
45void arch_trigger_all_cpu_backtrace(void);
46#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
47#endif
48
44#endif /* _ASM_X86_IRQ_H */ 49#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6825e2efd1b4..6bc3985ee473 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
60#ifdef CONFIG_MICROCODE_EARLY 60#ifdef CONFIG_MICROCODE_EARLY
61#define MAX_UCODE_COUNT 128 61#define MAX_UCODE_COUNT 128
62extern void __init load_ucode_bsp(void); 62extern void __init load_ucode_bsp(void);
63extern __init void load_ucode_ap(void); 63extern void __cpuinit load_ucode_ap(void);
64extern int __init save_microcode_in_initrd(void); 64extern int __init save_microcode_in_initrd(void);
65#else 65#else
66static inline void __init load_ucode_bsp(void) {} 66static inline void __init load_ucode_bsp(void) {}
67static inline __init void load_ucode_ap(void) {} 67static inline void __cpuinit load_ucode_ap(void) {}
68static inline int __init save_microcode_in_initrd(void) 68static inline int __init save_microcode_in_initrd(void)
69{ 69{
70 return 0; 70 return 0;
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c0fa356e90de..86f9301903c8 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
18 void __user *, size_t *, loff_t *); 18 void __user *, size_t *, loff_t *);
19extern int unknown_nmi_panic; 19extern int unknown_nmi_panic;
20 20
21void arch_trigger_all_cpu_backtrace(void); 21#endif /* CONFIG_X86_LOCAL_APIC */
22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif
24 22
25#define NMI_FLAG_FIRST 1 23#define NMI_FLAG_FIRST 1
26 24
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 31cb9ae992b7..a698d7165c96 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -9,6 +9,7 @@
9 * 9 *
10 */ 10 */
11#include <asm/apic.h> 11#include <asm/apic.h>
12#include <asm/nmi.h>
12 13
13#include <linux/cpumask.h> 14#include <linux/cpumask.h>
14#include <linux/kdebug.h> 15#include <linux/kdebug.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 35ffda5d0727..5f90b85ff22e 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
714 if (mtrr_tom2) 714 if (mtrr_tom2)
715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; 715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
716 716
717 nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
718 /* 717 /*
719 * [0, 1M) should always be covered by var mtrr with WB 718 * [0, 1M) should always be covered by var mtrr with WB
720 * and fixed mtrrs should take effect before var mtrr for it: 719 * and fixed mtrrs should take effect before var mtrr for it:
721 */ 720 */
722 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, 721 nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
723 1ULL<<(20 - PAGE_SHIFT)); 722 1ULL<<(20 - PAGE_SHIFT));
724 /* Sort the ranges: */ 723 /* add from var mtrr at last */
725 sort_range(range, nr_range); 724 nr_range = x86_get_mtrr_mem_range(range, nr_range,
725 x_remove_base, x_remove_size);
726 726
727 range_sums = sum_ranges(range, nr_range); 727 range_sums = sum_ranges(range, nr_range);
728 printk(KERN_INFO "total RAM covered: %ldM\n", 728 printk(KERN_INFO "total RAM covered: %ldM\n",
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f60d41ff9a97..a9e22073bd56 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
169 EVENT_EXTRA_END 168 EVENT_EXTRA_END
170}; 169};
171 170
172static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 171static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
173 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 172 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
174 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 173 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
174 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
175 EVENT_EXTRA_END 175 EVENT_EXTRA_END
176}; 176};
177 177
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9895a9a41380..211bce445522 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 365 return insn.length;
366} 366}
367 367
368static void __kprobes arch_copy_kprobe(struct kprobe *p) 368static int __kprobes arch_copy_kprobe(struct kprobe *p)
369{ 369{
370 int ret;
371
370 /* Copy an instruction with recovering if other optprobe modifies it.*/ 372 /* Copy an instruction with recovering if other optprobe modifies it.*/
371 __copy_instruction(p->ainsn.insn, p->addr); 373 ret = __copy_instruction(p->ainsn.insn, p->addr);
374 if (!ret)
375 return -EINVAL;
372 376
373 /* 377 /*
374 * __copy_instruction can modify the displacement of the instruction, 378 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
384 388
385 /* Also, displacement change doesn't affect the first byte */ 389 /* Also, displacement change doesn't affect the first byte */
386 p->opcode = p->ainsn.insn[0]; 390 p->opcode = p->ainsn.insn[0];
391
392 return 0;
387} 393}
388 394
389int __kprobes arch_prepare_kprobe(struct kprobe *p) 395int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
397 p->ainsn.insn = get_insn_slot(); 403 p->ainsn.insn = get_insn_slot();
398 if (!p->ainsn.insn) 404 if (!p->ainsn.insn)
399 return -ENOMEM; 405 return -ENOMEM;
400 arch_copy_kprobe(p); 406
401 return 0; 407 return arch_copy_kprobe(p);
402} 408}
403 409
404void __kprobes arch_arm_kprobe(struct kprobe *p) 410void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d2c381280e3c..3dd37ebd591b 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -242,6 +242,7 @@ void __init kvmclock_init(void)
242 if (!mem) 242 if (!mem)
243 return; 243 return;
244 hv_clock = __va(mem); 244 hv_clock = __va(mem);
245 memset(hv_clock, 0, size);
245 246
246 if (kvm_register_clock("boot clock")) { 247 if (kvm_register_clock("boot clock")) {
247 hv_clock = NULL; 248 hv_clock = NULL;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4e7a37ff03ab..81a5f5e8f142 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,18 +277,6 @@ void exit_idle(void)
277} 277}
278#endif 278#endif
279 279
280void arch_cpu_idle_prepare(void)
281{
282 /*
283 * If we're the non-boot CPU, nothing set the stack canary up
284 * for us. CPU0 already has it initialized but no harm in
285 * doing it again. This is a good place for updating it, as
286 * we wont ever return from this function (so the invalid
287 * canaries already on the stack wont ever trigger).
288 */
289 boot_init_stack_canary();
290}
291
292void arch_cpu_idle_enter(void) 280void arch_cpu_idle_enter(void)
293{ 281{
294 local_touch_nmi(); 282 local_touch_nmi();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9c73b51817e4..bfd348e99369 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
372 372
373void __cpuinit set_cpu_sibling_map(int cpu) 373void __cpuinit set_cpu_sibling_map(int cpu)
374{ 374{
375 bool has_mc = boot_cpu_data.x86_max_cores > 1;
376 bool has_smt = smp_num_siblings > 1; 375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
377 struct cpuinfo_x86 *c = &cpu_data(cpu); 377 struct cpuinfo_x86 *c = &cpu_data(cpu);
378 struct cpuinfo_x86 *o; 378 struct cpuinfo_x86 *o;
379 int i; 379 int i;
380 380
381 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 381 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
382 382
383 if (!has_smt && !has_mc) { 383 if (!has_mp) {
384 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 384 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
385 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 385 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
386 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); 386 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
394 if ((i == cpu) || (has_smt && match_smt(c, o))) 394 if ((i == cpu) || (has_smt && match_smt(c, o)))
395 link_mask(sibling, cpu, i); 395 link_mask(sibling, cpu, i);
396 396
397 if ((i == cpu) || (has_mc && match_llc(c, o))) 397 if ((i == cpu) || (has_mp && match_llc(c, o)))
398 link_mask(llc_shared, cpu, i); 398 link_mask(llc_shared, cpu, i);
399 399
400 } 400 }
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
406 for_each_cpu(i, cpu_sibling_setup_mask) { 406 for_each_cpu(i, cpu_sibling_setup_mask) {
407 o = &cpu_data(i); 407 o = &cpu_data(i);
408 408
409 if ((i == cpu) || (has_mc && match_mc(c, o))) { 409 if ((i == cpu) || (has_mp && match_mc(c, o))) {
410 link_mask(core, cpu, i); 410 link_mask(core, cpu, i);
411 411
412 /* 412 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 094b5d96ab14..e8ba99c34180 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
582 if (index != XCR_XFEATURE_ENABLED_MASK) 582 if (index != XCR_XFEATURE_ENABLED_MASK)
583 return 1; 583 return 1;
584 xcr0 = xcr; 584 xcr0 = xcr;
585 if (kvm_x86_ops->get_cpl(vcpu) != 0)
586 return 1;
587 if (!(xcr0 & XSTATE_FP)) 585 if (!(xcr0 & XSTATE_FP))
588 return 1; 586 return 1;
589 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 587 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
597 595
598int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 596int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
599{ 597{
600 if (__kvm_set_xcr(vcpu, index, xcr)) { 598 if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
599 __kvm_set_xcr(vcpu, index, xcr)) {
601 kvm_inject_gp(vcpu, 0); 600 kvm_inject_gp(vcpu, 0);
602 return 1; 601 return 1;
603 } 602 }
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5ae2eb09419e..d2fbcedcf6ea 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
1069 * that by attempting to use more space than is available. 1069 * that by attempting to use more space than is available.
1070 */ 1070 */
1071 unsigned long dummy_size = remaining_size + 1024; 1071 unsigned long dummy_size = remaining_size + 1024;
1072 void *dummy = kmalloc(dummy_size, GFP_ATOMIC); 1072 void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
1073
1074 if (!dummy)
1075 return EFI_OUT_OF_RESOURCES;
1073 1076
1074 status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, 1077 status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
1075 EFI_VARIABLE_NON_VOLATILE | 1078 EFI_VARIABLE_NON_VOLATILE |
@@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
1089 0, dummy); 1092 0, dummy);
1090 } 1093 }
1091 1094
1095 kfree(dummy);
1096
1092 /* 1097 /*
1093 * The runtime code may now have triggered a garbage collection 1098 * The runtime code may now have triggered a garbage collection
1094 * run, so check the variable info again 1099 * run, so check the variable info again