aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-22 15:00:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-22 15:00:12 -0500
commit069ec22915b0932e1c2da53255a705593743374f (patch)
treecfce74bd6e2f4bce7b719aa017814ced15772929
parent3ad5d7e06a96d54a55acb5ab25938a06814605c8 (diff)
parent581b7f158fe0383b492acd1ce3fb4e99d4e57808 (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "This update contains: - MPX updates for handling 32bit processes - A fix for a long standing bug in 32bit signal frame handling related to FPU/XSAVE state - Handle get_xsave_addr() correctly in KVM - Fix SMAP check under paravirtualization - Add a comment to the static function trace entry to avoid further confusion about the difference to dynamic tracing" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu: Fix SMAP check in PVOPS environments x86/ftrace: Add comment on static function tracing x86/fpu: Fix get_xsave_addr() behavior under virtualization x86/fpu: Fix 32-bit signal frame handling x86/mpx: Fix 32-bit address space calculation x86/mpx: Do proper get_user() when running 32-bit binaries on 64-bit kernels
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c1
-rw-r--r--arch/x86/kernel/mcount_64.S6
-rw-r--r--arch/x86/mm/mpx.c47
5 files changed, 53 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
273 273
274static __always_inline void setup_smap(struct cpuinfo_x86 *c) 274static __always_inline void setup_smap(struct cpuinfo_x86 *c)
275{ 275{
276 unsigned long eflags; 276 unsigned long eflags = native_save_fl();
277 277
278 /* This should have been cleared long ago */ 278 /* This should have been cleared long ago */
279 raw_local_save_flags(eflags);
280 BUG_ON(eflags & X86_EFLAGS_AC); 279 BUG_ON(eflags & X86_EFLAGS_AC);
281 280
282 if (cpu_has(c, X86_FEATURE_SMAP)) { 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index ef29b742cea7..31c6a60505e6 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
385 */ 385 */
386void fpu__init_prepare_fx_sw_frame(void) 386void fpu__init_prepare_fx_sw_frame(void)
387{ 387{
388 int fsave_header_size = sizeof(struct fregs_state);
389 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 388 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
390 389
391 if (config_enabled(CONFIG_X86_32))
392 size += fsave_header_size;
393
394 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 390 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
395 fx_sw_reserved.extended_size = size; 391 fx_sw_reserved.extended_size = size;
396 fx_sw_reserved.xfeatures = xfeatures_mask; 392 fx_sw_reserved.xfeatures = xfeatures_mask;
397 fx_sw_reserved.xstate_size = xstate_size; 393 fx_sw_reserved.xstate_size = xstate_size;
398 394
399 if (config_enabled(CONFIG_IA32_EMULATION)) { 395 if (config_enabled(CONFIG_IA32_EMULATION) ||
396 config_enabled(CONFIG_X86_32)) {
397 int fsave_header_size = sizeof(struct fregs_state);
398
400 fx_sw_reserved_ia32 = fx_sw_reserved; 399 fx_sw_reserved_ia32 = fx_sw_reserved;
401 fx_sw_reserved_ia32.extended_size += fsave_header_size; 400 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
402 } 401 }
403} 402}
404 403
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6454f2731b56..70fc312221fc 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
694 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 694 if (!boot_cpu_has(X86_FEATURE_XSAVE))
695 return NULL; 695 return NULL;
696 696
697 xsave = &current->thread.fpu.state.xsave;
698 /* 697 /*
699 * We should not ever be requesting features that we 698 * We should not ever be requesting features that we
700 * have not enabled. Remember that pcntxt_mask is 699 * have not enabled. Remember that pcntxt_mask is
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94ea120fa21f..87e1762e2bca 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -278,6 +278,12 @@ trace:
278 /* save_mcount_regs fills in first two parameters */ 278 /* save_mcount_regs fills in first two parameters */
279 save_mcount_regs 279 save_mcount_regs
280 280
281 /*
282 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
283 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
284 * ip and parent ip are used and the list function is called when
285 * function tracing is enabled.
286 */
281 call *ftrace_trace_function 287 call *ftrace_trace_function
282 288
283 restore_mcount_regs 289 restore_mcount_regs
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b0ae85f90f10..1202d5ca2fb5 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
586} 586}
587 587
588/* 588/*
589 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
590 * we might run off the end of the bounds table if we are on
591 * a 64-bit kernel and try to get 8 bytes.
592 */
593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
594 long __user *bd_entry_ptr)
595{
596 u32 bd_entry_32;
597 int ret;
598
599 if (is_64bit_mm(mm))
600 return get_user(*bd_entry_ret, bd_entry_ptr);
601
602 /*
603 * Note that get_user() uses the type of the *pointer* to
604 * establish the size of the get, not the destination.
605 */
606 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
607 *bd_entry_ret = bd_entry_32;
608 return ret;
609}
610
611/*
589 * Get the base of bounds tables pointed by specific bounds 612 * Get the base of bounds tables pointed by specific bounds
590 * directory entry. 613 * directory entry.
591 */ 614 */
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
605 int need_write = 0; 628 int need_write = 0;
606 629
607 pagefault_disable(); 630 pagefault_disable();
608 ret = get_user(bd_entry, bd_entry_ptr); 631 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
609 pagefault_enable(); 632 pagefault_enable();
610 if (!ret) 633 if (!ret)
611 break; 634 break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
700 */ 723 */
701static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 724static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
702{ 725{
703 unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 726 unsigned long long virt_space;
704 if (is_64bit_mm(mm)) 727 unsigned long long GB = (1ULL << 30);
705 return virt_space / MPX_BD_NR_ENTRIES_64; 728
706 else 729 /*
707 return virt_space / MPX_BD_NR_ENTRIES_32; 730 * This covers 32-bit emulation as well as 32-bit kernels
731 * running on 64-bit harware.
732 */
733 if (!is_64bit_mm(mm))
734 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
735
736 /*
737 * 'x86_virt_bits' returns what the hardware is capable
738 * of, and returns the full >32-bit adddress space when
739 * running 32-bit kernels on 64-bit hardware.
740 */
741 virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
742 return virt_space / MPX_BD_NR_ENTRIES_64;
708} 743}
709 744
710/* 745/*