aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-21 20:18:44 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-21 20:18:44 -0400
commit49b8c695e331c9685e6ffdbf34872509d77c8459 (patch)
treed4afdfae0115b2ab56687d23d6329d6ad934788f
parente59d1b0a24199db01978e6c1e89859eda93ce683 (diff)
parentb1a74bf8212367be2b1d6685c11a84e056eaaaf1 (diff)
Merge branch 'x86/fpu' into x86/smap
Reason for merge: x86/fpu changed the structure of some of the code that x86/smap changes; mostly fpu-internal.h but also minor changes to the signal code. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Resolved Conflicts: arch/x86/ia32/ia32_signal.c arch/x86/include/asm/fpu-internal.h arch/x86/kernel/signal.c
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/x86/ia32/ia32_signal.c21
-rw-r--r--arch/x86/ia32/sys_ia32.c2
-rw-r--r--arch/x86/include/asm/cpufeature.h3
-rw-r--r--arch/x86/include/asm/fpu-internal.h411
-rw-r--r--arch/x86/include/asm/i387.h29
-rw-r--r--arch/x86/include/asm/iommu_table.h6
-rw-r--r--arch/x86/include/asm/signal.h4
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--arch/x86/include/asm/vdso.h3
-rw-r--r--arch/x86/include/asm/xor_32.h56
-rw-r--r--arch/x86/include/asm/xor_64.h61
-rw-r--r--arch/x86/include/asm/xor_avx.h54
-rw-r--r--arch/x86/include/asm/xsave.h13
-rw-r--r--arch/x86/kernel/cpu/bugs.c7
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/i387.c292
-rw-r--r--arch/x86/kernel/probe_roms.c2
-rw-r--r--arch/x86/kernel/process.c22
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/signal.c215
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/kernel/xsave.c517
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--drivers/lguest/x86/core.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c4
-rw-r--r--drivers/net/can/softing/softing_fw.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c48
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c30
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/inode.c24
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/cifs/smb2misc.c16
-rw-r--r--fs/cifs/smb2pdu.h10
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h1
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c5
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/xfrm/xfrm_state.c4
90 files changed, 1106 insertions, 1145 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 49c5c41b07e1..c298e9d188c6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1837,6 +1837,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1837 and restore using xsave. The kernel will fallback to 1837 and restore using xsave. The kernel will fallback to
1838 enabling legacy floating-point and sse state. 1838 enabling legacy floating-point and sse state.
1839 1839
1840 eagerfpu= [X86]
1841 on enable eager fpu restore
1842 off disable eager fpu restore
1843 auto selects the default scheme, which automatically
1844 enables eagerfpu restore for xsaveopt.
1845
1840 nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or 1846 nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
1841 wfi(ARM) instruction doesn't work correctly and not to 1847 wfi(ARM) instruction doesn't work correctly and not to
1842 use it. This is also useful when using JTAG debugger. 1848 use it. This is also useful when using JTAG debugger.
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 05e62a312bd9..efc6a958b71d 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -32,6 +32,7 @@
32#include <asm/sigframe.h> 32#include <asm/sigframe.h>
33#include <asm/sighandling.h> 33#include <asm/sighandling.h>
34#include <asm/sys_ia32.h> 34#include <asm/sys_ia32.h>
35#include <asm/smap.h>
35 36
36#define FIX_EFLAGS __FIX_EFLAGS 37#define FIX_EFLAGS __FIX_EFLAGS
37 38
@@ -162,7 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
162 } 163 }
163 seg = get_fs(); 164 seg = get_fs();
164 set_fs(KERNEL_DS); 165 set_fs(KERNEL_DS);
165 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); 166 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
167 (stack_t __force __user *) &uoss, regs->sp);
166 set_fs(seg); 168 set_fs(seg);
167 if (ret >= 0 && uoss_ptr) { 169 if (ret >= 0 && uoss_ptr) {
168 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) 170 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
@@ -254,7 +256,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
254 get_user_ex(*pax, &sc->ax); 256 get_user_ex(*pax, &sc->ax);
255 } get_user_catch(err); 257 } get_user_catch(err);
256 258
257 err |= restore_i387_xstate_ia32(buf); 259 err |= restore_xstate_sig(buf, 1);
258 260
259 return err; 261 return err;
260} 262}
@@ -362,7 +364,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
362 */ 364 */
363static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 365static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
364 size_t frame_size, 366 size_t frame_size,
365 void **fpstate) 367 void __user **fpstate)
366{ 368{
367 unsigned long sp; 369 unsigned long sp;
368 370
@@ -382,9 +384,12 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
382 sp = (unsigned long) ka->sa.sa_restorer; 384 sp = (unsigned long) ka->sa.sa_restorer;
383 385
384 if (used_math()) { 386 if (used_math()) {
385 sp = sp - sig_xstate_ia32_size; 387 unsigned long fx_aligned, math_size;
386 *fpstate = (struct _fpstate_ia32 *) sp; 388
387 if (save_i387_xstate_ia32(*fpstate) < 0) 389 sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
390 *fpstate = (struct _fpstate_ia32 __user *) sp;
391 if (save_xstate_sig(*fpstate, (void __user *)fx_aligned,
392 math_size) < 0)
388 return (void __user *) -1L; 393 return (void __user *) -1L;
389 } 394 }
390 395
@@ -449,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
449 * These are actually not used anymore, but left because some 454 * These are actually not used anymore, but left because some
450 * gdb versions depend on them as a marker. 455 * gdb versions depend on them as a marker.
451 */ 456 */
452 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); 457 put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
453 } put_user_catch(err); 458 } put_user_catch(err);
454 459
455 if (err) 460 if (err)
@@ -526,7 +531,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
526 * Not actually used anymore, but left because some gdb 531 * Not actually used anymore, but left because some gdb
527 * versions need it. 532 * versions need it.
528 */ 533 */
529 put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); 534 put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
530 } put_user_catch(err); 535 } put_user_catch(err);
531 536
532 err |= copy_siginfo_to_user32(&frame->info, info); 537 err |= copy_siginfo_to_user32(&frame->info, info);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 4540bece0946..c5b938d92eab 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
287 return ret; 287 return ret;
288} 288}
289 289
290asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, 290asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
291 int options) 291 int options)
292{ 292{
293 return compat_sys_wait4(pid, stat_addr, options, NULL); 293 return compat_sys_wait4(pid, stat_addr, options, NULL);
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 633b6176cf60..16cae425d1f8 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -97,6 +97,7 @@
97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
100#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
100 101
101/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 102/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
102#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 103#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -300,12 +301,14 @@ extern const char * const x86_power_flags[32];
300#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 301#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
301#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 302#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
302#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 303#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
304#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
303#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 305#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
304#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 306#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
305#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 307#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
306#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 308#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
307#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 309#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
308#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 310#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
311#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
309 312
310#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 313#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
311# define cpu_has_invlpg 1 314# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0fe13583a028..409b9ccf5518 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
14#include <linux/regset.h> 14#include <linux/regset.h>
15#include <linux/compat.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/cpufeature.h> 18#include <asm/cpufeature.h>
@@ -20,43 +21,76 @@
20#include <asm/user.h> 21#include <asm/user.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
24#include <asm/smap.h>
23 25
24extern unsigned int sig_xstate_size; 26#ifdef CONFIG_X86_64
27# include <asm/sigcontext32.h>
28# include <asm/user32.h>
29int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
30 compat_sigset_t *set, struct pt_regs *regs);
31int ia32_setup_frame(int sig, struct k_sigaction *ka,
32 compat_sigset_t *set, struct pt_regs *regs);
33#else
34# define user_i387_ia32_struct user_i387_struct
35# define user32_fxsr_struct user_fxsr_struct
36# define ia32_setup_frame __setup_frame
37# define ia32_setup_rt_frame __setup_rt_frame
38#endif
39
40extern unsigned int mxcsr_feature_mask;
25extern void fpu_init(void); 41extern void fpu_init(void);
42extern void eager_fpu_init(void);
26 43
27DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 44DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28 45
46extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
47 struct task_struct *tsk);
48extern void convert_to_fxsr(struct task_struct *tsk,
49 const struct user_i387_ia32_struct *env);
50
29extern user_regset_active_fn fpregs_active, xfpregs_active; 51extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 52extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 xstateregs_get; 53 xstateregs_get;
32extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 54extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 xstateregs_set; 55 xstateregs_set;
34 56
35
36/* 57/*
37 * xstateregs_active == fpregs_active. Please refer to the comment 58 * xstateregs_active == fpregs_active. Please refer to the comment
38 * at the definition of fpregs_active. 59 * at the definition of fpregs_active.
39 */ 60 */
40#define xstateregs_active fpregs_active 61#define xstateregs_active fpregs_active
41 62
42extern struct _fpx_sw_bytes fx_sw_reserved;
43#ifdef CONFIG_IA32_EMULATION
44extern unsigned int sig_xstate_ia32_size;
45extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46struct _fpstate_ia32;
47struct _xstate_ia32;
48extern int save_i387_xstate_ia32(void __user *buf);
49extern int restore_i387_xstate_ia32(void __user *buf);
50#endif
51
52#ifdef CONFIG_MATH_EMULATION 63#ifdef CONFIG_MATH_EMULATION
64# define HAVE_HWFP (boot_cpu_data.hard_math)
53extern void finit_soft_fpu(struct i387_soft_struct *soft); 65extern void finit_soft_fpu(struct i387_soft_struct *soft);
54#else 66#else
67# define HAVE_HWFP 1
55static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 68static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56#endif 69#endif
57 70
71static inline int is_ia32_compat_frame(void)
72{
73 return config_enabled(CONFIG_IA32_EMULATION) &&
74 test_thread_flag(TIF_IA32);
75}
76
77static inline int is_ia32_frame(void)
78{
79 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
80}
81
82static inline int is_x32_frame(void)
83{
84 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
85}
86
58#define X87_FSW_ES (1 << 7) /* Exception Summary */ 87#define X87_FSW_ES (1 << 7) /* Exception Summary */
59 88
89static __always_inline __pure bool use_eager_fpu(void)
90{
91 return static_cpu_has(X86_FEATURE_EAGER_FPU);
92}
93
60static __always_inline __pure bool use_xsaveopt(void) 94static __always_inline __pure bool use_xsaveopt(void)
61{ 95{
62 return static_cpu_has(X86_FEATURE_XSAVEOPT); 96 return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -72,6 +106,13 @@ static __always_inline __pure bool use_fxsr(void)
72 return static_cpu_has(X86_FEATURE_FXSR); 106 return static_cpu_has(X86_FEATURE_FXSR);
73} 107}
74 108
109static inline void fx_finit(struct i387_fxsave_struct *fx)
110{
111 memset(fx, 0, xstate_size);
112 fx->cwd = 0x37f;
113 fx->mxcsr = MXCSR_DEFAULT;
114}
115
75extern void __sanitize_i387_state(struct task_struct *); 116extern void __sanitize_i387_state(struct task_struct *);
76 117
77static inline void sanitize_i387_state(struct task_struct *tsk) 118static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -81,133 +122,104 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
81 __sanitize_i387_state(tsk); 122 __sanitize_i387_state(tsk);
82} 123}
83 124
84#ifdef CONFIG_X86_64 125#define user_insn(insn, output, input...) \
85static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 126({ \
86{ 127 int err; \
87 int err; 128 asm volatile(ASM_STAC "\n" \
88 129 "1:" #insn "\n\t" \
89 /* See comment in fxsave() below. */ 130 "2: " ASM_CLAC "\n" \
90#ifdef CONFIG_AS_FXSAVEQ 131 ".section .fixup,\"ax\"\n" \
91 asm volatile("1: fxrstorq %[fx]\n\t" 132 "3: movl $-1,%[err]\n" \
92 "2:\n" 133 " jmp 2b\n" \
93 ".section .fixup,\"ax\"\n" 134 ".previous\n" \
94 "3: movl $-1,%[err]\n" 135 _ASM_EXTABLE(1b, 3b) \
95 " jmp 2b\n" 136 : [err] "=r" (err), output \
96 ".previous\n" 137 : "0"(0), input); \
97 _ASM_EXTABLE(1b, 3b) 138 err; \
98 : [err] "=r" (err) 139})
99 : [fx] "m" (*fx), "0" (0)); 140
100#else 141#define check_insn(insn, output, input...) \
101 asm volatile("1: rex64/fxrstor (%[fx])\n\t" 142({ \
102 "2:\n" 143 int err; \
103 ".section .fixup,\"ax\"\n" 144 asm volatile("1:" #insn "\n\t" \
104 "3: movl $-1,%[err]\n" 145 "2:\n" \
105 " jmp 2b\n" 146 ".section .fixup,\"ax\"\n" \
106 ".previous\n" 147 "3: movl $-1,%[err]\n" \
107 _ASM_EXTABLE(1b, 3b) 148 " jmp 2b\n" \
108 : [err] "=r" (err) 149 ".previous\n" \
109 : [fx] "R" (fx), "m" (*fx), "0" (0)); 150 _ASM_EXTABLE(1b, 3b) \
110#endif 151 : [err] "=r" (err), output \
111 return err; 152 : "0"(0), input); \
153 err; \
154})
155
156static inline int fsave_user(struct i387_fsave_struct __user *fx)
157{
158 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
112} 159}
113 160
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 161static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{ 162{
116 int err; 163 if (config_enabled(CONFIG_X86_32))
164 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
165 else if (config_enabled(CONFIG_AS_FXSAVEQ))
166 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
117 167
118 /* 168 /* See comment in fpu_fxsave() below. */
119 * Clear the bytes not touched by the fxsave and reserved 169 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
120 * for the SW usage.
121 */
122 err = __clear_user(&fx->sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
124 if (unlikely(err))
125 return -EFAULT;
126
127 /* See comment in fxsave() below. */
128#ifdef CONFIG_AS_FXSAVEQ
129 asm volatile(ASM_STAC "\n"
130 "1: fxsaveq %[fx]\n\t"
131 "2: " ASM_CLAC "\n"
132 ".section .fixup,\"ax\"\n"
133 "3: movl $-1,%[err]\n"
134 " jmp 2b\n"
135 ".previous\n"
136 _ASM_EXTABLE(1b, 3b)
137 : [err] "=r" (err), [fx] "=m" (*fx)
138 : "0" (0));
139#else
140 asm volatile(ASM_STAC "\n"
141 "1: rex64/fxsave (%[fx])\n\t"
142 "2: " ASM_CLAC "\n"
143 ".section .fixup,\"ax\"\n"
144 "3: movl $-1,%[err]\n"
145 " jmp 2b\n"
146 ".previous\n"
147 _ASM_EXTABLE(1b, 3b)
148 : [err] "=r" (err), "=m" (*fx)
149 : [fx] "R" (fx), "0" (0));
150#endif
151 if (unlikely(err) &&
152 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
153 err = -EFAULT;
154 /* No need to clear here because the caller clears USED_MATH */
155 return err;
156} 170}
157 171
158static inline void fpu_fxsave(struct fpu *fpu) 172static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
159{ 173{
160 /* Using "rex64; fxsave %0" is broken because, if the memory operand 174 if (config_enabled(CONFIG_X86_32))
161 uses any extended registers for addressing, a second REX prefix 175 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
162 will be generated (to the assembler, rex64 followed by semicolon 176 else if (config_enabled(CONFIG_AS_FXSAVEQ))
163 is a separate instruction), and hence the 64-bitness is lost. */ 177 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
164 178
165#ifdef CONFIG_AS_FXSAVEQ 179 /* See comment in fpu_fxsave() below. */
166 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 180 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
167 starting with gas 2.16. */ 181 "m" (*fx));
168 __asm__ __volatile__("fxsaveq %0"
169 : "=m" (fpu->state->fxsave));
170#else
171 /* Using, as a workaround, the properly prefixed form below isn't
172 accepted by any binutils version so far released, complaining that
173 the same type of prefix is used twice if an extended register is
174 needed for addressing (fix submitted to mainline 2005-11-21).
175 asm volatile("rex64/fxsave %0"
176 : "=m" (fpu->state->fxsave));
177 This, however, we can work around by forcing the compiler to select
178 an addressing mode that doesn't require extended registers. */
179 asm volatile("rex64/fxsave (%[fx])"
180 : "=m" (fpu->state->fxsave)
181 : [fx] "R" (&fpu->state->fxsave));
182#endif
183} 182}
184 183
185#else /* CONFIG_X86_32 */ 184static inline int frstor_checking(struct i387_fsave_struct *fx)
186
187/* perform fxrstor iff the processor has extended states, otherwise frstor */
188static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
189{ 185{
190 /* 186 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
191 * The "nop" is needed to make the instructions the same
192 * length.
193 */
194 alternative_input(
195 "nop ; frstor %1",
196 "fxrstor %1",
197 X86_FEATURE_FXSR,
198 "m" (*fx));
199
200 return 0;
201} 187}
202 188
203static inline void fpu_fxsave(struct fpu *fpu) 189static inline void fpu_fxsave(struct fpu *fpu)
204{ 190{
205 asm volatile("fxsave %[fx]" 191 if (config_enabled(CONFIG_X86_32))
206 : [fx] "=m" (fpu->state->fxsave)); 192 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
193 else if (config_enabled(CONFIG_AS_FXSAVEQ))
194 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
195 else {
196 /* Using "rex64; fxsave %0" is broken because, if the memory
197 * operand uses any extended registers for addressing, a second
198 * REX prefix will be generated (to the assembler, rex64
199 * followed by semicolon is a separate instruction), and hence
200 * the 64-bitness is lost.
201 *
202 * Using "fxsaveq %0" would be the ideal choice, but is only
203 * supported starting with gas 2.16.
204 *
205 * Using, as a workaround, the properly prefixed form below
206 * isn't accepted by any binutils version so far released,
207 * complaining that the same type of prefix is used twice if
208 * an extended register is needed for addressing (fix submitted
209 * to mainline 2005-11-21).
210 *
211 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
212 *
213 * This, however, we can work around by forcing the compiler to
214 * select an addressing mode that doesn't require extended
215 * registers.
216 */
217 asm volatile( "rex64/fxsave (%[fx])"
218 : "=m" (fpu->state->fxsave)
219 : [fx] "R" (&fpu->state->fxsave));
220 }
207} 221}
208 222
209#endif /* CONFIG_X86_64 */
210
211/* 223/*
212 * These must be called with preempt disabled. Returns 224 * These must be called with preempt disabled. Returns
213 * 'true' if the FPU state is still intact. 225 * 'true' if the FPU state is still intact.
@@ -250,17 +262,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
250 return fpu_save_init(&tsk->thread.fpu); 262 return fpu_save_init(&tsk->thread.fpu);
251} 263}
252 264
253static inline int fpu_fxrstor_checking(struct fpu *fpu)
254{
255 return fxrstor_checking(&fpu->state->fxsave);
256}
257
258static inline int fpu_restore_checking(struct fpu *fpu) 265static inline int fpu_restore_checking(struct fpu *fpu)
259{ 266{
260 if (use_xsave()) 267 if (use_xsave())
261 return fpu_xrstor_checking(fpu); 268 return fpu_xrstor_checking(&fpu->state->xsave);
269 else if (use_fxsr())
270 return fxrstor_checking(&fpu->state->fxsave);
262 else 271 else
263 return fpu_fxrstor_checking(fpu); 272 return frstor_checking(&fpu->state->fsave);
264} 273}
265 274
266static inline int restore_fpu_checking(struct task_struct *tsk) 275static inline int restore_fpu_checking(struct task_struct *tsk)
@@ -312,15 +321,52 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk)
312static inline void __thread_fpu_end(struct task_struct *tsk) 321static inline void __thread_fpu_end(struct task_struct *tsk)
313{ 322{
314 __thread_clear_has_fpu(tsk); 323 __thread_clear_has_fpu(tsk);
315 stts(); 324 if (!use_eager_fpu())
325 stts();
316} 326}
317 327
318static inline void __thread_fpu_begin(struct task_struct *tsk) 328static inline void __thread_fpu_begin(struct task_struct *tsk)
319{ 329{
320 clts(); 330 if (!use_eager_fpu())
331 clts();
321 __thread_set_has_fpu(tsk); 332 __thread_set_has_fpu(tsk);
322} 333}
323 334
335static inline void __drop_fpu(struct task_struct *tsk)
336{
337 if (__thread_has_fpu(tsk)) {
338 /* Ignore delayed exceptions from user space */
339 asm volatile("1: fwait\n"
340 "2:\n"
341 _ASM_EXTABLE(1b, 2b));
342 __thread_fpu_end(tsk);
343 }
344}
345
346static inline void drop_fpu(struct task_struct *tsk)
347{
348 /*
349 * Forget coprocessor state..
350 */
351 preempt_disable();
352 tsk->fpu_counter = 0;
353 __drop_fpu(tsk);
354 clear_used_math();
355 preempt_enable();
356}
357
358static inline void drop_init_fpu(struct task_struct *tsk)
359{
360 if (!use_eager_fpu())
361 drop_fpu(tsk);
362 else {
363 if (use_xsave())
364 xrstor_state(init_xstate_buf, -1);
365 else
366 fxrstor_checking(&init_xstate_buf->i387);
367 }
368}
369
324/* 370/*
325 * FPU state switching for scheduling. 371 * FPU state switching for scheduling.
326 * 372 *
@@ -354,7 +400,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
354{ 400{
355 fpu_switch_t fpu; 401 fpu_switch_t fpu;
356 402
357 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; 403 /*
404 * If the task has used the math, pre-load the FPU on xsave processors
405 * or if the past 5 consecutive context-switches used math.
406 */
407 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
408 new->fpu_counter > 5);
358 if (__thread_has_fpu(old)) { 409 if (__thread_has_fpu(old)) {
359 if (!__save_init_fpu(old)) 410 if (!__save_init_fpu(old))
360 cpu = ~0; 411 cpu = ~0;
@@ -366,14 +417,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
366 new->fpu_counter++; 417 new->fpu_counter++;
367 __thread_set_has_fpu(new); 418 __thread_set_has_fpu(new);
368 prefetch(new->thread.fpu.state); 419 prefetch(new->thread.fpu.state);
369 } else 420 } else if (!use_eager_fpu())
370 stts(); 421 stts();
371 } else { 422 } else {
372 old->fpu_counter = 0; 423 old->fpu_counter = 0;
373 old->thread.fpu.last_cpu = ~0; 424 old->thread.fpu.last_cpu = ~0;
374 if (fpu.preload) { 425 if (fpu.preload) {
375 new->fpu_counter++; 426 new->fpu_counter++;
376 if (fpu_lazy_restore(new, cpu)) 427 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
377 fpu.preload = 0; 428 fpu.preload = 0;
378 else 429 else
379 prefetch(new->thread.fpu.state); 430 prefetch(new->thread.fpu.state);
@@ -393,44 +444,40 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
393{ 444{
394 if (fpu.preload) { 445 if (fpu.preload) {
395 if (unlikely(restore_fpu_checking(new))) 446 if (unlikely(restore_fpu_checking(new)))
396 __thread_fpu_end(new); 447 drop_init_fpu(new);
397 } 448 }
398} 449}
399 450
400/* 451/*
401 * Signal frame handlers... 452 * Signal frame handlers...
402 */ 453 */
403extern int save_i387_xstate(void __user *buf); 454extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
404extern int restore_i387_xstate(void __user *buf); 455extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
405 456
406static inline void __clear_fpu(struct task_struct *tsk) 457static inline int xstate_sigframe_size(void)
407{ 458{
408 if (__thread_has_fpu(tsk)) { 459 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
409 /* Ignore delayed exceptions from user space */ 460}
410 asm volatile("1: fwait\n" 461
411 "2:\n" 462static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
412 _ASM_EXTABLE(1b, 2b)); 463{
413 __thread_fpu_end(tsk); 464 void __user *buf_fx = buf;
465 int size = xstate_sigframe_size();
466
467 if (ia32_frame && use_fxsr()) {
468 buf_fx = buf + sizeof(struct i387_fsave_struct);
469 size += sizeof(struct i387_fsave_struct);
414 } 470 }
471
472 return __restore_xstate_sig(buf, buf_fx, size);
415} 473}
416 474
417/* 475/*
418 * The actual user_fpu_begin/end() functions 476 * Need to be preemption-safe.
419 * need to be preemption-safe.
420 * 477 *
421 * NOTE! user_fpu_end() must be used only after you 478 * NOTE! user_fpu_begin() must be used only immediately before restoring
422 * have saved the FP state, and user_fpu_begin() must 479 * it. This function does not do any save/restore on their own.
423 * be used only immediately before restoring it.
424 * These functions do not do any save/restore on
425 * their own.
426 */ 480 */
427static inline void user_fpu_end(void)
428{
429 preempt_disable();
430 __thread_fpu_end(current);
431 preempt_enable();
432}
433
434static inline void user_fpu_begin(void) 481static inline void user_fpu_begin(void)
435{ 482{
436 preempt_disable(); 483 preempt_disable();
@@ -439,25 +486,32 @@ static inline void user_fpu_begin(void)
439 preempt_enable(); 486 preempt_enable();
440} 487}
441 488
489static inline void __save_fpu(struct task_struct *tsk)
490{
491 if (use_xsave())
492 xsave_state(&tsk->thread.fpu.state->xsave, -1);
493 else
494 fpu_fxsave(&tsk->thread.fpu);
495}
496
442/* 497/*
443 * These disable preemption on their own and are safe 498 * These disable preemption on their own and are safe
444 */ 499 */
445static inline void save_init_fpu(struct task_struct *tsk) 500static inline void save_init_fpu(struct task_struct *tsk)
446{ 501{
447 WARN_ON_ONCE(!__thread_has_fpu(tsk)); 502 WARN_ON_ONCE(!__thread_has_fpu(tsk));
503
504 if (use_eager_fpu()) {
505 __save_fpu(tsk);
506 return;
507 }
508
448 preempt_disable(); 509 preempt_disable();
449 __save_init_fpu(tsk); 510 __save_init_fpu(tsk);
450 __thread_fpu_end(tsk); 511 __thread_fpu_end(tsk);
451 preempt_enable(); 512 preempt_enable();
452} 513}
453 514
454static inline void clear_fpu(struct task_struct *tsk)
455{
456 preempt_disable();
457 __clear_fpu(tsk);
458 preempt_enable();
459}
460
461/* 515/*
462 * i387 state interaction 516 * i387 state interaction
463 */ 517 */
@@ -512,11 +566,34 @@ static inline void fpu_free(struct fpu *fpu)
512 } 566 }
513} 567}
514 568
515static inline void fpu_copy(struct fpu *dst, struct fpu *src) 569static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
516{ 570{
517 memcpy(dst->state, src->state, xstate_size); 571 if (use_eager_fpu()) {
572 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
573 __save_fpu(dst);
574 } else {
575 struct fpu *dfpu = &dst->thread.fpu;
576 struct fpu *sfpu = &src->thread.fpu;
577
578 unlazy_fpu(src);
579 memcpy(dfpu->state, sfpu->state, xstate_size);
580 }
518} 581}
519 582
520extern void fpu_finit(struct fpu *fpu); 583static inline unsigned long
584alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
585 unsigned long *size)
586{
587 unsigned long frame_size = xstate_sigframe_size();
588
589 *buf_fx = sp = round_down(sp - frame_size, 64);
590 if (ia32_frame && use_fxsr()) {
591 frame_size += sizeof(struct i387_fsave_struct);
592 sp -= sizeof(struct i387_fsave_struct);
593 }
594
595 *size = frame_size;
596 return sp;
597}
521 598
522#endif 599#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 257d9cca214f..ed8089d69094 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -19,12 +19,37 @@ struct pt_regs;
19struct user_i387_struct; 19struct user_i387_struct;
20 20
21extern int init_fpu(struct task_struct *child); 21extern int init_fpu(struct task_struct *child);
22extern void fpu_finit(struct fpu *fpu);
22extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 23extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
23extern void math_state_restore(void); 24extern void math_state_restore(void);
24 25
25extern bool irq_fpu_usable(void); 26extern bool irq_fpu_usable(void);
26extern void kernel_fpu_begin(void); 27
27extern void kernel_fpu_end(void); 28/*
29 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
30 * and they don't touch the preempt state on their own.
31 * If you enable preemption after __kernel_fpu_begin(), preempt notifier
32 * should call the __kernel_fpu_end() to prevent the kernel/user FPU
33 * state from getting corrupted. KVM for example uses this model.
34 *
35 * All other cases use kernel_fpu_begin/end() which disable preemption
36 * during kernel FPU usage.
37 */
38extern void __kernel_fpu_begin(void);
39extern void __kernel_fpu_end(void);
40
41static inline void kernel_fpu_begin(void)
42{
43 WARN_ON_ONCE(!irq_fpu_usable());
44 preempt_disable();
45 __kernel_fpu_begin();
46}
47
48static inline void kernel_fpu_end(void)
49{
50 __kernel_fpu_end();
51 preempt_enable();
52}
28 53
29/* 54/*
30 * Some instructions like VIA's padlock instructions generate a spurious 55 * Some instructions like VIA's padlock instructions generate a spurious
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
index f229b13a5f30..f42a04735a0a 100644
--- a/arch/x86/include/asm/iommu_table.h
+++ b/arch/x86/include/asm/iommu_table.h
@@ -48,7 +48,7 @@ struct iommu_table_entry {
48 48
49 49
50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ 50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
51 static const struct iommu_table_entry const \ 51 static const struct iommu_table_entry \
52 __iommu_entry_##_detect __used \ 52 __iommu_entry_##_detect __used \
53 __attribute__ ((unused, __section__(".iommu_table"), \ 53 __attribute__ ((unused, __section__(".iommu_table"), \
54 aligned((sizeof(void *))))) \ 54 aligned((sizeof(void *))))) \
@@ -63,10 +63,10 @@ struct iommu_table_entry {
63 * to stop detecting the other IOMMUs after yours has been detected. 63 * to stop detecting the other IOMMUs after yours has been detected.
64 */ 64 */
65#define IOMMU_INIT_POST(_detect) \ 65#define IOMMU_INIT_POST(_detect) \
66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) 66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0)
67 67
68#define IOMMU_INIT_POST_FINISH(detect) \ 68#define IOMMU_INIT_POST_FINISH(detect) \
69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) 69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1)
70 70
71/* 71/*
72 * A more sophisticated version of IOMMU_INIT. This variant requires: 72 * A more sophisticated version of IOMMU_INIT. This variant requires:
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 598457cbd0f8..323973f4abf1 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -31,6 +31,10 @@ typedef struct {
31 unsigned long sig[_NSIG_WORDS]; 31 unsigned long sig[_NSIG_WORDS];
32} sigset_t; 32} sigset_t;
33 33
34#ifndef CONFIG_COMPAT
35typedef sigset_t compat_sigset_t;
36#endif
37
34#else 38#else
35/* Here we must cater to libcs that poke about in kernel headers. */ 39/* Here we must cater to libcs that poke about in kernel headers. */
36 40
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 3fda9db48819..4ca1c611b552 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
40 struct old_sigaction32 __user *); 40 struct old_sigaction32 __user *);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
44asmlinkage long sys32_sysfs(int, u32, u32); 44asmlinkage long sys32_sysfs(int, u32, u32);
45 45
46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, 46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index bb0522850b74..fddb53d63915 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -11,7 +11,8 @@ extern const char VDSO32_PRELINK[];
11#define VDSO32_SYMBOL(base, name) \ 11#define VDSO32_SYMBOL(base, name) \
12({ \ 12({ \
13 extern const char VDSO32_##name[]; \ 13 extern const char VDSO32_##name[]; \
14 (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ 14 (void __user *)(VDSO32_##name - VDSO32_PRELINK + \
15 (unsigned long)(base)); \
15}) 16})
16#endif 17#endif
17 18
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 454570891bdc..aabd5850bdb9 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -534,38 +534,6 @@ static struct xor_block_template xor_block_p5_mmx = {
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) 534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */ 535 */
536 536
537#define XMMS_SAVE \
538do { \
539 preempt_disable(); \
540 cr0 = read_cr0(); \
541 clts(); \
542 asm volatile( \
543 "movups %%xmm0,(%0) ;\n\t" \
544 "movups %%xmm1,0x10(%0) ;\n\t" \
545 "movups %%xmm2,0x20(%0) ;\n\t" \
546 "movups %%xmm3,0x30(%0) ;\n\t" \
547 : \
548 : "r" (xmm_save) \
549 : "memory"); \
550} while (0)
551
552#define XMMS_RESTORE \
553do { \
554 asm volatile( \
555 "sfence ;\n\t" \
556 "movups (%0),%%xmm0 ;\n\t" \
557 "movups 0x10(%0),%%xmm1 ;\n\t" \
558 "movups 0x20(%0),%%xmm2 ;\n\t" \
559 "movups 0x30(%0),%%xmm3 ;\n\t" \
560 : \
561 : "r" (xmm_save) \
562 : "memory"); \
563 write_cr0(cr0); \
564 preempt_enable(); \
565} while (0)
566
567#define ALIGN16 __attribute__((aligned(16)))
568
569#define OFFS(x) "16*("#x")" 537#define OFFS(x) "16*("#x")"
570#define PF_OFFS(x) "256+16*("#x")" 538#define PF_OFFS(x) "256+16*("#x")"
571#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" 539#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
@@ -587,10 +555,8 @@ static void
587xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 555xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
588{ 556{
589 unsigned long lines = bytes >> 8; 557 unsigned long lines = bytes >> 8;
590 char xmm_save[16*4] ALIGN16;
591 int cr0;
592 558
593 XMMS_SAVE; 559 kernel_fpu_begin();
594 560
595 asm volatile( 561 asm volatile(
596#undef BLOCK 562#undef BLOCK
@@ -633,7 +599,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
633 : 599 :
634 : "memory"); 600 : "memory");
635 601
636 XMMS_RESTORE; 602 kernel_fpu_end();
637} 603}
638 604
639static void 605static void
@@ -641,10 +607,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
641 unsigned long *p3) 607 unsigned long *p3)
642{ 608{
643 unsigned long lines = bytes >> 8; 609 unsigned long lines = bytes >> 8;
644 char xmm_save[16*4] ALIGN16;
645 int cr0;
646 610
647 XMMS_SAVE; 611 kernel_fpu_begin();
648 612
649 asm volatile( 613 asm volatile(
650#undef BLOCK 614#undef BLOCK
@@ -694,7 +658,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
694 : 658 :
695 : "memory" ); 659 : "memory" );
696 660
697 XMMS_RESTORE; 661 kernel_fpu_end();
698} 662}
699 663
700static void 664static void
@@ -702,10 +666,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
702 unsigned long *p3, unsigned long *p4) 666 unsigned long *p3, unsigned long *p4)
703{ 667{
704 unsigned long lines = bytes >> 8; 668 unsigned long lines = bytes >> 8;
705 char xmm_save[16*4] ALIGN16;
706 int cr0;
707 669
708 XMMS_SAVE; 670 kernel_fpu_begin();
709 671
710 asm volatile( 672 asm volatile(
711#undef BLOCK 673#undef BLOCK
@@ -762,7 +724,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
762 : 724 :
763 : "memory" ); 725 : "memory" );
764 726
765 XMMS_RESTORE; 727 kernel_fpu_end();
766} 728}
767 729
768static void 730static void
@@ -770,10 +732,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
770 unsigned long *p3, unsigned long *p4, unsigned long *p5) 732 unsigned long *p3, unsigned long *p4, unsigned long *p5)
771{ 733{
772 unsigned long lines = bytes >> 8; 734 unsigned long lines = bytes >> 8;
773 char xmm_save[16*4] ALIGN16;
774 int cr0;
775 735
776 XMMS_SAVE; 736 kernel_fpu_begin();
777 737
778 /* Make sure GCC forgets anything it knows about p4 or p5, 738 /* Make sure GCC forgets anything it knows about p4 or p5,
779 such that it won't pass to the asm volatile below a 739 such that it won't pass to the asm volatile below a
@@ -850,7 +810,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
850 like assuming they have some legal value. */ 810 like assuming they have some legal value. */
851 asm("" : "=r" (p4), "=r" (p5)); 811 asm("" : "=r" (p4), "=r" (p5));
852 812
853 XMMS_RESTORE; 813 kernel_fpu_end();
854} 814}
855 815
856static struct xor_block_template xor_block_pIII_sse = { 816static struct xor_block_template xor_block_pIII_sse = {
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index b9b2323e90fe..5fc06d0b7eb5 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -34,41 +34,7 @@
34 * no advantages to be gotten from x86-64 here anyways. 34 * no advantages to be gotten from x86-64 here anyways.
35 */ 35 */
36 36
37typedef struct { 37#include <asm/i387.h>
38 unsigned long a, b;
39} __attribute__((aligned(16))) xmm_store_t;
40
41/* Doesn't use gcc to save the XMM registers, because there is no easy way to
42 tell it to do a clts before the register saving. */
43#define XMMS_SAVE \
44do { \
45 preempt_disable(); \
46 asm volatile( \
47 "movq %%cr0,%0 ;\n\t" \
48 "clts ;\n\t" \
49 "movups %%xmm0,(%1) ;\n\t" \
50 "movups %%xmm1,0x10(%1) ;\n\t" \
51 "movups %%xmm2,0x20(%1) ;\n\t" \
52 "movups %%xmm3,0x30(%1) ;\n\t" \
53 : "=&r" (cr0) \
54 : "r" (xmm_save) \
55 : "memory"); \
56} while (0)
57
58#define XMMS_RESTORE \
59do { \
60 asm volatile( \
61 "sfence ;\n\t" \
62 "movups (%1),%%xmm0 ;\n\t" \
63 "movups 0x10(%1),%%xmm1 ;\n\t" \
64 "movups 0x20(%1),%%xmm2 ;\n\t" \
65 "movups 0x30(%1),%%xmm3 ;\n\t" \
66 "movq %0,%%cr0 ;\n\t" \
67 : \
68 : "r" (cr0), "r" (xmm_save) \
69 : "memory"); \
70 preempt_enable(); \
71} while (0)
72 38
73#define OFFS(x) "16*("#x")" 39#define OFFS(x) "16*("#x")"
74#define PF_OFFS(x) "256+16*("#x")" 40#define PF_OFFS(x) "256+16*("#x")"
@@ -91,10 +57,8 @@ static void
91xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 57xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
92{ 58{
93 unsigned int lines = bytes >> 8; 59 unsigned int lines = bytes >> 8;
94 unsigned long cr0;
95 xmm_store_t xmm_save[4];
96 60
97 XMMS_SAVE; 61 kernel_fpu_begin();
98 62
99 asm volatile( 63 asm volatile(
100#undef BLOCK 64#undef BLOCK
@@ -135,7 +99,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
135 : [inc] "r" (256UL) 99 : [inc] "r" (256UL)
136 : "memory"); 100 : "memory");
137 101
138 XMMS_RESTORE; 102 kernel_fpu_end();
139} 103}
140 104
141static void 105static void
@@ -143,11 +107,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
143 unsigned long *p3) 107 unsigned long *p3)
144{ 108{
145 unsigned int lines = bytes >> 8; 109 unsigned int lines = bytes >> 8;
146 xmm_store_t xmm_save[4];
147 unsigned long cr0;
148
149 XMMS_SAVE;
150 110
111 kernel_fpu_begin();
151 asm volatile( 112 asm volatile(
152#undef BLOCK 113#undef BLOCK
153#define BLOCK(i) \ 114#define BLOCK(i) \
@@ -194,7 +155,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
194 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) 155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
195 : [inc] "r" (256UL) 156 : [inc] "r" (256UL)
196 : "memory"); 157 : "memory");
197 XMMS_RESTORE; 158 kernel_fpu_end();
198} 159}
199 160
200static void 161static void
@@ -202,10 +163,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
202 unsigned long *p3, unsigned long *p4) 163 unsigned long *p3, unsigned long *p4)
203{ 164{
204 unsigned int lines = bytes >> 8; 165 unsigned int lines = bytes >> 8;
205 xmm_store_t xmm_save[4];
206 unsigned long cr0;
207 166
208 XMMS_SAVE; 167 kernel_fpu_begin();
209 168
210 asm volatile( 169 asm volatile(
211#undef BLOCK 170#undef BLOCK
@@ -261,7 +220,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
261 : [inc] "r" (256UL) 220 : [inc] "r" (256UL)
262 : "memory" ); 221 : "memory" );
263 222
264 XMMS_RESTORE; 223 kernel_fpu_end();
265} 224}
266 225
267static void 226static void
@@ -269,10 +228,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
269 unsigned long *p3, unsigned long *p4, unsigned long *p5) 228 unsigned long *p3, unsigned long *p4, unsigned long *p5)
270{ 229{
271 unsigned int lines = bytes >> 8; 230 unsigned int lines = bytes >> 8;
272 xmm_store_t xmm_save[4];
273 unsigned long cr0;
274 231
275 XMMS_SAVE; 232 kernel_fpu_begin();
276 233
277 asm volatile( 234 asm volatile(
278#undef BLOCK 235#undef BLOCK
@@ -336,7 +293,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
336 : [inc] "r" (256UL) 293 : [inc] "r" (256UL)
337 : "memory"); 294 : "memory");
338 295
339 XMMS_RESTORE; 296 kernel_fpu_end();
340} 297}
341 298
342static struct xor_block_template xor_block_sse = { 299static struct xor_block_template xor_block_sse = {
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 2510d35f480e..7ea79c5fa1f2 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -20,32 +20,6 @@
20#include <linux/compiler.h> 20#include <linux/compiler.h>
21#include <asm/i387.h> 21#include <asm/i387.h>
22 22
23#define ALIGN32 __aligned(32)
24
25#define YMM_SAVED_REGS 4
26
27#define YMMS_SAVE \
28do { \
29 preempt_disable(); \
30 cr0 = read_cr0(); \
31 clts(); \
32 asm volatile("vmovaps %%ymm0, %0" : "=m" (ymm_save[0]) : : "memory"); \
33 asm volatile("vmovaps %%ymm1, %0" : "=m" (ymm_save[32]) : : "memory"); \
34 asm volatile("vmovaps %%ymm2, %0" : "=m" (ymm_save[64]) : : "memory"); \
35 asm volatile("vmovaps %%ymm3, %0" : "=m" (ymm_save[96]) : : "memory"); \
36} while (0);
37
38#define YMMS_RESTORE \
39do { \
40 asm volatile("sfence" : : : "memory"); \
41 asm volatile("vmovaps %0, %%ymm3" : : "m" (ymm_save[96])); \
42 asm volatile("vmovaps %0, %%ymm2" : : "m" (ymm_save[64])); \
43 asm volatile("vmovaps %0, %%ymm1" : : "m" (ymm_save[32])); \
44 asm volatile("vmovaps %0, %%ymm0" : : "m" (ymm_save[0])); \
45 write_cr0(cr0); \
46 preempt_enable(); \
47} while (0);
48
49#define BLOCK4(i) \ 23#define BLOCK4(i) \
50 BLOCK(32 * i, 0) \ 24 BLOCK(32 * i, 0) \
51 BLOCK(32 * (i + 1), 1) \ 25 BLOCK(32 * (i + 1), 1) \
@@ -60,10 +34,9 @@ do { \
60 34
61static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) 35static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
62{ 36{
63 unsigned long cr0, lines = bytes >> 9; 37 unsigned long lines = bytes >> 9;
64 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
65 38
66 YMMS_SAVE 39 kernel_fpu_begin();
67 40
68 while (lines--) { 41 while (lines--) {
69#undef BLOCK 42#undef BLOCK
@@ -82,16 +55,15 @@ do { \
82 p1 = (unsigned long *)((uintptr_t)p1 + 512); 55 p1 = (unsigned long *)((uintptr_t)p1 + 512);
83 } 56 }
84 57
85 YMMS_RESTORE 58 kernel_fpu_end();
86} 59}
87 60
88static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, 61static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
89 unsigned long *p2) 62 unsigned long *p2)
90{ 63{
91 unsigned long cr0, lines = bytes >> 9; 64 unsigned long lines = bytes >> 9;
92 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
93 65
94 YMMS_SAVE 66 kernel_fpu_begin();
95 67
96 while (lines--) { 68 while (lines--) {
97#undef BLOCK 69#undef BLOCK
@@ -113,16 +85,15 @@ do { \
113 p2 = (unsigned long *)((uintptr_t)p2 + 512); 85 p2 = (unsigned long *)((uintptr_t)p2 + 512);
114 } 86 }
115 87
116 YMMS_RESTORE 88 kernel_fpu_end();
117} 89}
118 90
119static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, 91static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
120 unsigned long *p2, unsigned long *p3) 92 unsigned long *p2, unsigned long *p3)
121{ 93{
122 unsigned long cr0, lines = bytes >> 9; 94 unsigned long lines = bytes >> 9;
123 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
124 95
125 YMMS_SAVE 96 kernel_fpu_begin();
126 97
127 while (lines--) { 98 while (lines--) {
128#undef BLOCK 99#undef BLOCK
@@ -147,16 +118,15 @@ do { \
147 p3 = (unsigned long *)((uintptr_t)p3 + 512); 118 p3 = (unsigned long *)((uintptr_t)p3 + 512);
148 } 119 }
149 120
150 YMMS_RESTORE 121 kernel_fpu_end();
151} 122}
152 123
153static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, 124static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
154 unsigned long *p2, unsigned long *p3, unsigned long *p4) 125 unsigned long *p2, unsigned long *p3, unsigned long *p4)
155{ 126{
156 unsigned long cr0, lines = bytes >> 9; 127 unsigned long lines = bytes >> 9;
157 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
158 128
159 YMMS_SAVE 129 kernel_fpu_begin();
160 130
161 while (lines--) { 131 while (lines--) {
162#undef BLOCK 132#undef BLOCK
@@ -184,7 +154,7 @@ do { \
184 p4 = (unsigned long *)((uintptr_t)p4 + 512); 154 p4 = (unsigned long *)((uintptr_t)p4 + 512);
185 } 155 }
186 156
187 YMMS_RESTORE 157 kernel_fpu_end();
188} 158}
189 159
190static struct xor_block_template xor_block_avx = { 160static struct xor_block_template xor_block_avx = {
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 2a923bd54341..0415cdabb5a6 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -34,17 +34,14 @@
34extern unsigned int xstate_size; 34extern unsigned int xstate_size;
35extern u64 pcntxt_mask; 35extern u64 pcntxt_mask;
36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
37extern struct xsave_struct *init_xstate_buf;
37 38
38extern void xsave_init(void); 39extern void xsave_init(void);
39extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); 40extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
40extern int init_fpu(struct task_struct *child); 41extern int init_fpu(struct task_struct *child);
41extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
42 void __user *fpstate,
43 struct _fpx_sw_bytes *sw);
44 42
45static inline int fpu_xrstor_checking(struct fpu *fpu) 43static inline int fpu_xrstor_checking(struct xsave_struct *fx)
46{ 44{
47 struct xsave_struct *fx = &fpu->state->xsave;
48 int err; 45 int err;
49 46
50 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 47 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -69,8 +66,7 @@ static inline int xsave_user(struct xsave_struct __user *buf)
69 * Clear the xsave header first, so that reserved fields are 66 * Clear the xsave header first, so that reserved fields are
70 * initialized to zero. 67 * initialized to zero.
71 */ 68 */
72 err = __clear_user(&buf->xsave_hdr, 69 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
73 sizeof(struct xsave_hdr_struct));
74 if (unlikely(err)) 70 if (unlikely(err))
75 return -EFAULT; 71 return -EFAULT;
76 72
@@ -85,9 +81,6 @@ static inline int xsave_user(struct xsave_struct __user *buf)
85 : [err] "=r" (err) 81 : [err] "=r" (err)
86 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 82 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
87 : "memory"); 83 : "memory");
88 if (unlikely(err) && __clear_user(buf, xstate_size))
89 err = -EFAULT;
90 /* No need to clear here because the caller clears USED_MATH */
91 return err; 84 return err;
92} 85}
93 86
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c97bb7b5a9f8..d0e910da16c5 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -165,10 +165,15 @@ void __init check_bugs(void)
165 print_cpu_info(&boot_cpu_data); 165 print_cpu_info(&boot_cpu_data);
166#endif 166#endif
167 check_config(); 167 check_config();
168 check_fpu();
169 check_hlt(); 168 check_hlt();
170 check_popad(); 169 check_popad();
171 init_utsname()->machine[1] = 170 init_utsname()->machine[1] =
172 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 171 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
173 alternative_instructions(); 172 alternative_instructions();
173
174 /*
175 * kernel_fpu_begin/end() in check_fpu() relies on the patched
176 * alternative instructions.
177 */
178 check_fpu();
174} 179}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7d35d6594118..44aec5d4dfaf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1324,7 +1324,6 @@ void __cpuinit cpu_init(void)
1324 dbg_restore_debug_regs(); 1324 dbg_restore_debug_regs();
1325 1325
1326 fpu_init(); 1326 fpu_init();
1327 xsave_init();
1328 1327
1329 raw_local_save_flags(kernel_eflags); 1328 raw_local_save_flags(kernel_eflags);
1330 1329
@@ -1379,6 +1378,5 @@ void __cpuinit cpu_init(void)
1379 dbg_restore_debug_regs(); 1378 dbg_restore_debug_regs();
1380 1379
1381 fpu_init(); 1380 fpu_init();
1382 xsave_init();
1383} 1381}
1384#endif 1382#endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index f250431fb505..675a05012449 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -19,24 +19,17 @@
19#include <asm/fpu-internal.h> 19#include <asm/fpu-internal.h>
20#include <asm/user.h> 20#include <asm/user.h>
21 21
22#ifdef CONFIG_X86_64
23# include <asm/sigcontext32.h>
24# include <asm/user32.h>
25#else
26# define save_i387_xstate_ia32 save_i387_xstate
27# define restore_i387_xstate_ia32 restore_i387_xstate
28# define _fpstate_ia32 _fpstate
29# define _xstate_ia32 _xstate
30# define sig_xstate_ia32_size sig_xstate_size
31# define fx_sw_reserved_ia32 fx_sw_reserved
32# define user_i387_ia32_struct user_i387_struct
33# define user32_fxsr_struct user_fxsr_struct
34#endif
35
36/* 22/*
37 * Were we in an interrupt that interrupted kernel mode? 23 * Were we in an interrupt that interrupted kernel mode?
38 * 24 *
39 * We can do a kernel_fpu_begin/end() pair *ONLY* if that 25 * For now, with eagerfpu we will return interrupted kernel FPU
26 * state as not-idle. TBD: Ideally we can change the return value
27 * to something like __thread_has_fpu(current). But we need to
28 * be careful of doing __thread_clear_has_fpu() before saving
29 * the FPU etc for supporting nested uses etc. For now, take
30 * the simple route!
31 *
32 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
40 * pair does nothing at all: the thread must not have fpu (so 33 * pair does nothing at all: the thread must not have fpu (so
41 * that we don't try to save the FPU state), and TS must 34 * that we don't try to save the FPU state), and TS must
42 * be set (so that the clts/stts pair does nothing that is 35 * be set (so that the clts/stts pair does nothing that is
@@ -44,6 +37,9 @@
44 */ 37 */
45static inline bool interrupted_kernel_fpu_idle(void) 38static inline bool interrupted_kernel_fpu_idle(void)
46{ 39{
40 if (use_eager_fpu())
41 return 0;
42
47 return !__thread_has_fpu(current) && 43 return !__thread_has_fpu(current) &&
48 (read_cr0() & X86_CR0_TS); 44 (read_cr0() & X86_CR0_TS);
49} 45}
@@ -77,29 +73,29 @@ bool irq_fpu_usable(void)
77} 73}
78EXPORT_SYMBOL(irq_fpu_usable); 74EXPORT_SYMBOL(irq_fpu_usable);
79 75
80void kernel_fpu_begin(void) 76void __kernel_fpu_begin(void)
81{ 77{
82 struct task_struct *me = current; 78 struct task_struct *me = current;
83 79
84 WARN_ON_ONCE(!irq_fpu_usable());
85 preempt_disable();
86 if (__thread_has_fpu(me)) { 80 if (__thread_has_fpu(me)) {
87 __save_init_fpu(me); 81 __save_init_fpu(me);
88 __thread_clear_has_fpu(me); 82 __thread_clear_has_fpu(me);
89 /* We do 'stts()' in kernel_fpu_end() */ 83 /* We do 'stts()' in __kernel_fpu_end() */
90 } else { 84 } else if (!use_eager_fpu()) {
91 this_cpu_write(fpu_owner_task, NULL); 85 this_cpu_write(fpu_owner_task, NULL);
92 clts(); 86 clts();
93 } 87 }
94} 88}
95EXPORT_SYMBOL(kernel_fpu_begin); 89EXPORT_SYMBOL(__kernel_fpu_begin);
96 90
97void kernel_fpu_end(void) 91void __kernel_fpu_end(void)
98{ 92{
99 stts(); 93 if (use_eager_fpu())
100 preempt_enable(); 94 math_state_restore();
95 else
96 stts();
101} 97}
102EXPORT_SYMBOL(kernel_fpu_end); 98EXPORT_SYMBOL(__kernel_fpu_end);
103 99
104void unlazy_fpu(struct task_struct *tsk) 100void unlazy_fpu(struct task_struct *tsk)
105{ 101{
@@ -113,23 +109,15 @@ void unlazy_fpu(struct task_struct *tsk)
113} 109}
114EXPORT_SYMBOL(unlazy_fpu); 110EXPORT_SYMBOL(unlazy_fpu);
115 111
116#ifdef CONFIG_MATH_EMULATION 112unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
117# define HAVE_HWFP (boot_cpu_data.hard_math)
118#else
119# define HAVE_HWFP 1
120#endif
121
122static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
123unsigned int xstate_size; 113unsigned int xstate_size;
124EXPORT_SYMBOL_GPL(xstate_size); 114EXPORT_SYMBOL_GPL(xstate_size);
125unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
126static struct i387_fxsave_struct fx_scratch __cpuinitdata; 115static struct i387_fxsave_struct fx_scratch __cpuinitdata;
127 116
128static void __cpuinit mxcsr_feature_mask_init(void) 117static void __cpuinit mxcsr_feature_mask_init(void)
129{ 118{
130 unsigned long mask = 0; 119 unsigned long mask = 0;
131 120
132 clts();
133 if (cpu_has_fxsr) { 121 if (cpu_has_fxsr) {
134 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); 122 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
135 asm volatile("fxsave %0" : : "m" (fx_scratch)); 123 asm volatile("fxsave %0" : : "m" (fx_scratch));
@@ -138,7 +126,6 @@ static void __cpuinit mxcsr_feature_mask_init(void)
138 mask = 0x0000ffbf; 126 mask = 0x0000ffbf;
139 } 127 }
140 mxcsr_feature_mask &= mask; 128 mxcsr_feature_mask &= mask;
141 stts();
142} 129}
143 130
144static void __cpuinit init_thread_xstate(void) 131static void __cpuinit init_thread_xstate(void)
@@ -192,9 +179,8 @@ void __cpuinit fpu_init(void)
192 init_thread_xstate(); 179 init_thread_xstate();
193 180
194 mxcsr_feature_mask_init(); 181 mxcsr_feature_mask_init();
195 /* clean state in init */ 182 xsave_init();
196 current_thread_info()->status = 0; 183 eager_fpu_init();
197 clear_used_math();
198} 184}
199 185
200void fpu_finit(struct fpu *fpu) 186void fpu_finit(struct fpu *fpu)
@@ -205,12 +191,7 @@ void fpu_finit(struct fpu *fpu)
205 } 191 }
206 192
207 if (cpu_has_fxsr) { 193 if (cpu_has_fxsr) {
208 struct i387_fxsave_struct *fx = &fpu->state->fxsave; 194 fx_finit(&fpu->state->fxsave);
209
210 memset(fx, 0, xstate_size);
211 fx->cwd = 0x37f;
212 if (cpu_has_xmm)
213 fx->mxcsr = MXCSR_DEFAULT;
214 } else { 195 } else {
215 struct i387_fsave_struct *fp = &fpu->state->fsave; 196 struct i387_fsave_struct *fp = &fpu->state->fsave;
216 memset(fp, 0, xstate_size); 197 memset(fp, 0, xstate_size);
@@ -454,7 +435,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
454 * FXSR floating point environment conversions. 435 * FXSR floating point environment conversions.
455 */ 436 */
456 437
457static void 438void
458convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) 439convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
459{ 440{
460 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; 441 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
@@ -491,8 +472,8 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
491 memcpy(&to[i], &from[i], sizeof(to[0])); 472 memcpy(&to[i], &from[i], sizeof(to[0]));
492} 473}
493 474
494static void convert_to_fxsr(struct task_struct *tsk, 475void convert_to_fxsr(struct task_struct *tsk,
495 const struct user_i387_ia32_struct *env) 476 const struct user_i387_ia32_struct *env)
496 477
497{ 478{
498 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; 479 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
@@ -589,223 +570,6 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
589} 570}
590 571
591/* 572/*
592 * Signal frame handlers.
593 */
594
595static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
596{
597 struct task_struct *tsk = current;
598 struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
599
600 fp->status = fp->swd;
601 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
602 return -1;
603 return 1;
604}
605
606static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
607{
608 struct task_struct *tsk = current;
609 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
610 struct user_i387_ia32_struct env;
611 int err = 0;
612
613 convert_from_fxsr(&env, tsk);
614 if (__copy_to_user(buf, &env, sizeof(env)))
615 return -1;
616
617 err |= __put_user(fx->swd, &buf->status);
618 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
619 if (err)
620 return -1;
621
622 if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
623 return -1;
624 return 1;
625}
626
627static int save_i387_xsave(void __user *buf)
628{
629 struct task_struct *tsk = current;
630 struct _fpstate_ia32 __user *fx = buf;
631 int err = 0;
632
633
634 sanitize_i387_state(tsk);
635
636 /*
637 * For legacy compatible, we always set FP/SSE bits in the bit
638 * vector while saving the state to the user context.
639 * This will enable us capturing any changes(during sigreturn) to
640 * the FP/SSE bits by the legacy applications which don't touch
641 * xstate_bv in the xsave header.
642 *
643 * xsave aware applications can change the xstate_bv in the xsave
644 * header as well as change any contents in the memory layout.
645 * xrestore as part of sigreturn will capture all the changes.
646 */
647 tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
648
649 if (save_i387_fxsave(fx) < 0)
650 return -1;
651
652 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
653 sizeof(struct _fpx_sw_bytes));
654 err |= __put_user(FP_XSTATE_MAGIC2,
655 (__u32 __user *) (buf + sig_xstate_ia32_size
656 - FP_XSTATE_MAGIC2_SIZE));
657 if (err)
658 return -1;
659
660 return 1;
661}
662
663int save_i387_xstate_ia32(void __user *buf)
664{
665 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
666 struct task_struct *tsk = current;
667
668 if (!used_math())
669 return 0;
670
671 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
672 return -EACCES;
673 /*
674 * This will cause a "finit" to be triggered by the next
675 * attempted FPU operation by the 'current' process.
676 */
677 clear_used_math();
678
679 if (!HAVE_HWFP) {
680 return fpregs_soft_get(current, NULL,
681 0, sizeof(struct user_i387_ia32_struct),
682 NULL, fp) ? -1 : 1;
683 }
684
685 unlazy_fpu(tsk);
686
687 if (cpu_has_xsave)
688 return save_i387_xsave(fp);
689 if (cpu_has_fxsr)
690 return save_i387_fxsave(fp);
691 else
692 return save_i387_fsave(fp);
693}
694
695static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
696{
697 struct task_struct *tsk = current;
698
699 return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
700 sizeof(struct i387_fsave_struct));
701}
702
703static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
704 unsigned int size)
705{
706 struct task_struct *tsk = current;
707 struct user_i387_ia32_struct env;
708 int err;
709
710 err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
711 size);
712 /* mxcsr reserved bits must be masked to zero for security reasons */
713 tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
714 if (err || __copy_from_user(&env, buf, sizeof(env)))
715 return 1;
716 convert_to_fxsr(tsk, &env);
717
718 return 0;
719}
720
721static int restore_i387_xsave(void __user *buf)
722{
723 struct _fpx_sw_bytes fx_sw_user;
724 struct _fpstate_ia32 __user *fx_user =
725 ((struct _fpstate_ia32 __user *) buf);
726 struct i387_fxsave_struct __user *fx =
727 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
728 struct xsave_hdr_struct *xsave_hdr =
729 &current->thread.fpu.state->xsave.xsave_hdr;
730 u64 mask;
731 int err;
732
733 if (check_for_xstate(fx, buf, &fx_sw_user))
734 goto fx_only;
735
736 mask = fx_sw_user.xstate_bv;
737
738 err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
739
740 xsave_hdr->xstate_bv &= pcntxt_mask;
741 /*
742 * These bits must be zero.
743 */
744 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
745
746 /*
747 * Init the state that is not present in the memory layout
748 * and enabled by the OS.
749 */
750 mask = ~(pcntxt_mask & ~mask);
751 xsave_hdr->xstate_bv &= mask;
752
753 return err;
754fx_only:
755 /*
756 * Couldn't find the extended state information in the memory
757 * layout. Restore the FP/SSE and init the other extended state
758 * enabled by the OS.
759 */
760 xsave_hdr->xstate_bv = XSTATE_FPSSE;
761 return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
762}
763
764int restore_i387_xstate_ia32(void __user *buf)
765{
766 int err;
767 struct task_struct *tsk = current;
768 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
769
770 if (HAVE_HWFP)
771 clear_fpu(tsk);
772
773 if (!buf) {
774 if (used_math()) {
775 clear_fpu(tsk);
776 clear_used_math();
777 }
778
779 return 0;
780 } else
781 if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
782 return -EACCES;
783
784 if (!used_math()) {
785 err = init_fpu(tsk);
786 if (err)
787 return err;
788 }
789
790 if (HAVE_HWFP) {
791 if (cpu_has_xsave)
792 err = restore_i387_xsave(buf);
793 else if (cpu_has_fxsr)
794 err = restore_i387_fxsave(fp, sizeof(struct
795 i387_fxsave_struct));
796 else
797 err = restore_i387_fsave(fp);
798 } else {
799 err = fpregs_soft_set(current, NULL,
800 0, sizeof(struct user_i387_ia32_struct),
801 NULL, fp) != 0;
802 }
803 set_used_math();
804
805 return err;
806}
807
808/*
809 * FPU state for core dumps. 573 * FPU state for core dumps.
810 * This is only used for a.out dumps now. 574 * This is only used for a.out dumps now.
811 * It is declared generically using elf_fpregset_t (which is 575 * It is declared generically using elf_fpregset_t (which is
diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
index 0bc72e2069e3..d5f15c3f7b25 100644
--- a/arch/x86/kernel/probe_roms.c
+++ b/arch/x86/kernel/probe_roms.c
@@ -150,7 +150,7 @@ static struct resource *find_oprom(struct pci_dev *pdev)
150 return oprom; 150 return oprom;
151} 151}
152 152
153void *pci_map_biosrom(struct pci_dev *pdev) 153void __iomem *pci_map_biosrom(struct pci_dev *pdev)
154{ 154{
155 struct resource *oprom = find_oprom(pdev); 155 struct resource *oprom = find_oprom(pdev);
156 156
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ef6a8456f719..dc3567e083f9 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -66,15 +66,13 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
66{ 66{
67 int ret; 67 int ret;
68 68
69 unlazy_fpu(src);
70
71 *dst = *src; 69 *dst = *src;
72 if (fpu_allocated(&src->thread.fpu)) { 70 if (fpu_allocated(&src->thread.fpu)) {
73 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); 71 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
74 ret = fpu_alloc(&dst->thread.fpu); 72 ret = fpu_alloc(&dst->thread.fpu);
75 if (ret) 73 if (ret)
76 return ret; 74 return ret;
77 fpu_copy(&dst->thread.fpu, &src->thread.fpu); 75 fpu_copy(dst, src);
78 } 76 }
79 return 0; 77 return 0;
80} 78}
@@ -97,16 +95,6 @@ void arch_task_cache_init(void)
97 SLAB_PANIC | SLAB_NOTRACK, NULL); 95 SLAB_PANIC | SLAB_NOTRACK, NULL);
98} 96}
99 97
100static inline void drop_fpu(struct task_struct *tsk)
101{
102 /*
103 * Forget coprocessor state..
104 */
105 tsk->fpu_counter = 0;
106 clear_fpu(tsk);
107 clear_used_math();
108}
109
110/* 98/*
111 * Free current thread data structures etc.. 99 * Free current thread data structures etc..
112 */ 100 */
@@ -163,7 +151,13 @@ void flush_thread(void)
163 151
164 flush_ptrace_hw_breakpoint(tsk); 152 flush_ptrace_hw_breakpoint(tsk);
165 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 153 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
166 drop_fpu(tsk); 154 drop_init_fpu(tsk);
155 /*
156 * Free the FPU state for non xsave platforms. They get reallocated
157 * lazily at the first use.
158 */
159 if (!use_eager_fpu())
160 free_thread_xstate(tsk);
167} 161}
168 162
169static void hard_disable_TSC(void) 163static void hard_disable_TSC(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 516fa186121b..b9ff83c7135b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -190,10 +190,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
190 regs->cs = __USER_CS; 190 regs->cs = __USER_CS;
191 regs->ip = new_ip; 191 regs->ip = new_ip;
192 regs->sp = new_sp; 192 regs->sp = new_sp;
193 /*
194 * Free the old FP and other extended state
195 */
196 free_thread_xstate(current);
197} 193}
198EXPORT_SYMBOL_GPL(start_thread); 194EXPORT_SYMBOL_GPL(start_thread);
199 195
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0a980c9d7cb8..8a6d20ce1978 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -232,10 +232,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
232 regs->cs = _cs; 232 regs->cs = _cs;
233 regs->ss = _ss; 233 regs->ss = _ss;
234 regs->flags = X86_EFLAGS_IF; 234 regs->flags = X86_EFLAGS_IF;
235 /*
236 * Free the old FP and other extended state
237 */
238 free_thread_xstate(current);
239} 235}
240 236
241void 237void
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index c4c6a5c2bf0f..861a9d1a463d 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1332,9 +1332,6 @@ static const struct user_regset_view user_x86_64_view = {
1332#define genregs32_get genregs_get 1332#define genregs32_get genregs_get
1333#define genregs32_set genregs_set 1333#define genregs32_set genregs_set
1334 1334
1335#define user_i387_ia32_struct user_i387_struct
1336#define user32_fxsr_struct user_fxsr_struct
1337
1338#endif /* CONFIG_X86_64 */ 1335#endif /* CONFIG_X86_64 */
1339 1336
1340#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1337#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 932612887e92..036bddb46236 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -118,7 +118,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
118 get_user_ex(*pax, &sc->ax); 118 get_user_ex(*pax, &sc->ax);
119 } get_user_catch(err); 119 } get_user_catch(err);
120 120
121 err |= restore_i387_xstate(buf); 121 err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
122 122
123 return err; 123 return err;
124} 124}
@@ -207,35 +207,32 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
207 void __user **fpstate) 207 void __user **fpstate)
208{ 208{
209 /* Default to using normal stack */ 209 /* Default to using normal stack */
210 unsigned long math_size = 0;
210 unsigned long sp = regs->sp; 211 unsigned long sp = regs->sp;
212 unsigned long buf_fx = 0;
211 int onsigstack = on_sig_stack(sp); 213 int onsigstack = on_sig_stack(sp);
212 214
213#ifdef CONFIG_X86_64
214 /* redzone */ 215 /* redzone */
215 sp -= 128; 216 if (config_enabled(CONFIG_X86_64))
216#endif /* CONFIG_X86_64 */ 217 sp -= 128;
217 218
218 if (!onsigstack) { 219 if (!onsigstack) {
219 /* This is the X/Open sanctioned signal stack switching. */ 220 /* This is the X/Open sanctioned signal stack switching. */
220 if (ka->sa.sa_flags & SA_ONSTACK) { 221 if (ka->sa.sa_flags & SA_ONSTACK) {
221 if (current->sas_ss_size) 222 if (current->sas_ss_size)
222 sp = current->sas_ss_sp + current->sas_ss_size; 223 sp = current->sas_ss_sp + current->sas_ss_size;
223 } else { 224 } else if (config_enabled(CONFIG_X86_32) &&
224#ifdef CONFIG_X86_32 225 (regs->ss & 0xffff) != __USER_DS &&
225 /* This is the legacy signal stack switching. */ 226 !(ka->sa.sa_flags & SA_RESTORER) &&
226 if ((regs->ss & 0xffff) != __USER_DS && 227 ka->sa.sa_restorer) {
227 !(ka->sa.sa_flags & SA_RESTORER) && 228 /* This is the legacy signal stack switching. */
228 ka->sa.sa_restorer)
229 sp = (unsigned long) ka->sa.sa_restorer; 229 sp = (unsigned long) ka->sa.sa_restorer;
230#endif /* CONFIG_X86_32 */
231 } 230 }
232 } 231 }
233 232
234 if (used_math()) { 233 if (used_math()) {
235 sp -= sig_xstate_size; 234 sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
236#ifdef CONFIG_X86_64 235 &buf_fx, &math_size);
237 sp = round_down(sp, 64);
238#endif /* CONFIG_X86_64 */
239 *fpstate = (void __user *)sp; 236 *fpstate = (void __user *)sp;
240 } 237 }
241 238
@@ -248,8 +245,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
248 if (onsigstack && !likely(on_sig_stack(sp))) 245 if (onsigstack && !likely(on_sig_stack(sp)))
249 return (void __user *)-1L; 246 return (void __user *)-1L;
250 247
251 /* save i387 state */ 248 /* save i387 and extended state */
252 if (used_math() && save_i387_xstate(*fpstate) < 0) 249 if (used_math() &&
250 save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
253 return (void __user *)-1L; 251 return (void __user *)-1L;
254 252
255 return (void __user *)sp; 253 return (void __user *)sp;
@@ -385,7 +383,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
385 */ 383 */
386 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 384 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
387 } put_user_catch(err); 385 } put_user_catch(err);
388 386
389 err |= copy_siginfo_to_user(&frame->info, info); 387 err |= copy_siginfo_to_user(&frame->info, info);
390 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, 388 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
391 regs, set->sig[0]); 389 regs, set->sig[0]);
@@ -477,6 +475,75 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
477} 475}
478#endif /* CONFIG_X86_32 */ 476#endif /* CONFIG_X86_32 */
479 477
478static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
479 siginfo_t *info, compat_sigset_t *set,
480 struct pt_regs *regs)
481{
482#ifdef CONFIG_X86_X32_ABI
483 struct rt_sigframe_x32 __user *frame;
484 void __user *restorer;
485 int err = 0;
486 void __user *fpstate = NULL;
487
488 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
489
490 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
491 return -EFAULT;
492
493 if (ka->sa.sa_flags & SA_SIGINFO) {
494 if (copy_siginfo_to_user32(&frame->info, info))
495 return -EFAULT;
496 }
497
498 put_user_try {
499 /* Create the ucontext. */
500 if (cpu_has_xsave)
501 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
502 else
503 put_user_ex(0, &frame->uc.uc_flags);
504 put_user_ex(0, &frame->uc.uc_link);
505 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
506 put_user_ex(sas_ss_flags(regs->sp),
507 &frame->uc.uc_stack.ss_flags);
508 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
509 put_user_ex(0, &frame->uc.uc__pad0);
510
511 if (ka->sa.sa_flags & SA_RESTORER) {
512 restorer = ka->sa.sa_restorer;
513 } else {
514 /* could use a vstub here */
515 restorer = NULL;
516 err |= -EFAULT;
517 }
518 put_user_ex(restorer, &frame->pretcode);
519 } put_user_catch(err);
520
521 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
522 regs, set->sig[0]);
523 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
524
525 if (err)
526 return -EFAULT;
527
528 /* Set up registers for signal handler */
529 regs->sp = (unsigned long) frame;
530 regs->ip = (unsigned long) ka->sa.sa_handler;
531
532 /* We use the x32 calling convention here... */
533 regs->di = sig;
534 regs->si = (unsigned long) &frame->info;
535 regs->dx = (unsigned long) &frame->uc;
536
537 loadsegment(ds, __USER_DS);
538 loadsegment(es, __USER_DS);
539
540 regs->cs = __USER_CS;
541 regs->ss = __USER_DS;
542#endif /* CONFIG_X86_X32_ABI */
543
544 return 0;
545}
546
480#ifdef CONFIG_X86_32 547#ifdef CONFIG_X86_32
481/* 548/*
482 * Atomically swap in the new signal mask, and wait for a signal. 549 * Atomically swap in the new signal mask, and wait for a signal.
@@ -615,55 +682,22 @@ static int signr_convert(int sig)
615 return sig; 682 return sig;
616} 683}
617 684
618#ifdef CONFIG_X86_32
619
620#define is_ia32 1
621#define ia32_setup_frame __setup_frame
622#define ia32_setup_rt_frame __setup_rt_frame
623
624#else /* !CONFIG_X86_32 */
625
626#ifdef CONFIG_IA32_EMULATION
627#define is_ia32 test_thread_flag(TIF_IA32)
628#else /* !CONFIG_IA32_EMULATION */
629#define is_ia32 0
630#endif /* CONFIG_IA32_EMULATION */
631
632#ifdef CONFIG_X86_X32_ABI
633#define is_x32 test_thread_flag(TIF_X32)
634
635static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
636 siginfo_t *info, compat_sigset_t *set,
637 struct pt_regs *regs);
638#else /* !CONFIG_X86_X32_ABI */
639#define is_x32 0
640#endif /* CONFIG_X86_X32_ABI */
641
642int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
643 sigset_t *set, struct pt_regs *regs);
644int ia32_setup_frame(int sig, struct k_sigaction *ka,
645 sigset_t *set, struct pt_regs *regs);
646
647#endif /* CONFIG_X86_32 */
648
649static int 685static int
650setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 686setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
651 struct pt_regs *regs) 687 struct pt_regs *regs)
652{ 688{
653 int usig = signr_convert(sig); 689 int usig = signr_convert(sig);
654 sigset_t *set = sigmask_to_save(); 690 sigset_t *set = sigmask_to_save();
691 compat_sigset_t *cset = (compat_sigset_t *) set;
655 692
656 /* Set up the stack frame */ 693 /* Set up the stack frame */
657 if (is_ia32) { 694 if (is_ia32_frame()) {
658 if (ka->sa.sa_flags & SA_SIGINFO) 695 if (ka->sa.sa_flags & SA_SIGINFO)
659 return ia32_setup_rt_frame(usig, ka, info, set, regs); 696 return ia32_setup_rt_frame(usig, ka, info, cset, regs);
660 else 697 else
661 return ia32_setup_frame(usig, ka, set, regs); 698 return ia32_setup_frame(usig, ka, cset, regs);
662#ifdef CONFIG_X86_X32_ABI 699 } else if (is_x32_frame()) {
663 } else if (is_x32) { 700 return x32_setup_rt_frame(usig, ka, info, cset, regs);
664 return x32_setup_rt_frame(usig, ka, info,
665 (compat_sigset_t *)set, regs);
666#endif
667 } else { 701 } else {
668 return __setup_rt_frame(sig, ka, info, set, regs); 702 return __setup_rt_frame(sig, ka, info, set, regs);
669 } 703 }
@@ -827,73 +861,6 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
827} 861}
828 862
829#ifdef CONFIG_X86_X32_ABI 863#ifdef CONFIG_X86_X32_ABI
830static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
831 siginfo_t *info, compat_sigset_t *set,
832 struct pt_regs *regs)
833{
834 struct rt_sigframe_x32 __user *frame;
835 void __user *restorer;
836 int err = 0;
837 void __user *fpstate = NULL;
838
839 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
840
841 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
842 return -EFAULT;
843
844 if (ka->sa.sa_flags & SA_SIGINFO) {
845 if (copy_siginfo_to_user32(&frame->info, info))
846 return -EFAULT;
847 }
848
849 put_user_try {
850 /* Create the ucontext. */
851 if (cpu_has_xsave)
852 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
853 else
854 put_user_ex(0, &frame->uc.uc_flags);
855 put_user_ex(0, &frame->uc.uc_link);
856 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
857 put_user_ex(sas_ss_flags(regs->sp),
858 &frame->uc.uc_stack.ss_flags);
859 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
860 put_user_ex(0, &frame->uc.uc__pad0);
861
862 if (ka->sa.sa_flags & SA_RESTORER) {
863 restorer = ka->sa.sa_restorer;
864 } else {
865 /* could use a vstub here */
866 restorer = NULL;
867 err |= -EFAULT;
868 }
869 put_user_ex(restorer, &frame->pretcode);
870 } put_user_catch(err);
871
872 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
873 regs, set->sig[0]);
874 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
875
876 if (err)
877 return -EFAULT;
878
879 /* Set up registers for signal handler */
880 regs->sp = (unsigned long) frame;
881 regs->ip = (unsigned long) ka->sa.sa_handler;
882
883 /* We use the x32 calling convention here... */
884 regs->di = sig;
885 regs->si = (unsigned long) &frame->info;
886 regs->dx = (unsigned long) &frame->uc;
887
888 loadsegment(ds, __USER_DS);
889 loadsegment(es, __USER_DS);
890
891 regs->cs = __USER_CS;
892 regs->ss = __USER_DS;
893
894 return 0;
895}
896
897asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) 864asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
898{ 865{
899 struct rt_sigframe_x32 __user *frame; 866 struct rt_sigframe_x32 __user *frame;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b481341c9369..4f4aba0551b0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -613,11 +613,12 @@ void math_state_restore(void)
613 } 613 }
614 614
615 __thread_fpu_begin(tsk); 615 __thread_fpu_begin(tsk);
616
616 /* 617 /*
617 * Paranoid restore. send a SIGSEGV if we fail to restore the state. 618 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
618 */ 619 */
619 if (unlikely(restore_fpu_checking(tsk))) { 620 if (unlikely(restore_fpu_checking(tsk))) {
620 __thread_fpu_end(tsk); 621 drop_init_fpu(tsk);
621 force_sig(SIGSEGV, tsk); 622 force_sig(SIGSEGV, tsk);
622 return; 623 return;
623 } 624 }
@@ -629,6 +630,8 @@ EXPORT_SYMBOL_GPL(math_state_restore);
629dotraplinkage void __kprobes 630dotraplinkage void __kprobes
630do_device_not_available(struct pt_regs *regs, long error_code) 631do_device_not_available(struct pt_regs *regs, long error_code)
631{ 632{
633 BUG_ON(use_eager_fpu());
634
632#ifdef CONFIG_MATH_EMULATION 635#ifdef CONFIG_MATH_EMULATION
633 if (read_cr0() & X86_CR0_EM) { 636 if (read_cr0() & X86_CR0_EM) {
634 struct math_emu_info info = { }; 637 struct math_emu_info info = { };
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 3d3e20709119..4e89b3dd408d 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -10,9 +10,7 @@
10#include <linux/compat.h> 10#include <linux/compat.h>
11#include <asm/i387.h> 11#include <asm/i387.h>
12#include <asm/fpu-internal.h> 12#include <asm/fpu-internal.h>
13#ifdef CONFIG_IA32_EMULATION 13#include <asm/sigframe.h>
14#include <asm/sigcontext32.h>
15#endif
16#include <asm/xcr.h> 14#include <asm/xcr.h>
17 15
18/* 16/*
@@ -23,13 +21,9 @@ u64 pcntxt_mask;
23/* 21/*
24 * Represents init state for the supported extended state. 22 * Represents init state for the supported extended state.
25 */ 23 */
26static struct xsave_struct *init_xstate_buf; 24struct xsave_struct *init_xstate_buf;
27
28struct _fpx_sw_bytes fx_sw_reserved;
29#ifdef CONFIG_IA32_EMULATION
30struct _fpx_sw_bytes fx_sw_reserved_ia32;
31#endif
32 25
26static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
33static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; 27static unsigned int *xstate_offsets, *xstate_sizes, xstate_features;
34 28
35/* 29/*
@@ -44,9 +38,9 @@ static unsigned int *xstate_offsets, *xstate_sizes, xstate_features;
44 */ 38 */
45void __sanitize_i387_state(struct task_struct *tsk) 39void __sanitize_i387_state(struct task_struct *tsk)
46{ 40{
47 u64 xstate_bv;
48 int feature_bit = 0x2;
49 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; 41 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
42 int feature_bit = 0x2;
43 u64 xstate_bv;
50 44
51 if (!fx) 45 if (!fx)
52 return; 46 return;
@@ -104,213 +98,326 @@ void __sanitize_i387_state(struct task_struct *tsk)
104 * Check for the presence of extended state information in the 98 * Check for the presence of extended state information in the
105 * user fpstate pointer in the sigcontext. 99 * user fpstate pointer in the sigcontext.
106 */ 100 */
107int check_for_xstate(struct i387_fxsave_struct __user *buf, 101static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
108 void __user *fpstate, 102 void __user *fpstate,
109 struct _fpx_sw_bytes *fx_sw_user) 103 struct _fpx_sw_bytes *fx_sw)
110{ 104{
111 int min_xstate_size = sizeof(struct i387_fxsave_struct) + 105 int min_xstate_size = sizeof(struct i387_fxsave_struct) +
112 sizeof(struct xsave_hdr_struct); 106 sizeof(struct xsave_hdr_struct);
113 unsigned int magic2; 107 unsigned int magic2;
114 int err;
115 108
116 err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], 109 if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
117 sizeof(struct _fpx_sw_bytes)); 110 return -1;
118 if (err)
119 return -EFAULT;
120 111
121 /* 112 /* Check for the first magic field and other error scenarios. */
122 * First Magic check failed. 113 if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
123 */ 114 fx_sw->xstate_size < min_xstate_size ||
124 if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) 115 fx_sw->xstate_size > xstate_size ||
125 return -EINVAL; 116 fx_sw->xstate_size > fx_sw->extended_size)
117 return -1;
126 118
127 /* 119 /*
128 * Check for error scenarios.
129 */
130 if (fx_sw_user->xstate_size < min_xstate_size ||
131 fx_sw_user->xstate_size > xstate_size ||
132 fx_sw_user->xstate_size > fx_sw_user->extended_size)
133 return -EINVAL;
134
135 err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
136 fx_sw_user->extended_size -
137 FP_XSTATE_MAGIC2_SIZE));
138 if (err)
139 return err;
140 /*
141 * Check for the presence of second magic word at the end of memory 120 * Check for the presence of second magic word at the end of memory
142 * layout. This detects the case where the user just copied the legacy 121 * layout. This detects the case where the user just copied the legacy
143 * fpstate layout with out copying the extended state information 122 * fpstate layout with out copying the extended state information
144 * in the memory layout. 123 * in the memory layout.
145 */ 124 */
146 if (magic2 != FP_XSTATE_MAGIC2) 125 if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
147 return -EFAULT; 126 || magic2 != FP_XSTATE_MAGIC2)
127 return -1;
148 128
149 return 0; 129 return 0;
150} 130}
151 131
152#ifdef CONFIG_X86_64
153/* 132/*
154 * Signal frame handlers. 133 * Signal frame handlers.
155 */ 134 */
156 135static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
157int save_i387_xstate(void __user *buf)
158{ 136{
159 struct task_struct *tsk = current; 137 if (use_fxsr()) {
160 int err = 0; 138 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
161 139 struct user_i387_ia32_struct env;
162 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) 140 struct _fpstate_ia32 __user *fp = buf;
163 return -EACCES;
164 141
165 BUG_ON(sig_xstate_size < xstate_size); 142 convert_from_fxsr(&env, tsk);
166 143
167 if ((unsigned long)buf % 64) 144 if (__copy_to_user(buf, &env, sizeof(env)) ||
168 pr_err("%s: bad fpstate %p\n", __func__, buf); 145 __put_user(xsave->i387.swd, &fp->status) ||
169 146 __put_user(X86_FXSR_MAGIC, &fp->magic))
170 if (!used_math()) 147 return -1;
171 return 0;
172
173 if (user_has_fpu()) {
174 if (use_xsave())
175 err = xsave_user(buf);
176 else
177 err = fxsave_user(buf);
178
179 if (err)
180 return err;
181 user_fpu_end();
182 } else { 148 } else {
183 sanitize_i387_state(tsk); 149 struct i387_fsave_struct __user *fp = buf;
184 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, 150 u32 swd;
185 xstate_size)) 151 if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
186 return -1; 152 return -1;
187 } 153 }
188 154
189 clear_used_math(); /* trigger finit */ 155 return 0;
156}
190 157
191 if (use_xsave()) { 158static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
192 struct _fpstate __user *fx = buf; 159{
193 struct _xstate __user *x = buf; 160 struct xsave_struct __user *x = buf;
194 u64 xstate_bv; 161 struct _fpx_sw_bytes *sw_bytes;
162 u32 xstate_bv;
163 int err;
195 164
196 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, 165 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
197 sizeof(struct _fpx_sw_bytes)); 166 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
167 err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
198 168
199 err |= __put_user(FP_XSTATE_MAGIC2, 169 if (!use_xsave())
200 (__u32 __user *) (buf + sig_xstate_size 170 return err;
201 - FP_XSTATE_MAGIC2_SIZE));
202 171
203 /* 172 err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
204 * Read the xstate_bv which we copied (directly from the cpu or
205 * from the state in task struct) to the user buffers and
206 * set the FP/SSE bits.
207 */
208 err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv);
209 173
210 /* 174 /*
211 * For legacy compatible, we always set FP/SSE bits in the bit 175 * Read the xstate_bv which we copied (directly from the cpu or
212 * vector while saving the state to the user context. This will 176 * from the state in task struct) to the user buffers.
213 * enable us capturing any changes(during sigreturn) to 177 */
214 * the FP/SSE bits by the legacy applications which don't touch 178 err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
215 * xstate_bv in the xsave header.
216 *
217 * xsave aware apps can change the xstate_bv in the xsave
218 * header as well as change any contents in the memory layout.
219 * xrestore as part of sigreturn will capture all the changes.
220 */
221 xstate_bv |= XSTATE_FPSSE;
222 179
223 err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv); 180 /*
181 * For legacy compatible, we always set FP/SSE bits in the bit
182 * vector while saving the state to the user context. This will
183 * enable us capturing any changes(during sigreturn) to
184 * the FP/SSE bits by the legacy applications which don't touch
185 * xstate_bv in the xsave header.
186 *
187 * xsave aware apps can change the xstate_bv in the xsave
188 * header as well as change any contents in the memory layout.
189 * xrestore as part of sigreturn will capture all the changes.
190 */
191 xstate_bv |= XSTATE_FPSSE;
224 192
225 if (err) 193 err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
226 return err;
227 }
228 194
229 return 1; 195 return err;
196}
197
198static inline int save_user_xstate(struct xsave_struct __user *buf)
199{
200 int err;
201
202 if (use_xsave())
203 err = xsave_user(buf);
204 else if (use_fxsr())
205 err = fxsave_user((struct i387_fxsave_struct __user *) buf);
206 else
207 err = fsave_user((struct i387_fsave_struct __user *) buf);
208
209 if (unlikely(err) && __clear_user(buf, xstate_size))
210 err = -EFAULT;
211 return err;
230} 212}
231 213
232/* 214/*
233 * Restore the extended state if present. Otherwise, restore the FP/SSE 215 * Save the fpu, extended register state to the user signal frame.
234 * state. 216 *
217 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
218 * state is copied.
219 * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
220 *
221 * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
222 * buf != buf_fx for 32-bit frames with fxstate.
223 *
224 * If the fpu, extended register state is live, save the state directly
225 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
226 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
227 *
228 * If this is a 32-bit frame with fxstate, put a fsave header before
229 * the aligned state at 'buf_fx'.
230 *
231 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
232 * indicating the absence/presence of the extended state to the user.
235 */ 233 */
236static int restore_user_xstate(void __user *buf) 234int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
237{ 235{
238 struct _fpx_sw_bytes fx_sw_user; 236 struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
239 u64 mask; 237 struct task_struct *tsk = current;
240 int err; 238 int ia32_fxstate = (buf != buf_fx);
241 239
242 if (((unsigned long)buf % 64) || 240 ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
243 check_for_xstate(buf, buf, &fx_sw_user)) 241 config_enabled(CONFIG_IA32_EMULATION));
244 goto fx_only;
245 242
246 mask = fx_sw_user.xstate_bv; 243 if (!access_ok(VERIFY_WRITE, buf, size))
244 return -EACCES;
247 245
248 /* 246 if (!HAVE_HWFP)
249 * restore the state passed by the user. 247 return fpregs_soft_get(current, NULL, 0,
250 */ 248 sizeof(struct user_i387_ia32_struct), NULL,
251 err = xrestore_user(buf, mask); 249 (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
252 if (err)
253 return err;
254 250
255 /* 251 if (user_has_fpu()) {
256 * init the state skipped by the user. 252 /* Save the live register state to the user directly. */
257 */ 253 if (save_user_xstate(buf_fx))
258 mask = pcntxt_mask & ~mask; 254 return -1;
259 if (unlikely(mask)) 255 /* Update the thread's fxstate to save the fsave header. */
260 xrstor_state(init_xstate_buf, mask); 256 if (ia32_fxstate)
257 fpu_fxsave(&tsk->thread.fpu);
258 } else {
259 sanitize_i387_state(tsk);
260 if (__copy_to_user(buf_fx, xsave, xstate_size))
261 return -1;
262 }
263
264 /* Save the fsave header for the 32-bit frames. */
265 if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
266 return -1;
267
268 if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
269 return -1;
270
271 drop_init_fpu(tsk); /* trigger finit */
261 272
262 return 0; 273 return 0;
274}
263 275
264fx_only: 276static inline void
265 /* 277sanitize_restored_xstate(struct task_struct *tsk,
266 * couldn't find the extended state information in the 278 struct user_i387_ia32_struct *ia32_env,
267 * memory layout. Restore just the FP/SSE and init all 279 u64 xstate_bv, int fx_only)
268 * the other extended state. 280{
269 */ 281 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
270 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); 282 struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;
271 return fxrstor_checking((__force struct i387_fxsave_struct *)buf); 283
284 if (use_xsave()) {
285 /* These bits must be zero. */
286 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
287
288 /*
289 * Init the state that is not present in the memory
290 * layout and not enabled by the OS.
291 */
292 if (fx_only)
293 xsave_hdr->xstate_bv = XSTATE_FPSSE;
294 else
295 xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
296 }
297
298 if (use_fxsr()) {
299 /*
300 * mscsr reserved bits must be masked to zero for security
301 * reasons.
302 */
303 xsave->i387.mxcsr &= mxcsr_feature_mask;
304
305 convert_to_fxsr(tsk, ia32_env);
306 }
272} 307}
273 308
274/* 309/*
275 * This restores directly out of user space. Exceptions are handled. 310 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
276 */ 311 */
277int restore_i387_xstate(void __user *buf) 312static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
278{ 313{
314 if (use_xsave()) {
315 if ((unsigned long)buf % 64 || fx_only) {
316 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
317 xrstor_state(init_xstate_buf, init_bv);
318 return fxrstor_checking((__force void *) buf);
319 } else {
320 u64 init_bv = pcntxt_mask & ~xbv;
321 if (unlikely(init_bv))
322 xrstor_state(init_xstate_buf, init_bv);
323 return xrestore_user(buf, xbv);
324 }
325 } else if (use_fxsr()) {
326 return fxrstor_checking((__force void *) buf);
327 } else
328 return frstor_checking((__force void *) buf);
329}
330
331int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
332{
333 int ia32_fxstate = (buf != buf_fx);
279 struct task_struct *tsk = current; 334 struct task_struct *tsk = current;
280 int err = 0; 335 int state_size = xstate_size;
336 u64 xstate_bv = 0;
337 int fx_only = 0;
338
339 ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
340 config_enabled(CONFIG_IA32_EMULATION));
281 341
282 if (!buf) { 342 if (!buf) {
283 if (used_math()) 343 drop_init_fpu(tsk);
284 goto clear;
285 return 0; 344 return 0;
286 } else 345 }
287 if (!access_ok(VERIFY_READ, buf, sig_xstate_size))
288 return -EACCES;
289 346
290 if (!used_math()) { 347 if (!access_ok(VERIFY_READ, buf, size))
291 err = init_fpu(tsk); 348 return -EACCES;
292 if (err) 349
293 return err; 350 if (!used_math() && init_fpu(tsk))
351 return -1;
352
353 if (!HAVE_HWFP) {
354 return fpregs_soft_set(current, NULL,
355 0, sizeof(struct user_i387_ia32_struct),
356 NULL, buf) != 0;
294 } 357 }
295 358
296 user_fpu_begin(); 359 if (use_xsave()) {
297 if (use_xsave()) 360 struct _fpx_sw_bytes fx_sw_user;
298 err = restore_user_xstate(buf); 361 if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
299 else 362 /*
300 err = fxrstor_checking((__force struct i387_fxsave_struct *) 363 * Couldn't find the extended state information in the
301 buf); 364 * memory layout. Restore just the FP/SSE and init all
302 if (unlikely(err)) { 365 * the other extended state.
366 */
367 state_size = sizeof(struct i387_fxsave_struct);
368 fx_only = 1;
369 } else {
370 state_size = fx_sw_user.xstate_size;
371 xstate_bv = fx_sw_user.xstate_bv;
372 }
373 }
374
375 if (ia32_fxstate) {
376 /*
377 * For 32-bit frames with fxstate, copy the user state to the
378 * thread's fpu state, reconstruct fxstate from the fsave
379 * header. Sanitize the copied state etc.
380 */
381 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
382 struct user_i387_ia32_struct env;
383 int err = 0;
384
385 /*
386 * Drop the current fpu which clears used_math(). This ensures
387 * that any context-switch during the copy of the new state,
388 * avoids the intermediate state from getting restored/saved.
389 * Thus avoiding the new restored state from getting corrupted.
390 * We will be ready to restore/save the state only after
391 * set_used_math() is again set.
392 */
393 drop_fpu(tsk);
394
395 if (__copy_from_user(xsave, buf_fx, state_size) ||
396 __copy_from_user(&env, buf, sizeof(env))) {
397 err = -1;
398 } else {
399 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
400 set_used_math();
401 }
402
403 if (use_eager_fpu())
404 math_state_restore();
405
406 return err;
407 } else {
303 /* 408 /*
304 * Encountered an error while doing the restore from the 409 * For 64-bit frames and 32-bit fsave frames, restore the user
305 * user buffer, clear the fpu state. 410 * state to the registers directly (with exceptions handled).
306 */ 411 */
307clear: 412 user_fpu_begin();
308 clear_fpu(tsk); 413 if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
309 clear_used_math(); 414 drop_init_fpu(tsk);
415 return -1;
416 }
310 } 417 }
311 return err; 418
419 return 0;
312} 420}
313#endif
314 421
315/* 422/*
316 * Prepare the SW reserved portion of the fxsave memory layout, indicating 423 * Prepare the SW reserved portion of the fxsave memory layout, indicating
@@ -321,31 +428,22 @@ clear:
321 */ 428 */
322static void prepare_fx_sw_frame(void) 429static void prepare_fx_sw_frame(void)
323{ 430{
324 int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + 431 int fsave_header_size = sizeof(struct i387_fsave_struct);
325 FP_XSTATE_MAGIC2_SIZE; 432 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
326 433
327 sig_xstate_size = sizeof(struct _fpstate) + size_extended; 434 if (config_enabled(CONFIG_X86_32))
328 435 size += fsave_header_size;
329#ifdef CONFIG_IA32_EMULATION
330 sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended;
331#endif
332
333 memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved));
334 436
335 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 437 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
336 fx_sw_reserved.extended_size = sig_xstate_size; 438 fx_sw_reserved.extended_size = size;
337 fx_sw_reserved.xstate_bv = pcntxt_mask; 439 fx_sw_reserved.xstate_bv = pcntxt_mask;
338 fx_sw_reserved.xstate_size = xstate_size; 440 fx_sw_reserved.xstate_size = xstate_size;
339#ifdef CONFIG_IA32_EMULATION
340 memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved,
341 sizeof(struct _fpx_sw_bytes));
342 fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size;
343#endif
344}
345 441
346#ifdef CONFIG_X86_64 442 if (config_enabled(CONFIG_IA32_EMULATION)) {
347unsigned int sig_xstate_size = sizeof(struct _fpstate); 443 fx_sw_reserved_ia32 = fx_sw_reserved;
348#endif 444 fx_sw_reserved_ia32.extended_size += fsave_header_size;
445 }
446}
349 447
350/* 448/*
351 * Enable the extended processor state save/restore feature 449 * Enable the extended processor state save/restore feature
@@ -384,19 +482,21 @@ static void __init setup_xstate_features(void)
384/* 482/*
385 * setup the xstate image representing the init state 483 * setup the xstate image representing the init state
386 */ 484 */
387static void __init setup_xstate_init(void) 485static void __init setup_init_fpu_buf(void)
388{ 486{
389 setup_xstate_features();
390
391 /* 487 /*
392 * Setup init_xstate_buf to represent the init state of 488 * Setup init_xstate_buf to represent the init state of
393 * all the features managed by the xsave 489 * all the features managed by the xsave
394 */ 490 */
395 init_xstate_buf = alloc_bootmem_align(xstate_size, 491 init_xstate_buf = alloc_bootmem_align(xstate_size,
396 __alignof__(struct xsave_struct)); 492 __alignof__(struct xsave_struct));
397 init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; 493 fx_finit(&init_xstate_buf->i387);
494
495 if (!cpu_has_xsave)
496 return;
497
498 setup_xstate_features();
398 499
399 clts();
400 /* 500 /*
401 * Init all the features state with header_bv being 0x0 501 * Init all the features state with header_bv being 0x0
402 */ 502 */
@@ -406,9 +506,21 @@ static void __init setup_xstate_init(void)
406 * of any feature which is not represented by all zero's. 506 * of any feature which is not represented by all zero's.
407 */ 507 */
408 xsave_state(init_xstate_buf, -1); 508 xsave_state(init_xstate_buf, -1);
409 stts();
410} 509}
411 510
511static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
512static int __init eager_fpu_setup(char *s)
513{
514 if (!strcmp(s, "on"))
515 eagerfpu = ENABLE;
516 else if (!strcmp(s, "off"))
517 eagerfpu = DISABLE;
518 else if (!strcmp(s, "auto"))
519 eagerfpu = AUTO;
520 return 1;
521}
522__setup("eagerfpu=", eager_fpu_setup);
523
412/* 524/*
413 * Enable and initialize the xsave feature. 525 * Enable and initialize the xsave feature.
414 */ 526 */
@@ -445,8 +557,11 @@ static void __init xstate_enable_boot_cpu(void)
445 557
446 update_regset_xstate_info(xstate_size, pcntxt_mask); 558 update_regset_xstate_info(xstate_size, pcntxt_mask);
447 prepare_fx_sw_frame(); 559 prepare_fx_sw_frame();
560 setup_init_fpu_buf();
448 561
449 setup_xstate_init(); 562 /* Auto enable eagerfpu for xsaveopt */
563 if (cpu_has_xsaveopt && eagerfpu != DISABLE)
564 eagerfpu = ENABLE;
450 565
451 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", 566 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
452 pcntxt_mask, xstate_size); 567 pcntxt_mask, xstate_size);
@@ -471,3 +586,43 @@ void __cpuinit xsave_init(void)
471 next_func = xstate_enable; 586 next_func = xstate_enable;
472 this_func(); 587 this_func();
473} 588}
589
590static inline void __init eager_fpu_init_bp(void)
591{
592 current->thread.fpu.state =
593 alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct));
594 if (!init_xstate_buf)
595 setup_init_fpu_buf();
596}
597
598void __cpuinit eager_fpu_init(void)
599{
600 static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
601
602 clear_used_math();
603 current_thread_info()->status = 0;
604
605 if (eagerfpu == ENABLE)
606 setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
607
608 if (!cpu_has_eager_fpu) {
609 stts();
610 return;
611 }
612
613 if (boot_func) {
614 boot_func();
615 boot_func = NULL;
616 }
617
618 /*
619 * This is same as math_state_restore(). But use_xsave() is
620 * not yet patched to use math_state_restore().
621 */
622 init_fpu(current);
623 __thread_fpu_begin(current);
624 if (cpu_has_xsave)
625 xrstor_state(init_xstate_buf, -1);
626 else
627 fxrstor_checking(&init_xstate_buf->i387);
628}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..70dfcec3c463 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1493,8 +1493,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1493#ifdef CONFIG_X86_64 1493#ifdef CONFIG_X86_64
1494 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1494 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1495#endif 1495#endif
1496 if (user_has_fpu()) 1496 /*
1497 clts(); 1497 * If the FPU is not active (through the host task or
1498 * the guest vcpu), then restore the cr0.TS bit.
1499 */
1500 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1501 stts();
1498 load_gdt(&__get_cpu_var(host_gdt)); 1502 load_gdt(&__get_cpu_var(host_gdt));
1499} 1503}
1500 1504
@@ -3730,7 +3734,7 @@ static void vmx_set_constant_host_state(void)
3730 unsigned long tmpl; 3734 unsigned long tmpl;
3731 struct desc_ptr dt; 3735 struct desc_ptr dt;
3732 3736
3733 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ 3737 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
3734 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ 3738 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
3735 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 3739 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
3736 3740
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 148ed666e311..02b2cd520693 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5972,7 +5972,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5972 */ 5972 */
5973 kvm_put_guest_xcr0(vcpu); 5973 kvm_put_guest_xcr0(vcpu);
5974 vcpu->guest_fpu_loaded = 1; 5974 vcpu->guest_fpu_loaded = 1;
5975 unlazy_fpu(current); 5975 __kernel_fpu_begin();
5976 fpu_restore_checking(&vcpu->arch.guest_fpu); 5976 fpu_restore_checking(&vcpu->arch.guest_fpu);
5977 trace_kvm_fpu(1); 5977 trace_kvm_fpu(1);
5978} 5978}
@@ -5986,6 +5986,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5986 5986
5987 vcpu->guest_fpu_loaded = 0; 5987 vcpu->guest_fpu_loaded = 0;
5988 fpu_save_init(&vcpu->arch.guest_fpu); 5988 fpu_save_init(&vcpu->arch.guest_fpu);
5989 __kernel_fpu_end();
5989 ++vcpu->stat.fpu_reload; 5990 ++vcpu->stat.fpu_reload;
5990 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 5991 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5991 trace_kvm_fpu(0); 5992 trace_kvm_fpu(0);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 39809035320a..4af12e1844d5 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -203,8 +203,8 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
203 * we set it now, so we can trap and pass that trap to the Guest if it 203 * we set it now, so we can trap and pass that trap to the Guest if it
204 * uses the FPU. 204 * uses the FPU.
205 */ 205 */
206 if (cpu->ts) 206 if (cpu->ts && user_has_fpu())
207 unlazy_fpu(current); 207 stts();
208 208
209 /* 209 /*
210 * SYSENTER is an optimized way of doing system calls. We can't allow 210 * SYSENTER is an optimized way of doing system calls. We can't allow
@@ -234,6 +234,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
234 if (boot_cpu_has(X86_FEATURE_SEP)) 234 if (boot_cpu_has(X86_FEATURE_SEP))
235 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); 235 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
236 236
237 /* Clear the host TS bit if it was set above. */
238 if (cpu->ts && user_has_fpu())
239 clts();
240
237 /* 241 /*
238 * If the Guest page faulted, then the cr2 register will tell us the 242 * If the Guest page faulted, then the cr2 register will tell us the
239 * bad virtual address. We have to grab this now, because once we 243 * bad virtual address. We have to grab this now, because once we
@@ -249,7 +253,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
249 * a different CPU. So all the critical stuff should be done 253 * a different CPU. So all the critical stuff should be done
250 * before this. 254 * before this.
251 */ 255 */
252 else if (cpu->regs->trapnum == 7) 256 else if (cpu->regs->trapnum == 7 && !user_has_fpu())
253 math_state_restore(); 257 math_state_restore();
254} 258}
255 259
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 4f50145f6483..662c5f7eb0c5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
109 priv = netdev_priv(dev); 109 priv = netdev_priv(dev);
110 110
111 dev->irq = res_irq->start; 111 dev->irq = res_irq->start;
112 priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); 112 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
113 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
114 priv->irq_flags |= IRQF_SHARED;
113 priv->reg_base = addr; 115 priv->reg_base = addr;
114 /* The CAN clock frequency is half the oscillator clock frequency */ 116 /* The CAN clock frequency is half the oscillator clock frequency */
115 priv->can.clock.freq = pdata->osc_freq / 2; 117 priv->can.clock.freq = pdata->osc_freq / 2;
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 310596175676..b595d3422b9f 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
150 const uint8_t *mem, *end, *dat; 150 const uint8_t *mem, *end, *dat;
151 uint16_t type, len; 151 uint16_t type, len;
152 uint32_t addr; 152 uint32_t addr;
153 uint8_t *buf = NULL; 153 uint8_t *buf = NULL, *new_buf;
154 int buflen = 0; 154 int buflen = 0;
155 int8_t type_end = 0; 155 int8_t type_end = 0;
156 156
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
199 if (len > buflen) { 199 if (len > buflen) {
200 /* align buflen */ 200 /* align buflen */
201 buflen = (len + (1024-1)) & ~(1024-1); 201 buflen = (len + (1024-1)) & ~(1024-1);
202 buf = krealloc(buf, buflen, GFP_KERNEL); 202 new_buf = krealloc(buf, buflen, GFP_KERNEL);
203 if (!buf) { 203 if (!new_buf) {
204 ret = -ENOMEM; 204 ret = -ENOMEM;
205 goto failed; 205 goto failed;
206 } 206 }
207 buf = new_buf;
207 } 208 }
208 /* verify record data */ 209 /* verify record data */
209 memcpy_fromio(buf, &dpram[addr + offset], len); 210 memcpy_fromio(buf, &dpram[addr + offset], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 463b9ec57d80..6d1a24acb77e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
1708 continue; \ 1708 continue; \
1709 else 1709 else
1710 1710
1711#define for_each_napi_rx_queue(bp, var) \
1712 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1713
1714/* Skip OOO FP */ 1711/* Skip OOO FP */
1715#define for_each_tx_queue(bp, var) \ 1712#define for_each_tx_queue(bp, var) \
1716 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e879e19eb0d6..af20c6ee2cd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2046 */ 2046 */
2047 bnx2x_setup_tc(bp->dev, bp->max_cos); 2047 bnx2x_setup_tc(bp->dev, bp->max_cos);
2048 2048
2049 /* Add all NAPI objects */
2050 bnx2x_add_all_napi(bp);
2049 bnx2x_napi_enable(bp); 2051 bnx2x_napi_enable(bp);
2050 2052
2051 /* set pf load just before approaching the MCP */ 2053 /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2408 2410
2409 /* Disable HW interrupts, NAPI */ 2411 /* Disable HW interrupts, NAPI */
2410 bnx2x_netif_stop(bp, 1); 2412 bnx2x_netif_stop(bp, 1);
2413 /* Delete all NAPI objects */
2414 bnx2x_del_all_napi(bp);
2411 2415
2412 /* Release IRQs */ 2416 /* Release IRQs */
2413 bnx2x_free_irq(bp); 2417 bnx2x_free_irq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfa757e74296..21b553229ea4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
792 bp->num_napi_queues = bp->num_queues; 792 bp->num_napi_queues = bp->num_queues;
793 793
794 /* Add NAPI objects */ 794 /* Add NAPI objects */
795 for_each_napi_rx_queue(bp, i) 795 for_each_rx_queue(bp, i)
796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
797 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
798} 798}
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
801{ 801{
802 int i; 802 int i;
803 803
804 for_each_napi_rx_queue(bp, i) 804 for_each_rx_queue(bp, i)
805 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
806} 806}
807 807
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc4e0e3885b0..c37a68d68090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2888 */ 2888 */
2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2890{ 2890{
2891 bnx2x_del_all_napi(bp);
2892 bnx2x_disable_msi(bp); 2891 bnx2x_disable_msi(bp);
2893 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2892 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2894 bnx2x_set_int_mode(bp); 2893 bnx2x_set_int_mode(bp);
2895 bnx2x_add_all_napi(bp);
2896} 2894}
2897 2895
2898/** 2896/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 02b5a343b195..21054987257a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8427,6 +8427,8 @@ unload_error:
8427 8427
8428 /* Disable HW interrupts, NAPI */ 8428 /* Disable HW interrupts, NAPI */
8429 bnx2x_netif_stop(bp, 1); 8429 bnx2x_netif_stop(bp, 1);
8430 /* Delete all NAPI objects */
8431 bnx2x_del_all_napi(bp);
8430 8432
8431 /* Release IRQs */ 8433 /* Release IRQs */
8432 bnx2x_free_irq(bp); 8434 bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11229static void poll_bnx2x(struct net_device *dev) 11231static void poll_bnx2x(struct net_device *dev)
11230{ 11232{
11231 struct bnx2x *bp = netdev_priv(dev); 11233 struct bnx2x *bp = netdev_priv(dev);
11234 int i;
11232 11235
11233 disable_irq(bp->pdev->irq); 11236 for_each_eth_queue(bp, i) {
11234 bnx2x_interrupt(bp->pdev->irq, dev); 11237 struct bnx2x_fastpath *fp = &bp->fp[i];
11235 enable_irq(bp->pdev->irq); 11238 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11239 }
11236} 11240}
11237#endif 11241#endif
11238 11242
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11899 */ 11903 */
11900 bnx2x_set_int_mode(bp); 11904 bnx2x_set_int_mode(bp);
11901 11905
11902 /* Add all NAPI objects */
11903 bnx2x_add_all_napi(bp);
11904
11905 rc = register_netdev(dev); 11906 rc = register_netdev(dev);
11906 if (rc) { 11907 if (rc) {
11907 dev_err(&pdev->dev, "Cannot register net device\n"); 11908 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11976 11977
11977 unregister_netdev(dev); 11978 unregister_netdev(dev);
11978 11979
11979 /* Delete all NAPI objects */
11980 bnx2x_del_all_napi(bp);
11981
11982 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11980 /* Power on: we can't let PCI layer write to us while we are in D3 */
11983 bnx2x_set_power_state(bp, PCI_D0); 11981 bnx2x_set_power_state(bp, PCI_D0);
11984 11982
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12025 bnx2x_tx_disable(bp); 12023 bnx2x_tx_disable(bp);
12026 12024
12027 bnx2x_netif_stop(bp, 0); 12025 bnx2x_netif_stop(bp, 0);
12026 /* Delete all NAPI objects */
12027 bnx2x_del_all_napi(bp);
12028 12028
12029 del_timer_sync(&bp->timer); 12029 del_timer_sync(&bp->timer);
12030 12030
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 845b2020f291..138446957786 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
1243{ 1243{
1244 struct net_local *lp = netdev_priv(dev); 1244 struct net_local *lp = netdev_priv(dev);
1245 unsigned long flags; 1245 unsigned long flags;
1246 u16 cfg;
1246 1247
1247 spin_lock_irqsave(&lp->lock, flags); 1248 spin_lock_irqsave(&lp->lock, flags);
1248 if (dev->flags & IFF_PROMISC) 1249 if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
1260 /* in promiscuous mode, we accept errored packets, 1261 /* in promiscuous mode, we accept errored packets,
1261 * so we have to enable interrupts on them also 1262 * so we have to enable interrupts on them also
1262 */ 1263 */
1263 writereg(dev, PP_RxCFG, 1264 cfg = lp->curr_rx_cfg;
1264 (lp->curr_rx_cfg | 1265 if (lp->rx_mode == RX_ALL_ACCEPT)
1265 (lp->rx_mode == RX_ALL_ACCEPT) 1266 cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
1266 ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) 1267 writereg(dev, PP_RxCFG, cfg);
1267 : 0));
1268 spin_unlock_irqrestore(&lp->lock, flags); 1268 spin_unlock_irqrestore(&lp->lock, flags);
1269} 1269}
1270 1270
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fac97b4bb59..8c63d06ab12b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
259 int num = 0, status = 0; 259 int num = 0, status = 0;
260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
261 261
262 spin_lock_bh(&adapter->mcc_cq_lock); 262 spin_lock(&adapter->mcc_cq_lock);
263 while ((compl = be_mcc_compl_get(adapter))) { 263 while ((compl = be_mcc_compl_get(adapter))) {
264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
265 /* Interpret flags as an async trailer */ 265 /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
280 if (num) 280 if (num)
281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
282 282
283 spin_unlock_bh(&adapter->mcc_cq_lock); 283 spin_unlock(&adapter->mcc_cq_lock);
284 return status; 284 return status;
285} 285}
286 286
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
295 if (be_error(adapter)) 295 if (be_error(adapter))
296 return -EIO; 296 return -EIO;
297 297
298 local_bh_disable();
298 status = be_process_mcc(adapter); 299 status = be_process_mcc(adapter);
300 local_bh_enable();
299 301
300 if (atomic_read(&mcc_obj->q.used) == 0) 302 if (atomic_read(&mcc_obj->q.used) == 0)
301 break; 303 break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 90a903d83d87..78b8aa8069f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
3763 /* when interrupts are not yet enabled, just reap any pending 3763 /* when interrupts are not yet enabled, just reap any pending
3764 * mcc completions */ 3764 * mcc completions */
3765 if (!netif_running(adapter->netdev)) { 3765 if (!netif_running(adapter->netdev)) {
3766 local_bh_disable();
3766 be_process_mcc(adapter); 3767 be_process_mcc(adapter);
3768 local_bh_enable();
3767 goto reschedule; 3769 goto reschedule;
3768 } 3770 }
3769 3771
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4605f7246687..d3233f59a82e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
1041 1041
1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1043 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1043 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1044 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1044 dev->features |= NETIF_F_HW_VLAN_RX;
1045 } 1045 }
1046 1046
1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cd153326c3cf..cb3356c9af80 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -310,6 +310,7 @@ struct e1000_adapter {
310 */ 310 */
311 struct e1000_ring *tx_ring /* One per active queue */ 311 struct e1000_ring *tx_ring /* One per active queue */
312 ____cacheline_aligned_in_smp; 312 ____cacheline_aligned_in_smp;
313 u32 tx_fifo_limit;
313 314
314 struct napi_struct napi; 315 struct napi_struct napi;
315 316
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 46c3b1f9ff89..d01a099475a1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
3517 } 3517 }
3518 3518
3519 /* 3519 /*
3520 * Alignment of Tx data is on an arbitrary byte boundary with the
3521 * maximum size per Tx descriptor limited only to the transmit
3522 * allocation of the packet buffer minus 96 bytes with an upper
3523 * limit of 24KB due to receive synchronization limitations.
3524 */
3525 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3526 24 << 10);
3527
3528 /*
3520 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3529 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3521 * fit in receive buffer. 3530 * fit in receive buffer.
3522 */ 3531 */
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4785 return 1; 4794 return 1;
4786} 4795}
4787 4796
4788#define E1000_MAX_PER_TXD 8192
4789#define E1000_MAX_TXD_PWR 12
4790
4791static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4797static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4792 unsigned int first, unsigned int max_per_txd, 4798 unsigned int first, unsigned int max_per_txd,
4793 unsigned int nr_frags, unsigned int mss) 4799 unsigned int nr_frags)
4794{ 4800{
4795 struct e1000_adapter *adapter = tx_ring->adapter; 4801 struct e1000_adapter *adapter = tx_ring->adapter;
4796 struct pci_dev *pdev = adapter->pdev; 4802 struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5023 5029
5024static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5030static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5025{ 5031{
5032 BUG_ON(size > tx_ring->count);
5033
5026 if (e1000_desc_unused(tx_ring) >= size) 5034 if (e1000_desc_unused(tx_ring) >= size)
5027 return 0; 5035 return 0;
5028 return __e1000_maybe_stop_tx(tx_ring, size); 5036 return __e1000_maybe_stop_tx(tx_ring, size);
5029} 5037}
5030 5038
5031#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
5032static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5039static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5033 struct net_device *netdev) 5040 struct net_device *netdev)
5034{ 5041{
5035 struct e1000_adapter *adapter = netdev_priv(netdev); 5042 struct e1000_adapter *adapter = netdev_priv(netdev);
5036 struct e1000_ring *tx_ring = adapter->tx_ring; 5043 struct e1000_ring *tx_ring = adapter->tx_ring;
5037 unsigned int first; 5044 unsigned int first;
5038 unsigned int max_per_txd = E1000_MAX_PER_TXD;
5039 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
5040 unsigned int tx_flags = 0; 5045 unsigned int tx_flags = 0;
5041 unsigned int len = skb_headlen(skb); 5046 unsigned int len = skb_headlen(skb);
5042 unsigned int nr_frags; 5047 unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5056 } 5061 }
5057 5062
5058 mss = skb_shinfo(skb)->gso_size; 5063 mss = skb_shinfo(skb)->gso_size;
5059 /*
5060 * The controller does a simple calculation to
5061 * make sure there is enough room in the FIFO before
5062 * initiating the DMA for each buffer. The calc is:
5063 * 4 = ceil(buffer len/mss). To make sure we don't
5064 * overrun the FIFO, adjust the max buffer len if mss
5065 * drops.
5066 */
5067 if (mss) { 5064 if (mss) {
5068 u8 hdr_len; 5065 u8 hdr_len;
5069 max_per_txd = min(mss << 2, max_per_txd);
5070 max_txd_pwr = fls(max_per_txd) - 1;
5071 5066
5072 /* 5067 /*
5073 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5068 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5097 count++; 5092 count++;
5098 count++; 5093 count++;
5099 5094
5100 count += TXD_USE_COUNT(len, max_txd_pwr); 5095 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5101 5096
5102 nr_frags = skb_shinfo(skb)->nr_frags; 5097 nr_frags = skb_shinfo(skb)->nr_frags;
5103 for (f = 0; f < nr_frags; f++) 5098 for (f = 0; f < nr_frags; f++)
5104 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5099 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5105 max_txd_pwr); 5100 adapter->tx_fifo_limit);
5106 5101
5107 if (adapter->hw.mac.tx_pkt_filtering) 5102 if (adapter->hw.mac.tx_pkt_filtering)
5108 e1000_transfer_dhcp_info(adapter, skb); 5103 e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5144 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5139 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5145 5140
5146 /* if count is 0 then mapping error has occurred */ 5141 /* if count is 0 then mapping error has occurred */
5147 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5142 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5143 nr_frags);
5148 if (count) { 5144 if (count) {
5149 skb_tx_timestamp(skb); 5145 skb_tx_timestamp(skb);
5150 5146
5151 netdev_sent_queue(netdev, skb->len); 5147 netdev_sent_queue(netdev, skb->len);
5152 e1000_tx_queue(tx_ring, tx_flags, count); 5148 e1000_tx_queue(tx_ring, tx_flags, count);
5153 /* Make sure there is space in the ring for the next send. */ 5149 /* Make sure there is space in the ring for the next send. */
5154 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5150 e1000_maybe_stop_tx(tx_ring,
5155 5151 (MAX_SKB_FRAGS *
5152 DIV_ROUND_UP(PAGE_SIZE,
5153 adapter->tx_fifo_limit) + 2));
5156 } else { 5154 } else {
5157 dev_kfree_skb_any(skb); 5155 dev_kfree_skb_any(skb);
5158 tx_ring->buffer_info[first].time_stamp = 0; 5156 tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6327 adapter->hw.phy.autoneg_advertised = 0x2f; 6325 adapter->hw.phy.autoneg_advertised = 0x2f;
6328 6326
6329 /* ring size defaults */ 6327 /* ring size defaults */
6330 adapter->rx_ring->count = 256; 6328 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6331 adapter->tx_ring->count = 256; 6329 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6332 6330
6333 /* 6331 /*
6334 * Initial Wake on LAN setting - If APM wake is enabled in 6332 * Initial Wake on LAN setting - If APM wake is enabled in
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8cba2df82b18..5faedd855b77 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
863 &ip_entry->ip4dst, &ip_entry->pdst); 863 &ip_entry->ip4dst, &ip_entry->pdst);
864 if (rc != 0) { 864 if (rc != 0) {
865 rc = efx_filter_get_ipv4_full( 865 rc = efx_filter_get_ipv4_full(
866 &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, 866 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
867 &ip_entry->ip4dst, &ip_entry->pdst); 867 &ip_entry->ip4src, &ip_entry->psrc);
868 EFX_WARN_ON_PARANOID(rc); 868 EFX_WARN_ON_PARANOID(rc);
869 ip_mask->ip4src = ~0; 869 ip_mask->ip4src = ~0;
870 ip_mask->psrc = ~0; 870 ip_mask->psrc = ~0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e2d083228f3a..719be3912aa9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __COMMON_H__
26#define __COMMON_H__
27
25#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
26#include <linux/netdevice.h> 29#include <linux/netdevice.h>
27#include <linux/phy.h> 30#include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
366 369
367extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 370extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
368extern const struct stmmac_ring_mode_ops ring_mode_ops; 371extern const struct stmmac_ring_mode_ops ring_mode_ops;
372
373#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9820ec842cc0..223adf95fd03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -20,6 +20,10 @@
20 20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/ 22*******************************************************************************/
23
24#ifndef __DESCS_H__
25#define __DESCS_H__
26
23struct dma_desc { 27struct dma_desc {
24 /* Receive descriptor */ 28 /* Receive descriptor */
25 union { 29 union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
166 * is not calculated */ 170 * is not calculated */
167 cic_full = 3, /* IP header and pseudoheader */ 171 cic_full = 3, /* IP header and pseudoheader */
168}; 172};
173
174#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index dd8d6e19dff6..7ee9499a6e38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -27,6 +27,9 @@
27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
28*******************************************************************************/ 28*******************************************************************************/
29 29
30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__
32
30#if defined(CONFIG_STMMAC_RING) 33#if defined(CONFIG_STMMAC_RING)
31static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
32{ 35{
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
124 p->des01.tx.buffer1_size = len; 127 p->des01.tx.buffer1_size = len;
125} 128}
126#endif 129#endif
130
131#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc7..2ec6aeae349e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC100_H__
26#define __DWMAC100_H__
27
25#include <linux/phy.h> 28#include <linux/phy.h>
26#include "common.h" 29#include "common.h"
27 30
@@ -119,3 +122,5 @@ enum ttc_control {
119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 122#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120 123
121extern const struct stmmac_dma_ops dwmac100_dma_ops; 124extern const struct stmmac_dma_ops dwmac100_dma_ops;
125
126#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index f90fcb5f9573..0e4cacedc1f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -19,6 +19,8 @@
19 19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22#ifndef __DWMAC1000_H__
23#define __DWMAC1000_H__
22 24
23#include <linux/phy.h> 25#include <linux/phy.h>
24#include "common.h" 26#include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
229#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
230 232
231/* Synopsys Core versions */ 233/* Synopsys Core versions */
232#define DWMAC_CORE_3_40 34 234#define DWMAC_CORE_3_40 0x34
233 235
234extern const struct stmmac_dma_ops dwmac1000_dma_ops; 236extern const struct stmmac_dma_ops dwmac1000_dma_ops;
237#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e678ce39d014..e49c9a0fd6ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC_DMA_H__
26#define __DWMAC_DMA_H__
27
25/* DMA CRS Control and Status Register Mapping */ 28/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ 29#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ 30#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
109extern void dwmac_dma_stop_rx(void __iomem *ioaddr); 112extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
110extern int dwmac_dma_interrupt(void __iomem *ioaddr, 113extern int dwmac_dma_interrupt(void __iomem *ioaddr,
111 struct stmmac_extra_stats *x); 114 struct stmmac_extra_stats *x);
115
116#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a38352024cb8..67995ef25251 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __MMC_H__
26#define __MMC_H__
27
25/* MMC control register */ 28/* MMC control register */
26/* When set, all counter are reset */ 29/* When set, all counter are reset */
27#define MMC_CNTRL_COUNTER_RESET 0x1 30#define MMC_CNTRL_COUNTER_RESET 0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
129extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); 132extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
130extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); 133extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
131extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); 134extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
135
136#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index c07cfe989f6e..0c74a702d461 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -33,7 +33,7 @@
33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ 33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ 34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ 35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
36#define MMC_DEFAUL_MASK 0xffffffff 36#define MMC_DEFAULT_MASK 0xffffffff
37 37
38/* MMC TX counter registers */ 38/* MMC TX counter registers */
39 39
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
147/* To mask all all interrupts.*/ 147/* To mask all all interrupts.*/
148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) 148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
149{ 149{
150 writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
152} 152}
153 153
154/* This reads the MAC core counters (if actaully supported). 154/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f2d3665430ad..e872e1da3137 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#ifndef __STMMAC_H__
24#define __STMMAC_H__
25
23#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "March_2012" 27#define DRV_MODULE_VERSION "March_2012"
25 28
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
166{ 169{
167} 170}
168#endif /* CONFIG_STMMAC_PCI */ 171#endif /* CONFIG_STMMAC_PCI */
172
173#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184b..aea9b14cdfbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -21,6 +21,8 @@
21 21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24#ifndef __STMMAC_TIMER_H__
25#define __STMMAC_TIMER_H__
24 26
25struct stmmac_timer { 27struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq); 28 void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
40extern int tmu2_register_user(void *fnt, void *data); 42extern int tmu2_register_user(void *fnt, void *data);
41extern void tmu2_unregister_user(void); 43extern void tmu2_unregister_user(void);
42#endif 44#endif
45
46#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cd7ee204e94a..a9ca4a03d31b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
394 struct device *dev = &pdev->dev; 394 struct device *dev = &pdev->dev;
395 struct davinci_mdio_data *data = dev_get_drvdata(dev); 395 struct davinci_mdio_data *data = dev_get_drvdata(dev);
396 396
397 if (data->bus) 397 if (data->bus) {
398 mdiobus_unregister(data->bus);
398 mdiobus_free(data->bus); 399 mdiobus_free(data->bus);
400 }
399 401
400 if (data->clk) 402 if (data->clk)
401 clk_put(data->clk); 403 clk_put(data->clk);
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 24d8566cfd8b..441b4dc79450 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
673 sm_pm_get_ls(smc,port_to_mib(smc,port))) ; 673 sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
674 break ; 674 break ;
675 case SMT_P_REASON : 675 case SMT_P_REASON :
676 * (u_long *) to = 0 ; 676 *(u32 *)to = 0 ;
677 sp_len = 4 ; 677 sp_len = 4 ;
678 goto sp_done ; 678 goto sp_done ;
679 case SMT_P1033 : /* time stamp */ 679 case SMT_P1033 : /* time stamp */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 328397c66730..adfab3fc5478 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
413 413
414 /* 5. Gobi 2000 and 3000 devices */ 414 /* 5. Gobi 2000 and 3000 devices */
415 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ 415 {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
416 {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
416 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ 417 {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
418 {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
417 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ 419 {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
418 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ 420 {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
419 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ 421 {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
@@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
441 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ 443 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
442 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ 444 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
443 {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ 445 {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
446 {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
447 {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
444 448
445 { } /* END */ 449 { } /* END */
446}; 450};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8531c1caac28..fd4b26d46fd5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
1573 netif_device_present(dev->net) && 1573 netif_device_present(dev->net) &&
1574 !timer_pending(&dev->delay) && 1574 !timer_pending(&dev->delay) &&
1575 !test_bit(EVENT_RX_HALT, &dev->flags)) 1575 !test_bit(EVENT_RX_HALT, &dev->flags))
1576 rx_alloc_submit(dev, GFP_KERNEL); 1576 rx_alloc_submit(dev, GFP_NOIO);
1577 1577
1578 if (!(dev->txq.qlen >= TX_QLEN(dev))) 1578 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1579 netif_tx_wake_all_queues(dev->net); 1579 netif_tx_wake_all_queues(dev->net);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 4026c906cc7b..b7e0258887e7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1482 case AR5K_EEPROM_MODE_11A: 1482 case AR5K_EEPROM_MODE_11A:
1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); 1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
1484 rate_pcal_info = ee->ee_rate_tpwr_a; 1484 rate_pcal_info = ee->ee_rate_tpwr_a;
1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; 1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
1486 break; 1486 break;
1487 case AR5K_EEPROM_MODE_11B: 1487 case AR5K_EEPROM_MODE_11B:
1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); 1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index dc2bcfeadeb4..94a9bbea6874 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -182,6 +182,7 @@
182#define AR5K_EEPROM_EEP_DELTA 10 182#define AR5K_EEPROM_EEP_DELTA 10
183#define AR5K_EEPROM_N_MODES 3 183#define AR5K_EEPROM_N_MODES 3
184#define AR5K_EEPROM_N_5GHZ_CHAN 10 184#define AR5K_EEPROM_N_5GHZ_CHAN 10
185#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
185#define AR5K_EEPROM_N_2GHZ_CHAN 3 186#define AR5K_EEPROM_N_2GHZ_CHAN 3
186#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 187#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
187#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 188#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 192ad5c1fcc8..a5edebeb0b4f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
1233 /* dpc will not be rescheduled */ 1233 /* dpc will not be rescheduled */
1234 wl->resched = false; 1234 wl->resched = false;
1235 1235
1236 /* inform publicly that interface is down */
1237 wl->pub->up = false;
1238
1236 return 0; 1239 return 0;
1237} 1240}
1238 1241
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 95aa8e1683ec..83324b321652 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
2042 return; 2042 return;
2043 } 2043 }
2044 len = ETH_ALEN; 2044 len = ETH_ALEN;
2045 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); 2045 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
2046 &len);
2046 if (ret) { 2047 if (ret) {
2047 IPW_DEBUG_INFO("failed querying ordinals at line %d\n", 2048 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
2048 __LINE__); 2049 __LINE__);
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1102ac..a47b306b522c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
124 const struct fw_img *img; 124 const struct fw_img *img;
125 size_t bufsz; 125 size_t bufsz;
126 126
127 if (!iwl_is_ready_rf(priv))
128 return -EAGAIN;
129
127 /* default is to dump the entire data segment */ 130 /* default is to dump the entire data segment */
128 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 131 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
129 priv->dbgfs_sram_offset = 0x800000; 132 priv->dbgfs_sram_offset = 0x800000;
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c58208c..4ffc18dc3a57 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
350/***************************************************** 350/*****************************************************
351* Error handling 351* Error handling
352******************************************************/ 352******************************************************/
353int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 353int iwl_dump_fh(struct iwl_trans *trans, char **buf);
354void iwl_dump_csr(struct iwl_trans *trans); 354void iwl_dump_csr(struct iwl_trans *trans);
355 355
356/***************************************************** 356/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1f009c..d1a61ba6247a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
555 } 555 }
556 556
557 iwl_dump_csr(trans); 557 iwl_dump_csr(trans);
558 iwl_dump_fh(trans, NULL, false); 558 iwl_dump_fh(trans, NULL);
559 559
560 iwl_op_mode_nic_error(trans->op_mode); 560 iwl_op_mode_nic_error(trans->op_mode);
561} 561}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f78df58..1e86ea2266d4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
1649#undef IWL_CMD 1649#undef IWL_CMD
1650} 1650}
1651 1651
1652int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) 1652int iwl_dump_fh(struct iwl_trans *trans, char **buf)
1653{ 1653{
1654 int i; 1654 int i;
1655#ifdef CONFIG_IWLWIFI_DEBUG
1656 int pos = 0;
1657 size_t bufsz = 0;
1658#endif
1659 static const u32 fh_tbl[] = { 1655 static const u32 fh_tbl[] = {
1660 FH_RSCSR_CHNL0_STTS_WPTR_REG, 1656 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1661 FH_RSCSR_CHNL0_RBDCB_BASE_REG, 1657 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1667 FH_TSSR_TX_STATUS_REG, 1663 FH_TSSR_TX_STATUS_REG,
1668 FH_TSSR_TX_ERROR_REG 1664 FH_TSSR_TX_ERROR_REG
1669 }; 1665 };
1670#ifdef CONFIG_IWLWIFI_DEBUG 1666
1671 if (display) { 1667#ifdef CONFIG_IWLWIFI_DEBUGFS
1672 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; 1668 if (buf) {
1669 int pos = 0;
1670 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1671
1673 *buf = kmalloc(bufsz, GFP_KERNEL); 1672 *buf = kmalloc(bufsz, GFP_KERNEL);
1674 if (!*buf) 1673 if (!*buf)
1675 return -ENOMEM; 1674 return -ENOMEM;
1675
1676 pos += scnprintf(*buf + pos, bufsz - pos, 1676 pos += scnprintf(*buf + pos, bufsz - pos,
1677 "FH register values:\n"); 1677 "FH register values:\n");
1678 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1678
1679 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1679 pos += scnprintf(*buf + pos, bufsz - pos, 1680 pos += scnprintf(*buf + pos, bufsz - pos,
1680 " %34s: 0X%08x\n", 1681 " %34s: 0X%08x\n",
1681 get_fh_string(fh_tbl[i]), 1682 get_fh_string(fh_tbl[i]),
1682 iwl_read_direct32(trans, fh_tbl[i])); 1683 iwl_read_direct32(trans, fh_tbl[i]));
1683 } 1684
1684 return pos; 1685 return pos;
1685 } 1686 }
1686#endif 1687#endif
1688
1687 IWL_ERR(trans, "FH register values:\n"); 1689 IWL_ERR(trans, "FH register values:\n");
1688 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1690 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1689 IWL_ERR(trans, " %34s: 0X%08x\n", 1691 IWL_ERR(trans, " %34s: 0X%08x\n",
1690 get_fh_string(fh_tbl[i]), 1692 get_fh_string(fh_tbl[i]),
1691 iwl_read_direct32(trans, fh_tbl[i])); 1693 iwl_read_direct32(trans, fh_tbl[i]));
1692 } 1694
1693 return 0; 1695 return 0;
1694} 1696}
1695 1697
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1982 size_t count, loff_t *ppos) 1984 size_t count, loff_t *ppos)
1983{ 1985{
1984 struct iwl_trans *trans = file->private_data; 1986 struct iwl_trans *trans = file->private_data;
1985 char *buf; 1987 char *buf = NULL;
1986 int pos = 0; 1988 int pos = 0;
1987 ssize_t ret = -EFAULT; 1989 ssize_t ret = -EFAULT;
1988 1990
1989 ret = pos = iwl_dump_fh(trans, &buf, true); 1991 ret = pos = iwl_dump_fh(trans, &buf);
1990 if (buf) { 1992 if (buf) {
1991 ret = simple_read_from_buffer(user_buf, 1993 ret = simple_read_from_buffer(user_buf,
1992 count, ppos, buf, pos); 1994 count, ppos, buf, pos);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 30899901aef5..650f79a1f2bd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,8 +57,7 @@
57static const struct ethtool_ops xennet_ethtool_ops; 57static const struct ethtool_ops xennet_ethtool_ops;
58 58
59struct netfront_cb { 59struct netfront_cb {
60 struct page *page; 60 int pull_to;
61 unsigned offset;
62}; 61};
63 62
64#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 63#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
867 struct sk_buff *skb; 866 struct sk_buff *skb;
868 867
869 while ((skb = __skb_dequeue(rxq)) != NULL) { 868 while ((skb = __skb_dequeue(rxq)) != NULL) {
870 struct page *page = NETFRONT_SKB_CB(skb)->page; 869 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
871 void *vaddr = page_address(page);
872 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
873
874 memcpy(skb->data, vaddr + offset,
875 skb_headlen(skb));
876 870
877 if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) 871 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
878 __free_page(page);
879 872
880 /* Ethernet work: Delayed to here as it peeks the header. */ 873 /* Ethernet work: Delayed to here as it peeks the header. */
881 skb->protocol = eth_type_trans(skb, dev); 874 skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
913 struct sk_buff_head errq; 906 struct sk_buff_head errq;
914 struct sk_buff_head tmpq; 907 struct sk_buff_head tmpq;
915 unsigned long flags; 908 unsigned long flags;
916 unsigned int len;
917 int err; 909 int err;
918 910
919 spin_lock(&np->rx_lock); 911 spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
955 } 947 }
956 } 948 }
957 949
958 NETFRONT_SKB_CB(skb)->page = 950 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
959 skb_frag_page(&skb_shinfo(skb)->frags[0]); 951 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
960 NETFRONT_SKB_CB(skb)->offset = rx->offset; 952 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
961
962 len = rx->status;
963 if (len > RX_COPY_THRESHOLD)
964 len = RX_COPY_THRESHOLD;
965 skb_put(skb, len);
966 953
967 if (rx->status > len) { 954 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
968 skb_shinfo(skb)->frags[0].page_offset = 955 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
969 rx->offset + len; 956 skb->data_len = rx->status;
970 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
971 skb->data_len = rx->status - len;
972 } else {
973 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
974 skb_shinfo(skb)->nr_frags = 0;
975 }
976 957
977 i = xennet_fill_frags(np, skb, &tmpq); 958 i = xennet_fill_frags(np, skb, &tmpq);
978 959
@@ -999,7 +980,7 @@ err:
999 * receive throughout using the standard receive 980 * receive throughout using the standard receive
1000 * buffer size was cut by 25%(!!!). 981 * buffer size was cut by 25%(!!!).
1001 */ 982 */
1002 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 983 skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
1003 skb->len += skb->data_len; 984 skb->len += skb->data_len;
1004 985
1005 if (rx->flags & XEN_NETRXF_csum_blank) 986 if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 074923ce593d..f0cf934ba877 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
1576 /* result already set, check signature */ 1576 /* result already set, check signature */
1577 if (server->sec_mode & 1577 if (server->sec_mode &
1578 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 1578 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1579 if (cifs_verify_signature(rdata->iov, rdata->nr_iov, 1579 int rc = 0;
1580 server, mid->sequence_number + 1)) 1580
1581 cERROR(1, "Unexpected SMB signature"); 1581 rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
1582 server,
1583 mid->sequence_number + 1);
1584 if (rc)
1585 cERROR(1, "SMB signature verification returned "
1586 "error = %d", rc);
1582 } 1587 }
1583 /* FIXME: should this be counted toward the initiating task? */ 1588 /* FIXME: should this be counted toward the initiating task? */
1584 task_io_account_read(rdata->bytes); 1589 task_io_account_read(rdata->bytes);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index cbe709ad6663..781025be48bc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -356,19 +356,12 @@ cifs_create_get_file_info:
356cifs_create_set_dentry: 356cifs_create_set_dentry:
357 if (rc != 0) { 357 if (rc != 0) {
358 cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); 358 cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
359 CIFSSMBClose(xid, tcon, *fileHandle);
359 goto out; 360 goto out;
360 } 361 }
361 d_drop(direntry); 362 d_drop(direntry);
362 d_add(direntry, newinode); 363 d_add(direntry, newinode);
363 364
364 /* ENOENT for create? How weird... */
365 rc = -ENOENT;
366 if (!newinode) {
367 CIFSSMBClose(xid, tcon, *fileHandle);
368 goto out;
369 }
370 rc = 0;
371
372out: 365out:
373 kfree(buf); 366 kfree(buf);
374 kfree(full_path); 367 kfree(full_path);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7354877fa3bd..cb79c7edecb0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
124{ 124{
125 struct cifsInodeInfo *cifs_i = CIFS_I(inode); 125 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
126 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 126 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
127 unsigned long oldtime = cifs_i->time;
128 127
129 cifs_revalidate_cache(inode, fattr); 128 cifs_revalidate_cache(inode, fattr);
130 129
130 spin_lock(&inode->i_lock);
131 inode->i_atime = fattr->cf_atime; 131 inode->i_atime = fattr->cf_atime;
132 inode->i_mtime = fattr->cf_mtime; 132 inode->i_mtime = fattr->cf_mtime;
133 inode->i_ctime = fattr->cf_ctime; 133 inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
148 else 148 else
149 cifs_i->time = jiffies; 149 cifs_i->time = jiffies;
150 150
151 cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
152 oldtime, cifs_i->time);
153
154 cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; 151 cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
155 152
156 cifs_i->server_eof = fattr->cf_eof; 153 cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
158 * Can't safely change the file size here if the client is writing to 155 * Can't safely change the file size here if the client is writing to
159 * it due to potential races. 156 * it due to potential races.
160 */ 157 */
161 spin_lock(&inode->i_lock);
162 if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { 158 if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
163 i_size_write(inode, fattr->cf_eof); 159 i_size_write(inode, fattr->cf_eof);
164 160
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb)
859 855
860 if (rc && tcon->ipc) { 856 if (rc && tcon->ipc) {
861 cFYI(1, "ipc connection - fake read inode"); 857 cFYI(1, "ipc connection - fake read inode");
858 spin_lock(&inode->i_lock);
862 inode->i_mode |= S_IFDIR; 859 inode->i_mode |= S_IFDIR;
863 set_nlink(inode, 2); 860 set_nlink(inode, 2);
864 inode->i_op = &cifs_ipc_inode_ops; 861 inode->i_op = &cifs_ipc_inode_ops;
865 inode->i_fop = &simple_dir_operations; 862 inode->i_fop = &simple_dir_operations;
866 inode->i_uid = cifs_sb->mnt_uid; 863 inode->i_uid = cifs_sb->mnt_uid;
867 inode->i_gid = cifs_sb->mnt_gid; 864 inode->i_gid = cifs_sb->mnt_gid;
865 spin_unlock(&inode->i_lock);
868 } else if (rc) { 866 } else if (rc) {
869 iget_failed(inode); 867 iget_failed(inode);
870 inode = ERR_PTR(rc); 868 inode = ERR_PTR(rc);
@@ -1110,6 +1108,15 @@ undo_setattr:
1110 goto out_close; 1108 goto out_close;
1111} 1109}
1112 1110
1111/* copied from fs/nfs/dir.c with small changes */
1112static void
1113cifs_drop_nlink(struct inode *inode)
1114{
1115 spin_lock(&inode->i_lock);
1116 if (inode->i_nlink > 0)
1117 drop_nlink(inode);
1118 spin_unlock(&inode->i_lock);
1119}
1113 1120
1114/* 1121/*
1115 * If dentry->d_inode is null (usually meaning the cached dentry 1122 * If dentry->d_inode is null (usually meaning the cached dentry
@@ -1166,13 +1173,13 @@ retry_std_delete:
1166psx_del_no_retry: 1173psx_del_no_retry:
1167 if (!rc) { 1174 if (!rc) {
1168 if (inode) 1175 if (inode)
1169 drop_nlink(inode); 1176 cifs_drop_nlink(inode);
1170 } else if (rc == -ENOENT) { 1177 } else if (rc == -ENOENT) {
1171 d_drop(dentry); 1178 d_drop(dentry);
1172 } else if (rc == -ETXTBSY) { 1179 } else if (rc == -ETXTBSY) {
1173 rc = cifs_rename_pending_delete(full_path, dentry, xid); 1180 rc = cifs_rename_pending_delete(full_path, dentry, xid);
1174 if (rc == 0) 1181 if (rc == 0)
1175 drop_nlink(inode); 1182 cifs_drop_nlink(inode);
1176 } else if ((rc == -EACCES) && (dosattr == 0) && inode) { 1183 } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
1177 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1184 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1178 if (attrs == NULL) { 1185 if (attrs == NULL) {
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
1241 * setting nlink not necessary except in cases where we failed to get it 1248 * setting nlink not necessary except in cases where we failed to get it
1242 * from the server or was set bogus 1249 * from the server or was set bogus
1243 */ 1250 */
1251 spin_lock(&dentry->d_inode->i_lock);
1244 if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) 1252 if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
1245 set_nlink(dentry->d_inode, 2); 1253 set_nlink(dentry->d_inode, 2);
1246 1254 spin_unlock(&dentry->d_inode->i_lock);
1247 mode &= ~current_umask(); 1255 mode &= ~current_umask();
1248 /* must turn on setgid bit if parent dir has it */ 1256 /* must turn on setgid bit if parent dir has it */
1249 if (inode->i_mode & S_ISGID) 1257 if (inode->i_mode & S_ISGID)
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 09e4b3ae4564..e6ce3b112875 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
433 if (old_file->d_inode) { 433 if (old_file->d_inode) {
434 cifsInode = CIFS_I(old_file->d_inode); 434 cifsInode = CIFS_I(old_file->d_inode);
435 if (rc == 0) { 435 if (rc == 0) {
436 spin_lock(&old_file->d_inode->i_lock);
436 inc_nlink(old_file->d_inode); 437 inc_nlink(old_file->d_inode);
438 spin_unlock(&old_file->d_inode->i_lock);
437/* BB should we make this contingent on superblock flag NOATIME? */ 439/* BB should we make this contingent on superblock flag NOATIME? */
438/* old_file->d_inode->i_ctime = CURRENT_TIME;*/ 440/* old_file->d_inode->i_ctime = CURRENT_TIME;*/
439 /* parent dir timestamps will update from srv 441 /* parent dir timestamps will update from srv
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index a4ff5d547554..e4d3b9964167 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
52 cERROR(1, "Bad protocol string signature header %x", 52 cERROR(1, "Bad protocol string signature header %x",
53 *(unsigned int *) hdr->ProtocolId); 53 *(unsigned int *) hdr->ProtocolId);
54 if (mid != hdr->MessageId) 54 if (mid != hdr->MessageId)
55 cERROR(1, "Mids do not match"); 55 cERROR(1, "Mids do not match: %llu and %llu", mid,
56 hdr->MessageId);
56 } 57 }
57 cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); 58 cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
58 return 1; 59 return 1;
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length)
107 * ie Validate the wct via smb2_struct_sizes table above 108 * ie Validate the wct via smb2_struct_sizes table above
108 */ 109 */
109 110
110 if (length < 2 + sizeof(struct smb2_hdr)) { 111 if (length < sizeof(struct smb2_pdu)) {
111 if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { 112 if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
112 pdu->StructureSize2 = 0; 113 pdu->StructureSize2 = 0;
113 /* 114 /*
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length)
121 return 1; 122 return 1;
122 } 123 }
123 if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { 124 if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
124 cERROR(1, "SMB length greater than maximum, mid=%lld", mid); 125 cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
125 return 1; 126 return 1;
126 } 127 }
127 128
128 if (check_smb2_hdr(hdr, mid)) 129 if (check_smb2_hdr(hdr, mid))
129 return 1; 130 return 1;
130 131
131 if (hdr->StructureSize != SMB2_HEADER_SIZE) { 132 if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
132 cERROR(1, "Illegal structure size %d", 133 cERROR(1, "Illegal structure size %u",
133 le16_to_cpu(hdr->StructureSize)); 134 le16_to_cpu(hdr->StructureSize));
134 return 1; 135 return 1;
135 } 136 }
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length)
161 if (4 + len != clc_len) { 162 if (4 + len != clc_len) {
162 cFYI(1, "Calculated size %u length %u mismatch mid %llu", 163 cFYI(1, "Calculated size %u length %u mismatch mid %llu",
163 clc_len, 4 + len, mid); 164 clc_len, 4 + len, mid);
164 if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */ 165 /* server can return one byte more */
165 return 0; /* BB workaround Samba 3 bug SessSetup rsp */ 166 if (clc_len == 4 + len + 1)
167 return 0;
166 return 1; 168 return 1;
167 } 169 }
168 return 0; 170 return 0;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f37a1b41b402..c5fbfac5d576 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -87,10 +87,6 @@
87 87
88#define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe) 88#define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
89 89
90#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64)
91
92#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
93
94/* 90/*
95 * SMB2 Header Definition 91 * SMB2 Header Definition
96 * 92 *
@@ -99,6 +95,9 @@
99 * "PDU" : "Protocol Data Unit" (ie a network "frame") 95 * "PDU" : "Protocol Data Unit" (ie a network "frame")
100 * 96 *
101 */ 97 */
98
99#define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64)
100
102struct smb2_hdr { 101struct smb2_hdr {
103 __be32 smb2_buf_length; /* big endian on wire */ 102 __be32 smb2_buf_length; /* big endian on wire */
104 /* length is only two or three bytes - with 103 /* length is only two or three bytes - with
@@ -140,6 +139,9 @@ struct smb2_pdu {
140 * command code name for the struct. Note that structures must be packed. 139 * command code name for the struct. Note that structures must be packed.
141 * 140 *
142 */ 141 */
142
143#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
144
143struct smb2_err_rsp { 145struct smb2_err_rsp {
144 struct smb2_hdr hdr; 146 struct smb2_hdr hdr;
145 __le16 StructureSize; 147 __le16 StructureSize;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 83867ef348df..d9b639b95fa8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
503 /* convert the length into a more usable form */ 503 /* convert the length into a more usable form */
504 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 504 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
505 struct kvec iov; 505 struct kvec iov;
506 int rc = 0;
506 507
507 iov.iov_base = mid->resp_buf; 508 iov.iov_base = mid->resp_buf;
508 iov.iov_len = len; 509 iov.iov_len = len;
509 /* FIXME: add code to kill session */ 510 /* FIXME: add code to kill session */
510 if (cifs_verify_signature(&iov, 1, server, 511 rc = cifs_verify_signature(&iov, 1, server,
511 mid->sequence_number + 1) != 0) 512 mid->sequence_number + 1);
512 cERROR(1, "Unexpected SMB signature"); 513 if (rc)
514 cERROR(1, "SMB signature verification returned error = "
515 "%d", rc);
513 } 516 }
514 517
515 /* BB special case reconnect tid and uid here? */ 518 /* BB special case reconnect tid and uid here? */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fc3526077348..6b4565c440c8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2149,7 +2149,7 @@
2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa 2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa
2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab 2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac 2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac
2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad 2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae 2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e1ce1048fe5f..4a045cda9c60 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
18 u16 ctmask; /* bitmask of ct events to be delivered */ 18 u16 ctmask; /* bitmask of ct events to be delivered */
19 u16 expmask; /* bitmask of expect events to be delivered */ 19 u16 expmask; /* bitmask of expect events to be delivered */
20 u32 pid; /* netlink pid of destroyer */ 20 u32 pid; /* netlink pid of destroyer */
21 struct timer_list timeout;
21}; 22};
22 23
23static inline struct nf_conntrack_ecache * 24static inline struct nf_conntrack_ecache *
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1f..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
168 struct napi_struct *napi; 168 struct napi_struct *napi;
169 int budget = 16; 169 int budget = 16;
170 170
171 WARN_ON_ONCE(!irqs_disabled());
172
173 list_for_each_entry(napi, &dev->napi_list, dev_list) { 171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 local_irq_enable();
175 if (napi->poll_owner != smp_processor_id() && 172 if (napi->poll_owner != smp_processor_id() &&
176 spin_trylock(&napi->poll_lock)) { 173 spin_trylock(&napi->poll_lock)) {
177 rcu_read_lock_bh();
178 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), 174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
179 napi, budget); 175 napi, budget);
180 rcu_read_unlock_bh();
181 spin_unlock(&napi->poll_lock); 176 spin_unlock(&napi->poll_lock);
182 177
183 if (!budget) { 178 if (!budget)
184 local_irq_disable();
185 break; 179 break;
186 }
187 } 180 }
188 local_irq_disable();
189 } 181 }
190} 182}
191 183
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
124static struct kmem_cache *mrt_cachep __read_mostly; 124static struct kmem_cache *mrt_cachep __read_mostly;
125 125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id); 126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static void ipmr_free_table(struct mr_table *mrt);
128
127static int ip_mr_forward(struct net *net, struct mr_table *mrt, 129static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache, 130 struct sk_buff *skb, struct mfc_cache *cache,
129 int local); 131 int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert); 133 struct sk_buff *pkt, vifi_t vifi, int assert);
132static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm); 135 struct mfc_cache *c, struct rtmsg *rtm);
136static void mroute_clean_tables(struct mr_table *mrt);
134static void ipmr_expire_process(unsigned long arg); 137static void ipmr_expire_process(unsigned long arg);
135 138
136#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 139#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
271 274
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list); 276 list_del(&mrt->list);
274 kfree(mrt); 277 ipmr_free_table(mrt);
275 } 278 }
276 fib_rules_unregister(net->ipv4.mr_rules_ops); 279 fib_rules_unregister(net->ipv4.mr_rules_ops);
277} 280}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
299 302
300static void __net_exit ipmr_rules_exit(struct net *net) 303static void __net_exit ipmr_rules_exit(struct net *net)
301{ 304{
302 kfree(net->ipv4.mrt); 305 ipmr_free_table(net->ipv4.mrt);
303} 306}
304#endif 307#endif
305 308
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
336 return mrt; 339 return mrt;
337} 340}
338 341
342static void ipmr_free_table(struct mr_table *mrt)
343{
344 del_timer_sync(&mrt->ipmr_expire_timer);
345 mroute_clean_tables(mrt);
346 kfree(mrt);
347}
348
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 349/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340 350
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 351static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 4ad9cf173992..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
502 ret = nf_ct_expect_related(rtcp_exp); 502 ret = nf_ct_expect_related(rtcp_exp);
503 if (ret == 0) 503 if (ret == 0)
504 break; 504 break;
505 else if (ret != -EBUSY) { 505 else if (ret == -EBUSY) {
506 nf_ct_unexpect_related(rtp_exp);
507 continue;
508 } else if (ret < 0) {
506 nf_ct_unexpect_related(rtp_exp); 509 nf_ct_unexpect_related(rtp_exp);
507 port = 0; 510 port = 0;
508 break; 511 break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fd9ecb52c66b..82cf2a722b23 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
934 if (mtu < ip_rt_min_pmtu) 934 if (mtu < ip_rt_min_pmtu)
935 mtu = ip_rt_min_pmtu; 935 mtu = ip_rt_min_pmtu;
936 936
937 rcu_read_lock();
937 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { 938 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
938 struct fib_nh *nh = &FIB_RES_NH(res); 939 struct fib_nh *nh = &FIB_RES_NH(res);
939 940
940 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 941 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
941 jiffies + ip_rt_mtu_expires); 942 jiffies + ip_rt_mtu_expires);
942 } 943 }
944 rcu_read_unlock();
943 return mtu; 945 return mtu;
944} 946}
945 947
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
956 dst->obsolete = DST_OBSOLETE_KILL; 958 dst->obsolete = DST_OBSOLETE_KILL;
957 } else { 959 } else {
958 rt->rt_pmtu = mtu; 960 rt->rt_pmtu = mtu;
959 dst_set_expires(&rt->dst, ip_rt_mtu_expires); 961 rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
960 } 962 }
961} 963}
962 964
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1263{ 1265{
1264 struct rtable *rt = (struct rtable *) dst; 1266 struct rtable *rt = (struct rtable *) dst;
1265 1267
1266 if (dst->flags & DST_NOCACHE) { 1268 if (!list_empty(&rt->rt_uncached)) {
1267 spin_lock_bh(&rt_uncached_lock); 1269 spin_lock_bh(&rt_uncached_lock);
1268 list_del(&rt->rt_uncached); 1270 list_del(&rt->rt_uncached);
1269 spin_unlock_bh(&rt_uncached_lock); 1271 spin_unlock_bh(&rt_uncached_lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85308b90df80..6e38c6c23caa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2926 * tcp_xmit_retransmit_queue(). 2926 * tcp_xmit_retransmit_queue().
2927 */ 2927 */
2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2928static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2929 int newly_acked_sacked, bool is_dupack, 2929 int prior_sacked, bool is_dupack,
2930 int flag) 2930 int flag)
2931{ 2931{
2932 struct inet_connection_sock *icsk = inet_csk(sk); 2932 struct inet_connection_sock *icsk = inet_csk(sk);
2933 struct tcp_sock *tp = tcp_sk(sk); 2933 struct tcp_sock *tp = tcp_sk(sk);
2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2934 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2935 (tcp_fackets_out(tp) > tp->reordering)); 2935 (tcp_fackets_out(tp) > tp->reordering));
2936 int newly_acked_sacked = 0;
2936 int fast_rexmit = 0; 2937 int fast_rexmit = 0;
2937 2938
2938 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2939 if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2992 tcp_add_reno_sack(sk); 2993 tcp_add_reno_sack(sk);
2993 } else 2994 } else
2994 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2995 do_lost = tcp_try_undo_partial(sk, pkts_acked);
2996 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
2995 break; 2997 break;
2996 case TCP_CA_Loss: 2998 case TCP_CA_Loss:
2997 if (flag & FLAG_DATA_ACKED) 2999 if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3013 if (is_dupack) 3015 if (is_dupack)
3014 tcp_add_reno_sack(sk); 3016 tcp_add_reno_sack(sk);
3015 } 3017 }
3018 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
3016 3019
3017 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3020 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3018 tcp_try_undo_dsack(sk); 3021 tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3590 int prior_packets; 3593 int prior_packets;
3591 int prior_sacked = tp->sacked_out; 3594 int prior_sacked = tp->sacked_out;
3592 int pkts_acked = 0; 3595 int pkts_acked = 0;
3593 int newly_acked_sacked = 0;
3594 bool frto_cwnd = false; 3596 bool frto_cwnd = false;
3595 3597
3596 /* If the ack is older than previous acks 3598 /* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3666 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3668 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3667 3669
3668 pkts_acked = prior_packets - tp->packets_out; 3670 pkts_acked = prior_packets - tp->packets_out;
3669 newly_acked_sacked = (prior_packets - prior_sacked) -
3670 (tp->packets_out - tp->sacked_out);
3671 3671
3672 if (tp->frto_counter) 3672 if (tp->frto_counter)
3673 frto_cwnd = tcp_process_frto(sk, flag); 3673 frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3681 tcp_may_raise_cwnd(sk, flag)) 3681 tcp_may_raise_cwnd(sk, flag))
3682 tcp_cong_avoid(sk, ack, prior_in_flight); 3682 tcp_cong_avoid(sk, ack, prior_in_flight);
3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3683 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3684 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3684 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3685 is_dupack, flag); 3685 is_dupack, flag);
3686 } else { 3686 } else {
3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3687 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3698no_queue: 3698no_queue:
3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3699 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3700 if (flag & FLAG_DSACKING_ACK) 3700 if (flag & FLAG_DSACKING_ACK)
3701 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3701 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3702 is_dupack, flag); 3702 is_dupack, flag);
3703 /* If this ack opens up a zero window, clear backoff. It was 3703 /* If this ack opens up a zero window, clear backoff. It was
3704 * being used to time the probes, and is probably far higher than 3704 * being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
3718 */ 3718 */
3719 if (TCP_SKB_CB(skb)->sacked) { 3719 if (TCP_SKB_CB(skb)->sacked) {
3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3720 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3721 newly_acked_sacked = tp->sacked_out - prior_sacked; 3721 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3722 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
3723 is_dupack, flag); 3722 is_dupack, flag);
3724 } 3723 }
3725 3724
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
167 struct esp_data *esp = x->data; 167 struct esp_data *esp = x->data;
168 168
169 /* skb is pure payload to encrypt */ 169 /* skb is pure payload to encrypt */
170 err = -ENOMEM;
171
172 aead = esp->aead; 170 aead = esp->aead;
173 alen = crypto_aead_authsize(aead); 171 alen = crypto_aead_authsize(aead);
174 172
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
203 } 201 }
204 202
205 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 203 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
206 if (!tmp) 204 if (!tmp) {
205 err = -ENOMEM;
207 goto error; 206 goto error;
207 }
208 208
209 seqhi = esp_tmp_seqhi(tmp); 209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen); 210 iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1347 /* Remove from tunnel list */ 1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list); 1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu);
1350 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1351 synchronize_rcu();
1352 1352
1353 atomic_dec(&l2tp_tunnel_count); 1353 atomic_dec(&l2tp_tunnel_count);
1354 kfree(tunnel);
1355} 1354}
1356 1355
1357/* Create a socket for the tunnel, if one isn't set up by 1356/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
163 163
164struct l2tp_tunnel { 164struct l2tp_tunnel {
165 int magic; /* Should be L2TP_TUNNEL_MAGIC */ 165 int magic; /* Should be L2TP_TUNNEL_MAGIC */
166 struct rcu_head rcu;
166 rwlock_t hlist_lock; /* protect session_hlist */ 167 rwlock_t hlist_lock; /* protect session_hlist */
167 struct hlist_head session_hlist[L2TP_HASH_SIZE]; 168 struct hlist_head session_hlist[L2TP_HASH_SIZE];
168 /* hashed list of sessions, 169 /* hashed list of sessions,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1812 sdata, NULL, NULL); 1812 sdata, NULL, NULL);
1813 } else { 1813 } else {
1814 int is_mesh_mcast = 1; 1814 /* DS -> MBSS (802.11-2012 13.11.3.3).
1815 const u8 *mesh_da; 1815 * For unicast with unknown forwarding information,
1816 * destination might be in the MBSS or if that fails
1817 * forwarded to another mesh gate. In either case
1818 * resolution will be handled in ieee80211_xmit(), so
1819 * leave the original DA. This also works for mcast */
1820 const u8 *mesh_da = skb->data;
1821
1822 if (mppath)
1823 mesh_da = mppath->mpp;
1824 else if (mpath)
1825 mesh_da = mpath->dst;
1826 rcu_read_unlock();
1816 1827
1817 if (is_multicast_ether_addr(skb->data))
1818 /* DA TA mSA AE:SA */
1819 mesh_da = skb->data;
1820 else {
1821 static const u8 bcast[ETH_ALEN] =
1822 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1823 if (mppath) {
1824 /* RA TA mDA mSA AE:DA SA */
1825 mesh_da = mppath->mpp;
1826 is_mesh_mcast = 0;
1827 } else if (mpath) {
1828 mesh_da = mpath->dst;
1829 is_mesh_mcast = 0;
1830 } else {
1831 /* DA TA mSA AE:SA */
1832 mesh_da = bcast;
1833 }
1834 }
1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1828 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1836 mesh_da, sdata->vif.addr); 1829 mesh_da, sdata->vif.addr);
1837 rcu_read_unlock(); 1830 if (is_multicast_ether_addr(mesh_da))
1838 if (is_mesh_mcast) 1831 /* DA TA mSA AE:SA */
1839 meshhdrlen = 1832 meshhdrlen =
1840 ieee80211_new_mesh_header(&mesh_hdr, 1833 ieee80211_new_mesh_header(&mesh_hdr,
1841 sdata, 1834 sdata,
1842 skb->data + ETH_ALEN, 1835 skb->data + ETH_ALEN,
1843 NULL); 1836 NULL);
1844 else 1837 else
1838 /* RA TA mDA mSA AE:DA SA */
1845 meshhdrlen = 1839 meshhdrlen =
1846 ieee80211_new_mesh_header(&mesh_hdr, 1840 ieee80211_new_mesh_header(&mesh_hdr,
1847 sdata, 1841 sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 72bf32a84874..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1171 goto out_err; 1171 goto out_err;
1172 } 1172 }
1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1174 if (!svc->stats.cpustats) 1174 if (!svc->stats.cpustats) {
1175 ret = -ENOMEM;
1175 goto out_err; 1176 goto out_err;
1177 }
1176 1178
1177 /* I'm the first user of the service */ 1179 /* I'm the first user of the service */
1178 atomic_set(&svc->usecnt, 0); 1180 atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
249{ 249{
250 struct nf_conn *ct = (void *)ul_conntrack; 250 struct nf_conn *ct = (void *)ul_conntrack;
251 struct net *net = nf_ct_net(ct); 251 struct net *net = nf_ct_net(ct);
252 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
253
254 BUG_ON(ecache == NULL);
252 255
253 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { 256 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
254 /* bad luck, let's retry again */ 257 /* bad luck, let's retry again */
255 ct->timeout.expires = jiffies + 258 ecache->timeout.expires = jiffies +
256 (random32() % net->ct.sysctl_events_retry_timeout); 259 (random32() % net->ct.sysctl_events_retry_timeout);
257 add_timer(&ct->timeout); 260 add_timer(&ecache->timeout);
258 return; 261 return;
259 } 262 }
260 /* we've got the event delivered, now it's dying */ 263 /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
268void nf_ct_insert_dying_list(struct nf_conn *ct) 271void nf_ct_insert_dying_list(struct nf_conn *ct)
269{ 272{
270 struct net *net = nf_ct_net(ct); 273 struct net *net = nf_ct_net(ct);
274 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
275
276 BUG_ON(ecache == NULL);
271 277
272 /* add this conntrack to the dying list */ 278 /* add this conntrack to the dying list */
273 spin_lock_bh(&nf_conntrack_lock); 279 spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
275 &net->ct.dying); 281 &net->ct.dying);
276 spin_unlock_bh(&nf_conntrack_lock); 282 spin_unlock_bh(&nf_conntrack_lock);
277 /* set a new timer to retry event delivery */ 283 /* set a new timer to retry event delivery */
278 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); 284 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
279 ct->timeout.expires = jiffies + 285 ecache->timeout.expires = jiffies +
280 (random32() % net->ct.sysctl_events_retry_timeout); 286 (random32() % net->ct.sysctl_events_retry_timeout);
281 add_timer(&ct->timeout); 287 add_timer(&ecache->timeout);
282} 288}
283EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 289EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
284 290
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index da4fc37a8578..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
2790 goto err_unreg_subsys; 2790 goto err_unreg_subsys;
2791 } 2791 }
2792 2792
2793 if (register_pernet_subsys(&ctnetlink_net_ops)) { 2793 ret = register_pernet_subsys(&ctnetlink_net_ops);
2794 if (ret < 0) {
2794 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2795 pr_err("ctnetlink_init: cannot register pernet operations\n");
2795 goto err_unreg_exp_subsys; 2796 goto err_unreg_exp_subsys;
2796 } 2797 }
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..14e2f3903142 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
480 } 480 }
481 481
482 if (indev && skb_mac_header_was_set(skb)) { 482 if (indev && skb_mac_header_was_set(skb)) {
483 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 483 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
484 nla_put_be16(inst->skb, NFULA_HWLEN, 484 nla_put_be16(inst->skb, NFULA_HWLEN,
485 htons(skb->dev->hard_header_len)) || 485 htons(skb->dev->hard_header_len)) ||
486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
996 996
997#ifdef CONFIG_PROC_FS 997#ifdef CONFIG_PROC_FS
998 if (!proc_create("nfnetlink_log", 0440, 998 if (!proc_create("nfnetlink_log", 0440,
999 proc_net_netfilter, &nful_file_ops)) 999 proc_net_netfilter, &nful_file_ops)) {
1000 status = -ENOMEM;
1000 goto cleanup_logger; 1001 goto cleanup_logger;
1002 }
1001#endif 1003#endif
1002 return status; 1004 return status;
1003 1005
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533ed..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1373 dst_pid = addr->nl_pid; 1373 dst_pid = addr->nl_pid;
1374 dst_group = ffs(addr->nl_groups); 1374 dst_group = ffs(addr->nl_groups);
1375 err = -EPERM; 1375 err = -EPERM;
1376 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1376 if ((dst_group || dst_pid) &&
1377 !netlink_capable(sock, NL_NONROOT_SEND))
1377 goto out; 1378 goto out;
1378 } else { 1379 } else {
1379 dst_pid = nlk->dst_pid; 1380 dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
2147 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2148 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2148 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2149 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2149 nl_table[NETLINK_USERSOCK].registered = 1; 2150 nl_table[NETLINK_USERSOCK].registered = 1;
2151 nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
2150 2152
2151 netlink_table_ungrab(); 2153 netlink_table_ungrab();
2152} 2154}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index aee7196aac36..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1273 spin_unlock(&f->lock); 1273 spin_unlock(&f->lock);
1274} 1274}
1275 1275
1276bool match_fanout_group(struct packet_type *ptype, struct sock * sk) 1276static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1277{ 1277{
1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) 1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1279 return true; 1279 return true;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 87cd0e4d4282..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
1994 goto error; 1994 goto error;
1995 1995
1996 x->outer_mode = xfrm_get_mode(x->props.mode, family); 1996 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1997 if (x->outer_mode == NULL) 1997 if (x->outer_mode == NULL) {
1998 err = -EPROTONOSUPPORT;
1998 goto error; 1999 goto error;
2000 }
1999 2001
2000 if (init_replay) { 2002 if (init_replay) {
2001 err = xfrm_init_replay(x); 2003 err = xfrm_init_replay(x);