aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c34
-rw-r--r--arch/x86/kernel/cpu/common.c74
-rw-r--r--arch/x86/kernel/cpu/intel.c14
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/head_32.S13
-rw-r--r--arch/x86/kernel/io_apic.c34
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c10
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/signal.c291
9 files changed, 231 insertions, 242 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d9..4e581fdc0a5a 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 143 return;
144#endif 144#endif
145} 145}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 652fdc9a757a..275e2cb43b91 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -224,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224#endif 224#endif
225 225
226/* 226/*
227 * Some CPU features depend on higher CPUID levels, which may not always
228 * be available due to CPUID level capping or broken virtualization
229 * software. Add those features to this table to auto-disable them.
230 */
231struct cpuid_dependent_feature {
232 u32 feature;
233 u32 level;
234};
235static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 },
238 { X86_FEATURE_DCA, 0x00000009 },
239 { X86_FEATURE_XSAVE, 0x0000000d },
240 { 0, 0 }
241};
242
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{
245 const struct cpuid_dependent_feature *df;
246 for (df = cpuid_dependent_features; df->feature; df++) {
247 /*
248 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable
250 * and the legitimate extended levels are all negative
251 * when signed; hence the weird messing around with
252 * signs here...
253 */
254 if (cpu_has(c, df->feature) &&
255 ((s32)df->feature < 0 ?
256 (u32)df->feature > (u32)c->extended_cpuid_level :
257 (s32)df->feature > (s32)c->cpuid_level)) {
258 clear_cpu_cap(c, df->feature);
259 if (warn)
260 printk(KERN_WARNING
261 "CPU: CPU feature %s disabled "
262 "due to lack of CPUID level 0x%x\n",
263 x86_cap_flags[df->feature],
264 df->level);
265 }
266 }
267}
268
269/*
227 * Naming convention should be: <Name> [(<Codename>)] 270 * Naming convention should be: <Name> [(<Codename>)]
228 * This table only is used unless init_<vendor>() below doesn't set it; 271 * This table only is used unless init_<vendor>() below doesn't set it;
229 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -586,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
586 if (this_cpu->c_early_init) 629 if (this_cpu->c_early_init)
587 this_cpu->c_early_init(c); 630 this_cpu->c_early_init(c);
588 631
589 validate_pat_support(c);
590
591#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
592 c->cpu_index = boot_cpu_id; 633 c->cpu_index = boot_cpu_id;
593#endif 634#endif
635 filter_cpuid_features(c, false);
594} 636}
595 637
596void __init early_cpu_init(void) 638void __init early_cpu_init(void)
@@ -724,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
724 * we do "generic changes." 766 * we do "generic changes."
725 */ 767 */
726 768
769 /* Filter out anything that depends on CPUID levels we don't have */
770 filter_cpuid_features(c, true);
771
727 /* If the model name is still unset, do table lookup. */ 772 /* If the model name is still unset, do table lookup. */
728 if (!c->x86_model_id[0]) { 773 if (!c->x86_model_id[0]) {
729 char *p; 774 char *p;
@@ -1053,22 +1098,19 @@ void __cpuinit cpu_init(void)
1053 */ 1098 */
1054 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1099 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1055 arch_kgdb_ops.correct_hw_break(); 1100 arch_kgdb_ops.correct_hw_break();
1056 else { 1101 else
1057#endif 1102#endif
1058 /* 1103 {
1059 * Clear all 6 debug registers: 1104 /*
1060 */ 1105 * Clear all 6 debug registers:
1061 1106 */
1062 set_debugreg(0UL, 0); 1107 set_debugreg(0UL, 0);
1063 set_debugreg(0UL, 1); 1108 set_debugreg(0UL, 1);
1064 set_debugreg(0UL, 2); 1109 set_debugreg(0UL, 2);
1065 set_debugreg(0UL, 3); 1110 set_debugreg(0UL, 3);
1066 set_debugreg(0UL, 6); 1111 set_debugreg(0UL, 6);
1067 set_debugreg(0UL, 7); 1112 set_debugreg(0UL, 7);
1068#ifdef CONFIG_KGDB
1069 /* If the kgdb is connected no debug regs should be altered. */
1070 } 1113 }
1071#endif
1072 1114
1073 fpu_init(); 1115 fpu_init();
1074 1116
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 549f2ada55f5..5deefae9064d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -30,7 +30,7 @@
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 31{
32 /* Unmask CPUID levels if masked: */ 32 /* Unmask CPUID levels if masked: */
33 if (c->x86 == 6 && c->x86_model >= 15) { 33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 u64 misc_enable; 34 u64 misc_enable;
35 35
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 64 }
65 65
66 /*
67 * There is a known erratum on Pentium III and Core Solo
68 * and Core Duo CPUs.
69 * " Page with PAT set to WC while associated MTRR is UC
70 * may consolidate to UC "
71 * Because of this erratum, it is better to stick with
72 * setting WC in MTRR rather than using PAT on these CPUs.
73 *
74 * Enable PAT WC only on P4, Core 2 or later CPUs.
75 */
76 if (c->x86 == 6 && c->x86_model < 15)
77 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 78}
67 79
68#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index a52703864a16..82801fd2e931 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -409,6 +409,8 @@ END(save_paranoid)
409ENTRY(ret_from_fork) 409ENTRY(ret_from_fork)
410 DEFAULT_FRAME 410 DEFAULT_FRAME
411 411
412 LOCK ; btr $TIF_FORK,TI_flags(%r8)
413
412 push kernel_eflags(%rip) 414 push kernel_eflags(%rip)
413 CFI_ADJUST_CFA_OFFSET 8 415 CFI_ADJUST_CFA_OFFSET 8
414 popf # reset kernel eflags 416 popf # reset kernel eflags
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 24c0e5cd71e3..722464c520cf 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -548,12 +548,8 @@ early_fault:
548 pushl %eax 548 pushl %eax
549 pushl %edx /* trapno */ 549 pushl %edx /* trapno */
550 pushl $fault_msg 550 pushl $fault_msg
551#ifdef CONFIG_EARLY_PRINTK
552 call early_printk
553#else
554 call printk 551 call printk
555#endif 552#endif
556#endif
557 call dump_stack 553 call dump_stack
558hlt_loop: 554hlt_loop:
559 hlt 555 hlt
@@ -580,11 +576,10 @@ ignore_int:
580 pushl 32(%esp) 576 pushl 32(%esp)
581 pushl 40(%esp) 577 pushl 40(%esp)
582 pushl $int_msg 578 pushl $int_msg
583#ifdef CONFIG_EARLY_PRINTK
584 call early_printk
585#else
586 call printk 579 call printk
587#endif 580
581 call dump_stack
582
588 addl $(5*4),%esp 583 addl $(5*4),%esp
589 popl %ds 584 popl %ds
590 popl %es 585 popl %es
@@ -660,7 +655,7 @@ early_recursion_flag:
660 .long 0 655 .long 0
661 656
662int_msg: 657int_msg:
663 .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 658 .asciz "Unknown interrupt or fault at: %p %p %p\n"
664 659
665fault_msg: 660fault_msg:
666/* fault info: */ 661/* fault info: */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index e4d36bd56b62..bfb7d734062a 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -3465,40 +3465,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3465 return 0; 3465 return 0;
3466} 3466}
3467 3467
3468int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3469{
3470 unsigned int irq;
3471 int ret;
3472 unsigned int irq_want;
3473
3474 irq_want = nr_irqs_gsi;
3475 irq = create_irq_nr(irq_want);
3476 if (irq == 0)
3477 return -1;
3478
3479#ifdef CONFIG_INTR_REMAP
3480 if (!intr_remapping_enabled)
3481 goto no_ir;
3482
3483 ret = msi_alloc_irte(dev, irq, 1);
3484 if (ret < 0)
3485 goto error;
3486no_ir:
3487#endif
3488 ret = setup_msi_irq(dev, msidesc, irq);
3489 if (ret < 0) {
3490 destroy_irq(irq);
3491 return ret;
3492 }
3493 return 0;
3494
3495#ifdef CONFIG_INTR_REMAP
3496error:
3497 destroy_irq(irq);
3498 return ret;
3499#endif
3500}
3501
3502int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3468int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3503{ 3469{
3504 unsigned int irq; 3470 unsigned int irq;
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 95777b0faa73..3a7c5a44082e 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
26}; 26};
27EXPORT_SYMBOL(pv_lock_ops); 27EXPORT_SYMBOL(pv_lock_ops);
28 28
29void __init paravirt_use_bytelocks(void)
30{
31#ifdef CONFIG_SMP
32 pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
33 pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
34 pv_lock_ops.spin_lock = __byte_spin_lock;
35 pv_lock_ops.spin_trylock = __byte_spin_trylock;
36 pv_lock_ops.spin_unlock = __byte_spin_unlock;
37#endif
38}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index e4c8fb608873..202514be5923 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -435,7 +435,6 @@ struct pv_mmu_ops pv_mmu_ops = {
435#endif /* PAGETABLE_LEVELS >= 3 */ 435#endif /* PAGETABLE_LEVELS >= 3 */
436 436
437 .pte_val = native_pte_val, 437 .pte_val = native_pte_val,
438 .pte_flags = native_pte_flags,
439 .pgd_val = native_pgd_val, 438 .pgd_val = native_pgd_val,
440 439
441 .make_pte = native_make_pte, 440 .make_pte = native_make_pte,
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index df0587f24c54..7fc78b019815 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -51,24 +51,24 @@
51#endif 51#endif
52 52
53#define COPY(x) { \ 53#define COPY(x) { \
54 err |= __get_user(regs->x, &sc->x); \ 54 get_user_ex(regs->x, &sc->x); \
55} 55}
56 56
57#define COPY_SEG(seg) { \ 57#define COPY_SEG(seg) { \
58 unsigned short tmp; \ 58 unsigned short tmp; \
59 err |= __get_user(tmp, &sc->seg); \ 59 get_user_ex(tmp, &sc->seg); \
60 regs->seg = tmp; \ 60 regs->seg = tmp; \
61} 61}
62 62
63#define COPY_SEG_CPL3(seg) { \ 63#define COPY_SEG_CPL3(seg) { \
64 unsigned short tmp; \ 64 unsigned short tmp; \
65 err |= __get_user(tmp, &sc->seg); \ 65 get_user_ex(tmp, &sc->seg); \
66 regs->seg = tmp | 3; \ 66 regs->seg = tmp | 3; \
67} 67}
68 68
69#define GET_SEG(seg) { \ 69#define GET_SEG(seg) { \
70 unsigned short tmp; \ 70 unsigned short tmp; \
71 err |= __get_user(tmp, &sc->seg); \ 71 get_user_ex(tmp, &sc->seg); \
72 loadsegment(seg, tmp); \ 72 loadsegment(seg, tmp); \
73} 73}
74 74
@@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
83 /* Always make any pending restarted system calls return -EINTR */ 83 /* Always make any pending restarted system calls return -EINTR */
84 current_thread_info()->restart_block.fn = do_no_restart_syscall; 84 current_thread_info()->restart_block.fn = do_no_restart_syscall;
85 85
86 get_user_try {
87
86#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
87 GET_SEG(gs); 89 GET_SEG(gs);
88 COPY_SEG(fs); 90 COPY_SEG(fs);
89 COPY_SEG(es); 91 COPY_SEG(es);
90 COPY_SEG(ds); 92 COPY_SEG(ds);
91#endif /* CONFIG_X86_32 */ 93#endif /* CONFIG_X86_32 */
92 94
93 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 95 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
94 COPY(dx); COPY(cx); COPY(ip); 96 COPY(dx); COPY(cx); COPY(ip);
95 97
96#ifdef CONFIG_X86_64 98#ifdef CONFIG_X86_64
97 COPY(r8); 99 COPY(r8);
98 COPY(r9); 100 COPY(r9);
99 COPY(r10); 101 COPY(r10);
100 COPY(r11); 102 COPY(r11);
101 COPY(r12); 103 COPY(r12);
102 COPY(r13); 104 COPY(r13);
103 COPY(r14); 105 COPY(r14);
104 COPY(r15); 106 COPY(r15);
105#endif /* CONFIG_X86_64 */ 107#endif /* CONFIG_X86_64 */
106 108
107#ifdef CONFIG_X86_32 109#ifdef CONFIG_X86_32
108 COPY_SEG_CPL3(cs); 110 COPY_SEG_CPL3(cs);
109 COPY_SEG_CPL3(ss); 111 COPY_SEG_CPL3(ss);
110#else /* !CONFIG_X86_32 */ 112#else /* !CONFIG_X86_32 */
111 /* Kernel saves and restores only the CS segment register on signals, 113 /* Kernel saves and restores only the CS segment register on signals,
112 * which is the bare minimum needed to allow mixed 32/64-bit code. 114 * which is the bare minimum needed to allow mixed 32/64-bit code.
113 * App's signal handler can save/restore other segments if needed. */ 115 * App's signal handler can save/restore other segments if needed. */
114 COPY_SEG_CPL3(cs); 116 COPY_SEG_CPL3(cs);
115#endif /* CONFIG_X86_32 */ 117#endif /* CONFIG_X86_32 */
116 118
117 err |= __get_user(tmpflags, &sc->flags); 119 get_user_ex(tmpflags, &sc->flags);
118 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 120 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
119 regs->orig_ax = -1; /* disable syscall checks */ 121 regs->orig_ax = -1; /* disable syscall checks */
122
123 get_user_ex(buf, &sc->fpstate);
124 err |= restore_i387_xstate(buf);
120 125
121 err |= __get_user(buf, &sc->fpstate); 126 get_user_ex(*pax, &sc->ax);
122 err |= restore_i387_xstate(buf); 127 } get_user_catch(err);
123 128
124 err |= __get_user(*pax, &sc->ax);
125 return err; 129 return err;
126} 130}
127 131
@@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
131{ 135{
132 int err = 0; 136 int err = 0;
133 137
138 put_user_try {
139
134#ifdef CONFIG_X86_32 140#ifdef CONFIG_X86_32
135 { 141 {
136 unsigned int tmp; 142 unsigned int tmp;
137 143
138 savesegment(gs, tmp); 144 savesegment(gs, tmp);
139 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 145 put_user_ex(tmp, (unsigned int __user *)&sc->gs);
140 } 146 }
141 err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs); 147 put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
142 err |= __put_user(regs->es, (unsigned int __user *)&sc->es); 148 put_user_ex(regs->es, (unsigned int __user *)&sc->es);
143 err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds); 149 put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
144#endif /* CONFIG_X86_32 */ 150#endif /* CONFIG_X86_32 */
145 151
146 err |= __put_user(regs->di, &sc->di); 152 put_user_ex(regs->di, &sc->di);
147 err |= __put_user(regs->si, &sc->si); 153 put_user_ex(regs->si, &sc->si);
148 err |= __put_user(regs->bp, &sc->bp); 154 put_user_ex(regs->bp, &sc->bp);
149 err |= __put_user(regs->sp, &sc->sp); 155 put_user_ex(regs->sp, &sc->sp);
150 err |= __put_user(regs->bx, &sc->bx); 156 put_user_ex(regs->bx, &sc->bx);
151 err |= __put_user(regs->dx, &sc->dx); 157 put_user_ex(regs->dx, &sc->dx);
152 err |= __put_user(regs->cx, &sc->cx); 158 put_user_ex(regs->cx, &sc->cx);
153 err |= __put_user(regs->ax, &sc->ax); 159 put_user_ex(regs->ax, &sc->ax);
154#ifdef CONFIG_X86_64 160#ifdef CONFIG_X86_64
155 err |= __put_user(regs->r8, &sc->r8); 161 put_user_ex(regs->r8, &sc->r8);
156 err |= __put_user(regs->r9, &sc->r9); 162 put_user_ex(regs->r9, &sc->r9);
157 err |= __put_user(regs->r10, &sc->r10); 163 put_user_ex(regs->r10, &sc->r10);
158 err |= __put_user(regs->r11, &sc->r11); 164 put_user_ex(regs->r11, &sc->r11);
159 err |= __put_user(regs->r12, &sc->r12); 165 put_user_ex(regs->r12, &sc->r12);
160 err |= __put_user(regs->r13, &sc->r13); 166 put_user_ex(regs->r13, &sc->r13);
161 err |= __put_user(regs->r14, &sc->r14); 167 put_user_ex(regs->r14, &sc->r14);
162 err |= __put_user(regs->r15, &sc->r15); 168 put_user_ex(regs->r15, &sc->r15);
163#endif /* CONFIG_X86_64 */ 169#endif /* CONFIG_X86_64 */
164 170
165 err |= __put_user(current->thread.trap_no, &sc->trapno); 171 put_user_ex(current->thread.trap_no, &sc->trapno);
166 err |= __put_user(current->thread.error_code, &sc->err); 172 put_user_ex(current->thread.error_code, &sc->err);
167 err |= __put_user(regs->ip, &sc->ip); 173 put_user_ex(regs->ip, &sc->ip);
168#ifdef CONFIG_X86_32 174#ifdef CONFIG_X86_32
169 err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); 175 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
170 err |= __put_user(regs->flags, &sc->flags); 176 put_user_ex(regs->flags, &sc->flags);
171 err |= __put_user(regs->sp, &sc->sp_at_signal); 177 put_user_ex(regs->sp, &sc->sp_at_signal);
172 err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); 178 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
173#else /* !CONFIG_X86_32 */ 179#else /* !CONFIG_X86_32 */
174 err |= __put_user(regs->flags, &sc->flags); 180 put_user_ex(regs->flags, &sc->flags);
175 err |= __put_user(regs->cs, &sc->cs); 181 put_user_ex(regs->cs, &sc->cs);
176 err |= __put_user(0, &sc->gs); 182 put_user_ex(0, &sc->gs);
177 err |= __put_user(0, &sc->fs); 183 put_user_ex(0, &sc->fs);
178#endif /* CONFIG_X86_32 */ 184#endif /* CONFIG_X86_32 */
179 185
180 err |= __put_user(fpstate, &sc->fpstate); 186 put_user_ex(fpstate, &sc->fpstate);
181 187
182 /* non-iBCS2 extensions.. */ 188 /* non-iBCS2 extensions.. */
183 err |= __put_user(mask, &sc->oldmask); 189 put_user_ex(mask, &sc->oldmask);
184 err |= __put_user(current->thread.cr2, &sc->cr2); 190 put_user_ex(current->thread.cr2, &sc->cr2);
191 } put_user_catch(err);
185 192
186 return err; 193 return err;
187} 194}
@@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
336 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 343 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
337 return -EFAULT; 344 return -EFAULT;
338 345
339 err |= __put_user(sig, &frame->sig); 346 put_user_try {
340 err |= __put_user(&frame->info, &frame->pinfo); 347 put_user_ex(sig, &frame->sig);
341 err |= __put_user(&frame->uc, &frame->puc); 348 put_user_ex(&frame->info, &frame->pinfo);
342 err |= copy_siginfo_to_user(&frame->info, info); 349 put_user_ex(&frame->uc, &frame->puc);
343 if (err) 350 err |= copy_siginfo_to_user(&frame->info, info);
344 return -EFAULT;
345
346 /* Create the ucontext. */
347 if (cpu_has_xsave)
348 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
349 else
350 err |= __put_user(0, &frame->uc.uc_flags);
351 err |= __put_user(0, &frame->uc.uc_link);
352 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
353 err |= __put_user(sas_ss_flags(regs->sp),
354 &frame->uc.uc_stack.ss_flags);
355 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
356 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
357 regs, set->sig[0]);
358 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
359 if (err)
360 return -EFAULT;
361 351
362 /* Set up to return from userspace. */ 352 /* Create the ucontext. */
363 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 353 if (cpu_has_xsave)
364 if (ka->sa.sa_flags & SA_RESTORER) 354 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
365 restorer = ka->sa.sa_restorer; 355 else
366 err |= __put_user(restorer, &frame->pretcode); 356 put_user_ex(0, &frame->uc.uc_flags);
357 put_user_ex(0, &frame->uc.uc_link);
358 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
359 put_user_ex(sas_ss_flags(regs->sp),
360 &frame->uc.uc_stack.ss_flags);
361 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
362 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
363 regs, set->sig[0]);
364 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
365
366 /* Set up to return from userspace. */
367 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
368 if (ka->sa.sa_flags & SA_RESTORER)
369 restorer = ka->sa.sa_restorer;
370 put_user_ex(restorer, &frame->pretcode);
367 371
368 /* 372 /*
369 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80 373 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
370 * 374 *
371 * WE DO NOT USE IT ANY MORE! It's only left here for historical 375 * WE DO NOT USE IT ANY MORE! It's only left here for historical
372 * reasons and because gdb uses it as a signature to notice 376 * reasons and because gdb uses it as a signature to notice
373 * signal handler stack frames. 377 * signal handler stack frames.
374 */ 378 */
375 err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); 379 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
380 } put_user_catch(err);
376 381
377 if (err) 382 if (err)
378 return -EFAULT; 383 return -EFAULT;
@@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
436 return -EFAULT; 441 return -EFAULT;
437 } 442 }
438 443
439 /* Create the ucontext. */ 444 put_user_try {
440 if (cpu_has_xsave) 445 /* Create the ucontext. */
441 err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); 446 if (cpu_has_xsave)
442 else 447 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
443 err |= __put_user(0, &frame->uc.uc_flags); 448 else
444 err |= __put_user(0, &frame->uc.uc_link); 449 put_user_ex(0, &frame->uc.uc_flags);
445 err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); 450 put_user_ex(0, &frame->uc.uc_link);
446 err |= __put_user(sas_ss_flags(regs->sp), 451 put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
447 &frame->uc.uc_stack.ss_flags); 452 put_user_ex(sas_ss_flags(regs->sp),
448 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 453 &frame->uc.uc_stack.ss_flags);
449 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); 454 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
450 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 455 err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
451 456 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
452 /* Set up to return from userspace. If provided, use a stub 457
453 already in userspace. */ 458 /* Set up to return from userspace. If provided, use a stub
454 /* x86-64 should always use SA_RESTORER. */ 459 already in userspace. */
455 if (ka->sa.sa_flags & SA_RESTORER) { 460 /* x86-64 should always use SA_RESTORER. */
456 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 461 if (ka->sa.sa_flags & SA_RESTORER) {
457 } else { 462 put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
458 /* could use a vstub here */ 463 } else {
459 return -EFAULT; 464 /* could use a vstub here */
460 } 465 err |= -EFAULT;
466 }
467 } put_user_catch(err);
461 468
462 if (err) 469 if (err)
463 return -EFAULT; 470 return -EFAULT;
@@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
509 struct old_sigaction __user *oact) 516 struct old_sigaction __user *oact)
510{ 517{
511 struct k_sigaction new_ka, old_ka; 518 struct k_sigaction new_ka, old_ka;
512 int ret; 519 int ret = 0;
513 520
514 if (act) { 521 if (act) {
515 old_sigset_t mask; 522 old_sigset_t mask;
516 523
517 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 524 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
518 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
519 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
520 return -EFAULT; 525 return -EFAULT;
521 526
522 __get_user(new_ka.sa.sa_flags, &act->sa_flags); 527 get_user_try {
523 __get_user(mask, &act->sa_mask); 528 get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
529 get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
530 get_user_ex(mask, &act->sa_mask);
531 get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
532 } get_user_catch(ret);
533
534 if (ret)
535 return -EFAULT;
524 siginitset(&new_ka.sa.sa_mask, mask); 536 siginitset(&new_ka.sa.sa_mask, mask);
525 } 537 }
526 538
527 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 539 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
528 540
529 if (!ret && oact) { 541 if (!ret && oact) {
530 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 542 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
531 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
532 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
533 return -EFAULT; 543 return -EFAULT;
534 544
535 __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 545 put_user_try {
536 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 546 put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
547 put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
548 put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
549 put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
550 } put_user_catch(ret);
551
552 if (ret)
553 return -EFAULT;
537 } 554 }
538 555
539 return ret; 556 return ret;