diff options
Diffstat (limited to 'arch')
32 files changed, 207 insertions, 138 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 0695be538de5..c3750c2c4113 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -2,8 +2,14 @@ | |||
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/kprobes.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/vmalloc.h> | ||
5 | #include <asm/alternative.h> | 8 | #include <asm/alternative.h> |
6 | #include <asm/sections.h> | 9 | #include <asm/sections.h> |
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/mce.h> | ||
12 | #include <asm/nmi.h> | ||
7 | 13 | ||
8 | #ifdef CONFIG_HOTPLUG_CPU | 14 | #ifdef CONFIG_HOTPLUG_CPU |
9 | static int smp_alt_once; | 15 | static int smp_alt_once; |
@@ -150,7 +156,7 @@ static void nop_out(void *insns, unsigned int len) | |||
150 | unsigned int noplen = len; | 156 | unsigned int noplen = len; |
151 | if (noplen > ASM_NOP_MAX) | 157 | if (noplen > ASM_NOP_MAX) |
152 | noplen = ASM_NOP_MAX; | 158 | noplen = ASM_NOP_MAX; |
153 | memcpy(insns, noptable[noplen], noplen); | 159 | text_poke(insns, noptable[noplen], noplen); |
154 | insns += noplen; | 160 | insns += noplen; |
155 | len -= noplen; | 161 | len -= noplen; |
156 | } | 162 | } |
@@ -202,7 +208,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
202 | continue; | 208 | continue; |
203 | if (*ptr > text_end) | 209 | if (*ptr > text_end) |
204 | continue; | 210 | continue; |
205 | **ptr = 0xf0; /* lock prefix */ | 211 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ |
206 | }; | 212 | }; |
207 | } | 213 | } |
208 | 214 | ||
@@ -360,10 +366,6 @@ void apply_paravirt(struct paravirt_patch_site *start, | |||
360 | /* Pad the rest with nops */ | 366 | /* Pad the rest with nops */ |
361 | nop_out(p->instr + used, p->len - used); | 367 | nop_out(p->instr + used, p->len - used); |
362 | } | 368 | } |
363 | |||
364 | /* Sync to be conservative, in case we patched following | ||
365 | * instructions */ | ||
366 | sync_core(); | ||
367 | } | 369 | } |
368 | extern struct paravirt_patch_site __start_parainstructions[], | 370 | extern struct paravirt_patch_site __start_parainstructions[], |
369 | __stop_parainstructions[]; | 371 | __stop_parainstructions[]; |
@@ -373,6 +375,14 @@ void __init alternative_instructions(void) | |||
373 | { | 375 | { |
374 | unsigned long flags; | 376 | unsigned long flags; |
375 | 377 | ||
378 | /* The patching is not fully atomic, so try to avoid local interruptions | ||
379 | that might execute the to be patched code. | ||
380 | Other CPUs are not running. */ | ||
381 | stop_nmi(); | ||
382 | #ifdef CONFIG_MCE | ||
383 | stop_mce(); | ||
384 | #endif | ||
385 | |||
376 | local_irq_save(flags); | 386 | local_irq_save(flags); |
377 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 387 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
378 | 388 | ||
@@ -405,4 +415,37 @@ void __init alternative_instructions(void) | |||
405 | #endif | 415 | #endif |
406 | apply_paravirt(__parainstructions, __parainstructions_end); | 416 | apply_paravirt(__parainstructions, __parainstructions_end); |
407 | local_irq_restore(flags); | 417 | local_irq_restore(flags); |
418 | |||
419 | restart_nmi(); | ||
420 | #ifdef CONFIG_MCE | ||
421 | restart_mce(); | ||
422 | #endif | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Warning: | ||
427 | * When you use this code to patch more than one byte of an instruction | ||
428 | * you need to make sure that other CPUs cannot execute this code in parallel. | ||
429 | * Also no thread must be currently preempted in the middle of these instructions. | ||
430 | * And on the local CPU you need to be protected again NMI or MCE handlers | ||
431 | * seeing an inconsistent instruction while you patch. | ||
432 | */ | ||
433 | void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len) | ||
434 | { | ||
435 | u8 *addr = oaddr; | ||
436 | if (!pte_write(*lookup_address((unsigned long)addr))) { | ||
437 | struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) }; | ||
438 | addr = vmap(p, 2, VM_MAP, PAGE_KERNEL); | ||
439 | if (!addr) | ||
440 | return; | ||
441 | addr += ((unsigned long)oaddr) % PAGE_SIZE; | ||
442 | } | ||
443 | memcpy(addr, opcode, len); | ||
444 | sync_core(); | ||
445 | /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline | ||
446 | case. */ | ||
447 | if (cpu_has_clflush) | ||
448 | asm("clflush (%0) " :: "r" (oaddr) : "memory"); | ||
449 | if (addr != oaddr) | ||
450 | vunmap(addr); | ||
408 | } | 451 | } |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 815a5f0aa474..c7ba455d5ac7 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
@@ -231,6 +231,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
231 | 231 | ||
232 | switch (c->x86) { | 232 | switch (c->x86) { |
233 | case 15: | 233 | case 15: |
234 | /* Use K8 tuning for Fam10h and Fam11h */ | ||
235 | case 0x10: | ||
236 | case 0x11: | ||
234 | set_bit(X86_FEATURE_K8, c->x86_capability); | 237 | set_bit(X86_FEATURE_K8, c->x86_capability); |
235 | break; | 238 | break; |
236 | case 6: | 239 | case 6: |
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c index 194144539a6f..461dabc4e495 100644 --- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c | |||
@@ -79,7 +79,7 @@ | |||
79 | #include <linux/smp.h> | 79 | #include <linux/smp.h> |
80 | #include <linux/cpufreq.h> | 80 | #include <linux/cpufreq.h> |
81 | #include <linux/pci.h> | 81 | #include <linux/pci.h> |
82 | #include <asm/processor.h> | 82 | #include <asm/processor-cyrix.h> |
83 | #include <asm/errno.h> | 83 | #include <asm/errno.h> |
84 | 84 | ||
85 | /* PCI config registers, all at F0 */ | 85 | /* PCI config registers, all at F0 */ |
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index e88d2fba156b..122d2d75aa9f 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <asm/dma.h> | 5 | #include <asm/dma.h> |
6 | #include <asm/io.h> | 6 | #include <asm/io.h> |
7 | #include <asm/processor.h> | 7 | #include <asm/processor-cyrix.h> |
8 | #include <asm/timer.h> | 8 | #include <asm/timer.h> |
9 | #include <asm/pci-direct.h> | 9 | #include <asm/pci-direct.h> |
10 | #include <asm/tsc.h> | 10 | #include <asm/tsc.h> |
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c index 56cd485b127c..34c781eddee4 100644 --- a/arch/i386/kernel/cpu/mcheck/mce.c +++ b/arch/i386/kernel/cpu/mcheck/mce.c | |||
@@ -60,6 +60,20 @@ void mcheck_init(struct cpuinfo_x86 *c) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | static unsigned long old_cr4 __initdata; | ||
64 | |||
65 | void __init stop_mce(void) | ||
66 | { | ||
67 | old_cr4 = read_cr4(); | ||
68 | clear_in_cr4(X86_CR4_MCE); | ||
69 | } | ||
70 | |||
71 | void __init restart_mce(void) | ||
72 | { | ||
73 | if (old_cr4 & X86_CR4_MCE) | ||
74 | set_in_cr4(X86_CR4_MCE); | ||
75 | } | ||
76 | |||
63 | static int __init mcheck_disable(char *str) | 77 | static int __init mcheck_disable(char *str) |
64 | { | 78 | { |
65 | mce_disabled = 1; | 79 | mce_disabled = 1; |
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c index 1001f1e0fe6d..2287d4863a8a 100644 --- a/arch/i386/kernel/cpu/mtrr/cyrix.c +++ b/arch/i386/kernel/cpu/mtrr/cyrix.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <asm/mtrr.h> | 3 | #include <asm/mtrr.h> |
4 | #include <asm/msr.h> | 4 | #include <asm/msr.h> |
5 | #include <asm/io.h> | 5 | #include <asm/io.h> |
6 | #include <asm/processor-cyrix.h> | ||
6 | #include "mtrr.h" | 7 | #include "mtrr.h" |
7 | 8 | ||
8 | int arr3_protected; | 9 | int arr3_protected; |
diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c index 7b39a2f954d9..c9014ca4a575 100644 --- a/arch/i386/kernel/cpu/mtrr/state.c +++ b/arch/i386/kernel/cpu/mtrr/state.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <asm/io.h> | 3 | #include <asm/io.h> |
4 | #include <asm/mtrr.h> | 4 | #include <asm/mtrr.h> |
5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | #include <asm-i386/processor-cyrix.h> | ||
6 | #include "mtrr.h" | 7 | #include "mtrr.h" |
7 | 8 | ||
8 | 9 | ||
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c index 30b5e48aa76b..4be488e73bee 100644 --- a/arch/i386/kernel/cpu/perfctr-watchdog.c +++ b/arch/i386/kernel/cpu/perfctr-watchdog.c | |||
@@ -325,7 +325,7 @@ static struct wd_ops k7_wd_ops = { | |||
325 | .stop = single_msr_stop_watchdog, | 325 | .stop = single_msr_stop_watchdog, |
326 | .perfctr = MSR_K7_PERFCTR0, | 326 | .perfctr = MSR_K7_PERFCTR0, |
327 | .evntsel = MSR_K7_EVNTSEL0, | 327 | .evntsel = MSR_K7_EVNTSEL0, |
328 | .checkbit = 1ULL<<63, | 328 | .checkbit = 1ULL<<47, |
329 | }; | 329 | }; |
330 | 330 | ||
331 | /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ | 331 | /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ |
@@ -346,7 +346,9 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
346 | perfctr_msr = MSR_P6_PERFCTR0; | 346 | perfctr_msr = MSR_P6_PERFCTR0; |
347 | evntsel_msr = MSR_P6_EVNTSEL0; | 347 | evntsel_msr = MSR_P6_EVNTSEL0; |
348 | 348 | ||
349 | wrmsrl(perfctr_msr, 0UL); | 349 | /* KVM doesn't implement this MSR */ |
350 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) | ||
351 | return 0; | ||
350 | 352 | ||
351 | evntsel = P6_EVNTSEL_INT | 353 | evntsel = P6_EVNTSEL_INT |
352 | | P6_EVNTSEL_OS | 354 | | P6_EVNTSEL_OS |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index dde828a333c3..448a50b1324c 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
36 | #include <asm/desc.h> | 36 | #include <asm/desc.h> |
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | #include <asm/alternative.h> | ||
38 | 39 | ||
39 | void jprobe_return_end(void); | 40 | void jprobe_return_end(void); |
40 | 41 | ||
@@ -169,16 +170,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
169 | 170 | ||
170 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 171 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
171 | { | 172 | { |
172 | *p->addr = BREAKPOINT_INSTRUCTION; | 173 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
173 | flush_icache_range((unsigned long) p->addr, | ||
174 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
175 | } | 174 | } |
176 | 175 | ||
177 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 176 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
178 | { | 177 | { |
179 | *p->addr = p->opcode; | 178 | text_poke(p->addr, &p->opcode, 1); |
180 | flush_icache_range((unsigned long) p->addr, | ||
181 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
182 | } | 179 | } |
183 | 180 | ||
184 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 181 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 03b7f5584d71..99beac7f96ce 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -353,7 +353,7 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
353 | * Take the local apic timer and PIT/HPET into account. We don't | 353 | * Take the local apic timer and PIT/HPET into account. We don't |
354 | * know which one is active, when we have highres/dyntick on | 354 | * know which one is active, when we have highres/dyntick on |
355 | */ | 355 | */ |
356 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_irqs(0); | 356 | sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0]; |
357 | 357 | ||
358 | /* if the none of the timers isn't firing, this cpu isn't doing much */ | 358 | /* if the none of the timers isn't firing, this cpu isn't doing much */ |
359 | if (!touched && last_irq_sums[cpu] == sum) { | 359 | if (!touched && last_irq_sums[cpu] == sum) { |
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 53f07a8275e3..79c167fcaee9 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -124,20 +124,28 @@ unsigned paravirt_patch_ignore(unsigned len) | |||
124 | return len; | 124 | return len; |
125 | } | 125 | } |
126 | 126 | ||
127 | struct branch { | ||
128 | unsigned char opcode; | ||
129 | u32 delta; | ||
130 | } __attribute__((packed)); | ||
131 | |||
127 | unsigned paravirt_patch_call(void *target, u16 tgt_clobbers, | 132 | unsigned paravirt_patch_call(void *target, u16 tgt_clobbers, |
128 | void *site, u16 site_clobbers, | 133 | void *site, u16 site_clobbers, |
129 | unsigned len) | 134 | unsigned len) |
130 | { | 135 | { |
131 | unsigned char *call = site; | 136 | unsigned char *call = site; |
132 | unsigned long delta = (unsigned long)target - (unsigned long)(call+5); | 137 | unsigned long delta = (unsigned long)target - (unsigned long)(call+5); |
138 | struct branch b; | ||
133 | 139 | ||
134 | if (tgt_clobbers & ~site_clobbers) | 140 | if (tgt_clobbers & ~site_clobbers) |
135 | return len; /* target would clobber too much for this site */ | 141 | return len; /* target would clobber too much for this site */ |
136 | if (len < 5) | 142 | if (len < 5) |
137 | return len; /* call too long for patch site */ | 143 | return len; /* call too long for patch site */ |
138 | 144 | ||
139 | *call++ = 0xe8; /* call */ | 145 | b.opcode = 0xe8; /* call */ |
140 | *(unsigned long *)call = delta; | 146 | b.delta = delta; |
147 | BUILD_BUG_ON(sizeof(b) != 5); | ||
148 | text_poke(call, (unsigned char *)&b, 5); | ||
141 | 149 | ||
142 | return 5; | 150 | return 5; |
143 | } | 151 | } |
@@ -150,8 +158,9 @@ unsigned paravirt_patch_jmp(void *target, void *site, unsigned len) | |||
150 | if (len < 5) | 158 | if (len < 5) |
151 | return len; /* call too long for patch site */ | 159 | return len; /* call too long for patch site */ |
152 | 160 | ||
153 | *jmp++ = 0xe9; /* jmp */ | 161 | b.opcode = 0xe9; /* jmp */ |
154 | *(unsigned long *)jmp = delta; | 162 | b.delta = delta; |
163 | text_poke(call, (unsigned char *)&b, 5); | ||
155 | 164 | ||
156 | return 5; | 165 | return 5; |
157 | } | 166 | } |
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index d574e38f0f77..f5dd85656c18 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c | |||
@@ -199,6 +199,13 @@ asmlinkage int sys_sigreturn(unsigned long __unused) | |||
199 | return eax; | 199 | return eax; |
200 | 200 | ||
201 | badframe: | 201 | badframe: |
202 | if (show_unhandled_signals && printk_ratelimit()) | ||
203 | printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx" | ||
204 | " esp:%lx oeax:%lx\n", | ||
205 | current->pid > 1 ? KERN_INFO : KERN_EMERG, | ||
206 | current->comm, current->pid, frame, regs->eip, | ||
207 | regs->esp, regs->orig_eax); | ||
208 | |||
202 | force_sig(SIGSEGV, current); | 209 | force_sig(SIGSEGV, current); |
203 | return 0; | 210 | return 0; |
204 | } | 211 | } |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 5910d3fac561..e4f61d1c6248 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -308,7 +308,7 @@ cpumask_t cpu_coregroup_map(int cpu) | |||
308 | /* representing cpus for which sibling maps can be computed */ | 308 | /* representing cpus for which sibling maps can be computed */ |
309 | static cpumask_t cpu_sibling_setup_map; | 309 | static cpumask_t cpu_sibling_setup_map; |
310 | 310 | ||
311 | void set_cpu_sibling_map(int cpu) | 311 | void __cpuinit set_cpu_sibling_map(int cpu) |
312 | { | 312 | { |
313 | int i; | 313 | int i; |
314 | struct cpuinfo_x86 *c = cpu_data; | 314 | struct cpuinfo_x86 *c = cpu_data; |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 57772a18c394..cfffe3dd9e83 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -618,6 +618,13 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs, | |||
618 | 618 | ||
619 | current->thread.error_code = error_code; | 619 | current->thread.error_code = error_code; |
620 | current->thread.trap_no = 13; | 620 | current->thread.trap_no = 13; |
621 | if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) && | ||
622 | printk_ratelimit()) | ||
623 | printk(KERN_INFO | ||
624 | "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", | ||
625 | current->comm, current->pid, | ||
626 | regs->eip, regs->esp, error_code); | ||
627 | |||
621 | force_sig(SIGSEGV, current); | 628 | force_sig(SIGSEGV, current); |
622 | return; | 629 | return; |
623 | 630 | ||
@@ -768,6 +775,8 @@ static __kprobes void default_do_nmi(struct pt_regs * regs) | |||
768 | reassert_nmi(); | 775 | reassert_nmi(); |
769 | } | 776 | } |
770 | 777 | ||
778 | static int ignore_nmis; | ||
779 | |||
771 | fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) | 780 | fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) |
772 | { | 781 | { |
773 | int cpu; | 782 | int cpu; |
@@ -778,11 +787,24 @@ fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code) | |||
778 | 787 | ||
779 | ++nmi_count(cpu); | 788 | ++nmi_count(cpu); |
780 | 789 | ||
781 | default_do_nmi(regs); | 790 | if (!ignore_nmis) |
791 | default_do_nmi(regs); | ||
782 | 792 | ||
783 | nmi_exit(); | 793 | nmi_exit(); |
784 | } | 794 | } |
785 | 795 | ||
796 | void stop_nmi(void) | ||
797 | { | ||
798 | acpi_nmi_disable(); | ||
799 | ignore_nmis++; | ||
800 | } | ||
801 | |||
802 | void restart_nmi(void) | ||
803 | { | ||
804 | ignore_nmis--; | ||
805 | acpi_nmi_enable(); | ||
806 | } | ||
807 | |||
786 | #ifdef CONFIG_KPROBES | 808 | #ifdef CONFIG_KPROBES |
787 | fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) | 809 | fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code) |
788 | { | 810 | { |
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index e92a10124935..01ffdd4964f0 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c | |||
@@ -283,6 +283,8 @@ static inline int vmalloc_fault(unsigned long address) | |||
283 | return 0; | 283 | return 0; |
284 | } | 284 | } |
285 | 285 | ||
286 | int show_unhandled_signals = 1; | ||
287 | |||
286 | /* | 288 | /* |
287 | * This routine handles page faults. It determines the address, | 289 | * This routine handles page faults. It determines the address, |
288 | * and the problem, and then passes it off to one of the appropriate | 290 | * and the problem, and then passes it off to one of the appropriate |
@@ -469,6 +471,14 @@ bad_area_nosemaphore: | |||
469 | if (is_prefetch(regs, address, error_code)) | 471 | if (is_prefetch(regs, address, error_code)) |
470 | return; | 472 | return; |
471 | 473 | ||
474 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
475 | printk_ratelimit()) { | ||
476 | printk("%s%s[%d]: segfault at %08lx eip %08lx " | ||
477 | "esp %08lx error %lx\n", | ||
478 | tsk->pid > 1 ? KERN_INFO : KERN_EMERG, | ||
479 | tsk->comm, tsk->pid, address, regs->eip, | ||
480 | regs->esp, error_code); | ||
481 | } | ||
472 | tsk->thread.cr2 = address; | 482 | tsk->thread.cr2 = address; |
473 | /* Kernel addresses are always protection faults */ | 483 | /* Kernel addresses are always protection faults */ |
474 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | 484 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index e1a9a805c445..c3b9905af2d5 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -800,17 +800,9 @@ void mark_rodata_ro(void) | |||
800 | unsigned long start = PFN_ALIGN(_text); | 800 | unsigned long start = PFN_ALIGN(_text); |
801 | unsigned long size = PFN_ALIGN(_etext) - start; | 801 | unsigned long size = PFN_ALIGN(_etext) - start; |
802 | 802 | ||
803 | #ifndef CONFIG_KPROBES | 803 | change_page_attr(virt_to_page(start), |
804 | #ifdef CONFIG_HOTPLUG_CPU | 804 | size >> PAGE_SHIFT, PAGE_KERNEL_RX); |
805 | /* It must still be possible to apply SMP alternatives. */ | 805 | printk("Write protecting the kernel text: %luk\n", size >> 10); |
806 | if (num_possible_cpus() <= 1) | ||
807 | #endif | ||
808 | { | ||
809 | change_page_attr(virt_to_page(start), | ||
810 | size >> PAGE_SHIFT, PAGE_KERNEL_RX); | ||
811 | printk("Write protecting the kernel text: %luk\n", size >> 10); | ||
812 | } | ||
813 | #endif | ||
814 | start += size; | 806 | start += size; |
815 | size = (unsigned long)__end_rodata - start; | 807 | size = (unsigned long)__end_rodata - start; |
816 | change_page_attr(virt_to_page(start), | 808 | change_page_attr(virt_to_page(start), |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c2aaec5289dc..4100ddc52f02 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -941,13 +941,6 @@ static const struct file_operations spufs_signal1_nosched_fops = { | |||
941 | .mmap = spufs_signal1_mmap, | 941 | .mmap = spufs_signal1_mmap, |
942 | }; | 942 | }; |
943 | 943 | ||
944 | static const struct file_operations spufs_signal1_nosched_fops = { | ||
945 | .open = spufs_signal1_open, | ||
946 | .release = spufs_signal1_release, | ||
947 | .write = spufs_signal1_write, | ||
948 | .mmap = spufs_signal1_mmap, | ||
949 | }; | ||
950 | |||
951 | static int spufs_signal2_open(struct inode *inode, struct file *file) | 944 | static int spufs_signal2_open(struct inode *inode, struct file *file) |
952 | { | 945 | { |
953 | struct spufs_inode_info *i = SPUFS_I(inode); | 946 | struct spufs_inode_info *i = SPUFS_I(inode); |
@@ -1083,13 +1076,6 @@ static const struct file_operations spufs_signal2_nosched_fops = { | |||
1083 | .mmap = spufs_signal2_mmap, | 1076 | .mmap = spufs_signal2_mmap, |
1084 | }; | 1077 | }; |
1085 | 1078 | ||
1086 | static const struct file_operations spufs_signal2_nosched_fops = { | ||
1087 | .open = spufs_signal2_open, | ||
1088 | .release = spufs_signal2_release, | ||
1089 | .write = spufs_signal2_write, | ||
1090 | .mmap = spufs_signal2_mmap, | ||
1091 | }; | ||
1092 | |||
1093 | static void spufs_signal1_type_set(void *data, u64 val) | 1079 | static void spufs_signal1_type_set(void *data, u64 val) |
1094 | { | 1080 | { |
1095 | struct spu_context *ctx = data; | 1081 | struct spu_context *ctx = data; |
diff --git a/arch/x86_64/boot/compressed/Makefile b/arch/x86_64/boot/compressed/Makefile index c9f2da7496c1..877c0bdbbc67 100644 --- a/arch/x86_64/boot/compressed/Makefile +++ b/arch/x86_64/boot/compressed/Makefile | |||
@@ -3,8 +3,6 @@ | |||
3 | # | 3 | # |
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | # Note all the files here are compiled/linked as 32bit executables. | ||
7 | # | ||
8 | 6 | ||
9 | targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o | 7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o |
10 | 8 | ||
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 941c84baecc8..e89abcdbdde8 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S | |||
@@ -25,7 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | .text | 27 | .text |
28 | .section .bootstrap.text | 28 | .section .text.head |
29 | .code64 | 29 | .code64 |
30 | .globl startup_64 | 30 | .globl startup_64 |
31 | startup_64: | 31 | startup_64: |
@@ -243,10 +243,16 @@ ENTRY(secondary_startup_64) | |||
243 | lretq | 243 | lretq |
244 | 244 | ||
245 | /* SMP bootup changes these two */ | 245 | /* SMP bootup changes these two */ |
246 | #ifndef CONFIG_HOTPLUG_CPU | ||
247 | .pushsection .init.data | ||
248 | #endif | ||
246 | .align 8 | 249 | .align 8 |
247 | .globl initial_code | 250 | .globl initial_code |
248 | initial_code: | 251 | initial_code: |
249 | .quad x86_64_start_kernel | 252 | .quad x86_64_start_kernel |
253 | #ifndef CONFIG_HOTPLUG_CPU | ||
254 | .popsection | ||
255 | #endif | ||
250 | .globl init_rsp | 256 | .globl init_rsp |
251 | init_rsp: | 257 | init_rsp: |
252 | .quad init_thread_union+THREAD_SIZE-8 | 258 | .quad init_thread_union+THREAD_SIZE-8 |
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c index 636f4f9fc6bb..e2d1b912e154 100644 --- a/arch/x86_64/kernel/hpet.c +++ b/arch/x86_64/kernel/hpet.c | |||
@@ -133,7 +133,7 @@ struct clocksource clocksource_hpet = { | |||
133 | .vread = vread_hpet, | 133 | .vread = vread_hpet, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | int hpet_arch_init(void) | 136 | int __init hpet_arch_init(void) |
137 | { | 137 | { |
138 | unsigned int id; | 138 | unsigned int id; |
139 | u64 tmp; | 139 | u64 tmp; |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index d4a0d0ac9935..a30e004682e2 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -39,9 +39,9 @@ | |||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/kdebug.h> | 40 | #include <linux/kdebug.h> |
41 | 41 | ||
42 | #include <asm/cacheflush.h> | ||
43 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
44 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
44 | #include <asm/alternative.h> | ||
45 | 45 | ||
46 | void jprobe_return_end(void); | 46 | void jprobe_return_end(void); |
47 | static void __kprobes arch_copy_kprobe(struct kprobe *p); | 47 | static void __kprobes arch_copy_kprobe(struct kprobe *p); |
@@ -209,16 +209,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
209 | 209 | ||
210 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 210 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
211 | { | 211 | { |
212 | *p->addr = BREAKPOINT_INSTRUCTION; | 212 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
213 | flush_icache_range((unsigned long) p->addr, | ||
214 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
215 | } | 213 | } |
216 | 214 | ||
217 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 215 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
218 | { | 216 | { |
219 | *p->addr = p->opcode; | 217 | text_poke(p->addr, &p->opcode, 1); |
220 | flush_icache_range((unsigned long) p->addr, | ||
221 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
222 | } | 218 | } |
223 | 219 | ||
224 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 220 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 4d8450ee3635..a66d607f5b92 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -667,6 +667,20 @@ static struct miscdevice mce_log_device = { | |||
667 | &mce_chrdev_ops, | 667 | &mce_chrdev_ops, |
668 | }; | 668 | }; |
669 | 669 | ||
670 | static unsigned long old_cr4 __initdata; | ||
671 | |||
672 | void __init stop_mce(void) | ||
673 | { | ||
674 | old_cr4 = read_cr4(); | ||
675 | clear_in_cr4(X86_CR4_MCE); | ||
676 | } | ||
677 | |||
678 | void __init restart_mce(void) | ||
679 | { | ||
680 | if (old_cr4 & X86_CR4_MCE) | ||
681 | set_in_cr4(X86_CR4_MCE); | ||
682 | } | ||
683 | |||
670 | /* | 684 | /* |
671 | * Old style boot options parsing. Only for compatibility. | 685 | * Old style boot options parsing. Only for compatibility. |
672 | */ | 686 | */ |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index edbbc59b7523..cb8ee9d02f86 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -384,11 +384,14 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
384 | return rc; | 384 | return rc; |
385 | } | 385 | } |
386 | 386 | ||
387 | static unsigned ignore_nmis; | ||
388 | |||
387 | asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) | 389 | asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code) |
388 | { | 390 | { |
389 | nmi_enter(); | 391 | nmi_enter(); |
390 | add_pda(__nmi_count,1); | 392 | add_pda(__nmi_count,1); |
391 | default_do_nmi(regs); | 393 | if (!ignore_nmis) |
394 | default_do_nmi(regs); | ||
392 | nmi_exit(); | 395 | nmi_exit(); |
393 | } | 396 | } |
394 | 397 | ||
@@ -401,6 +404,18 @@ int do_nmi_callback(struct pt_regs * regs, int cpu) | |||
401 | return 0; | 404 | return 0; |
402 | } | 405 | } |
403 | 406 | ||
407 | void stop_nmi(void) | ||
408 | { | ||
409 | acpi_nmi_disable(); | ||
410 | ignore_nmis++; | ||
411 | } | ||
412 | |||
413 | void restart_nmi(void) | ||
414 | { | ||
415 | ignore_nmis--; | ||
416 | acpi_nmi_enable(); | ||
417 | } | ||
418 | |||
404 | #ifdef CONFIG_SYSCTL | 419 | #ifdef CONFIG_SYSCTL |
405 | 420 | ||
406 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) | 421 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 92fade4a62cf..e7ac629d4c46 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -342,10 +342,10 @@ void __show_regs(struct pt_regs * regs) | |||
342 | rdmsrl(MSR_GS_BASE, gs); | 342 | rdmsrl(MSR_GS_BASE, gs); |
343 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 343 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
344 | 344 | ||
345 | asm("movq %%cr0, %0": "=r" (cr0)); | 345 | cr0 = read_cr0(); |
346 | asm("movq %%cr2, %0": "=r" (cr2)); | 346 | cr2 = read_cr2(); |
347 | asm("movq %%cr3, %0": "=r" (cr3)); | 347 | cr3 = read_cr3(); |
348 | asm("movq %%cr4, %0": "=r" (cr4)); | 348 | cr4 = read_cr4(); |
349 | 349 | ||
350 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 350 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
351 | fs,fsindex,gs,gsindex,shadowgs); | 351 | fs,fsindex,gs,gsindex,shadowgs); |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 6fa0a302e2aa..af838f6b0b7f 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -608,6 +608,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
608 | else | 608 | else |
609 | num_cache_leaves = 3; | 609 | num_cache_leaves = 3; |
610 | 610 | ||
611 | if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) | ||
612 | set_bit(X86_FEATURE_K8, &c->x86_capability); | ||
613 | |||
611 | /* RDTSC can be speculated around */ | 614 | /* RDTSC can be speculated around */ |
612 | clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); | 615 | clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); |
613 | 616 | ||
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index 4886afcd6287..739175b01e06 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c | |||
@@ -487,7 +487,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
487 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 487 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
488 | { | 488 | { |
489 | struct task_struct *me = current; | 489 | struct task_struct *me = current; |
490 | if (exception_trace) | 490 | if (show_unhandled_signals && printk_ratelimit()) |
491 | printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n", | 491 | printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n", |
492 | me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); | 492 | me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); |
493 | 493 | ||
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index 6a5a98f2a75c..ea83a9f91965 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c | |||
@@ -55,11 +55,11 @@ void __save_processor_state(struct saved_context *ctxt) | |||
55 | * control registers | 55 | * control registers |
56 | */ | 56 | */ |
57 | rdmsrl(MSR_EFER, ctxt->efer); | 57 | rdmsrl(MSR_EFER, ctxt->efer); |
58 | asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0)); | 58 | ctxt->cr0 = read_cr0(); |
59 | asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2)); | 59 | ctxt->cr2 = read_cr2(); |
60 | asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3)); | 60 | ctxt->cr3 = read_cr3(); |
61 | asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4)); | 61 | ctxt->cr4 = read_cr4(); |
62 | asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8)); | 62 | ctxt->cr8 = read_cr8(); |
63 | } | 63 | } |
64 | 64 | ||
65 | void save_processor_state(void) | 65 | void save_processor_state(void) |
@@ -81,11 +81,11 @@ void __restore_processor_state(struct saved_context *ctxt) | |||
81 | * control registers | 81 | * control registers |
82 | */ | 82 | */ |
83 | wrmsrl(MSR_EFER, ctxt->efer); | 83 | wrmsrl(MSR_EFER, ctxt->efer); |
84 | asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8)); | 84 | write_cr8(ctxt->cr8); |
85 | asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4)); | 85 | write_cr4(ctxt->cr4); |
86 | asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3)); | 86 | write_cr3(ctxt->cr3); |
87 | asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2)); | 87 | write_cr2(ctxt->cr2); |
88 | asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0)); | 88 | write_cr0(ctxt->cr0); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * now restore the descriptor tables to their proper values | 91 | * now restore the descriptor tables to their proper values |
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 8713ad4a4db1..03888420775d 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -584,7 +584,8 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
584 | tsk->thread.error_code = error_code; | 584 | tsk->thread.error_code = error_code; |
585 | tsk->thread.trap_no = trapnr; | 585 | tsk->thread.trap_no = trapnr; |
586 | 586 | ||
587 | if (exception_trace && unhandled_signal(tsk, signr)) | 587 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && |
588 | printk_ratelimit()) | ||
588 | printk(KERN_INFO | 589 | printk(KERN_INFO |
589 | "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", | 590 | "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n", |
590 | tsk->comm, tsk->pid, str, | 591 | tsk->comm, tsk->pid, str, |
@@ -688,7 +689,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, | |||
688 | tsk->thread.error_code = error_code; | 689 | tsk->thread.error_code = error_code; |
689 | tsk->thread.trap_no = 13; | 690 | tsk->thread.trap_no = 13; |
690 | 691 | ||
691 | if (exception_trace && unhandled_signal(tsk, SIGSEGV)) | 692 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
693 | printk_ratelimit()) | ||
692 | printk(KERN_INFO | 694 | printk(KERN_INFO |
693 | "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", | 695 | "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", |
694 | tsk->comm, tsk->pid, | 696 | tsk->comm, tsk->pid, |
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index e7a5eb6cd785..ba8ea97abd21 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S | |||
@@ -28,7 +28,7 @@ SECTIONS | |||
28 | _text = .; /* Text and read-only data */ | 28 | _text = .; /* Text and read-only data */ |
29 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 29 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
30 | /* First the code that has to be first for bootstrapping */ | 30 | /* First the code that has to be first for bootstrapping */ |
31 | *(.bootstrap.text) | 31 | *(.text.head) |
32 | _stext = .; | 32 | _stext = .; |
33 | /* Then the rest */ | 33 | /* Then the rest */ |
34 | TEXT_TEXT | 34 | TEXT_TEXT |
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 2074bddd4f04..327c9f2fa626 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -159,7 +159,7 @@ void dump_pagetable(unsigned long address) | |||
159 | pmd_t *pmd; | 159 | pmd_t *pmd; |
160 | pte_t *pte; | 160 | pte_t *pte; |
161 | 161 | ||
162 | asm("movq %%cr3,%0" : "=r" (pgd)); | 162 | pgd = (pgd_t *)read_cr3(); |
163 | 163 | ||
164 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | 164 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); |
165 | pgd += pgd_index(address); | 165 | pgd += pgd_index(address); |
@@ -221,16 +221,6 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | int unhandled_signal(struct task_struct *tsk, int sig) | ||
225 | { | ||
226 | if (is_init(tsk)) | ||
227 | return 1; | ||
228 | if (tsk->ptrace & PT_PTRACED) | ||
229 | return 0; | ||
230 | return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || | ||
231 | (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); | ||
232 | } | ||
233 | |||
234 | static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, | 224 | static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, |
235 | unsigned long error_code) | 225 | unsigned long error_code) |
236 | { | 226 | { |
@@ -302,7 +292,7 @@ static int vmalloc_fault(unsigned long address) | |||
302 | } | 292 | } |
303 | 293 | ||
304 | static int page_fault_trace; | 294 | static int page_fault_trace; |
305 | int exception_trace = 1; | 295 | int show_unhandled_signals = 1; |
306 | 296 | ||
307 | /* | 297 | /* |
308 | * This routine handles page faults. It determines the address, | 298 | * This routine handles page faults. It determines the address, |
@@ -326,7 +316,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
326 | prefetchw(&mm->mmap_sem); | 316 | prefetchw(&mm->mmap_sem); |
327 | 317 | ||
328 | /* get the address */ | 318 | /* get the address */ |
329 | __asm__("movq %%cr2,%0":"=r" (address)); | 319 | address = read_cr2(); |
330 | 320 | ||
331 | info.si_code = SEGV_MAPERR; | 321 | info.si_code = SEGV_MAPERR; |
332 | 322 | ||
@@ -494,7 +484,8 @@ bad_area_nosemaphore: | |||
494 | (address >> 32)) | 484 | (address >> 32)) |
495 | return; | 485 | return; |
496 | 486 | ||
497 | if (exception_trace && unhandled_signal(tsk, SIGSEGV)) { | 487 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
488 | printk_ratelimit()) { | ||
498 | printk( | 489 | printk( |
499 | "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", | 490 | "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", |
500 | tsk->pid > 1 ? KERN_INFO : KERN_EMERG, | 491 | tsk->pid > 1 ? KERN_INFO : KERN_EMERG, |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 381c2ecd407e..38f5d6368006 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -383,7 +383,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) | |||
383 | } | 383 | } |
384 | 384 | ||
385 | if (!after_bootmem) | 385 | if (!after_bootmem) |
386 | asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); | 386 | mmu_cr4_features = read_cr4(); |
387 | __flush_tlb_all(); | 387 | __flush_tlb_all(); |
388 | } | 388 | } |
389 | 389 | ||
@@ -600,16 +600,6 @@ void mark_rodata_ro(void) | |||
600 | { | 600 | { |
601 | unsigned long start = (unsigned long)_stext, end; | 601 | unsigned long start = (unsigned long)_stext, end; |
602 | 602 | ||
603 | #ifdef CONFIG_HOTPLUG_CPU | ||
604 | /* It must still be possible to apply SMP alternatives. */ | ||
605 | if (num_possible_cpus() > 1) | ||
606 | start = (unsigned long)_etext; | ||
607 | #endif | ||
608 | |||
609 | #ifdef CONFIG_KPROBES | ||
610 | start = (unsigned long)__start_rodata; | ||
611 | #endif | ||
612 | |||
613 | end = (unsigned long)__end_rodata; | 603 | end = (unsigned long)__end_rodata; |
614 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; | 604 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; |
615 | end &= PAGE_MASK; | 605 | end &= PAGE_MASK; |
@@ -697,39 +687,6 @@ int kern_addr_valid(unsigned long addr) | |||
697 | return pfn_valid(pte_pfn(*pte)); | 687 | return pfn_valid(pte_pfn(*pte)); |
698 | } | 688 | } |
699 | 689 | ||
700 | #ifdef CONFIG_SYSCTL | ||
701 | #include <linux/sysctl.h> | ||
702 | |||
703 | static ctl_table debug_table2[] = { | ||
704 | { | ||
705 | .ctl_name = 99, | ||
706 | .procname = "exception-trace", | ||
707 | .data = &exception_trace, | ||
708 | .maxlen = sizeof(int), | ||
709 | .mode = 0644, | ||
710 | .proc_handler = proc_dointvec | ||
711 | }, | ||
712 | {} | ||
713 | }; | ||
714 | |||
715 | static ctl_table debug_root_table2[] = { | ||
716 | { | ||
717 | .ctl_name = CTL_DEBUG, | ||
718 | .procname = "debug", | ||
719 | .mode = 0555, | ||
720 | .child = debug_table2 | ||
721 | }, | ||
722 | {} | ||
723 | }; | ||
724 | |||
725 | static __init int x8664_sysctl_init(void) | ||
726 | { | ||
727 | register_sysctl_table(debug_root_table2); | ||
728 | return 0; | ||
729 | } | ||
730 | __initcall(x8664_sysctl_init); | ||
731 | #endif | ||
732 | |||
733 | /* A pseudo VMA to allow ptrace access for the vsyscall page. This only | 690 | /* A pseudo VMA to allow ptrace access for the vsyscall page. This only |
734 | covers the 64bit vsyscall page now. 32bit has a real VMA now and does | 691 | covers the 64bit vsyscall page now. 32bit has a real VMA now and does |
735 | not need special handling anymore. */ | 692 | not need special handling anymore. */ |
@@ -767,7 +724,7 @@ int in_gate_area_no_task(unsigned long addr) | |||
767 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); | 724 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
768 | } | 725 | } |
769 | 726 | ||
770 | void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) | 727 | void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) |
771 | { | 728 | { |
772 | return __alloc_bootmem_core(pgdat->bdata, size, | 729 | return __alloc_bootmem_core(pgdat->bdata, size, |
773 | SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); | 730 | SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); |
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 36377b6b8efe..7e161c698af4 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | 15 | ||
16 | static inline pte_t *lookup_address(unsigned long address) | 16 | pte_t *lookup_address(unsigned long address) |
17 | { | 17 | { |
18 | pgd_t *pgd = pgd_offset_k(address); | 18 | pgd_t *pgd = pgd_offset_k(address); |
19 | pud_t *pud; | 19 | pud_t *pud; |