aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/early_printk.c13
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c15
-rw-r--r--arch/x86_64/kernel/kprobes.c190
-rw-r--r--arch/x86_64/kernel/mpparse.c25
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/setup.c33
-rw-r--r--arch/x86_64/kernel/signal.c30
-rw-r--r--arch/x86_64/kernel/time.c42
-rw-r--r--arch/x86_64/kernel/traps.c22
11 files changed, 305 insertions, 87 deletions
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 504e63474993..c9a6b812e926 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -40,11 +40,7 @@ int fix_aperture __initdata = 1;
40 40
41static u32 __init allocate_aperture(void) 41static u32 __init allocate_aperture(void)
42{ 42{
43#ifdef CONFIG_DISCONTIGMEM
44 pg_data_t *nd0 = NODE_DATA(0); 43 pg_data_t *nd0 = NODE_DATA(0);
45#else
46 pg_data_t *nd0 = &contig_page_data;
47#endif
48 u32 aper_size; 44 u32 aper_size;
49 void *p; 45 void *p;
50 46
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index e3a19e8ebbf8..9631c747c5e3 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -2,20 +2,24 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/tty.h>
5#include <asm/io.h> 6#include <asm/io.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7 8
8/* Simple VGA output */ 9/* Simple VGA output */
9 10
10#ifdef __i386__ 11#ifdef __i386__
12#include <asm/setup.h>
11#define VGABASE (__ISA_IO_base + 0xb8000) 13#define VGABASE (__ISA_IO_base + 0xb8000)
12#else 14#else
15#include <asm/bootsetup.h>
13#define VGABASE ((void __iomem *)0xffffffff800b8000UL) 16#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
14#endif 17#endif
15 18
16#define MAX_YPOS 25 19#define MAX_YPOS max_ypos
17#define MAX_XPOS 80 20#define MAX_XPOS max_xpos
18 21
22static int max_ypos = 25, max_xpos = 80;
19static int current_ypos = 1, current_xpos = 0; 23static int current_ypos = 1, current_xpos = 0;
20 24
21static void early_vga_write(struct console *con, const char *str, unsigned n) 25static void early_vga_write(struct console *con, const char *str, unsigned n)
@@ -196,7 +200,10 @@ int __init setup_early_printk(char *opt)
196 } else if (!strncmp(buf, "ttyS", 4)) { 200 } else if (!strncmp(buf, "ttyS", 4)) {
197 early_serial_init(buf); 201 early_serial_init(buf);
198 early_console = &early_serial_console; 202 early_console = &early_serial_console;
199 } else if (!strncmp(buf, "vga", 3)) { 203 } else if (!strncmp(buf, "vga", 3)
204 && SCREEN_INFO.orig_video_isVGA == 1) {
205 max_xpos = SCREEN_INFO.orig_video_cols;
206 max_ypos = SCREEN_INFO.orig_video_lines;
200 early_console = &early_vga_console; 207 early_console = &early_vga_console;
201 } 208 }
202 early_console_initialized = 1; 209 early_console_initialized = 1;
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index 0f8c78dcd38c..cf6ab147a2a5 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -94,7 +94,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
94 s = strstr(saved_command_line, "earlyprintk="); 94 s = strstr(saved_command_line, "earlyprintk=");
95 if (s != NULL) 95 if (s != NULL)
96 setup_early_printk(s); 96 setup_early_printk(s);
97#ifdef CONFIG_DISCONTIGMEM 97#ifdef CONFIG_NUMA
98 s = strstr(saved_command_line, "numa="); 98 s = strstr(saved_command_line, "numa=");
99 if (s != NULL) 99 if (s != NULL)
100 numa_setup(s+5); 100 numa_setup(s+5);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 7873d9ba8814..19eafa0aa95c 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -157,14 +157,13 @@ static unsigned int startup_8259A_irq(unsigned int irq)
157} 157}
158 158
159static struct hw_interrupt_type i8259A_irq_type = { 159static struct hw_interrupt_type i8259A_irq_type = {
160 "XT-PIC", 160 .typename = "XT-PIC",
161 startup_8259A_irq, 161 .startup = startup_8259A_irq,
162 shutdown_8259A_irq, 162 .shutdown = shutdown_8259A_irq,
163 enable_8259A_irq, 163 .enable = enable_8259A_irq,
164 disable_8259A_irq, 164 .disable = disable_8259A_irq,
165 mask_and_ack_8259A, 165 .ack = mask_and_ack_8259A,
166 end_8259A_irq, 166 .end = end_8259A_irq,
167 NULL
168}; 167};
169 168
170/* 169/*
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index f77f8a0ff187..4e680f87a75f 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -27,6 +27,8 @@
27 * <prasanna@in.ibm.com> adapted for x86_64 27 * <prasanna@in.ibm.com> adapted for x86_64
28 * 2005-Mar Roland McGrath <roland@redhat.com> 28 * 2005-Mar Roland McGrath <roland@redhat.com>
29 * Fixed to handle %rip-relative addressing mode correctly. 29 * Fixed to handle %rip-relative addressing mode correctly.
30 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
31 * Added function return probes functionality
30 */ 32 */
31 33
32#include <linux/config.h> 34#include <linux/config.h>
@@ -37,18 +39,16 @@
37#include <linux/slab.h> 39#include <linux/slab.h>
38#include <linux/preempt.h> 40#include <linux/preempt.h>
39#include <linux/moduleloader.h> 41#include <linux/moduleloader.h>
40 42#include <asm/cacheflush.h>
41#include <asm/pgtable.h> 43#include <asm/pgtable.h>
42#include <asm/kdebug.h> 44#include <asm/kdebug.h>
43 45
44static DECLARE_MUTEX(kprobe_mutex); 46static DECLARE_MUTEX(kprobe_mutex);
45 47
46/* kprobe_status settings */
47#define KPROBE_HIT_ACTIVE 0x00000001
48#define KPROBE_HIT_SS 0x00000002
49
50static struct kprobe *current_kprobe; 48static struct kprobe *current_kprobe;
51static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags; 49static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags;
50static struct kprobe *kprobe_prev;
51static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev;
52static struct pt_regs jprobe_saved_regs; 52static struct pt_regs jprobe_saved_regs;
53static long *jprobe_saved_rsp; 53static long *jprobe_saved_rsp;
54static kprobe_opcode_t *get_insn_slot(void); 54static kprobe_opcode_t *get_insn_slot(void);
@@ -214,6 +214,21 @@ void arch_copy_kprobe(struct kprobe *p)
214 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ 214 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
215 *ripdisp = disp; 215 *ripdisp = disp;
216 } 216 }
217 p->opcode = *p->addr;
218}
219
220void arch_arm_kprobe(struct kprobe *p)
221{
222 *p->addr = BREAKPOINT_INSTRUCTION;
223 flush_icache_range((unsigned long) p->addr,
224 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
225}
226
227void arch_disarm_kprobe(struct kprobe *p)
228{
229 *p->addr = p->opcode;
230 flush_icache_range((unsigned long) p->addr,
231 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
217} 232}
218 233
219void arch_remove_kprobe(struct kprobe *p) 234void arch_remove_kprobe(struct kprobe *p)
@@ -223,10 +238,29 @@ void arch_remove_kprobe(struct kprobe *p)
223 down(&kprobe_mutex); 238 down(&kprobe_mutex);
224} 239}
225 240
226static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 241static inline void save_previous_kprobe(void)
227{ 242{
228 *p->addr = p->opcode; 243 kprobe_prev = current_kprobe;
229 regs->rip = (unsigned long)p->addr; 244 kprobe_status_prev = kprobe_status;
245 kprobe_old_rflags_prev = kprobe_old_rflags;
246 kprobe_saved_rflags_prev = kprobe_saved_rflags;
247}
248
249static inline void restore_previous_kprobe(void)
250{
251 current_kprobe = kprobe_prev;
252 kprobe_status = kprobe_status_prev;
253 kprobe_old_rflags = kprobe_old_rflags_prev;
254 kprobe_saved_rflags = kprobe_saved_rflags_prev;
255}
256
257static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
258{
259 current_kprobe = p;
260 kprobe_saved_rflags = kprobe_old_rflags
261 = (regs->eflags & (TF_MASK | IF_MASK));
262 if (is_IF_modifier(p->ainsn.insn))
263 kprobe_saved_rflags &= ~IF_MASK;
230} 264}
231 265
232static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 266static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -240,6 +274,50 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
240 regs->rip = (unsigned long)p->ainsn.insn; 274 regs->rip = (unsigned long)p->ainsn.insn;
241} 275}
242 276
277struct task_struct *arch_get_kprobe_task(void *ptr)
278{
279 return ((struct thread_info *) (((unsigned long) ptr) &
280 (~(THREAD_SIZE -1))))->task;
281}
282
283void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
284{
285 unsigned long *sara = (unsigned long *)regs->rsp;
286 struct kretprobe_instance *ri;
287 static void *orig_ret_addr;
288
289 /*
290 * Save the return address when the return probe hits
291 * the first time, and use it to populate the (krprobe
292 * instance)->ret_addr for subsequent return probes at
293 * the same addrress since stack address would have
294 * the kretprobe_trampoline by then.
295 */
296 if (((void*) *sara) != kretprobe_trampoline)
297 orig_ret_addr = (void*) *sara;
298
299 if ((ri = get_free_rp_inst(rp)) != NULL) {
300 ri->rp = rp;
301 ri->stack_addr = sara;
302 ri->ret_addr = orig_ret_addr;
303 add_rp_inst(ri);
304 /* Replace the return addr with trampoline addr */
305 *sara = (unsigned long) &kretprobe_trampoline;
306 } else {
307 rp->nmissed++;
308 }
309}
310
311void arch_kprobe_flush_task(struct task_struct *tk)
312{
313 struct kretprobe_instance *ri;
314 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
315 *((unsigned long *)(ri->stack_addr)) =
316 (unsigned long) ri->ret_addr;
317 recycle_rp_inst(ri);
318 }
319}
320
243/* 321/*
244 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 322 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
245 * remain disabled thorough out this function. 323 * remain disabled thorough out this function.
@@ -264,9 +342,30 @@ int kprobe_handler(struct pt_regs *regs)
264 regs->eflags |= kprobe_saved_rflags; 342 regs->eflags |= kprobe_saved_rflags;
265 unlock_kprobes(); 343 unlock_kprobes();
266 goto no_kprobe; 344 goto no_kprobe;
345 } else if (kprobe_status == KPROBE_HIT_SSDONE) {
346 /* TODO: Provide re-entrancy from
347 * post_kprobes_handler() and avoid exception
348 * stack corruption while single-stepping on
349 * the instruction of the new probe.
350 */
351 arch_disarm_kprobe(p);
352 regs->rip = (unsigned long)p->addr;
353 ret = 1;
354 } else {
355 /* We have reentered the kprobe_handler(), since
356 * another probe was hit while within the
357 * handler. We here save the original kprobe
358 * variables and just single step on instruction
359 * of the new probe without calling any user
360 * handlers.
361 */
362 save_previous_kprobe();
363 set_current_kprobe(p, regs);
364 p->nmissed++;
365 prepare_singlestep(p, regs);
366 kprobe_status = KPROBE_REENTER;
367 return 1;
267 } 368 }
268 disarm_kprobe(p, regs);
269 ret = 1;
270 } else { 369 } else {
271 p = current_kprobe; 370 p = current_kprobe;
272 if (p->break_handler && p->break_handler(p, regs)) { 371 if (p->break_handler && p->break_handler(p, regs)) {
@@ -296,11 +395,7 @@ int kprobe_handler(struct pt_regs *regs)
296 } 395 }
297 396
298 kprobe_status = KPROBE_HIT_ACTIVE; 397 kprobe_status = KPROBE_HIT_ACTIVE;
299 current_kprobe = p; 398 set_current_kprobe(p, regs);
300 kprobe_saved_rflags = kprobe_old_rflags
301 = (regs->eflags & (TF_MASK | IF_MASK));
302 if (is_IF_modifier(p->ainsn.insn))
303 kprobe_saved_rflags &= ~IF_MASK;
304 399
305 if (p->pre_handler && p->pre_handler(p, regs)) 400 if (p->pre_handler && p->pre_handler(p, regs))
306 /* handler has already set things up, so skip ss setup */ 401 /* handler has already set things up, so skip ss setup */
@@ -317,6 +412,55 @@ no_kprobe:
317} 412}
318 413
319/* 414/*
415 * For function-return probes, init_kprobes() establishes a probepoint
416 * here. When a retprobed function returns, this probe is hit and
417 * trampoline_probe_handler() runs, calling the kretprobe's handler.
418 */
419 void kretprobe_trampoline_holder(void)
420 {
421 asm volatile ( ".global kretprobe_trampoline\n"
422 "kretprobe_trampoline: \n"
423 "nop\n");
424 }
425
426/*
427 * Called when we hit the probe point at kretprobe_trampoline
428 */
429int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
430{
431 struct task_struct *tsk;
432 struct kretprobe_instance *ri;
433 struct hlist_head *head;
434 struct hlist_node *node;
435 unsigned long *sara = (unsigned long *)regs->rsp - 1;
436
437 tsk = arch_get_kprobe_task(sara);
438 head = kretprobe_inst_table_head(tsk);
439
440 hlist_for_each_entry(ri, node, head, hlist) {
441 if (ri->stack_addr == sara && ri->rp) {
442 if (ri->rp->handler)
443 ri->rp->handler(ri, regs);
444 }
445 }
446 return 0;
447}
448
449void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
450 unsigned long flags)
451{
452 struct kretprobe_instance *ri;
453 /* RA already popped */
454 unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
455
456 while ((ri = get_rp_inst(sara))) {
457 regs->rip = (unsigned long)ri->ret_addr;
458 recycle_rp_inst(ri);
459 }
460 regs->eflags &= ~TF_MASK;
461}
462
463/*
320 * Called after single-stepping. p->addr is the address of the 464 * Called after single-stepping. p->addr is the address of the
321 * instruction whose first byte has been replaced by the "int 3" 465 * instruction whose first byte has been replaced by the "int 3"
322 * instruction. To avoid the SMP problems that can occur when we 466 * instruction. To avoid the SMP problems that can occur when we
@@ -401,13 +545,23 @@ int post_kprobe_handler(struct pt_regs *regs)
401 if (!kprobe_running()) 545 if (!kprobe_running())
402 return 0; 546 return 0;
403 547
404 if (current_kprobe->post_handler) 548 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
549 kprobe_status = KPROBE_HIT_SSDONE;
405 current_kprobe->post_handler(current_kprobe, regs, 0); 550 current_kprobe->post_handler(current_kprobe, regs, 0);
551 }
406 552
407 resume_execution(current_kprobe, regs); 553 if (current_kprobe->post_handler != trampoline_post_handler)
554 resume_execution(current_kprobe, regs);
408 regs->eflags |= kprobe_saved_rflags; 555 regs->eflags |= kprobe_saved_rflags;
409 556
410 unlock_kprobes(); 557 /* Restore the original saved kprobes variables and continue. */
558 if (kprobe_status == KPROBE_REENTER) {
559 restore_previous_kprobe();
560 goto out;
561 } else {
562 unlock_kprobes();
563 }
564out:
411 preempt_enable_no_resched(); 565 preempt_enable_no_resched();
412 566
413 /* 567 /*
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 61a63be6b294..9c5aa2a790c7 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -23,6 +23,7 @@
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/mc146818rtc.h> 24#include <linux/mc146818rtc.h>
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/module.h>
26 27
27#include <asm/smp.h> 28#include <asm/smp.h>
28#include <asm/mtrr.h> 29#include <asm/mtrr.h>
@@ -45,7 +46,8 @@ int acpi_found_madt;
45int apic_version [MAX_APICS]; 46int apic_version [MAX_APICS];
46unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 47unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
47int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 48int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL }; 49unsigned char pci_bus_to_node [256];
50EXPORT_SYMBOL(pci_bus_to_node);
49 51
50static int mp_current_pci_id = 0; 52static int mp_current_pci_id = 0;
51/* I/O APIC entries */ 53/* I/O APIC entries */
@@ -904,11 +906,20 @@ void __init mp_config_acpi_legacy_irqs (void)
904 return; 906 return;
905} 907}
906 908
909#define MAX_GSI_NUM 4096
910
907int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) 911int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
908{ 912{
909 int ioapic = -1; 913 int ioapic = -1;
910 int ioapic_pin = 0; 914 int ioapic_pin = 0;
911 int idx, bit = 0; 915 int idx, bit = 0;
916 static int pci_irq = 16;
917 /*
918 * Mapping between Global System Interrupts, which
919 * represent all possible interrupts, to the IRQs
920 * assigned to actual devices.
921 */
922 static int gsi_to_irq[MAX_GSI_NUM];
912 923
913 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) 924 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
914 return gsi; 925 return gsi;
@@ -943,11 +954,21 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
943 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 954 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
944 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 955 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
945 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 956 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
946 return gsi; 957 return gsi_to_irq[gsi];
947 } 958 }
948 959
949 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 960 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
950 961
962 if (edge_level) {
963 /*
964 * For PCI devices assign IRQs in order, avoiding gaps
965 * due to unused I/O APIC pins.
966 */
967 int irq = gsi;
968 gsi = pci_irq++;
969 gsi_to_irq[irq] = gsi;
970 }
971
951 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 972 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
952 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 973 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
953 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 974 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index dce8bab4306c..e59d1f9d6163 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <linux/ptrace.h> 34#include <linux/ptrace.h>
35#include <linux/utsname.h> 35#include <linux/utsname.h>
36#include <linux/random.h> 36#include <linux/random.h>
37#include <linux/kprobes.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
@@ -293,6 +294,14 @@ void exit_thread(void)
293{ 294{
294 struct task_struct *me = current; 295 struct task_struct *me = current;
295 struct thread_struct *t = &me->thread; 296 struct thread_struct *t = &me->thread;
297
298 /*
299 * Remove function-return probe instances associated with this task
300 * and put them back on the free list. Do not insert an exit probe for
301 * this function, it will be disabled by kprobe_flush_task if you do.
302 */
303 kprobe_flush_task(me);
304
296 if (me->thread.io_bitmap_ptr) { 305 if (me->thread.io_bitmap_ptr) {
297 struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); 306 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
298 307
@@ -312,6 +321,13 @@ void flush_thread(void)
312 struct task_struct *tsk = current; 321 struct task_struct *tsk = current;
313 struct thread_info *t = current_thread_info(); 322 struct thread_info *t = current_thread_info();
314 323
324 /*
325 * Remove function-return probe instances associated with this task
326 * and put them back on the free list. Do not insert an exit probe for
327 * this function, it will be disabled by kprobe_flush_task if you do.
328 */
329 kprobe_flush_task(tsk);
330
315 if (t->flags & _TIF_ABI_PENDING) 331 if (t->flags & _TIF_ABI_PENDING)
316 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); 332 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
317 333
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 99f038ede23c..000015dd5a8b 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -40,6 +40,8 @@
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41#include <linux/kallsyms.h> 41#include <linux/kallsyms.h>
42#include <linux/edd.h> 42#include <linux/edd.h>
43#include <linux/mmzone.h>
44
43#include <asm/mtrr.h> 45#include <asm/mtrr.h>
44#include <asm/uaccess.h> 46#include <asm/uaccess.h>
45#include <asm/system.h> 47#include <asm/system.h>
@@ -348,7 +350,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
348 if (!memcmp(from, "mem=", 4)) 350 if (!memcmp(from, "mem=", 4))
349 parse_memopt(from+4, &from); 351 parse_memopt(from+4, &from);
350 352
351#ifdef CONFIG_DISCONTIGMEM 353#ifdef CONFIG_NUMA
352 if (!memcmp(from, "numa=", 5)) 354 if (!memcmp(from, "numa=", 5))
353 numa_setup(from+5); 355 numa_setup(from+5);
354#endif 356#endif
@@ -377,17 +379,20 @@ static __init void parse_cmdline_early (char ** cmdline_p)
377 *cmdline_p = command_line; 379 *cmdline_p = command_line;
378} 380}
379 381
380#ifndef CONFIG_DISCONTIGMEM 382#ifndef CONFIG_NUMA
381static void __init contig_initmem_init(void) 383static void __init
384contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
382{ 385{
383 unsigned long bootmap_size, bootmap; 386 unsigned long bootmap_size, bootmap;
384 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 387
385 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); 388 memory_present(0, start_pfn, end_pfn);
386 if (bootmap == -1L) 389 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
387 panic("Cannot find bootmem map of size %ld\n",bootmap_size); 390 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
388 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); 391 if (bootmap == -1L)
389 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT); 392 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
390 reserve_bootmem(bootmap, bootmap_size); 393 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
394 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
395 reserve_bootmem(bootmap, bootmap_size);
391} 396}
392#endif 397#endif
393 398
@@ -554,10 +559,10 @@ void __init setup_arch(char **cmdline_p)
554 acpi_numa_init(); 559 acpi_numa_init();
555#endif 560#endif
556 561
557#ifdef CONFIG_DISCONTIGMEM 562#ifdef CONFIG_NUMA
558 numa_initmem_init(0, end_pfn); 563 numa_initmem_init(0, end_pfn);
559#else 564#else
560 contig_initmem_init(); 565 contig_initmem_init(0, end_pfn);
561#endif 566#endif
562 567
563 /* Reserve direct mapping */ 568 /* Reserve direct mapping */
@@ -618,6 +623,8 @@ void __init setup_arch(char **cmdline_p)
618 } 623 }
619 } 624 }
620#endif 625#endif
626
627 sparse_init();
621 paging_init(); 628 paging_init();
622 629
623 check_ioapic(); 630 check_ioapic();
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 429c0269dc4e..98b7ba95d581 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -34,9 +34,9 @@
34 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36 36
37void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 37int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
38 sigset_t *set, struct pt_regs * regs); 38 sigset_t *set, struct pt_regs * regs);
39void ia32_setup_frame(int sig, struct k_sigaction *ka, 39int ia32_setup_frame(int sig, struct k_sigaction *ka,
40 sigset_t *set, struct pt_regs * regs); 40 sigset_t *set, struct pt_regs * regs);
41 41
42asmlinkage long 42asmlinkage long
@@ -238,7 +238,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
238 return (void __user *)round_down(rsp - size, 16); 238 return (void __user *)round_down(rsp - size, 16);
239} 239}
240 240
241static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 241static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
242 sigset_t *set, struct pt_regs * regs) 242 sigset_t *set, struct pt_regs * regs)
243{ 243{
244 struct rt_sigframe __user *frame; 244 struct rt_sigframe __user *frame;
@@ -327,20 +327,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
327 current->comm, current->pid, frame, regs->rip, frame->pretcode); 327 current->comm, current->pid, frame, regs->rip, frame->pretcode);
328#endif 328#endif
329 329
330 return; 330 return 1;
331 331
332give_sigsegv: 332give_sigsegv:
333 force_sigsegv(sig, current); 333 force_sigsegv(sig, current);
334 return 0;
334} 335}
335 336
336/* 337/*
337 * OK, we're invoking a handler 338 * OK, we're invoking a handler
338 */ 339 */
339 340
340static void 341static int
341handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 342handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
342 sigset_t *oldset, struct pt_regs *regs) 343 sigset_t *oldset, struct pt_regs *regs)
343{ 344{
345 int ret;
346
344#ifdef DEBUG_SIG 347#ifdef DEBUG_SIG
345 printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n", 348 printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
346 current->pid, sig, 349 current->pid, sig,
@@ -384,20 +387,22 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
384#ifdef CONFIG_IA32_EMULATION 387#ifdef CONFIG_IA32_EMULATION
385 if (test_thread_flag(TIF_IA32)) { 388 if (test_thread_flag(TIF_IA32)) {
386 if (ka->sa.sa_flags & SA_SIGINFO) 389 if (ka->sa.sa_flags & SA_SIGINFO)
387 ia32_setup_rt_frame(sig, ka, info, oldset, regs); 390 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
388 else 391 else
389 ia32_setup_frame(sig, ka, oldset, regs); 392 ret = ia32_setup_frame(sig, ka, oldset, regs);
390 } else 393 } else
391#endif 394#endif
392 setup_rt_frame(sig, ka, info, oldset, regs); 395 ret = setup_rt_frame(sig, ka, info, oldset, regs);
393 396
394 if (!(ka->sa.sa_flags & SA_NODEFER)) { 397 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
395 spin_lock_irq(&current->sighand->siglock); 398 spin_lock_irq(&current->sighand->siglock);
396 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 399 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
397 sigaddset(&current->blocked,sig); 400 sigaddset(&current->blocked,sig);
398 recalc_sigpending(); 401 recalc_sigpending();
399 spin_unlock_irq(&current->sighand->siglock); 402 spin_unlock_irq(&current->sighand->siglock);
400 } 403 }
404
405 return ret;
401} 406}
402 407
403/* 408/*
@@ -417,7 +422,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
417 * kernel mode. Just return without doing anything 422 * kernel mode. Just return without doing anything
418 * if so. 423 * if so.
419 */ 424 */
420 if ((regs->cs & 3) != 3) 425 if (!user_mode(regs))
421 return 1; 426 return 1;
422 427
423 if (try_to_freeze(0)) 428 if (try_to_freeze(0))
@@ -434,11 +439,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
434 * inside the kernel. 439 * inside the kernel.
435 */ 440 */
436 if (current->thread.debugreg7) 441 if (current->thread.debugreg7)
437 asm volatile("movq %0,%%db7" : : "r" (current->thread.debugreg7)); 442 set_debugreg(current->thread.debugreg7, 7);
438 443
439 /* Whee! Actually deliver the signal. */ 444 /* Whee! Actually deliver the signal. */
440 handle_signal(signr, &info, &ka, oldset, regs); 445 return handle_signal(signr, &info, &ka, oldset, regs);
441 return 1;
442 } 446 }
443 447
444 no_signal: 448 no_signal:
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index fb8c809b4cd9..66bf6ddeb0c3 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -64,6 +64,7 @@ static int notsc __initdata = 0;
64unsigned int cpu_khz; /* TSC clocks / usec, not used here */ 64unsigned int cpu_khz; /* TSC clocks / usec, not used here */
65static unsigned long hpet_period; /* fsecs / HPET clock */ 65static unsigned long hpet_period; /* fsecs / HPET clock */
66unsigned long hpet_tick; /* HPET clocks / interrupt */ 66unsigned long hpet_tick; /* HPET clocks / interrupt */
67static int hpet_use_timer;
67unsigned long vxtime_hz = PIT_TICK_RATE; 68unsigned long vxtime_hz = PIT_TICK_RATE;
68int report_lost_ticks; /* command line option */ 69int report_lost_ticks; /* command line option */
69unsigned long long monotonic_base; 70unsigned long long monotonic_base;
@@ -105,7 +106,9 @@ static inline unsigned int do_gettimeoffset_tsc(void)
105 106
106static inline unsigned int do_gettimeoffset_hpet(void) 107static inline unsigned int do_gettimeoffset_hpet(void)
107{ 108{
108 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32; 109 /* cap counter read to one tick to avoid inconsistencies */
110 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
111 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
109} 112}
110 113
111unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; 114unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -301,7 +304,7 @@ unsigned long long monotonic_clock(void)
301 304
302 last_offset = vxtime.last; 305 last_offset = vxtime.last;
303 base = monotonic_base; 306 base = monotonic_base;
304 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 307 this_offset = hpet_readl(HPET_COUNTER);
305 308
306 } while (read_seqretry(&xtime_lock, seq)); 309 } while (read_seqretry(&xtime_lock, seq));
307 offset = (this_offset - last_offset); 310 offset = (this_offset - last_offset);
@@ -377,7 +380,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
377 380
378 write_seqlock(&xtime_lock); 381 write_seqlock(&xtime_lock);
379 382
380 if (vxtime.hpet_address) { 383 if (vxtime.hpet_address)
384 offset = hpet_readl(HPET_COUNTER);
385
386 if (hpet_use_timer) {
387 /* if we're using the hpet timer functionality,
388 * we can more accurately know the counter value
389 * when the timer interrupt occured.
390 */
381 offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 391 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
382 delay = hpet_readl(HPET_COUNTER) - offset; 392 delay = hpet_readl(HPET_COUNTER) - offset;
383 } else { 393 } else {
@@ -803,17 +813,18 @@ static int hpet_timer_stop_set_go(unsigned long tick)
803 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick, 813 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
804 * and period also hpet_tick. 814 * and period also hpet_tick.
805 */ 815 */
806 816 if (hpet_use_timer) {
807 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | 817 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
808 HPET_TN_32BIT, HPET_T0_CFG); 818 HPET_TN_32BIT, HPET_T0_CFG);
809 hpet_writel(hpet_tick, HPET_T0_CMP); 819 hpet_writel(hpet_tick, HPET_T0_CMP);
810 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */ 820 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
811 821 cfg |= HPET_CFG_LEGACY;
822 }
812/* 823/*
813 * Go! 824 * Go!
814 */ 825 */
815 826
816 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY; 827 cfg |= HPET_CFG_ENABLE;
817 hpet_writel(cfg, HPET_CFG); 828 hpet_writel(cfg, HPET_CFG);
818 829
819 return 0; 830 return 0;
@@ -834,8 +845,7 @@ static int hpet_init(void)
834 845
835 id = hpet_readl(HPET_ID); 846 id = hpet_readl(HPET_ID);
836 847
837 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) || 848 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
838 !(id & HPET_ID_LEGSUP))
839 return -1; 849 return -1;
840 850
841 hpet_period = hpet_readl(HPET_PERIOD); 851 hpet_period = hpet_readl(HPET_PERIOD);
@@ -845,6 +855,8 @@ static int hpet_init(void)
845 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / 855 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
846 hpet_period; 856 hpet_period;
847 857
858 hpet_use_timer = (id & HPET_ID_LEGSUP);
859
848 return hpet_timer_stop_set_go(hpet_tick); 860 return hpet_timer_stop_set_go(hpet_tick);
849} 861}
850 862
@@ -901,9 +913,11 @@ void __init time_init(void)
901 set_normalized_timespec(&wall_to_monotonic, 913 set_normalized_timespec(&wall_to_monotonic,
902 -xtime.tv_sec, -xtime.tv_nsec); 914 -xtime.tv_sec, -xtime.tv_nsec);
903 915
904 if (!hpet_init()) { 916 if (!hpet_init())
905 vxtime_hz = (1000000000000000L + hpet_period / 2) / 917 vxtime_hz = (1000000000000000L + hpet_period / 2) /
906 hpet_period; 918 hpet_period;
919
920 if (hpet_use_timer) {
907 cpu_khz = hpet_calibrate_tsc(); 921 cpu_khz = hpet_calibrate_tsc();
908 timename = "HPET"; 922 timename = "HPET";
909#ifdef CONFIG_X86_PM_TIMER 923#ifdef CONFIG_X86_PM_TIMER
@@ -968,7 +982,7 @@ void __init time_init_gtod(void)
968 if (unsynchronized_tsc()) 982 if (unsynchronized_tsc())
969 notsc = 1; 983 notsc = 1;
970 if (vxtime.hpet_address && notsc) { 984 if (vxtime.hpet_address && notsc) {
971 timetype = "HPET"; 985 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
972 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick; 986 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
973 vxtime.mode = VXTIME_HPET; 987 vxtime.mode = VXTIME_HPET;
974 do_gettimeoffset = do_gettimeoffset_hpet; 988 do_gettimeoffset = do_gettimeoffset_hpet;
@@ -983,7 +997,7 @@ void __init time_init_gtod(void)
983 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n"); 997 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
984#endif 998#endif
985 } else { 999 } else {
986 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC"; 1000 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
987 vxtime.mode = VXTIME_TSC; 1001 vxtime.mode = VXTIME_TSC;
988 } 1002 }
989 1003
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 3dfec8fdabcd..121646fc43f6 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -274,7 +274,7 @@ EXPORT_SYMBOL(dump_stack);
274void show_registers(struct pt_regs *regs) 274void show_registers(struct pt_regs *regs)
275{ 275{
276 int i; 276 int i;
277 int in_kernel = (regs->cs & 3) == 0; 277 int in_kernel = !user_mode(regs);
278 unsigned long rsp; 278 unsigned long rsp;
279 const int cpu = safe_smp_processor_id(); 279 const int cpu = safe_smp_processor_id();
280 struct task_struct *cur = cpu_pda[cpu].pcurrent; 280 struct task_struct *cur = cpu_pda[cpu].pcurrent;
@@ -318,7 +318,7 @@ void handle_BUG(struct pt_regs *regs)
318 struct bug_frame f; 318 struct bug_frame f;
319 char tmp; 319 char tmp;
320 320
321 if (regs->cs & 3) 321 if (user_mode(regs))
322 return; 322 return;
323 if (__copy_from_user(&f, (struct bug_frame *) regs->rip, 323 if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
324 sizeof(struct bug_frame))) 324 sizeof(struct bug_frame)))
@@ -437,7 +437,7 @@ static void do_trap(int trapnr, int signr, char *str,
437 } 437 }
438#endif 438#endif
439 439
440 if ((regs->cs & 3) != 0) { 440 if (user_mode(regs)) {
441 struct task_struct *tsk = current; 441 struct task_struct *tsk = current;
442 442
443 if (exception_trace && unhandled_signal(tsk, signr)) 443 if (exception_trace && unhandled_signal(tsk, signr))
@@ -522,7 +522,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
522 } 522 }
523#endif 523#endif
524 524
525 if ((regs->cs & 3)!=0) { 525 if (user_mode(regs)) {
526 struct task_struct *tsk = current; 526 struct task_struct *tsk = current;
527 527
528 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) 528 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
@@ -638,7 +638,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
638 if (eregs == (struct pt_regs *)eregs->rsp) 638 if (eregs == (struct pt_regs *)eregs->rsp)
639 ; 639 ;
640 /* Exception from user space */ 640 /* Exception from user space */
641 else if (eregs->cs & 3) 641 else if (user_mode(eregs))
642 regs = ((struct pt_regs *)current->thread.rsp0) - 1; 642 regs = ((struct pt_regs *)current->thread.rsp0) - 1;
643 /* Exception from kernel and interrupts are enabled. Move to 643 /* Exception from kernel and interrupts are enabled. Move to
644 kernel process stack. */ 644 kernel process stack. */
@@ -669,7 +669,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
669 } 669 }
670#endif 670#endif
671 671
672 asm("movq %%db6,%0" : "=r" (condition)); 672 get_debugreg(condition, 6);
673 673
674 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 674 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
675 SIGTRAP) == NOTIFY_STOP) 675 SIGTRAP) == NOTIFY_STOP)
@@ -697,7 +697,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
697 * allowing programs to debug themselves without the ptrace() 697 * allowing programs to debug themselves without the ptrace()
698 * interface. 698 * interface.
699 */ 699 */
700 if ((regs->cs & 3) == 0) 700 if (!user_mode(regs))
701 goto clear_TF_reenable; 701 goto clear_TF_reenable;
702 /* 702 /*
703 * Was the TF flag set by a debugger? If so, clear it now, 703 * Was the TF flag set by a debugger? If so, clear it now,
@@ -715,13 +715,13 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
715 info.si_signo = SIGTRAP; 715 info.si_signo = SIGTRAP;
716 info.si_errno = 0; 716 info.si_errno = 0;
717 info.si_code = TRAP_BRKPT; 717 info.si_code = TRAP_BRKPT;
718 if ((regs->cs & 3) == 0) 718 if (!user_mode(regs))
719 goto clear_dr7; 719 goto clear_dr7;
720 720
721 info.si_addr = (void __user *)regs->rip; 721 info.si_addr = (void __user *)regs->rip;
722 force_sig_info(SIGTRAP, &info, tsk); 722 force_sig_info(SIGTRAP, &info, tsk);
723clear_dr7: 723clear_dr7:
724 asm volatile("movq %0,%%db7"::"r"(0UL)); 724 set_debugreg(0UL, 7);
725 return; 725 return;
726 726
727clear_TF_reenable: 727clear_TF_reenable:
@@ -756,7 +756,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
756 unsigned short cwd, swd; 756 unsigned short cwd, swd;
757 757
758 conditional_sti(regs); 758 conditional_sti(regs);
759 if ((regs->cs & 3) == 0 && 759 if (!user_mode(regs) &&
760 kernel_math_error(regs, "kernel x87 math error")) 760 kernel_math_error(regs, "kernel x87 math error"))
761 return; 761 return;
762 762
@@ -822,7 +822,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
822 unsigned short mxcsr; 822 unsigned short mxcsr;
823 823
824 conditional_sti(regs); 824 conditional_sti(regs);
825 if ((regs->cs & 3) == 0 && 825 if (!user_mode(regs) &&
826 kernel_math_error(regs, "kernel simd math error")) 826 kernel_math_error(regs, "kernel simd math error"))
827 return; 827 return;
828 828