aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c23
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/i386_ksyms.c160
-rw-r--r--arch/i386/kernel/i387.c3
-rw-r--r--arch/i386/kernel/io_apic.c13
-rw-r--r--arch/i386/kernel/kprobes.c176
-rw-r--r--arch/i386/kernel/mpparse.c31
-rw-r--r--arch/i386/kernel/nmi.c24
-rw-r--r--arch/i386/kernel/pci-dma.c3
-rw-r--r--arch/i386/kernel/process.c36
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot.c5
-rw-r--r--arch/i386/kernel/setup.c28
-rw-r--r--arch/i386/kernel/signal.c31
-rw-r--r--arch/i386/kernel/smp.c3
-rw-r--r--arch/i386/kernel/smpboot.c14
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/timers/common.c12
-rw-r--r--arch/i386/kernel/timers/timer.c9
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c3
-rw-r--r--arch/i386/kernel/timers/timer_pm.c1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c8
-rw-r--r--arch/i386/kernel/traps.c21
25 files changed, 376 insertions, 242 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d509836b70c3..8d993fa71754 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -1133,7 +1133,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
1133 } 1133 }
1134 1134
1135#ifdef CONFIG_SMP 1135#ifdef CONFIG_SMP
1136 update_process_times(user_mode(regs)); 1136 update_process_times(user_mode_vm(regs));
1137#endif 1137#endif
1138 } 1138 }
1139 1139
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d199e525680a..b9954248d0aa 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -635,7 +635,7 @@ void __init cpu_init (void)
635 635
636 /* Clear all 6 debug registers: */ 636 /* Clear all 6 debug registers: */
637 637
638#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); 638#define CD(register) set_debugreg(0, register)
639 639
640 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); 640 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
641 641
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index e1c2042b9b7e..d66b09e0c820 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -375,6 +375,19 @@ int mtrr_add_page(unsigned long base, unsigned long size,
375 return error; 375 return error;
376} 376}
377 377
378static int mtrr_check(unsigned long base, unsigned long size)
379{
380 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
381 printk(KERN_WARNING
382 "mtrr: size and base must be multiples of 4 kiB\n");
383 printk(KERN_DEBUG
384 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
385 dump_stack();
386 return -1;
387 }
388 return 0;
389}
390
378/** 391/**
379 * mtrr_add - Add a memory type region 392 * mtrr_add - Add a memory type region
380 * @base: Physical base address of region 393 * @base: Physical base address of region
@@ -415,11 +428,8 @@ int
415mtrr_add(unsigned long base, unsigned long size, unsigned int type, 428mtrr_add(unsigned long base, unsigned long size, unsigned int type,
416 char increment) 429 char increment)
417{ 430{
418 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 431 if (mtrr_check(base, size))
419 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
420 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
421 return -EINVAL; 432 return -EINVAL;
422 }
423 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 433 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
424 increment); 434 increment);
425} 435}
@@ -511,11 +521,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
511int 521int
512mtrr_del(int reg, unsigned long base, unsigned long size) 522mtrr_del(int reg, unsigned long base, unsigned long size)
513{ 523{
514 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 524 if (mtrr_check(base, size))
515 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
516 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
517 return -EINVAL; 525 return -EINVAL;
518 }
519 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 526 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
520} 527}
521 528
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 7323c19f354e..8bd77d948a84 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -86,7 +86,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
86 seq_printf(m, "stepping\t: unknown\n"); 86 seq_printf(m, "stepping\t: unknown\n");
87 87
88 if ( cpu_has(c, X86_FEATURE_TSC) ) { 88 if ( cpu_has(c, X86_FEATURE_TSC) ) {
89 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", 89 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
90 cpu_khz / 1000, (cpu_khz % 1000)); 90 cpu_khz / 1000, (cpu_khz % 1000));
91 } 91 }
92 92
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 903190a4b3ff..180f070d03cb 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -1,97 +1,17 @@
1#include <linux/config.h> 1#include <linux/config.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/elfcore.h>
6#include <linux/mca.h>
7#include <linux/sched.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10#include <linux/smp_lock.h>
11#include <linux/pm.h>
12#include <linux/pci.h>
13#include <linux/apm_bios.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/tty.h>
17#include <linux/highmem.h>
18#include <linux/time.h>
19
20#include <asm/semaphore.h>
21#include <asm/processor.h>
22#include <asm/i387.h>
23#include <asm/uaccess.h>
24#include <asm/checksum.h> 3#include <asm/checksum.h>
25#include <asm/io.h>
26#include <asm/delay.h>
27#include <asm/irq.h>
28#include <asm/mmx.h>
29#include <asm/desc.h> 4#include <asm/desc.h>
30#include <asm/pgtable.h>
31#include <asm/tlbflush.h>
32#include <asm/nmi.h>
33#include <asm/ist.h>
34#include <asm/kdebug.h>
35
36extern void dump_thread(struct pt_regs *, struct user *);
37extern spinlock_t rtc_lock;
38 5
39/* This is definitely a GPL-only symbol */ 6/* This is definitely a GPL-only symbol */
40EXPORT_SYMBOL_GPL(cpu_gdt_table); 7EXPORT_SYMBOL_GPL(cpu_gdt_table);
41 8
42#if defined(CONFIG_APM_MODULE)
43extern void machine_real_restart(unsigned char *, int);
44EXPORT_SYMBOL(machine_real_restart);
45extern void default_idle(void);
46EXPORT_SYMBOL(default_idle);
47#endif
48
49#ifdef CONFIG_SMP
50extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
51extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
52#endif
53
54#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
55extern struct drive_info_struct drive_info;
56EXPORT_SYMBOL(drive_info);
57#endif
58
59extern unsigned long cpu_khz;
60extern unsigned long get_cmos_time(void);
61
62/* platform dependent support */
63EXPORT_SYMBOL(boot_cpu_data);
64#ifdef CONFIG_DISCONTIGMEM
65EXPORT_SYMBOL(node_data);
66EXPORT_SYMBOL(physnode_map);
67#endif
68#ifdef CONFIG_X86_NUMAQ
69EXPORT_SYMBOL(xquad_portio);
70#endif
71EXPORT_SYMBOL(dump_thread);
72EXPORT_SYMBOL(dump_fpu);
73EXPORT_SYMBOL_GPL(kernel_fpu_begin);
74EXPORT_SYMBOL(__ioremap);
75EXPORT_SYMBOL(ioremap_nocache);
76EXPORT_SYMBOL(iounmap);
77EXPORT_SYMBOL(kernel_thread);
78EXPORT_SYMBOL(pm_idle);
79EXPORT_SYMBOL(pm_power_off);
80EXPORT_SYMBOL(get_cmos_time);
81EXPORT_SYMBOL(cpu_khz);
82EXPORT_SYMBOL(apm_info);
83
84EXPORT_SYMBOL(__down_failed); 9EXPORT_SYMBOL(__down_failed);
85EXPORT_SYMBOL(__down_failed_interruptible); 10EXPORT_SYMBOL(__down_failed_interruptible);
86EXPORT_SYMBOL(__down_failed_trylock); 11EXPORT_SYMBOL(__down_failed_trylock);
87EXPORT_SYMBOL(__up_wakeup); 12EXPORT_SYMBOL(__up_wakeup);
88/* Networking helper routines. */ 13/* Networking helper routines. */
89EXPORT_SYMBOL(csum_partial_copy_generic); 14EXPORT_SYMBOL(csum_partial_copy_generic);
90/* Delay loops */
91EXPORT_SYMBOL(__ndelay);
92EXPORT_SYMBOL(__udelay);
93EXPORT_SYMBOL(__delay);
94EXPORT_SYMBOL(__const_udelay);
95 15
96EXPORT_SYMBOL(__get_user_1); 16EXPORT_SYMBOL(__get_user_1);
97EXPORT_SYMBOL(__get_user_2); 17EXPORT_SYMBOL(__get_user_2);
@@ -105,87 +25,11 @@ EXPORT_SYMBOL(__put_user_8);
105EXPORT_SYMBOL(strpbrk); 25EXPORT_SYMBOL(strpbrk);
106EXPORT_SYMBOL(strstr); 26EXPORT_SYMBOL(strstr);
107 27
108EXPORT_SYMBOL(strncpy_from_user);
109EXPORT_SYMBOL(__strncpy_from_user);
110EXPORT_SYMBOL(clear_user);
111EXPORT_SYMBOL(__clear_user);
112EXPORT_SYMBOL(__copy_from_user_ll);
113EXPORT_SYMBOL(__copy_to_user_ll);
114EXPORT_SYMBOL(strnlen_user);
115
116EXPORT_SYMBOL(dma_alloc_coherent);
117EXPORT_SYMBOL(dma_free_coherent);
118
119#ifdef CONFIG_PCI
120EXPORT_SYMBOL(pci_mem_start);
121#endif
122
123#ifdef CONFIG_PCI_BIOS
124EXPORT_SYMBOL(pcibios_set_irq_routing);
125EXPORT_SYMBOL(pcibios_get_irq_routing_table);
126#endif
127
128#ifdef CONFIG_X86_USE_3DNOW
129EXPORT_SYMBOL(_mmx_memcpy);
130EXPORT_SYMBOL(mmx_clear_page);
131EXPORT_SYMBOL(mmx_copy_page);
132#endif
133
134#ifdef CONFIG_X86_HT
135EXPORT_SYMBOL(smp_num_siblings);
136EXPORT_SYMBOL(cpu_sibling_map);
137#endif
138
139#ifdef CONFIG_SMP 28#ifdef CONFIG_SMP
140EXPORT_SYMBOL(cpu_data); 29extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
141EXPORT_SYMBOL(cpu_online_map); 30extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
142EXPORT_SYMBOL(cpu_callout_map);
143EXPORT_SYMBOL(__write_lock_failed); 31EXPORT_SYMBOL(__write_lock_failed);
144EXPORT_SYMBOL(__read_lock_failed); 32EXPORT_SYMBOL(__read_lock_failed);
145
146/* Global SMP stuff */
147EXPORT_SYMBOL(smp_call_function);
148
149/* TLB flushing */
150EXPORT_SYMBOL(flush_tlb_page);
151#endif
152
153#ifdef CONFIG_X86_IO_APIC
154EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
155#endif
156
157#ifdef CONFIG_MCA
158EXPORT_SYMBOL(machine_id);
159#endif
160
161#ifdef CONFIG_VT
162EXPORT_SYMBOL(screen_info);
163#endif
164
165EXPORT_SYMBOL(get_wchan);
166
167EXPORT_SYMBOL(rtc_lock);
168
169EXPORT_SYMBOL_GPL(set_nmi_callback);
170EXPORT_SYMBOL_GPL(unset_nmi_callback);
171
172EXPORT_SYMBOL(register_die_notifier);
173#ifdef CONFIG_HAVE_DEC_LOCK
174EXPORT_SYMBOL(_atomic_dec_and_lock);
175#endif
176
177EXPORT_SYMBOL(__PAGE_KERNEL);
178
179#ifdef CONFIG_HIGHMEM
180EXPORT_SYMBOL(kmap);
181EXPORT_SYMBOL(kunmap);
182EXPORT_SYMBOL(kmap_atomic);
183EXPORT_SYMBOL(kunmap_atomic);
184EXPORT_SYMBOL(kmap_atomic_to_page);
185#endif
186
187#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
188EXPORT_SYMBOL(ist_info);
189#endif 33#endif
190 34
191EXPORT_SYMBOL(csum_partial); 35EXPORT_SYMBOL(csum_partial);
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index c55e037f08f7..b817168d9c62 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/module.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14#include <asm/i387.h> 15#include <asm/i387.h>
15#include <asm/math_emu.h> 16#include <asm/math_emu.h>
@@ -79,6 +80,7 @@ void kernel_fpu_begin(void)
79 } 80 }
80 clts(); 81 clts();
81} 82}
83EXPORT_SYMBOL_GPL(kernel_fpu_begin);
82 84
83void restore_fpu( struct task_struct *tsk ) 85void restore_fpu( struct task_struct *tsk )
84{ 86{
@@ -526,6 +528,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
526 528
527 return fpvalid; 529 return fpvalid;
528} 530}
531EXPORT_SYMBOL(dump_fpu);
529 532
530int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu) 533int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
531{ 534{
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7a324e8b86f9..08540bc4ba3e 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,7 +31,7 @@
31#include <linux/mc146818rtc.h> 31#include <linux/mc146818rtc.h>
32#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/acpi.h> 33#include <linux/acpi.h>
34 34#include <linux/module.h>
35#include <linux/sysdev.h> 35#include <linux/sysdev.h>
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/smp.h> 37#include <asm/smp.h>
@@ -812,6 +812,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
812 } 812 }
813 return best_guess; 813 return best_guess;
814} 814}
815EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
815 816
816/* 817/*
817 * This function currently is only a helper for the i386 smp boot process where 818 * This function currently is only a helper for the i386 smp boot process where
@@ -1659,6 +1660,12 @@ static void __init setup_ioapic_ids_from_mpc(void)
1659 unsigned long flags; 1660 unsigned long flags;
1660 1661
1661 /* 1662 /*
1663 * Don't check I/O APIC IDs for xAPIC systems. They have
1664 * no meaning without the serial APIC bus.
1665 */
1666 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
1667 return;
1668 /*
1662 * This is broken; anything with a real cpu count has to 1669 * This is broken; anything with a real cpu count has to
1663 * circumvent this idiocy regardless. 1670 * circumvent this idiocy regardless.
1664 */ 1671 */
@@ -1684,10 +1691,6 @@ static void __init setup_ioapic_ids_from_mpc(void)
1684 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; 1691 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
1685 } 1692 }
1686 1693
1687 /* Don't check I/O APIC IDs for some xAPIC systems. They have
1688 * no meaning without the serial APIC bus. */
1689 if (NO_IOAPIC_CHECK)
1690 continue;
1691 /* 1694 /*
1692 * Sanity check, is the ID really free? Every APIC in a 1695 * Sanity check, is the ID really free? Every APIC in a
1693 * system must have a unique ID or we get lots of nice 1696 * system must have a unique ID or we get lots of nice
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b455069..3762f6b35ab2 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
23 * Rusty Russell). 23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments. 25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
26 */ 29 */
27 30
28#include <linux/config.h> 31#include <linux/config.h>
@@ -30,15 +33,14 @@
30#include <linux/ptrace.h> 33#include <linux/ptrace.h>
31#include <linux/spinlock.h> 34#include <linux/spinlock.h>
32#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <asm/cacheflush.h>
33#include <asm/kdebug.h> 37#include <asm/kdebug.h>
34#include <asm/desc.h> 38#include <asm/desc.h>
35 39
36/* kprobe_status settings */
37#define KPROBE_HIT_ACTIVE 0x00000001
38#define KPROBE_HIT_SS 0x00000002
39
40static struct kprobe *current_kprobe; 40static struct kprobe *current_kprobe;
41static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags; 41static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
42static struct kprobe *kprobe_prev;
43static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
42static struct pt_regs jprobe_saved_regs; 44static struct pt_regs jprobe_saved_regs;
43static long *jprobe_saved_esp; 45static long *jprobe_saved_esp;
44/* copy of the kernel stack at the probe fire time */ 46/* copy of the kernel stack at the probe fire time */
@@ -68,16 +70,50 @@ int arch_prepare_kprobe(struct kprobe *p)
68void arch_copy_kprobe(struct kprobe *p) 70void arch_copy_kprobe(struct kprobe *p)
69{ 71{
70 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 72 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
73 p->opcode = *p->addr;
71} 74}
72 75
73void arch_remove_kprobe(struct kprobe *p) 76void arch_arm_kprobe(struct kprobe *p)
74{ 77{
78 *p->addr = BREAKPOINT_INSTRUCTION;
79 flush_icache_range((unsigned long) p->addr,
80 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
75} 81}
76 82
77static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 83void arch_disarm_kprobe(struct kprobe *p)
78{ 84{
79 *p->addr = p->opcode; 85 *p->addr = p->opcode;
80 regs->eip = (unsigned long)p->addr; 86 flush_icache_range((unsigned long) p->addr,
87 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
88}
89
90void arch_remove_kprobe(struct kprobe *p)
91{
92}
93
94static inline void save_previous_kprobe(void)
95{
96 kprobe_prev = current_kprobe;
97 kprobe_status_prev = kprobe_status;
98 kprobe_old_eflags_prev = kprobe_old_eflags;
99 kprobe_saved_eflags_prev = kprobe_saved_eflags;
100}
101
102static inline void restore_previous_kprobe(void)
103{
104 current_kprobe = kprobe_prev;
105 kprobe_status = kprobe_status_prev;
106 kprobe_old_eflags = kprobe_old_eflags_prev;
107 kprobe_saved_eflags = kprobe_saved_eflags_prev;
108}
109
110static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
111{
112 current_kprobe = p;
113 kprobe_saved_eflags = kprobe_old_eflags
114 = (regs->eflags & (TF_MASK | IF_MASK));
115 if (is_IF_modifier(p->opcode))
116 kprobe_saved_eflags &= ~IF_MASK;
81} 117}
82 118
83static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 119static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -91,6 +127,50 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
91 regs->eip = (unsigned long)&p->ainsn.insn; 127 regs->eip = (unsigned long)&p->ainsn.insn;
92} 128}
93 129
130struct task_struct *arch_get_kprobe_task(void *ptr)
131{
132 return ((struct thread_info *) (((unsigned long) ptr) &
133 (~(THREAD_SIZE -1))))->task;
134}
135
136void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
137{
138 unsigned long *sara = (unsigned long *)&regs->esp;
139 struct kretprobe_instance *ri;
140 static void *orig_ret_addr;
141
142 /*
143 * Save the return address when the return probe hits
144 * the first time, and use it to populate the (krprobe
145 * instance)->ret_addr for subsequent return probes at
146 * the same addrress since stack address would have
147 * the kretprobe_trampoline by then.
148 */
149 if (((void*) *sara) != kretprobe_trampoline)
150 orig_ret_addr = (void*) *sara;
151
152 if ((ri = get_free_rp_inst(rp)) != NULL) {
153 ri->rp = rp;
154 ri->stack_addr = sara;
155 ri->ret_addr = orig_ret_addr;
156 add_rp_inst(ri);
157 /* Replace the return addr with trampoline addr */
158 *sara = (unsigned long) &kretprobe_trampoline;
159 } else {
160 rp->nmissed++;
161 }
162}
163
164void arch_kprobe_flush_task(struct task_struct *tk)
165{
166 struct kretprobe_instance *ri;
167 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
168 *((unsigned long *)(ri->stack_addr)) =
169 (unsigned long) ri->ret_addr;
170 recycle_rp_inst(ri);
171 }
172}
173
94/* 174/*
95 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 175 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
96 * remain disabled thorough out this function. 176 * remain disabled thorough out this function.
@@ -127,8 +207,18 @@ static int kprobe_handler(struct pt_regs *regs)
127 unlock_kprobes(); 207 unlock_kprobes();
128 goto no_kprobe; 208 goto no_kprobe;
129 } 209 }
130 disarm_kprobe(p, regs); 210 /* We have reentered the kprobe_handler(), since
131 ret = 1; 211 * another probe was hit while within the handler.
212 * We here save the original kprobes variables and
213 * just single step on the instruction of the new probe
214 * without calling any user handlers.
215 */
216 save_previous_kprobe();
217 set_current_kprobe(p, regs);
218 p->nmissed++;
219 prepare_singlestep(p, regs);
220 kprobe_status = KPROBE_REENTER;
221 return 1;
132 } else { 222 } else {
133 p = current_kprobe; 223 p = current_kprobe;
134 if (p->break_handler && p->break_handler(p, regs)) { 224 if (p->break_handler && p->break_handler(p, regs)) {
@@ -163,11 +253,7 @@ static int kprobe_handler(struct pt_regs *regs)
163 } 253 }
164 254
165 kprobe_status = KPROBE_HIT_ACTIVE; 255 kprobe_status = KPROBE_HIT_ACTIVE;
166 current_kprobe = p; 256 set_current_kprobe(p, regs);
167 kprobe_saved_eflags = kprobe_old_eflags
168 = (regs->eflags & (TF_MASK | IF_MASK));
169 if (is_IF_modifier(p->opcode))
170 kprobe_saved_eflags &= ~IF_MASK;
171 257
172 if (p->pre_handler && p->pre_handler(p, regs)) 258 if (p->pre_handler && p->pre_handler(p, regs))
173 /* handler has already set things up, so skip ss setup */ 259 /* handler has already set things up, so skip ss setup */
@@ -184,6 +270,55 @@ no_kprobe:
184} 270}
185 271
186/* 272/*
273 * For function-return probes, init_kprobes() establishes a probepoint
274 * here. When a retprobed function returns, this probe is hit and
275 * trampoline_probe_handler() runs, calling the kretprobe's handler.
276 */
277 void kretprobe_trampoline_holder(void)
278 {
279 asm volatile ( ".global kretprobe_trampoline\n"
280 "kretprobe_trampoline: \n"
281 "nop\n");
282 }
283
284/*
285 * Called when we hit the probe point at kretprobe_trampoline
286 */
287int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
288{
289 struct task_struct *tsk;
290 struct kretprobe_instance *ri;
291 struct hlist_head *head;
292 struct hlist_node *node;
293 unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
294
295 tsk = arch_get_kprobe_task(sara);
296 head = kretprobe_inst_table_head(tsk);
297
298 hlist_for_each_entry(ri, node, head, hlist) {
299 if (ri->stack_addr == sara && ri->rp) {
300 if (ri->rp->handler)
301 ri->rp->handler(ri, regs);
302 }
303 }
304 return 0;
305}
306
307void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
308 unsigned long flags)
309{
310 struct kretprobe_instance *ri;
311 /* RA already popped */
312 unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
313
314 while ((ri = get_rp_inst(sara))) {
315 regs->eip = (unsigned long)ri->ret_addr;
316 recycle_rp_inst(ri);
317 }
318 regs->eflags &= ~TF_MASK;
319}
320
321/*
187 * Called after single-stepping. p->addr is the address of the 322 * Called after single-stepping. p->addr is the address of the
188 * instruction whose first byte has been replaced by the "int 3" 323 * instruction whose first byte has been replaced by the "int 3"
189 * instruction. To avoid the SMP problems that can occur when we 324 * instruction. To avoid the SMP problems that can occur when we
@@ -263,13 +398,22 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
263 if (!kprobe_running()) 398 if (!kprobe_running())
264 return 0; 399 return 0;
265 400
266 if (current_kprobe->post_handler) 401 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
402 kprobe_status = KPROBE_HIT_SSDONE;
267 current_kprobe->post_handler(current_kprobe, regs, 0); 403 current_kprobe->post_handler(current_kprobe, regs, 0);
404 }
268 405
269 resume_execution(current_kprobe, regs); 406 if (current_kprobe->post_handler != trampoline_post_handler)
407 resume_execution(current_kprobe, regs);
270 regs->eflags |= kprobe_saved_eflags; 408 regs->eflags |= kprobe_saved_eflags;
271 409
410 /*Restore back the original saved kprobes variables and continue. */
411 if (kprobe_status == KPROBE_REENTER) {
412 restore_previous_kprobe();
413 goto out;
414 }
272 unlock_kprobes(); 415 unlock_kprobes();
416out:
273 preempt_enable_no_resched(); 417 preempt_enable_no_resched();
274 418
275 /* 419 /*
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1347ab4939e7..383a11600d2c 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -914,7 +914,10 @@ void __init mp_register_ioapic (
914 mp_ioapics[idx].mpc_apicaddr = address; 914 mp_ioapics[idx].mpc_apicaddr = address;
915 915
916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
917 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); 917 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
918 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
919 else
920 mp_ioapics[idx].mpc_apicid = id;
918 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); 921 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
919 922
920 /* 923 /*
@@ -1055,11 +1058,20 @@ void __init mp_config_acpi_legacy_irqs (void)
1055 } 1058 }
1056} 1059}
1057 1060
1061#define MAX_GSI_NUM 4096
1062
1058int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) 1063int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1059{ 1064{
1060 int ioapic = -1; 1065 int ioapic = -1;
1061 int ioapic_pin = 0; 1066 int ioapic_pin = 0;
1062 int idx, bit = 0; 1067 int idx, bit = 0;
1068 static int pci_irq = 16;
1069 /*
1070 * Mapping between Global System Interrups, which
1071 * represent all possible interrupts, and IRQs
1072 * assigned to actual devices.
1073 */
1074 static int gsi_to_irq[MAX_GSI_NUM];
1063 1075
1064#ifdef CONFIG_ACPI_BUS 1076#ifdef CONFIG_ACPI_BUS
1065 /* Don't set up the ACPI SCI because it's already set up */ 1077 /* Don't set up the ACPI SCI because it's already set up */
@@ -1094,11 +1106,26 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1094 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 1106 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1095 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1107 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1096 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1108 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1097 return gsi; 1109 return gsi_to_irq[gsi];
1098 } 1110 }
1099 1111
1100 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 1112 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1101 1113
1114 if (edge_level) {
1115 /*
1116 * For PCI devices assign IRQs in order, avoiding gaps
1117 * due to unused I/O APIC pins.
1118 */
1119 int irq = gsi;
1120 if (gsi < MAX_GSI_NUM) {
1121 gsi = pci_irq++;
1122 gsi_to_irq[irq] = gsi;
1123 } else {
1124 printk(KERN_ERR "GSI %u is too high\n", gsi);
1125 return gsi;
1126 }
1127 }
1128
1102 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 1129 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1103 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 1130 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
1104 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 1131 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2c0ee9c2d020..da6c46d667cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -28,8 +28,7 @@
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29 29
30#include <asm/smp.h> 30#include <asm/smp.h>
31#include <asm/mtrr.h> 31#include <asm/div64.h>
32#include <asm/mpspec.h>
33#include <asm/nmi.h> 32#include <asm/nmi.h>
34 33
35#include "mach_traps.h" 34#include "mach_traps.h"
@@ -324,6 +323,16 @@ static void clear_msr_range(unsigned int base, unsigned int n)
324 wrmsr(base+i, 0, 0); 323 wrmsr(base+i, 0, 0);
325} 324}
326 325
326static inline void write_watchdog_counter(const char *descr)
327{
328 u64 count = (u64)cpu_khz * 1000;
329
330 do_div(count, nmi_hz);
331 if(descr)
332 Dprintk("setting %s to -0x%08Lx\n", descr, count);
333 wrmsrl(nmi_perfctr_msr, 0 - count);
334}
335
327static void setup_k7_watchdog(void) 336static void setup_k7_watchdog(void)
328{ 337{
329 unsigned int evntsel; 338 unsigned int evntsel;
@@ -339,8 +348,7 @@ static void setup_k7_watchdog(void)
339 | K7_NMI_EVENT; 348 | K7_NMI_EVENT;
340 349
341 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 350 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
342 Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000)); 351 write_watchdog_counter("K7_PERFCTR0");
343 wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
344 apic_write(APIC_LVTPC, APIC_DM_NMI); 352 apic_write(APIC_LVTPC, APIC_DM_NMI);
345 evntsel |= K7_EVNTSEL_ENABLE; 353 evntsel |= K7_EVNTSEL_ENABLE;
346 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 354 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
@@ -361,8 +369,7 @@ static void setup_p6_watchdog(void)
361 | P6_NMI_EVENT; 369 | P6_NMI_EVENT;
362 370
363 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); 371 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
364 Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000)); 372 write_watchdog_counter("P6_PERFCTR0");
365 wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
366 apic_write(APIC_LVTPC, APIC_DM_NMI); 373 apic_write(APIC_LVTPC, APIC_DM_NMI);
367 evntsel |= P6_EVNTSEL0_ENABLE; 374 evntsel |= P6_EVNTSEL0_ENABLE;
368 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); 375 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
@@ -402,8 +409,7 @@ static int setup_p4_watchdog(void)
402 409
403 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); 410 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
404 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); 411 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
405 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000)); 412 write_watchdog_counter("P4_IQ_COUNTER0");
406 wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
407 apic_write(APIC_LVTPC, APIC_DM_NMI); 413 apic_write(APIC_LVTPC, APIC_DM_NMI);
408 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 414 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
409 return 1; 415 return 1;
@@ -518,7 +524,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
518 * other P6 variant */ 524 * other P6 variant */
519 apic_write(APIC_LVTPC, APIC_DM_NMI); 525 apic_write(APIC_LVTPC, APIC_DM_NMI);
520 } 526 }
521 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); 527 write_watchdog_counter(NULL);
522 } 528 }
523} 529}
524 530
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 4de2e03c7b45..1e51427cc9eb 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h>
14#include <asm/io.h> 15#include <asm/io.h>
15 16
16struct dma_coherent_mem { 17struct dma_coherent_mem {
@@ -54,6 +55,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
54 } 55 }
55 return ret; 56 return ret;
56} 57}
58EXPORT_SYMBOL(dma_alloc_coherent);
57 59
58void dma_free_coherent(struct device *dev, size_t size, 60void dma_free_coherent(struct device *dev, size_t size,
59 void *vaddr, dma_addr_t dma_handle) 61 void *vaddr, dma_addr_t dma_handle)
@@ -68,6 +70,7 @@ void dma_free_coherent(struct device *dev, size_t size,
68 } else 70 } else
69 free_pages((unsigned long)vaddr, order); 71 free_pages((unsigned long)vaddr, order);
70} 72}
73EXPORT_SYMBOL(dma_free_coherent);
71 74
72int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 75int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
73 dma_addr_t device_addr, size_t size, int flags) 76 dma_addr_t device_addr, size_t size, int flags)
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 96e3ea6b17c7..aea2ce1145df 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -37,6 +37,7 @@
37#include <linux/kallsyms.h> 37#include <linux/kallsyms.h>
38#include <linux/ptrace.h> 38#include <linux/ptrace.h>
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/kprobes.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -73,6 +74,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
73 * Powermanagement idle function, if any.. 74 * Powermanagement idle function, if any..
74 */ 75 */
75void (*pm_idle)(void); 76void (*pm_idle)(void);
77EXPORT_SYMBOL(pm_idle);
76static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 78static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
77 79
78void disable_hlt(void) 80void disable_hlt(void)
@@ -105,6 +107,9 @@ void default_idle(void)
105 cpu_relax(); 107 cpu_relax();
106 } 108 }
107} 109}
110#ifdef CONFIG_APM_MODULE
111EXPORT_SYMBOL(default_idle);
112#endif
108 113
109/* 114/*
110 * On SMP it's slightly faster (but much more power-consuming!) 115 * On SMP it's slightly faster (but much more power-consuming!)
@@ -262,7 +267,7 @@ void show_regs(struct pt_regs * regs)
262 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 267 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
263 print_symbol("EIP is at %s\n", regs->eip); 268 print_symbol("EIP is at %s\n", regs->eip);
264 269
265 if (regs->xcs & 3) 270 if (user_mode(regs))
266 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 271 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
267 printk(" EFLAGS: %08lx %s (%s)\n", 272 printk(" EFLAGS: %08lx %s (%s)\n",
268 regs->eflags, print_tainted(), system_utsname.release); 273 regs->eflags, print_tainted(), system_utsname.release);
@@ -325,6 +330,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
325 /* Ok, create the new process.. */ 330 /* Ok, create the new process.. */
326 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 331 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
327} 332}
333EXPORT_SYMBOL(kernel_thread);
328 334
329/* 335/*
330 * Free current thread data structures etc.. 336 * Free current thread data structures etc..
@@ -334,6 +340,13 @@ void exit_thread(void)
334 struct task_struct *tsk = current; 340 struct task_struct *tsk = current;
335 struct thread_struct *t = &tsk->thread; 341 struct thread_struct *t = &tsk->thread;
336 342
343 /*
344 * Remove function-return probe instances associated with this task
345 * and put them back on the free list. Do not insert an exit probe for
346 * this function, it will be disabled by kprobe_flush_task if you do.
347 */
348 kprobe_flush_task(tsk);
349
337 /* The process may have allocated an io port bitmap... nuke it. */ 350 /* The process may have allocated an io port bitmap... nuke it. */
338 if (unlikely(NULL != t->io_bitmap_ptr)) { 351 if (unlikely(NULL != t->io_bitmap_ptr)) {
339 int cpu = get_cpu(); 352 int cpu = get_cpu();
@@ -357,6 +370,13 @@ void flush_thread(void)
357{ 370{
358 struct task_struct *tsk = current; 371 struct task_struct *tsk = current;
359 372
373 /*
374 * Remove function-return probe instances associated with this task
375 * and put them back on the free list. Do not insert an exit probe for
376 * this function, it will be disabled by kprobe_flush_task if you do.
377 */
378 kprobe_flush_task(tsk);
379
360 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); 380 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
361 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 381 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
362 /* 382 /*
@@ -508,6 +528,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
508 528
509 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 529 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
510} 530}
531EXPORT_SYMBOL(dump_thread);
511 532
512/* 533/*
513 * Capture the user space registers if the task is not running (in user space) 534 * Capture the user space registers if the task is not running (in user space)
@@ -627,13 +648,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
627 * Now maybe reload the debug registers 648 * Now maybe reload the debug registers
628 */ 649 */
629 if (unlikely(next->debugreg[7])) { 650 if (unlikely(next->debugreg[7])) {
630 loaddebug(next, 0); 651 set_debugreg(current->thread.debugreg[0], 0);
631 loaddebug(next, 1); 652 set_debugreg(current->thread.debugreg[1], 1);
632 loaddebug(next, 2); 653 set_debugreg(current->thread.debugreg[2], 2);
633 loaddebug(next, 3); 654 set_debugreg(current->thread.debugreg[3], 3);
634 /* no 4 and 5 */ 655 /* no 4 and 5 */
635 loaddebug(next, 6); 656 set_debugreg(current->thread.debugreg[6], 6);
636 loaddebug(next, 7); 657 set_debugreg(current->thread.debugreg[7], 7);
637 } 658 }
638 659
639 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) 660 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
@@ -731,6 +752,7 @@ unsigned long get_wchan(struct task_struct *p)
731 } while (count++ < 16); 752 } while (count++ < 16);
732 return 0; 753 return 0;
733} 754}
755EXPORT_SYMBOL(get_wchan);
734 756
735/* 757/*
736 * sys_alloc_thread_area: get a yet unused TLS descriptor index. 758 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index e34f651fa13c..0da59b42843c 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -668,7 +668,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
668 info.si_code = TRAP_BRKPT; 668 info.si_code = TRAP_BRKPT;
669 669
670 /* User-mode eip? */ 670 /* User-mode eip? */
671 info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL; 671 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
672 672
673 /* Send us the fakey SIGTRAP */ 673 /* Send us the fakey SIGTRAP */
674 force_sig_info(SIGTRAP, &info, tsk); 674 force_sig_info(SIGTRAP, &info, tsk);
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 6dc27eb70ee7..db912209a8d3 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -2,6 +2,7 @@
2 * linux/arch/i386/kernel/reboot.c 2 * linux/arch/i386/kernel/reboot.c
3 */ 3 */
4 4
5#include <linux/config.h>
5#include <linux/mm.h> 6#include <linux/mm.h>
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/delay.h> 8#include <linux/delay.h>
@@ -19,6 +20,7 @@
19 * Power off function, if any 20 * Power off function, if any
20 */ 21 */
21void (*pm_power_off)(void); 22void (*pm_power_off)(void);
23EXPORT_SYMBOL(pm_power_off);
22 24
23static int reboot_mode; 25static int reboot_mode;
24static int reboot_thru_bios; 26static int reboot_thru_bios;
@@ -295,6 +297,9 @@ void machine_real_restart(unsigned char *code, int length)
295 : 297 :
296 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100))); 298 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
297} 299}
300#ifdef CONFIG_APM_MODULE
301EXPORT_SYMBOL(machine_real_restart);
302#endif
298 303
299void machine_restart(char * __unused) 304void machine_restart(char * __unused)
300{ 305{
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 2bfbddebdbf8..30406fd0b64c 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -23,8 +23,10 @@
23 * This file handles the architecture-dependent parts of initialization 23 * This file handles the architecture-dependent parts of initialization
24 */ 24 */
25 25
26#include <linux/config.h>
26#include <linux/sched.h> 27#include <linux/sched.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/mmzone.h>
28#include <linux/tty.h> 30#include <linux/tty.h>
29#include <linux/ioport.h> 31#include <linux/ioport.h>
30#include <linux/acpi.h> 32#include <linux/acpi.h>
@@ -73,6 +75,7 @@ EXPORT_SYMBOL(efi_enabled);
73struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 75struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
74/* common cpu data for all cpus */ 76/* common cpu data for all cpus */
75struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 77struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
78EXPORT_SYMBOL(boot_cpu_data);
76 79
77unsigned long mmu_cr4_features; 80unsigned long mmu_cr4_features;
78 81
@@ -90,12 +93,18 @@ extern acpi_interrupt_flags acpi_sci_flags;
90 93
91/* for MCA, but anyone else can use it if they want */ 94/* for MCA, but anyone else can use it if they want */
92unsigned int machine_id; 95unsigned int machine_id;
96#ifdef CONFIG_MCA
97EXPORT_SYMBOL(machine_id);
98#endif
93unsigned int machine_submodel_id; 99unsigned int machine_submodel_id;
94unsigned int BIOS_revision; 100unsigned int BIOS_revision;
95unsigned int mca_pentium_flag; 101unsigned int mca_pentium_flag;
96 102
97/* For PCI or other memory-mapped resources */ 103/* For PCI or other memory-mapped resources */
98unsigned long pci_mem_start = 0x10000000; 104unsigned long pci_mem_start = 0x10000000;
105#ifdef CONFIG_PCI
106EXPORT_SYMBOL(pci_mem_start);
107#endif
99 108
100/* Boot loader ID as an integer, for the benefit of proc_dointvec */ 109/* Boot loader ID as an integer, for the benefit of proc_dointvec */
101int bootloader_type; 110int bootloader_type;
@@ -107,14 +116,26 @@ static unsigned int highmem_pages = -1;
107 * Setup options 116 * Setup options
108 */ 117 */
109struct drive_info_struct { char dummy[32]; } drive_info; 118struct drive_info_struct { char dummy[32]; } drive_info;
119#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
120 defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
121EXPORT_SYMBOL(drive_info);
122#endif
110struct screen_info screen_info; 123struct screen_info screen_info;
124#ifdef CONFIG_VT
125EXPORT_SYMBOL(screen_info);
126#endif
111struct apm_info apm_info; 127struct apm_info apm_info;
128EXPORT_SYMBOL(apm_info);
112struct sys_desc_table_struct { 129struct sys_desc_table_struct {
113 unsigned short length; 130 unsigned short length;
114 unsigned char table[0]; 131 unsigned char table[0];
115}; 132};
116struct edid_info edid_info; 133struct edid_info edid_info;
117struct ist_info ist_info; 134struct ist_info ist_info;
135#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
136 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
137EXPORT_SYMBOL(ist_info);
138#endif
118struct e820map e820; 139struct e820map e820;
119 140
120extern void early_cpu_init(void); 141extern void early_cpu_init(void);
@@ -1022,7 +1043,7 @@ static void __init reserve_ebda_region(void)
1022 reserve_bootmem(addr, PAGE_SIZE); 1043 reserve_bootmem(addr, PAGE_SIZE);
1023} 1044}
1024 1045
1025#ifndef CONFIG_DISCONTIGMEM 1046#ifndef CONFIG_NEED_MULTIPLE_NODES
1026void __init setup_bootmem_allocator(void); 1047void __init setup_bootmem_allocator(void);
1027static unsigned long __init setup_memory(void) 1048static unsigned long __init setup_memory(void)
1028{ 1049{
@@ -1072,9 +1093,9 @@ void __init zone_sizes_init(void)
1072 free_area_init(zones_size); 1093 free_area_init(zones_size);
1073} 1094}
1074#else 1095#else
1075extern unsigned long setup_memory(void); 1096extern unsigned long __init setup_memory(void);
1076extern void zone_sizes_init(void); 1097extern void zone_sizes_init(void);
1077#endif /* !CONFIG_DISCONTIGMEM */ 1098#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1078 1099
1079void __init setup_bootmem_allocator(void) 1100void __init setup_bootmem_allocator(void)
1080{ 1101{
@@ -1475,6 +1496,7 @@ void __init setup_arch(char **cmdline_p)
1475#endif 1496#endif
1476 paging_init(); 1497 paging_init();
1477 remapped_pgdat_init(); 1498 remapped_pgdat_init();
1499 sparse_init();
1478 zone_sizes_init(); 1500 zone_sizes_init();
1479 1501
1480 /* 1502 /*
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index ea46d028af08..b9b8f4e20fad 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -346,8 +346,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
346extern void __user __kernel_sigreturn; 346extern void __user __kernel_sigreturn;
347extern void __user __kernel_rt_sigreturn; 347extern void __user __kernel_rt_sigreturn;
348 348
349static void setup_frame(int sig, struct k_sigaction *ka, 349static int setup_frame(int sig, struct k_sigaction *ka,
350 sigset_t *set, struct pt_regs * regs) 350 sigset_t *set, struct pt_regs * regs)
351{ 351{
352 void __user *restorer; 352 void __user *restorer;
353 struct sigframe __user *frame; 353 struct sigframe __user *frame;
@@ -429,13 +429,14 @@ static void setup_frame(int sig, struct k_sigaction *ka,
429 current->comm, current->pid, frame, regs->eip, frame->pretcode); 429 current->comm, current->pid, frame, regs->eip, frame->pretcode);
430#endif 430#endif
431 431
432 return; 432 return 1;
433 433
434give_sigsegv: 434give_sigsegv:
435 force_sigsegv(sig, current); 435 force_sigsegv(sig, current);
436 return 0;
436} 437}
437 438
438static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 439static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
439 sigset_t *set, struct pt_regs * regs) 440 sigset_t *set, struct pt_regs * regs)
440{ 441{
441 void __user *restorer; 442 void __user *restorer;
@@ -522,20 +523,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
522 current->comm, current->pid, frame, regs->eip, frame->pretcode); 523 current->comm, current->pid, frame, regs->eip, frame->pretcode);
523#endif 524#endif
524 525
525 return; 526 return 1;
526 527
527give_sigsegv: 528give_sigsegv:
528 force_sigsegv(sig, current); 529 force_sigsegv(sig, current);
530 return 0;
529} 531}
530 532
531/* 533/*
532 * OK, we're invoking a handler 534 * OK, we're invoking a handler
533 */ 535 */
534 536
535static void 537static int
536handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 538handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
537 sigset_t *oldset, struct pt_regs * regs) 539 sigset_t *oldset, struct pt_regs * regs)
538{ 540{
541 int ret;
542
539 /* Are we from a system call? */ 543 /* Are we from a system call? */
540 if (regs->orig_eax >= 0) { 544 if (regs->orig_eax >= 0) {
541 /* If so, check system call restarting.. */ 545 /* If so, check system call restarting.. */
@@ -569,17 +573,19 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
569 573
570 /* Set up the stack frame */ 574 /* Set up the stack frame */
571 if (ka->sa.sa_flags & SA_SIGINFO) 575 if (ka->sa.sa_flags & SA_SIGINFO)
572 setup_rt_frame(sig, ka, info, oldset, regs); 576 ret = setup_rt_frame(sig, ka, info, oldset, regs);
573 else 577 else
574 setup_frame(sig, ka, oldset, regs); 578 ret = setup_frame(sig, ka, oldset, regs);
575 579
576 if (!(ka->sa.sa_flags & SA_NODEFER)) { 580 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
577 spin_lock_irq(&current->sighand->siglock); 581 spin_lock_irq(&current->sighand->siglock);
578 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 582 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
579 sigaddset(&current->blocked,sig); 583 sigaddset(&current->blocked,sig);
580 recalc_sigpending(); 584 recalc_sigpending();
581 spin_unlock_irq(&current->sighand->siglock); 585 spin_unlock_irq(&current->sighand->siglock);
582 } 586 }
587
588 return ret;
583} 589}
584 590
585/* 591/*
@@ -599,7 +605,7 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
599 * kernel mode. Just return without doing anything 605 * kernel mode. Just return without doing anything
600 * if so. 606 * if so.
601 */ 607 */
602 if ((regs->xcs & 3) != 3) 608 if (!user_mode(regs))
603 return 1; 609 return 1;
604 610
605 if (current->flags & PF_FREEZE) { 611 if (current->flags & PF_FREEZE) {
@@ -618,12 +624,11 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
618 * inside the kernel. 624 * inside the kernel.
619 */ 625 */
620 if (unlikely(current->thread.debugreg[7])) { 626 if (unlikely(current->thread.debugreg[7])) {
621 loaddebug(&current->thread, 7); 627 set_debugreg(current->thread.debugreg[7], 7);
622 } 628 }
623 629
624 /* Whee! Actually deliver the signal. */ 630 /* Whee! Actually deliver the signal. */
625 handle_signal(signr, &info, &ka, oldset, regs); 631 return handle_signal(signr, &info, &ka, oldset, regs);
626 return 1;
627 } 632 }
628 633
629 no_signal: 634 no_signal:
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6223c33ac91c..68be7d0c7238 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -19,6 +19,7 @@
19#include <linux/mc146818rtc.h> 19#include <linux/mc146818rtc.h>
20#include <linux/cache.h> 20#include <linux/cache.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/module.h>
22 23
23#include <asm/mtrr.h> 24#include <asm/mtrr.h>
24#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
@@ -452,6 +453,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
452 453
453 preempt_enable(); 454 preempt_enable();
454} 455}
456EXPORT_SYMBOL(flush_tlb_page);
455 457
456static void do_flush_tlb_all(void* info) 458static void do_flush_tlb_all(void* info)
457{ 459{
@@ -547,6 +549,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
547 549
548 return 0; 550 return 0;
549} 551}
552EXPORT_SYMBOL(smp_call_function);
550 553
551static void stop_this_cpu (void * dummy) 554static void stop_this_cpu (void * dummy)
552{ 555{
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bc1bb6919e6a..c20d96d5c15c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -60,6 +60,9 @@ static int __initdata smp_b_stepping;
60 60
61/* Number of siblings per CPU package */ 61/* Number of siblings per CPU package */
62int smp_num_siblings = 1; 62int smp_num_siblings = 1;
63#ifdef CONFIG_X86_HT
64EXPORT_SYMBOL(smp_num_siblings);
65#endif
63int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ 66int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
64EXPORT_SYMBOL(phys_proc_id); 67EXPORT_SYMBOL(phys_proc_id);
65int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */ 68int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
@@ -67,13 +70,16 @@ EXPORT_SYMBOL(cpu_core_id);
67 70
68/* bitmap of online cpus */ 71/* bitmap of online cpus */
69cpumask_t cpu_online_map; 72cpumask_t cpu_online_map;
73EXPORT_SYMBOL(cpu_online_map);
70 74
71cpumask_t cpu_callin_map; 75cpumask_t cpu_callin_map;
72cpumask_t cpu_callout_map; 76cpumask_t cpu_callout_map;
77EXPORT_SYMBOL(cpu_callout_map);
73static cpumask_t smp_commenced_mask; 78static cpumask_t smp_commenced_mask;
74 79
75/* Per CPU bogomips and other parameters */ 80/* Per CPU bogomips and other parameters */
76struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 81struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
82EXPORT_SYMBOL(cpu_data);
77 83
78u8 x86_cpu_to_apicid[NR_CPUS] = 84u8 x86_cpu_to_apicid[NR_CPUS] =
79 { [0 ... NR_CPUS-1] = 0xff }; 85 { [0 ... NR_CPUS-1] = 0xff };
@@ -199,7 +205,7 @@ static void __init synchronize_tsc_bp (void)
199 unsigned long long t0; 205 unsigned long long t0;
200 unsigned long long sum, avg; 206 unsigned long long sum, avg;
201 long long delta; 207 long long delta;
202 unsigned long one_usec; 208 unsigned int one_usec;
203 int buggy = 0; 209 int buggy = 0;
204 210
205 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus()); 211 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
@@ -885,8 +891,14 @@ static void smp_tune_scheduling (void)
885static int boot_cpu_logical_apicid; 891static int boot_cpu_logical_apicid;
886/* Where the IO area was mapped on multiquad, always 0 otherwise */ 892/* Where the IO area was mapped on multiquad, always 0 otherwise */
887void *xquad_portio; 893void *xquad_portio;
894#ifdef CONFIG_X86_NUMAQ
895EXPORT_SYMBOL(xquad_portio);
896#endif
888 897
889cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 898cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
899#ifdef CONFIG_X86_HT
900EXPORT_SYMBOL(cpu_sibling_map);
901#endif
890cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 902cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
891EXPORT_SYMBOL(cpu_core_map); 903EXPORT_SYMBOL(cpu_core_map);
892 904
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a0dcb7c87c30..e68d9fdb0759 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -77,11 +77,13 @@ u64 jiffies_64 = INITIAL_JIFFIES;
77 77
78EXPORT_SYMBOL(jiffies_64); 78EXPORT_SYMBOL(jiffies_64);
79 79
80unsigned long cpu_khz; /* Detected as we calibrate the TSC */ 80unsigned int cpu_khz; /* Detected as we calibrate the TSC */
81EXPORT_SYMBOL(cpu_khz);
81 82
82extern unsigned long wall_jiffies; 83extern unsigned long wall_jiffies;
83 84
84DEFINE_SPINLOCK(rtc_lock); 85DEFINE_SPINLOCK(rtc_lock);
86EXPORT_SYMBOL(rtc_lock);
85 87
86DEFINE_SPINLOCK(i8253_lock); 88DEFINE_SPINLOCK(i8253_lock);
87EXPORT_SYMBOL(i8253_lock); 89EXPORT_SYMBOL(i8253_lock);
@@ -324,6 +326,8 @@ unsigned long get_cmos_time(void)
324 326
325 return retval; 327 return retval;
326} 328}
329EXPORT_SYMBOL(get_cmos_time);
330
327static void sync_cmos_clock(unsigned long dummy); 331static void sync_cmos_clock(unsigned long dummy);
328 332
329static struct timer_list sync_cmos_timer = 333static struct timer_list sync_cmos_timer =
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index 8e201219f525..37353bd31803 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -139,6 +139,15 @@ bad_calibration:
139} 139}
140#endif 140#endif
141 141
142
143unsigned long read_timer_tsc(void)
144{
145 unsigned long retval;
146 rdtscl(retval);
147 return retval;
148}
149
150
142/* calculate cpu_khz */ 151/* calculate cpu_khz */
143void init_cpu_khz(void) 152void init_cpu_khz(void)
144{ 153{
@@ -154,7 +163,8 @@ void init_cpu_khz(void)
154 :"=a" (cpu_khz), "=d" (edx) 163 :"=a" (cpu_khz), "=d" (edx)
155 :"r" (tsc_quotient), 164 :"r" (tsc_quotient),
156 "0" (eax), "1" (edx)); 165 "0" (eax), "1" (edx));
157 printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); 166 printk("Detected %u.%03u MHz processor.\n",
167 cpu_khz / 1000, cpu_khz % 1000);
158 } 168 }
159 } 169 }
160 } 170 }
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
index a3d6a288088b..7e39ed8e33f8 100644
--- a/arch/i386/kernel/timers/timer.c
+++ b/arch/i386/kernel/timers/timer.c
@@ -64,3 +64,12 @@ struct timer_opts* __init select_timer(void)
64 panic("select_timer: Cannot find a suitable timer\n"); 64 panic("select_timer: Cannot find a suitable timer\n");
65 return NULL; 65 return NULL;
66} 66}
67
68int read_current_timer(unsigned long *timer_val)
69{
70 if (cur_timer->read_timer) {
71 *timer_val = cur_timer->read_timer();
72 return 0;
73 }
74 return -1;
75}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index f778f471a09a..d766e0963ac1 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -158,7 +158,7 @@ static int __init init_hpet(char* override)
158 { unsigned long eax=0, edx=1000; 158 { unsigned long eax=0, edx=1000;
159 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient, 159 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
160 eax, edx); 160 eax, edx);
161 printk("Detected %lu.%03lu MHz processor.\n", 161 printk("Detected %u.%03u MHz processor.\n",
162 cpu_khz / 1000, cpu_khz % 1000); 162 cpu_khz / 1000, cpu_khz % 1000);
163 } 163 }
164 set_cyc2ns_scale(cpu_khz/1000); 164 set_cyc2ns_scale(cpu_khz/1000);
@@ -186,6 +186,7 @@ static struct timer_opts timer_hpet = {
186 .get_offset = get_offset_hpet, 186 .get_offset = get_offset_hpet,
187 .monotonic_clock = monotonic_clock_hpet, 187 .monotonic_clock = monotonic_clock_hpet,
188 .delay = delay_hpet, 188 .delay = delay_hpet,
189 .read_timer = read_timer_tsc,
189}; 190};
190 191
191struct init_timer_opts __initdata timer_hpet_init = { 192struct init_timer_opts __initdata timer_hpet_init = {
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index d77f22030fe6..4ef20e663498 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -246,6 +246,7 @@ static struct timer_opts timer_pmtmr = {
246 .get_offset = get_offset_pmtmr, 246 .get_offset = get_offset_pmtmr,
247 .monotonic_clock = monotonic_clock_pmtmr, 247 .monotonic_clock = monotonic_clock_pmtmr,
248 .delay = delay_pmtmr, 248 .delay = delay_pmtmr,
249 .read_timer = read_timer_tsc,
249}; 250};
250 251
251struct init_timer_opts __initdata timer_pmtmr_init = { 252struct init_timer_opts __initdata timer_pmtmr_init = {
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 180444d87824..54c36b182021 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -256,7 +256,7 @@ static unsigned long loops_per_jiffy_ref = 0;
256 256
257#ifndef CONFIG_SMP 257#ifndef CONFIG_SMP
258static unsigned long fast_gettimeoffset_ref = 0; 258static unsigned long fast_gettimeoffset_ref = 0;
259static unsigned long cpu_khz_ref = 0; 259static unsigned int cpu_khz_ref = 0;
260#endif 260#endif
261 261
262static int 262static int
@@ -323,7 +323,7 @@ static inline void cpufreq_delayed_get(void) { return; }
323int recalibrate_cpu_khz(void) 323int recalibrate_cpu_khz(void)
324{ 324{
325#ifndef CONFIG_SMP 325#ifndef CONFIG_SMP
326 unsigned long cpu_khz_old = cpu_khz; 326 unsigned int cpu_khz_old = cpu_khz;
327 327
328 if (cpu_has_tsc) { 328 if (cpu_has_tsc) {
329 init_cpu_khz(); 329 init_cpu_khz();
@@ -534,7 +534,8 @@ static int __init init_tsc(char* override)
534 :"=a" (cpu_khz), "=d" (edx) 534 :"=a" (cpu_khz), "=d" (edx)
535 :"r" (tsc_quotient), 535 :"r" (tsc_quotient),
536 "0" (eax), "1" (edx)); 536 "0" (eax), "1" (edx));
537 printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); 537 printk("Detected %u.%03u MHz processor.\n",
538 cpu_khz / 1000, cpu_khz % 1000);
538 } 539 }
539 set_cyc2ns_scale(cpu_khz/1000); 540 set_cyc2ns_scale(cpu_khz/1000);
540 return 0; 541 return 0;
@@ -572,6 +573,7 @@ static struct timer_opts timer_tsc = {
572 .get_offset = get_offset_tsc, 573 .get_offset = get_offset_tsc,
573 .monotonic_clock = monotonic_clock_tsc, 574 .monotonic_clock = monotonic_clock_tsc,
574 .delay = delay_tsc, 575 .delay = delay_tsc,
576 .read_timer = read_timer_tsc,
575}; 577};
576 578
577struct init_timer_opts __initdata timer_tsc_init = { 579struct init_timer_opts __initdata timer_tsc_init = {
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 83c579e82a81..e4d4e2162c7a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -104,6 +104,7 @@ int register_die_notifier(struct notifier_block *nb)
104 spin_unlock_irqrestore(&die_notifier_lock, flags); 104 spin_unlock_irqrestore(&die_notifier_lock, flags);
105 return err; 105 return err;
106} 106}
107EXPORT_SYMBOL(register_die_notifier);
107 108
108static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) 109static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
109{ 110{
@@ -209,7 +210,7 @@ void show_registers(struct pt_regs *regs)
209 210
210 esp = (unsigned long) (&regs->esp); 211 esp = (unsigned long) (&regs->esp);
211 ss = __KERNEL_DS; 212 ss = __KERNEL_DS;
212 if (regs->xcs & 3) { 213 if (user_mode(regs)) {
213 in_kernel = 0; 214 in_kernel = 0;
214 esp = regs->esp; 215 esp = regs->esp;
215 ss = regs->xss & 0xffff; 216 ss = regs->xss & 0xffff;
@@ -265,7 +266,7 @@ static void handle_BUG(struct pt_regs *regs)
265 char c; 266 char c;
266 unsigned long eip; 267 unsigned long eip;
267 268
268 if (regs->xcs & 3) 269 if (user_mode(regs))
269 goto no_bug; /* Not in kernel */ 270 goto no_bug; /* Not in kernel */
270 271
271 eip = regs->eip; 272 eip = regs->eip;
@@ -353,7 +354,7 @@ void die(const char * str, struct pt_regs * regs, long err)
353 354
354static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) 355static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
355{ 356{
356 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) 357 if (!user_mode_vm(regs))
357 die(str, regs, err); 358 die(str, regs, err);
358} 359}
359 360
@@ -366,7 +367,7 @@ static void do_trap(int trapnr, int signr, char *str, int vm86,
366 goto trap_signal; 367 goto trap_signal;
367 } 368 }
368 369
369 if (!(regs->xcs & 3)) 370 if (!user_mode(regs))
370 goto kernel_trap; 371 goto kernel_trap;
371 372
372 trap_signal: { 373 trap_signal: {
@@ -488,7 +489,7 @@ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
488 if (regs->eflags & VM_MASK) 489 if (regs->eflags & VM_MASK)
489 goto gp_in_vm86; 490 goto gp_in_vm86;
490 491
491 if (!(regs->xcs & 3)) 492 if (!user_mode(regs))
492 goto gp_in_kernel; 493 goto gp_in_kernel;
493 494
494 current->thread.error_code = error_code; 495 current->thread.error_code = error_code;
@@ -636,11 +637,13 @@ void set_nmi_callback(nmi_callback_t callback)
636{ 637{
637 nmi_callback = callback; 638 nmi_callback = callback;
638} 639}
640EXPORT_SYMBOL_GPL(set_nmi_callback);
639 641
640void unset_nmi_callback(void) 642void unset_nmi_callback(void)
641{ 643{
642 nmi_callback = dummy_nmi_callback; 644 nmi_callback = dummy_nmi_callback;
643} 645}
646EXPORT_SYMBOL_GPL(unset_nmi_callback);
644 647
645#ifdef CONFIG_KPROBES 648#ifdef CONFIG_KPROBES
646fastcall void do_int3(struct pt_regs *regs, long error_code) 649fastcall void do_int3(struct pt_regs *regs, long error_code)
@@ -682,7 +685,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
682 unsigned int condition; 685 unsigned int condition;
683 struct task_struct *tsk = current; 686 struct task_struct *tsk = current;
684 687
685 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); 688 get_debugreg(condition, 6);
686 689
687 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 690 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
688 SIGTRAP) == NOTIFY_STOP) 691 SIGTRAP) == NOTIFY_STOP)
@@ -713,7 +716,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
713 * check for kernel mode by just checking the CPL 716 * check for kernel mode by just checking the CPL
714 * of CS. 717 * of CS.
715 */ 718 */
716 if ((regs->xcs & 3) == 0) 719 if (!user_mode(regs))
717 goto clear_TF_reenable; 720 goto clear_TF_reenable;
718 } 721 }
719 722
@@ -724,9 +727,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
724 * the signal is delivered. 727 * the signal is delivered.
725 */ 728 */
726clear_dr7: 729clear_dr7:
727 __asm__("movl %0,%%db7" 730 set_debugreg(0, 7);
728 : /* no output */
729 : "r" (0));
730 return; 731 return;
731 732
732debug_vm86: 733debug_vm86: