aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile3
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.h2
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/ioport.c1
-rw-r--r--arch/i386/kernel/kprobes.c9
-rw-r--r--arch/i386/kernel/machine_kexec.c13
-rw-r--r--arch/i386/kernel/nmi.c1
-rw-r--r--arch/i386/kernel/process.c50
-rw-r--r--arch/i386/kernel/ptrace.c5
-rw-r--r--arch/i386/kernel/setup.c7
-rw-r--r--arch/i386/kernel/smpboot.c62
-rw-r--r--arch/i386/kernel/time.c7
-rw-r--r--arch/i386/kernel/traps.c73
-rw-r--r--arch/i386/kernel/vsyscall.lds.S1
16 files changed, 134 insertions, 106 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 1b452a1665c4..ab98fc21a541 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -59,7 +59,8 @@ quiet_cmd_syscall = SYSCALL $@
59 59
60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH) 60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
61 61
62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
63 $(call ld-option, -Wl$(comma)--hash-style=sysv)
63SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags) 64SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
64SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags) 65SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
65 66
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e9f0b928b0a9..5c43be47587f 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -759,7 +759,7 @@ static int __cpuinit cache_sysfs_init(void)
759 if (num_cache_leaves == 0) 759 if (num_cache_leaves == 0)
760 return 0; 760 return 0;
761 761
762 register_cpu_notifier(&cacheinfo_cpu_notifier); 762 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
763 763
764 for_each_online_cpu(i) { 764 for_each_online_cpu(i) {
765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, 765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
diff --git a/arch/i386/kernel/cpu/mcheck/mce.h b/arch/i386/kernel/cpu/mcheck/mce.h
index dc2416dfef15..84fd4cf7d0fb 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.h
+++ b/arch/i386/kernel/cpu/mcheck/mce.h
@@ -9,6 +9,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c);
9/* Call the installed machine check handler for this CPU setup. */ 9/* Call the installed machine check handler for this CPU setup. */
10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code); 10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
11 11
12extern int mce_disabled __initdata; 12extern int mce_disabled;
13extern int nr_mce_banks; 13extern int nr_mce_banks;
14 14
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 48f0f62f781c..5b96f038367f 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -90,7 +90,7 @@ static void crash_save_self(struct pt_regs *regs)
90 crash_save_this_cpu(regs, cpu); 90 crash_save_this_cpu(regs, cpu);
91} 91}
92 92
93#ifdef CONFIG_SMP 93#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
94static atomic_t waiting_for_crash_ipi; 94static atomic_t waiting_for_crash_ipi;
95 95
96static int crash_nmi_callback(struct pt_regs *regs, int cpu) 96static int crash_nmi_callback(struct pt_regs *regs, int cpu)
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index d9a260f2efb4..37a7d2eaf4a0 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -204,7 +204,7 @@ VM_MASK = 0x00020000
204ENTRY(ret_from_fork) 204ENTRY(ret_from_fork)
205 CFI_STARTPROC 205 CFI_STARTPROC
206 pushl %eax 206 pushl %eax
207 CFI_ADJUST_CFA_OFFSET -4 207 CFI_ADJUST_CFA_OFFSET 4
208 call schedule_tail 208 call schedule_tail
209 GET_THREAD_INFO(%ebp) 209 GET_THREAD_INFO(%ebp)
210 popl %eax 210 popl %eax
diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c
index 79026f026b85..498e8bc197d5 100644
--- a/arch/i386/kernel/ioport.c
+++ b/arch/i386/kernel/ioport.c
@@ -79,6 +79,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
79 79
80 memset(bitmap, 0xff, IO_BITMAP_BYTES); 80 memset(bitmap, 0xff, IO_BITMAP_BYTES);
81 t->io_bitmap_ptr = bitmap; 81 t->io_bitmap_ptr = bitmap;
82 set_thread_flag(TIF_IO_BITMAP);
82 } 83 }
83 84
84 /* 85 /*
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index de2e16e561c0..afe6505ca0b3 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -256,11 +256,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
256 int ret = 0; 256 int ret = 0;
257 kprobe_opcode_t *addr; 257 kprobe_opcode_t *addr;
258 struct kprobe_ctlblk *kcb; 258 struct kprobe_ctlblk *kcb;
259#ifdef CONFIG_PREEMPT
260 unsigned pre_preempt_count = preempt_count();
261#else
262 unsigned pre_preempt_count = 1;
263#endif
264 259
265 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 260 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
266 261
@@ -338,13 +333,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
338 return 1; 333 return 1;
339 334
340ss_probe: 335ss_probe:
341 if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){ 336#ifndef CONFIG_PREEMPT
337 if (p->ainsn.boostable == 1 && !p->post_handler){
342 /* Boost up -- we can execute copied instructions directly */ 338 /* Boost up -- we can execute copied instructions directly */
343 reset_current_kprobe(); 339 reset_current_kprobe();
344 regs->eip = (unsigned long)p->ainsn.insn; 340 regs->eip = (unsigned long)p->ainsn.insn;
345 preempt_enable_no_resched(); 341 preempt_enable_no_resched();
346 return 1; 342 return 1;
347 } 343 }
344#endif
348 prepare_singlestep(p, regs); 345 prepare_singlestep(p, regs);
349 kcb->kprobe_status = KPROBE_HIT_SS; 346 kcb->kprobe_status = KPROBE_HIT_SS;
350 return 1; 347 return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index 511abe52a94e..6b1ae6ba76f0 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -189,14 +189,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
189 memcpy((void *)reboot_code_buffer, relocate_new_kernel, 189 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
190 relocate_new_kernel_size); 190 relocate_new_kernel_size);
191 191
192 /* The segment registers are funny things, they are 192 /* The segment registers are funny things, they have both a
193 * automatically loaded from a table, in memory wherever you 193 * visible and an invisible part. Whenever the visible part is
194 * set them to a specific selector, but this table is never 194 * set to a specific selector, the invisible part is loaded
195 * accessed again you set the segment to a different selector. 195 * with from a table in memory. At no other time is the
196 * 196 * descriptor table in memory accessed.
197 * The more common model is are caches where the behide
198 * the scenes work is done, but is also dropped at arbitrary
199 * times.
200 * 197 *
201 * I take advantage of this here by force loading the 198 * I take advantage of this here by force loading the
202 * segments, before I zap the gdt with an invalid value. 199 * segments, before I zap the gdt with an invalid value.
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2dd928a84645..acb351478e42 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -575,6 +575,7 @@ void touch_nmi_watchdog (void)
575 */ 575 */
576 touch_softlockup_watchdog(); 576 touch_softlockup_watchdog();
577} 577}
578EXPORT_SYMBOL(touch_nmi_watchdog);
578 579
579extern void die_nmi(struct pt_regs *, const char *msg); 580extern void die_nmi(struct pt_regs *, const char *msg);
580 581
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 94e2c87edeaa..8657c739656a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -359,16 +359,16 @@ EXPORT_SYMBOL(kernel_thread);
359 */ 359 */
360void exit_thread(void) 360void exit_thread(void)
361{ 361{
362 struct task_struct *tsk = current;
363 struct thread_struct *t = &tsk->thread;
364
365 /* The process may have allocated an io port bitmap... nuke it. */ 362 /* The process may have allocated an io port bitmap... nuke it. */
366 if (unlikely(NULL != t->io_bitmap_ptr)) { 363 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
364 struct task_struct *tsk = current;
365 struct thread_struct *t = &tsk->thread;
367 int cpu = get_cpu(); 366 int cpu = get_cpu();
368 struct tss_struct *tss = &per_cpu(init_tss, cpu); 367 struct tss_struct *tss = &per_cpu(init_tss, cpu);
369 368
370 kfree(t->io_bitmap_ptr); 369 kfree(t->io_bitmap_ptr);
371 t->io_bitmap_ptr = NULL; 370 t->io_bitmap_ptr = NULL;
371 clear_thread_flag(TIF_IO_BITMAP);
372 /* 372 /*
373 * Careful, clear this in the TSS too: 373 * Careful, clear this in the TSS too:
374 */ 374 */
@@ -387,6 +387,7 @@ void flush_thread(void)
387 387
388 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); 388 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
389 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 389 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
390 clear_tsk_thread_flag(tsk, TIF_DEBUG);
390 /* 391 /*
391 * Forget coprocessor state.. 392 * Forget coprocessor state..
392 */ 393 */
@@ -431,7 +432,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
431 savesegment(gs,p->thread.gs); 432 savesegment(gs,p->thread.gs);
432 433
433 tsk = current; 434 tsk = current;
434 if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) { 435 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
435 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 436 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
436 if (!p->thread.io_bitmap_ptr) { 437 if (!p->thread.io_bitmap_ptr) {
437 p->thread.io_bitmap_max = 0; 438 p->thread.io_bitmap_max = 0;
@@ -439,6 +440,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
439 } 440 }
440 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr, 441 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
441 IO_BITMAP_BYTES); 442 IO_BITMAP_BYTES);
443 set_tsk_thread_flag(p, TIF_IO_BITMAP);
442 } 444 }
443 445
444 /* 446 /*
@@ -533,10 +535,24 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
533 return 1; 535 return 1;
534} 536}
535 537
536static inline void 538static noinline void __switch_to_xtra(struct task_struct *next_p,
537handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) 539 struct tss_struct *tss)
538{ 540{
539 if (!next->io_bitmap_ptr) { 541 struct thread_struct *next;
542
543 next = &next_p->thread;
544
545 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
546 set_debugreg(next->debugreg[0], 0);
547 set_debugreg(next->debugreg[1], 1);
548 set_debugreg(next->debugreg[2], 2);
549 set_debugreg(next->debugreg[3], 3);
550 /* no 4 and 5 */
551 set_debugreg(next->debugreg[6], 6);
552 set_debugreg(next->debugreg[7], 7);
553 }
554
555 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
540 /* 556 /*
541 * Disable the bitmap via an invalid offset. We still cache 557 * Disable the bitmap via an invalid offset. We still cache
542 * the previous bitmap owner and the IO bitmap contents: 558 * the previous bitmap owner and the IO bitmap contents:
@@ -544,6 +560,7 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
544 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 560 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
545 return; 561 return;
546 } 562 }
563
547 if (likely(next == tss->io_bitmap_owner)) { 564 if (likely(next == tss->io_bitmap_owner)) {
548 /* 565 /*
549 * Previous owner of the bitmap (hence the bitmap content) 566 * Previous owner of the bitmap (hence the bitmap content)
@@ -671,20 +688,11 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
671 set_iopl_mask(next->iopl); 688 set_iopl_mask(next->iopl);
672 689
673 /* 690 /*
674 * Now maybe reload the debug registers 691 * Now maybe handle debug registers and/or IO bitmaps
675 */ 692 */
676 if (unlikely(next->debugreg[7])) { 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
677 set_debugreg(next->debugreg[0], 0); 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
678 set_debugreg(next->debugreg[1], 1); 695 __switch_to_xtra(next_p, tss);
679 set_debugreg(next->debugreg[2], 2);
680 set_debugreg(next->debugreg[3], 3);
681 /* no 4 and 5 */
682 set_debugreg(next->debugreg[6], 6);
683 set_debugreg(next->debugreg[7], 7);
684 }
685
686 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
687 handle_io_bitmap(next, tss);
688 696
689 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
690 698
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index fd7eaf7866e0..d3db03f4085d 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -468,8 +468,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
468 for(i=0; i<4; i++) 468 for(i=0; i<4; i++)
469 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1) 469 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
470 goto out_tsk; 470 goto out_tsk;
471 if (data)
472 set_tsk_thread_flag(child, TIF_DEBUG);
473 else
474 clear_tsk_thread_flag(child, TIF_DEBUG);
471 } 475 }
472
473 addr -= (long) &dummy->u_debugreg; 476 addr -= (long) &dummy->u_debugreg;
474 addr = addr >> 2; 477 addr = addr >> 2;
475 child->thread.debugreg[addr] = data; 478 child->thread.debugreg[addr] = data;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 08c00d20f162..f1682206d304 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -26,7 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/mmzone.h> 28#include <linux/mmzone.h>
29#include <linux/tty.h> 29#include <linux/screen_info.h>
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/apm_bios.h> 32#include <linux/apm_bios.h>
@@ -1327,7 +1327,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
1327 res->start = e820.map[i].addr; 1327 res->start = e820.map[i].addr;
1328 res->end = res->start + e820.map[i].size - 1; 1328 res->end = res->start + e820.map[i].size - 1;
1329 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1329 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1330 request_resource(&iomem_resource, res); 1330 if (request_resource(&iomem_resource, res)) {
1331 kfree(res);
1332 continue;
1333 }
1331 if (e820.map[i].type == E820_RAM) { 1334 if (e820.map[i].type == E820_RAM) {
1332 /* 1335 /*
1333 * We don't know which RAM region contains kernel data, 1336 * We don't know which RAM region contains kernel data,
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 6f5fea05f1d7..f948419c888a 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -212,14 +212,20 @@ valid_k7:
212 * then we print a warning if not, and always resync. 212 * then we print a warning if not, and always resync.
213 */ 213 */
214 214
215static atomic_t tsc_start_flag = ATOMIC_INIT(0); 215static struct {
216static atomic_t tsc_count_start = ATOMIC_INIT(0); 216 atomic_t start_flag;
217static atomic_t tsc_count_stop = ATOMIC_INIT(0); 217 atomic_t count_start;
218static unsigned long long tsc_values[NR_CPUS]; 218 atomic_t count_stop;
219 unsigned long long values[NR_CPUS];
220} tsc __initdata = {
221 .start_flag = ATOMIC_INIT(0),
222 .count_start = ATOMIC_INIT(0),
223 .count_stop = ATOMIC_INIT(0),
224};
219 225
220#define NR_LOOPS 5 226#define NR_LOOPS 5
221 227
222static void __init synchronize_tsc_bp (void) 228static void __init synchronize_tsc_bp(void)
223{ 229{
224 int i; 230 int i;
225 unsigned long long t0; 231 unsigned long long t0;
@@ -233,7 +239,7 @@ static void __init synchronize_tsc_bp (void)
233 /* convert from kcyc/sec to cyc/usec */ 239 /* convert from kcyc/sec to cyc/usec */
234 one_usec = cpu_khz / 1000; 240 one_usec = cpu_khz / 1000;
235 241
236 atomic_set(&tsc_start_flag, 1); 242 atomic_set(&tsc.start_flag, 1);
237 wmb(); 243 wmb();
238 244
239 /* 245 /*
@@ -250,16 +256,16 @@ static void __init synchronize_tsc_bp (void)
250 /* 256 /*
251 * all APs synchronize but they loop on '== num_cpus' 257 * all APs synchronize but they loop on '== num_cpus'
252 */ 258 */
253 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) 259 while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
254 cpu_relax(); 260 cpu_relax();
255 atomic_set(&tsc_count_stop, 0); 261 atomic_set(&tsc.count_stop, 0);
256 wmb(); 262 wmb();
257 /* 263 /*
258 * this lets the APs save their current TSC: 264 * this lets the APs save their current TSC:
259 */ 265 */
260 atomic_inc(&tsc_count_start); 266 atomic_inc(&tsc.count_start);
261 267
262 rdtscll(tsc_values[smp_processor_id()]); 268 rdtscll(tsc.values[smp_processor_id()]);
263 /* 269 /*
264 * We clear the TSC in the last loop: 270 * We clear the TSC in the last loop:
265 */ 271 */
@@ -269,56 +275,54 @@ static void __init synchronize_tsc_bp (void)
269 /* 275 /*
270 * Wait for all APs to leave the synchronization point: 276 * Wait for all APs to leave the synchronization point:
271 */ 277 */
272 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) 278 while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
273 cpu_relax(); 279 cpu_relax();
274 atomic_set(&tsc_count_start, 0); 280 atomic_set(&tsc.count_start, 0);
275 wmb(); 281 wmb();
276 atomic_inc(&tsc_count_stop); 282 atomic_inc(&tsc.count_stop);
277 } 283 }
278 284
279 sum = 0; 285 sum = 0;
280 for (i = 0; i < NR_CPUS; i++) { 286 for (i = 0; i < NR_CPUS; i++) {
281 if (cpu_isset(i, cpu_callout_map)) { 287 if (cpu_isset(i, cpu_callout_map)) {
282 t0 = tsc_values[i]; 288 t0 = tsc.values[i];
283 sum += t0; 289 sum += t0;
284 } 290 }
285 } 291 }
286 avg = sum; 292 avg = sum;
287 do_div(avg, num_booting_cpus()); 293 do_div(avg, num_booting_cpus());
288 294
289 sum = 0;
290 for (i = 0; i < NR_CPUS; i++) { 295 for (i = 0; i < NR_CPUS; i++) {
291 if (!cpu_isset(i, cpu_callout_map)) 296 if (!cpu_isset(i, cpu_callout_map))
292 continue; 297 continue;
293 delta = tsc_values[i] - avg; 298 delta = tsc.values[i] - avg;
294 if (delta < 0) 299 if (delta < 0)
295 delta = -delta; 300 delta = -delta;
296 /* 301 /*
297 * We report bigger than 2 microseconds clock differences. 302 * We report bigger than 2 microseconds clock differences.
298 */ 303 */
299 if (delta > 2*one_usec) { 304 if (delta > 2*one_usec) {
300 long realdelta; 305 long long realdelta;
306
301 if (!buggy) { 307 if (!buggy) {
302 buggy = 1; 308 buggy = 1;
303 printk("\n"); 309 printk("\n");
304 } 310 }
305 realdelta = delta; 311 realdelta = delta;
306 do_div(realdelta, one_usec); 312 do_div(realdelta, one_usec);
307 if (tsc_values[i] < avg) 313 if (tsc.values[i] < avg)
308 realdelta = -realdelta; 314 realdelta = -realdelta;
309 315
310 if (realdelta > 0) 316 if (realdelta)
311 printk(KERN_INFO "CPU#%d had %ld usecs TSC " 317 printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
312 "skew, fixed it up.\n", i, realdelta); 318 "skew, fixed it up.\n", i, realdelta);
313 } 319 }
314
315 sum += delta;
316 } 320 }
317 if (!buggy) 321 if (!buggy)
318 printk("passed.\n"); 322 printk("passed.\n");
319} 323}
320 324
321static void __init synchronize_tsc_ap (void) 325static void __init synchronize_tsc_ap(void)
322{ 326{
323 int i; 327 int i;
324 328
@@ -327,20 +331,20 @@ static void __init synchronize_tsc_ap (void)
327 * this gets called, so we first wait for the BP to 331 * this gets called, so we first wait for the BP to
328 * finish SMP initialization: 332 * finish SMP initialization:
329 */ 333 */
330 while (!atomic_read(&tsc_start_flag)) 334 while (!atomic_read(&tsc.start_flag))
331 cpu_relax(); 335 cpu_relax();
332 336
333 for (i = 0; i < NR_LOOPS; i++) { 337 for (i = 0; i < NR_LOOPS; i++) {
334 atomic_inc(&tsc_count_start); 338 atomic_inc(&tsc.count_start);
335 while (atomic_read(&tsc_count_start) != num_booting_cpus()) 339 while (atomic_read(&tsc.count_start) != num_booting_cpus())
336 cpu_relax(); 340 cpu_relax();
337 341
338 rdtscll(tsc_values[smp_processor_id()]); 342 rdtscll(tsc.values[smp_processor_id()]);
339 if (i == NR_LOOPS-1) 343 if (i == NR_LOOPS-1)
340 write_tsc(0, 0); 344 write_tsc(0, 0);
341 345
342 atomic_inc(&tsc_count_stop); 346 atomic_inc(&tsc.count_stop);
343 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) 347 while (atomic_read(&tsc.count_stop) != num_booting_cpus())
344 cpu_relax(); 348 cpu_relax();
345 } 349 }
346} 350}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 316421a7f56f..edd00f6cee37 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
135{ 135{
136 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = instruction_pointer(regs);
137 137
138 if (in_lock_functions(pc)) 138 if (!user_mode_vm(regs) && in_lock_functions(pc))
139 return *(unsigned long *)(regs->ebp + 4); 139 return *(unsigned long *)(regs->ebp + 4);
140 140
141 return pc; 141 return pc;
@@ -206,15 +206,16 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
206unsigned long get_cmos_time(void) 206unsigned long get_cmos_time(void)
207{ 207{
208 unsigned long retval; 208 unsigned long retval;
209 unsigned long flags;
209 210
210 spin_lock(&rtc_lock); 211 spin_lock_irqsave(&rtc_lock, flags);
211 212
212 if (efi_enabled) 213 if (efi_enabled)
213 retval = efi_get_time(); 214 retval = efi_get_time();
214 else 215 else
215 retval = mach_get_cmos_time(); 216 retval = mach_get_cmos_time();
216 217
217 spin_unlock(&rtc_lock); 218 spin_unlock_irqrestore(&rtc_lock, flags);
218 219
219 return retval; 220 return retval;
220} 221}
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 2bf8b55b91f8..0d4005dc06c5 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -100,13 +100,13 @@ int register_die_notifier(struct notifier_block *nb)
100 vmalloc_sync_all(); 100 vmalloc_sync_all();
101 return atomic_notifier_chain_register(&i386die_chain, nb); 101 return atomic_notifier_chain_register(&i386die_chain, nb);
102} 102}
103EXPORT_SYMBOL(register_die_notifier); 103EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
104 104
105int unregister_die_notifier(struct notifier_block *nb) 105int unregister_die_notifier(struct notifier_block *nb)
106{ 106{
107 return atomic_notifier_chain_unregister(&i386die_chain, nb); 107 return atomic_notifier_chain_unregister(&i386die_chain, nb);
108} 108}
109EXPORT_SYMBOL(unregister_die_notifier); 109EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
110 110
111static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) 111static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112{ 112{
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 188 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 189 }
190 if (unw_ret > 0) { 190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 if (call_trace > 0) 191#ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info));
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
192 return; 199 return;
193 printk("%sLegacy call trace:\n", log_lvl); 200 else
201 printk("Full inexact backtrace again:\n");
202#else
203 printk("Inexact backtrace:\n");
204#endif
194 } 205 }
195 } 206 }
196 207
@@ -324,35 +335,35 @@ void show_registers(struct pt_regs *regs)
324 335
325static void handle_BUG(struct pt_regs *regs) 336static void handle_BUG(struct pt_regs *regs)
326{ 337{
338 unsigned long eip = regs->eip;
327 unsigned short ud2; 339 unsigned short ud2;
328 unsigned short line;
329 char *file;
330 char c;
331 unsigned long eip;
332
333 eip = regs->eip;
334 340
335 if (eip < PAGE_OFFSET) 341 if (eip < PAGE_OFFSET)
336 goto no_bug; 342 return;
337 if (__get_user(ud2, (unsigned short __user *)eip)) 343 if (__get_user(ud2, (unsigned short __user *)eip))
338 goto no_bug; 344 return;
339 if (ud2 != 0x0b0f) 345 if (ud2 != 0x0b0f)
340 goto no_bug; 346 return;
341 if (__get_user(line, (unsigned short __user *)(eip + 2)))
342 goto bug;
343 if (__get_user(file, (char * __user *)(eip + 4)) ||
344 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
345 file = "<bad filename>";
346 347
347 printk(KERN_EMERG "------------[ cut here ]------------\n"); 348 printk(KERN_EMERG "------------[ cut here ]------------\n");
348 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
349 349
350no_bug: 350#ifdef CONFIG_DEBUG_BUGVERBOSE
351 return; 351 do {
352 unsigned short line;
353 char *file;
354 char c;
355
356 if (__get_user(line, (unsigned short __user *)(eip + 2)))
357 break;
358 if (__get_user(file, (char * __user *)(eip + 4)) ||
359 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
360 file = "<bad filename>";
352 361
353 /* Here we know it was a BUG but file-n-line is unavailable */ 362 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
354bug: 363 return;
355 printk(KERN_EMERG "Kernel BUG\n"); 364 } while (0);
365#endif
366 printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
356} 367}
357 368
358/* This is gone through when something in the kernel 369/* This is gone through when something in the kernel
@@ -442,11 +453,9 @@ void die(const char * str, struct pt_regs * regs, long err)
442 if (in_interrupt()) 453 if (in_interrupt())
443 panic("Fatal exception in interrupt"); 454 panic("Fatal exception in interrupt");
444 455
445 if (panic_on_oops) { 456 if (panic_on_oops)
446 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 457 panic("Fatal exception: panic_on_oops");
447 ssleep(5); 458
448 panic("Fatal exception");
449 }
450 oops_exit(); 459 oops_exit();
451 do_exit(SIGSEGV); 460 do_exit(SIGSEGV);
452} 461}
@@ -1238,8 +1247,10 @@ static int __init call_trace_setup(char *s)
1238 call_trace = -1; 1247 call_trace = -1;
1239 else if (strcmp(s, "both") == 0) 1248 else if (strcmp(s, "both") == 0)
1240 call_trace = 0; 1249 call_trace = 0;
1241 else if (strcmp(s, "new") == 0) 1250 else if (strcmp(s, "newfallback") == 0)
1242 call_trace = 1; 1251 call_trace = 1;
1252 else if (strcmp(s, "new") == 2)
1253 call_trace = 2;
1243 return 1; 1254 return 1;
1244} 1255}
1245__setup("call_trace=", call_trace_setup); 1256__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index e26975fc68b6..f66cd11adb72 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -10,6 +10,7 @@ SECTIONS
10 . = VDSO_PRELINK + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .gnu.hash : { *(.gnu.hash) }
13 .dynsym : { *(.dynsym) } 14 .dynsym : { *(.dynsym) }
14 .dynstr : { *(.dynstr) } 15 .dynstr : { *(.dynstr) }
15 .gnu.version : { *(.gnu.version) } 16 .gnu.version : { *(.gnu.version) }