aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/kernel/Makefile3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c221
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.h2
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/kprobes.c9
-rw-r--r--arch/i386/kernel/machine_kexec.c13
-rw-r--r--arch/i386/kernel/nmi.c1
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/smpboot.c62
-rw-r--r--arch/i386/kernel/time.c2
-rw-r--r--arch/i386/kernel/traps.c29
-rw-r--r--arch/i386/kernel/vsyscall.lds.S1
16 files changed, 204 insertions, 155 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index daa75ce4b777..f71fb4a029cb 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -672,7 +672,7 @@ config MTRR
672 See <file:Documentation/mtrr.txt> for more information. 672 See <file:Documentation/mtrr.txt> for more information.
673 673
674config EFI 674config EFI
675 bool "Boot from EFI support (EXPERIMENTAL)" 675 bool "Boot from EFI support"
676 depends on ACPI 676 depends on ACPI
677 default n 677 default n
678 ---help--- 678 ---help---
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 1b452a1665c4..ab98fc21a541 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -59,7 +59,8 @@ quiet_cmd_syscall = SYSCALL $@
59 59
60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH) 60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
61 61
62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
63 $(call ld-option, -Wl$(comma)--hash-style=sysv)
63SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags) 64SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
64SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags) 65SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
65 66
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index e44a4c6a4fe5..ccc1edff5c97 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
96 96
97config X86_GX_SUSPMOD 97config X86_GX_SUSPMOD
98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
99 depends on PCI
99 help 100 help
100 This add the CPUFreq driver for NatSemi Geode processors which 101 This add the CPUFreq driver for NatSemi Geode processors which
101 support suspend modulation. 102 support suspend modulation.
@@ -202,7 +203,7 @@ config X86_LONGRUN
202config X86_LONGHAUL 203config X86_LONGHAUL
203 tristate "VIA Cyrix III Longhaul" 204 tristate "VIA Cyrix III Longhaul"
204 select CPU_FREQ_TABLE 205 select CPU_FREQ_TABLE
205 depends on BROKEN 206 depends on ACPI_PROCESSOR
206 help 207 help
207 This adds the CPUFreq driver for VIA Samuel/CyrixIII, 208 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
208 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T 209 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 567b39bea07e..efb41e81351c 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void)
384 } 384 }
385 385
386 /* Do initialization in ACPI core */ 386 /* Do initialization in ACPI core */
387 acpi_processor_preregister_performance(acpi_perf_data); 387 return acpi_processor_preregister_performance(acpi_perf_data);
388 return 0;
389} 388}
390 389
391static int 390static int
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 146f607e9c44..4f2c3aeef724 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -29,11 +29,13 @@
29#include <linux/cpufreq.h> 29#include <linux/cpufreq.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/pci.h>
33 32
34#include <asm/msr.h> 33#include <asm/msr.h>
35#include <asm/timex.h> 34#include <asm/timex.h>
36#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/acpi.h>
37#include <linux/acpi.h>
38#include <acpi/processor.h>
37 39
38#include "longhaul.h" 40#include "longhaul.h"
39 41
@@ -56,6 +58,8 @@ static int minvid, maxvid;
56static unsigned int minmult, maxmult; 58static unsigned int minmult, maxmult;
57static int can_scale_voltage; 59static int can_scale_voltage;
58static int vrmrev; 60static int vrmrev;
61static struct acpi_processor *pr = NULL;
62static struct acpi_processor_cx *cx = NULL;
59 63
60/* Module parameters */ 64/* Module parameters */
61static int dont_scale_voltage; 65static int dont_scale_voltage;
@@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void)
118 return eblcr_table[invalue]; 122 return eblcr_table[invalue];
119} 123}
120 124
125/* For processor with BCR2 MSR */
121 126
122static void do_powersaver(union msr_longhaul *longhaul, 127static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
123 unsigned int clock_ratio_index)
124{ 128{
125 struct pci_dev *dev; 129 union msr_bcr2 bcr2;
126 unsigned long flags; 130 u32 t;
127 unsigned int tmp_mask;
128 int version;
129 int i;
130 u16 pci_cmd;
131 u16 cmd_state[64];
132 131
133 switch (cpu_model) { 132 rdmsrl(MSR_VIA_BCR2, bcr2.val);
134 case CPU_EZRA_T: 133 /* Enable software clock multiplier */
135 version = 3; 134 bcr2.bits.ESOFTBF = 1;
136 break; 135 bcr2.bits.CLOCKMUL = clock_ratio_index;
137 case CPU_NEHEMIAH:
138 version = 0xf;
139 break;
140 default:
141 return;
142 }
143 136
144 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); 137 /* Sync to timer tick */
145 longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; 138 safe_halt();
146 longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 139 ACPI_FLUSH_CPU_CACHE();
147 longhaul->bits.EnableSoftBusRatio = 1; 140 /* Change frequency on next halt or sleep */
148 longhaul->bits.RevisionKey = 0; 141 wrmsrl(MSR_VIA_BCR2, bcr2.val);
142 /* Invoke C3 */
143 inb(cx_address);
144 /* Dummy op - must do something useless after P_LVL3 read */
145 t = inl(acpi_fadt.xpm_tmr_blk.address);
146
147 /* Disable software clock multiplier */
148 local_irq_disable();
149 rdmsrl(MSR_VIA_BCR2, bcr2.val);
150 bcr2.bits.ESOFTBF = 0;
151 wrmsrl(MSR_VIA_BCR2, bcr2.val);
152}
149 153
150 preempt_disable(); 154/* For processor with Longhaul MSR */
151 local_irq_save(flags);
152 155
153 /* 156static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
154 * get current pci bus master state for all devices 157{
155 * and clear bus master bit 158 union msr_longhaul longhaul;
156 */ 159 u32 t;
157 dev = NULL;
158 i = 0;
159 do {
160 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
161 if (dev != NULL) {
162 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
163 cmd_state[i++] = pci_cmd;
164 pci_cmd &= ~PCI_COMMAND_MASTER;
165 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
166 }
167 } while (dev != NULL);
168 160
169 tmp_mask=inb(0x21); /* works on C3. save mask. */ 161 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
170 outb(0xFE,0x21); /* TMR0 only */ 162 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
171 outb(0xFF,0x80); /* delay */ 163 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
164 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
165 longhaul.bits.EnableSoftBusRatio = 1;
172 166
167 /* Sync to timer tick */
173 safe_halt(); 168 safe_halt();
174 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); 169 ACPI_FLUSH_CPU_CACHE();
175 halt(); 170 /* Change frequency on next halt or sleep */
176 171 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
172 /* Invoke C3 */
173 inb(cx_address);
174 /* Dummy op - must do something useless after P_LVL3 read */
175 t = inl(acpi_fadt.xpm_tmr_blk.address);
176
177 /* Disable bus ratio bit */
177 local_irq_disable(); 178 local_irq_disable();
178 179 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
179 outb(tmp_mask,0x21); /* restore mask */ 180 longhaul.bits.EnableSoftBusRatio = 0;
180 181 longhaul.bits.EnableSoftBSEL = 0;
181 /* restore pci bus master state for all devices */ 182 longhaul.bits.EnableSoftVID = 0;
182 dev = NULL; 183 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
183 i = 0;
184 do {
185 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
186 if (dev != NULL) {
187 pci_cmd = cmd_state[i++];
188 pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
189 }
190 } while (dev != NULL);
191 local_irq_restore(flags);
192 preempt_enable();
193
194 /* disable bus ratio bit */
195 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
196 longhaul->bits.EnableSoftBusRatio = 0;
197 longhaul->bits.RevisionKey = version;
198 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
199} 184}
200 185
201/** 186/**
@@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
209{ 194{
210 int speed, mult; 195 int speed, mult;
211 struct cpufreq_freqs freqs; 196 struct cpufreq_freqs freqs;
212 union msr_longhaul longhaul;
213 union msr_bcr2 bcr2;
214 static unsigned int old_ratio=-1; 197 static unsigned int old_ratio=-1;
198 unsigned long flags;
199 unsigned int pic1_mask, pic2_mask;
215 200
216 if (old_ratio == clock_ratio_index) 201 if (old_ratio == clock_ratio_index)
217 return; 202 return;
@@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
234 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 219 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
235 fsb, mult/10, mult%10, print_speed(speed/1000)); 220 fsb, mult/10, mult%10, print_speed(speed/1000));
236 221
222 preempt_disable();
223 local_irq_save(flags);
224
225 pic2_mask = inb(0xA1);
226 pic1_mask = inb(0x21); /* works on C3. save mask. */
227 outb(0xFF,0xA1); /* Overkill */
228 outb(0xFE,0x21); /* TMR0 only */
229
230 /* Disable bus master arbitration */
231 if (pr->flags.bm_check) {
232 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
233 ACPI_MTX_DO_NOT_LOCK);
234 }
235
237 switch (longhaul_version) { 236 switch (longhaul_version) {
238 237
239 /* 238 /*
@@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
245 */ 244 */
246 case TYPE_LONGHAUL_V1: 245 case TYPE_LONGHAUL_V1:
247 case TYPE_LONGHAUL_V2: 246 case TYPE_LONGHAUL_V2:
248 rdmsrl (MSR_VIA_BCR2, bcr2.val); 247 do_longhaul1(cx->address, clock_ratio_index);
249 /* Enable software clock multiplier */
250 bcr2.bits.ESOFTBF = 1;
251 bcr2.bits.CLOCKMUL = clock_ratio_index;
252 local_irq_disable();
253 wrmsrl (MSR_VIA_BCR2, bcr2.val);
254 safe_halt();
255
256 /* Disable software clock multiplier */
257 rdmsrl (MSR_VIA_BCR2, bcr2.val);
258 bcr2.bits.ESOFTBF = 0;
259 local_irq_disable();
260 wrmsrl (MSR_VIA_BCR2, bcr2.val);
261 local_irq_enable();
262 break; 248 break;
263 249
264 /* 250 /*
@@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
273 * to work in practice. 259 * to work in practice.
274 */ 260 */
275 case TYPE_POWERSAVER: 261 case TYPE_POWERSAVER:
276 do_powersaver(&longhaul, clock_ratio_index); 262 do_powersaver(cx->address, clock_ratio_index);
277 break; 263 break;
278 } 264 }
279 265
266 /* Enable bus master arbitration */
267 if (pr->flags.bm_check) {
268 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
269 ACPI_MTX_DO_NOT_LOCK);
270 }
271
272 outb(pic2_mask,0xA1); /* restore mask */
273 outb(pic1_mask,0x21);
274
275 local_irq_restore(flags);
276 preempt_enable();
277
280 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 278 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
281} 279}
282 280
@@ -324,9 +322,11 @@ static int guess_fsb(void)
324static int __init longhaul_get_ranges(void) 322static int __init longhaul_get_ranges(void)
325{ 323{
326 unsigned long invalue; 324 unsigned long invalue;
327 unsigned int multipliers[32]= { 325 unsigned int ezra_t_multipliers[32]= {
328 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, 326 90, 30, 40, 100, 55, 35, 45, 95,
329 -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; 327 50, 70, 80, 60, 120, 75, 85, 65,
328 -1, 110, 120, -1, 135, 115, 125, 105,
329 130, 150, 160, 140, -1, 155, -1, 145 };
330 unsigned int j, k = 0; 330 unsigned int j, k = 0;
331 union msr_longhaul longhaul; 331 union msr_longhaul longhaul;
332 unsigned long lo, hi; 332 unsigned long lo, hi;
@@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void)
355 invalue = longhaul.bits.MaxMHzBR; 355 invalue = longhaul.bits.MaxMHzBR;
356 if (longhaul.bits.MaxMHzBR4) 356 if (longhaul.bits.MaxMHzBR4)
357 invalue += 16; 357 invalue += 16;
358 maxmult=multipliers[invalue]; 358 maxmult=ezra_t_multipliers[invalue];
359 359
360 invalue = longhaul.bits.MinMHzBR; 360 invalue = longhaul.bits.MinMHzBR;
361 if (longhaul.bits.MinMHzBR4 == 1) 361 if (longhaul.bits.MinMHzBR4 == 1)
362 minmult = 30; 362 minmult = 30;
363 else 363 else
364 minmult = multipliers[invalue]; 364 minmult = ezra_t_multipliers[invalue];
365 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 365 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
366 break; 366 break;
367 } 367 }
@@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu)
527 return calc_speed(longhaul_get_cpu_mult()); 527 return calc_speed(longhaul_get_cpu_mult());
528} 528}
529 529
530static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
531 u32 nesting_level,
532 void *context, void **return_value)
533{
534 struct acpi_device *d;
535
536 if ( acpi_bus_get_device(obj_handle, &d) ) {
537 return 0;
538 }
539 *return_value = (void *)acpi_driver_data(d);
540 return 1;
541}
530 542
531static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 543static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
532{ 544{
@@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
534 char *cpuname=NULL; 546 char *cpuname=NULL;
535 int ret; 547 int ret;
536 548
549 /* Check ACPI support for C3 state */
550 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
551 &longhaul_walk_callback, NULL, (void *)&pr);
552 if (pr == NULL) goto err_acpi;
553
554 cx = &pr->power.states[ACPI_STATE_C3];
555 if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
556
557 /* Now check what we have on this motherboard */
537 switch (c->x86_model) { 558 switch (c->x86_model) {
538 case 6: 559 case 6:
539 cpu_model = CPU_SAMUEL; 560 cpu_model = CPU_SAMUEL;
@@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
634 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); 655 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
635 656
636 return 0; 657 return 0;
658
659err_acpi:
660 printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
661 return -ENODEV;
637} 662}
638 663
639static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) 664static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@@ -666,6 +691,18 @@ static int __init longhaul_init(void)
666 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 691 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
667 return -ENODEV; 692 return -ENODEV;
668 693
694#ifdef CONFIG_SMP
695 if (num_online_cpus() > 1) {
696 return -ENODEV;
697 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
698 }
699#endif
700#ifdef CONFIG_X86_IO_APIC
701 if (cpu_has_apic) {
702 printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
703 return -ENODEV;
704 }
705#endif
669 switch (c->x86_model) { 706 switch (c->x86_model) {
670 case 6 ... 9: 707 case 6 ... 9:
671 return cpufreq_register_driver(&longhaul_driver); 708 return cpufreq_register_driver(&longhaul_driver);
@@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
699MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 736MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
700MODULE_LICENSE ("GPL"); 737MODULE_LICENSE ("GPL");
701 738
702module_init(longhaul_init); 739late_initcall(longhaul_init);
703module_exit(longhaul_exit); 740module_exit(longhaul_exit);
704 741
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e9f0b928b0a9..5c43be47587f 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -759,7 +759,7 @@ static int __cpuinit cache_sysfs_init(void)
759 if (num_cache_leaves == 0) 759 if (num_cache_leaves == 0)
760 return 0; 760 return 0;
761 761
762 register_cpu_notifier(&cacheinfo_cpu_notifier); 762 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
763 763
764 for_each_online_cpu(i) { 764 for_each_online_cpu(i) {
765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, 765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
diff --git a/arch/i386/kernel/cpu/mcheck/mce.h b/arch/i386/kernel/cpu/mcheck/mce.h
index dc2416dfef15..84fd4cf7d0fb 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.h
+++ b/arch/i386/kernel/cpu/mcheck/mce.h
@@ -9,6 +9,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c);
9/* Call the installed machine check handler for this CPU setup. */ 9/* Call the installed machine check handler for this CPU setup. */
10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code); 10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
11 11
12extern int mce_disabled __initdata; 12extern int mce_disabled;
13extern int nr_mce_banks; 13extern int nr_mce_banks;
14 14
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index d9a260f2efb4..37a7d2eaf4a0 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -204,7 +204,7 @@ VM_MASK = 0x00020000
204ENTRY(ret_from_fork) 204ENTRY(ret_from_fork)
205 CFI_STARTPROC 205 CFI_STARTPROC
206 pushl %eax 206 pushl %eax
207 CFI_ADJUST_CFA_OFFSET -4 207 CFI_ADJUST_CFA_OFFSET 4
208 call schedule_tail 208 call schedule_tail
209 GET_THREAD_INFO(%ebp) 209 GET_THREAD_INFO(%ebp)
210 popl %eax 210 popl %eax
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index de2e16e561c0..afe6505ca0b3 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -256,11 +256,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
256 int ret = 0; 256 int ret = 0;
257 kprobe_opcode_t *addr; 257 kprobe_opcode_t *addr;
258 struct kprobe_ctlblk *kcb; 258 struct kprobe_ctlblk *kcb;
259#ifdef CONFIG_PREEMPT
260 unsigned pre_preempt_count = preempt_count();
261#else
262 unsigned pre_preempt_count = 1;
263#endif
264 259
265 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 260 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
266 261
@@ -338,13 +333,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
338 return 1; 333 return 1;
339 334
340ss_probe: 335ss_probe:
341 if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){ 336#ifndef CONFIG_PREEMPT
337 if (p->ainsn.boostable == 1 && !p->post_handler){
342 /* Boost up -- we can execute copied instructions directly */ 338 /* Boost up -- we can execute copied instructions directly */
343 reset_current_kprobe(); 339 reset_current_kprobe();
344 regs->eip = (unsigned long)p->ainsn.insn; 340 regs->eip = (unsigned long)p->ainsn.insn;
345 preempt_enable_no_resched(); 341 preempt_enable_no_resched();
346 return 1; 342 return 1;
347 } 343 }
344#endif
348 prepare_singlestep(p, regs); 345 prepare_singlestep(p, regs);
349 kcb->kprobe_status = KPROBE_HIT_SS; 346 kcb->kprobe_status = KPROBE_HIT_SS;
350 return 1; 347 return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index 511abe52a94e..6b1ae6ba76f0 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -189,14 +189,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
189 memcpy((void *)reboot_code_buffer, relocate_new_kernel, 189 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
190 relocate_new_kernel_size); 190 relocate_new_kernel_size);
191 191
192 /* The segment registers are funny things, they are 192 /* The segment registers are funny things, they have both a
193 * automatically loaded from a table, in memory wherever you 193 * visible and an invisible part. Whenever the visible part is
194 * set them to a specific selector, but this table is never 194 * set to a specific selector, the invisible part is loaded
195 * accessed again you set the segment to a different selector. 195 * with from a table in memory. At no other time is the
196 * 196 * descriptor table in memory accessed.
197 * The more common model is are caches where the behide
198 * the scenes work is done, but is also dropped at arbitrary
199 * times.
200 * 197 *
201 * I take advantage of this here by force loading the 198 * I take advantage of this here by force loading the
202 * segments, before I zap the gdt with an invalid value. 199 * segments, before I zap the gdt with an invalid value.
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2dd928a84645..acb351478e42 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -575,6 +575,7 @@ void touch_nmi_watchdog (void)
575 */ 575 */
576 touch_softlockup_watchdog(); 576 touch_softlockup_watchdog();
577} 577}
578EXPORT_SYMBOL(touch_nmi_watchdog);
578 579
579extern void die_nmi(struct pt_regs *, const char *msg); 580extern void die_nmi(struct pt_regs *, const char *msg);
580 581
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 923bb292f47f..8657c739656a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
690 /* 690 /*
691 * Now maybe handle debug registers and/or IO bitmaps 691 * Now maybe handle debug registers and/or IO bitmaps
692 */ 692 */
693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
695 __switch_to_xtra(next_p, tss); 695 __switch_to_xtra(next_p, tss);
696 696
697 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 6f5fea05f1d7..f948419c888a 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -212,14 +212,20 @@ valid_k7:
212 * then we print a warning if not, and always resync. 212 * then we print a warning if not, and always resync.
213 */ 213 */
214 214
215static atomic_t tsc_start_flag = ATOMIC_INIT(0); 215static struct {
216static atomic_t tsc_count_start = ATOMIC_INIT(0); 216 atomic_t start_flag;
217static atomic_t tsc_count_stop = ATOMIC_INIT(0); 217 atomic_t count_start;
218static unsigned long long tsc_values[NR_CPUS]; 218 atomic_t count_stop;
219 unsigned long long values[NR_CPUS];
220} tsc __initdata = {
221 .start_flag = ATOMIC_INIT(0),
222 .count_start = ATOMIC_INIT(0),
223 .count_stop = ATOMIC_INIT(0),
224};
219 225
220#define NR_LOOPS 5 226#define NR_LOOPS 5
221 227
222static void __init synchronize_tsc_bp (void) 228static void __init synchronize_tsc_bp(void)
223{ 229{
224 int i; 230 int i;
225 unsigned long long t0; 231 unsigned long long t0;
@@ -233,7 +239,7 @@ static void __init synchronize_tsc_bp (void)
233 /* convert from kcyc/sec to cyc/usec */ 239 /* convert from kcyc/sec to cyc/usec */
234 one_usec = cpu_khz / 1000; 240 one_usec = cpu_khz / 1000;
235 241
236 atomic_set(&tsc_start_flag, 1); 242 atomic_set(&tsc.start_flag, 1);
237 wmb(); 243 wmb();
238 244
239 /* 245 /*
@@ -250,16 +256,16 @@ static void __init synchronize_tsc_bp (void)
250 /* 256 /*
251 * all APs synchronize but they loop on '== num_cpus' 257 * all APs synchronize but they loop on '== num_cpus'
252 */ 258 */
253 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) 259 while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
254 cpu_relax(); 260 cpu_relax();
255 atomic_set(&tsc_count_stop, 0); 261 atomic_set(&tsc.count_stop, 0);
256 wmb(); 262 wmb();
257 /* 263 /*
258 * this lets the APs save their current TSC: 264 * this lets the APs save their current TSC:
259 */ 265 */
260 atomic_inc(&tsc_count_start); 266 atomic_inc(&tsc.count_start);
261 267
262 rdtscll(tsc_values[smp_processor_id()]); 268 rdtscll(tsc.values[smp_processor_id()]);
263 /* 269 /*
264 * We clear the TSC in the last loop: 270 * We clear the TSC in the last loop:
265 */ 271 */
@@ -269,56 +275,54 @@ static void __init synchronize_tsc_bp (void)
269 /* 275 /*
270 * Wait for all APs to leave the synchronization point: 276 * Wait for all APs to leave the synchronization point:
271 */ 277 */
272 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) 278 while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
273 cpu_relax(); 279 cpu_relax();
274 atomic_set(&tsc_count_start, 0); 280 atomic_set(&tsc.count_start, 0);
275 wmb(); 281 wmb();
276 atomic_inc(&tsc_count_stop); 282 atomic_inc(&tsc.count_stop);
277 } 283 }
278 284
279 sum = 0; 285 sum = 0;
280 for (i = 0; i < NR_CPUS; i++) { 286 for (i = 0; i < NR_CPUS; i++) {
281 if (cpu_isset(i, cpu_callout_map)) { 287 if (cpu_isset(i, cpu_callout_map)) {
282 t0 = tsc_values[i]; 288 t0 = tsc.values[i];
283 sum += t0; 289 sum += t0;
284 } 290 }
285 } 291 }
286 avg = sum; 292 avg = sum;
287 do_div(avg, num_booting_cpus()); 293 do_div(avg, num_booting_cpus());
288 294
289 sum = 0;
290 for (i = 0; i < NR_CPUS; i++) { 295 for (i = 0; i < NR_CPUS; i++) {
291 if (!cpu_isset(i, cpu_callout_map)) 296 if (!cpu_isset(i, cpu_callout_map))
292 continue; 297 continue;
293 delta = tsc_values[i] - avg; 298 delta = tsc.values[i] - avg;
294 if (delta < 0) 299 if (delta < 0)
295 delta = -delta; 300 delta = -delta;
296 /* 301 /*
297 * We report bigger than 2 microseconds clock differences. 302 * We report bigger than 2 microseconds clock differences.
298 */ 303 */
299 if (delta > 2*one_usec) { 304 if (delta > 2*one_usec) {
300 long realdelta; 305 long long realdelta;
306
301 if (!buggy) { 307 if (!buggy) {
302 buggy = 1; 308 buggy = 1;
303 printk("\n"); 309 printk("\n");
304 } 310 }
305 realdelta = delta; 311 realdelta = delta;
306 do_div(realdelta, one_usec); 312 do_div(realdelta, one_usec);
307 if (tsc_values[i] < avg) 313 if (tsc.values[i] < avg)
308 realdelta = -realdelta; 314 realdelta = -realdelta;
309 315
310 if (realdelta > 0) 316 if (realdelta)
311 printk(KERN_INFO "CPU#%d had %ld usecs TSC " 317 printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
312 "skew, fixed it up.\n", i, realdelta); 318 "skew, fixed it up.\n", i, realdelta);
313 } 319 }
314
315 sum += delta;
316 } 320 }
317 if (!buggy) 321 if (!buggy)
318 printk("passed.\n"); 322 printk("passed.\n");
319} 323}
320 324
321static void __init synchronize_tsc_ap (void) 325static void __init synchronize_tsc_ap(void)
322{ 326{
323 int i; 327 int i;
324 328
@@ -327,20 +331,20 @@ static void __init synchronize_tsc_ap (void)
327 * this gets called, so we first wait for the BP to 331 * this gets called, so we first wait for the BP to
328 * finish SMP initialization: 332 * finish SMP initialization:
329 */ 333 */
330 while (!atomic_read(&tsc_start_flag)) 334 while (!atomic_read(&tsc.start_flag))
331 cpu_relax(); 335 cpu_relax();
332 336
333 for (i = 0; i < NR_LOOPS; i++) { 337 for (i = 0; i < NR_LOOPS; i++) {
334 atomic_inc(&tsc_count_start); 338 atomic_inc(&tsc.count_start);
335 while (atomic_read(&tsc_count_start) != num_booting_cpus()) 339 while (atomic_read(&tsc.count_start) != num_booting_cpus())
336 cpu_relax(); 340 cpu_relax();
337 341
338 rdtscll(tsc_values[smp_processor_id()]); 342 rdtscll(tsc.values[smp_processor_id()]);
339 if (i == NR_LOOPS-1) 343 if (i == NR_LOOPS-1)
340 write_tsc(0, 0); 344 write_tsc(0, 0);
341 345
342 atomic_inc(&tsc_count_stop); 346 atomic_inc(&tsc.count_stop);
343 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) 347 while (atomic_read(&tsc.count_stop) != num_booting_cpus())
344 cpu_relax(); 348 cpu_relax();
345 } 349 }
346} 350}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 8705c0f05788..edd00f6cee37 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
135{ 135{
136 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = instruction_pointer(regs);
137 137
138 if (in_lock_functions(pc)) 138 if (!user_mode_vm(regs) && in_lock_functions(pc))
139 return *(unsigned long *)(regs->ebp + 4); 139 return *(unsigned long *)(regs->ebp + 4);
140 140
141 return pc; 141 return pc;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 313ac1f7dc5a..0d4005dc06c5 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 188 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 189 }
190 if (unw_ret > 0) { 190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 if (call_trace > 0) 191#ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info));
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
192 return; 199 return;
193 printk("%sLegacy call trace:\n", log_lvl); 200 else
201 printk("Full inexact backtrace again:\n");
202#else
203 printk("Inexact backtrace:\n");
204#endif
194 } 205 }
195 } 206 }
196 207
@@ -442,11 +453,9 @@ void die(const char * str, struct pt_regs * regs, long err)
442 if (in_interrupt()) 453 if (in_interrupt())
443 panic("Fatal exception in interrupt"); 454 panic("Fatal exception in interrupt");
444 455
445 if (panic_on_oops) { 456 if (panic_on_oops)
446 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 457 panic("Fatal exception: panic_on_oops");
447 ssleep(5); 458
448 panic("Fatal exception");
449 }
450 oops_exit(); 459 oops_exit();
451 do_exit(SIGSEGV); 460 do_exit(SIGSEGV);
452} 461}
@@ -1238,8 +1247,10 @@ static int __init call_trace_setup(char *s)
1238 call_trace = -1; 1247 call_trace = -1;
1239 else if (strcmp(s, "both") == 0) 1248 else if (strcmp(s, "both") == 0)
1240 call_trace = 0; 1249 call_trace = 0;
1241 else if (strcmp(s, "new") == 0) 1250 else if (strcmp(s, "newfallback") == 0)
1242 call_trace = 1; 1251 call_trace = 1;
1252 else if (strcmp(s, "new") == 2)
1253 call_trace = 2;
1243 return 1; 1254 return 1;
1244} 1255}
1245__setup("call_trace=", call_trace_setup); 1256__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index e26975fc68b6..f66cd11adb72 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -10,6 +10,7 @@ SECTIONS
10 . = VDSO_PRELINK + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .gnu.hash : { *(.gnu.hash) }
13 .dynsym : { *(.dynsym) } 14 .dynsym : { *(.dynsym) }
14 .dynstr : { *(.dynstr) } 15 .dynstr : { *(.dynstr) }
15 .gnu.version : { *(.gnu.version) } 16 .gnu.version : { *(.gnu.version) }