aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:51:09 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:51:09 -0400
commit81a07d7588d376c530d006e24d7981304ce96e16 (patch)
tree1608e094c88b9702c86cf2e6f65339aab9ea3f3f /arch/i386
parent8871e73fdbde07d0a41393f7ee30787b65387b36 (diff)
parent8501a2fbe762b21d2504ed3aca3b52be61b5e6e4 (diff)
Merge branch 'x86-64'
* x86-64: (83 commits) [PATCH] x86_64: x86_64 stack usage debugging [PATCH] x86_64: (resend) x86_64 stack overflow debugging [PATCH] x86_64: msi_apic.c build fix [PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs [PATCH] x86_64: Avoid broadcasting NMI IPIs [PATCH] x86_64: fix apic error on bootup [PATCH] x86_64: enlarge window for stack growth [PATCH] x86_64: Minor string functions optimizations [PATCH] x86_64: Move export symbols to their C functions [PATCH] x86_64: Standardize i386/x86_64 handling of NMI_VECTOR [PATCH] x86_64: Fix modular pc speaker [PATCH] x86_64: remove sys32_ni_syscall() [PATCH] x86_64: Do not use -ffunction-sections for modules [PATCH] x86_64: Add cpu_relax to apic_wait_icr_idle [PATCH] x86_64: adjust kstack_depth_to_print default [PATCH] i386/x86-64: adjust /proc/interrupts column headings [PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels [PATCH] x86_64: Fix fast check in safe_smp_processor_id [PATCH] x86_64: x86_64 setup.c - printing cmp related boottime information [PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status ... Manual resolve of trivial conflict in arch/i386/kernel/Makefile
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig13
-rw-r--r--arch/i386/boot/Makefile9
-rw-r--r--arch/i386/boot/compressed/misc.c32
-rw-r--r--arch/i386/kernel/Makefile4
-rw-r--r--arch/i386/kernel/alternative.c118
-rw-r--r--arch/i386/kernel/apic.c16
-rw-r--r--arch/i386/kernel/apm.c6
-rw-r--r--arch/i386/kernel/cpu/amd.c16
-rw-r--r--arch/i386/kernel/cpu/intel.c6
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c113
-rw-r--r--arch/i386/kernel/crash.c7
-rw-r--r--arch/i386/kernel/entry.S263
-rw-r--r--arch/i386/kernel/io_apic.c49
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/i386/kernel/nmi.c72
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/smp.c12
-rw-r--r--arch/i386/kernel/smpboot.c1
-rw-r--r--arch/i386/kernel/traps.c70
-rw-r--r--arch/i386/kernel/vmlinux.lds.S9
-rw-r--r--arch/i386/oprofile/op_model_athlon.c1
-rw-r--r--arch/i386/oprofile/op_model_p4.c1
-rw-r--r--arch/i386/oprofile/op_model_ppro.c1
23 files changed, 689 insertions, 140 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 374fb50608a0..f3eaf22f273d 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -328,6 +328,15 @@ config X86_MCE_P4THERMAL
328 Enabling this feature will cause a message to be printed when the P4 328 Enabling this feature will cause a message to be printed when the P4
329 enters thermal throttling. 329 enters thermal throttling.
330 330
331config VM86
332 default y
333 bool "Enable VM86 support" if EMBEDDED
334 help
335 This option is required by programs like DOSEMU to run 16-bit legacy
336 code on X86 processors. It also may be needed by software like
337 XFree86 to initialize some video cards via BIOS. Disabling this
338 option saves about 6k.
339
331config TOSHIBA 340config TOSHIBA
332 tristate "Toshiba Laptop support" 341 tristate "Toshiba Laptop support"
333 ---help--- 342 ---help---
@@ -1068,6 +1077,10 @@ config SCx200HR_TIMER
1068 processor goes idle (as is done by the scheduler). The 1077 processor goes idle (as is done by the scheduler). The
1069 other workaround is idle=poll boot option. 1078 other workaround is idle=poll boot option.
1070 1079
1080config K8_NB
1081 def_bool y
1082 depends on AGP_AMD64
1083
1071source "drivers/pcmcia/Kconfig" 1084source "drivers/pcmcia/Kconfig"
1072 1085
1073source "drivers/pci/hotplug/Kconfig" 1086source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 33e55476381b..e97946626064 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
109isoimage: $(BOOTIMAGE) 109isoimage: $(BOOTIMAGE)
110 -rm -rf $(obj)/isoimage 110 -rm -rf $(obj)/isoimage
111 mkdir $(obj)/isoimage 111 mkdir $(obj)/isoimage
112 cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ 112 for i in lib lib64 share end ; do \
113 $(obj)/isoimage 113 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
114 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
115 break ; \
116 fi ; \
117 if [ $$i = end ] ; then exit 1 ; fi ; \
118 done
114 cp $(BOOTIMAGE) $(obj)/isoimage/linux 119 cp $(BOOTIMAGE) $(obj)/isoimage/linux
115 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg 120 echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
116 if [ -f '$(FDINITRD)' ] ; then \ 121 if [ -f '$(FDINITRD)' ] ; then \
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index f19f3a7492a5..b2ccd543410d 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -24,14 +24,6 @@
24 24
25#undef memset 25#undef memset
26#undef memcpy 26#undef memcpy
27
28/*
29 * Why do we do this? Don't ask me..
30 *
31 * Incomprehensible are the ways of bootloaders.
32 */
33static void* memset(void *, int, size_t);
34static void* memcpy(void *, __const void *, size_t);
35#define memzero(s, n) memset ((s), 0, (n)) 27#define memzero(s, n) memset ((s), 0, (n))
36 28
37typedef unsigned char uch; 29typedef unsigned char uch;
@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
93#endif 85#endif
94#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) 86#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
95 87
96extern char input_data[]; 88extern unsigned char input_data[];
97extern int input_len; 89extern int input_len;
98 90
99static long bytes_out = 0; 91static long bytes_out = 0;
@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0;
103static void *malloc(int size); 95static void *malloc(int size);
104static void free(void *where); 96static void free(void *where);
105 97
98static void *memset(void *s, int c, unsigned n);
99static void *memcpy(void *dest, const void *src, unsigned n);
100
106static void putstr(const char *); 101static void putstr(const char *);
107 102
108extern int end; 103extern int end;
@@ -205,7 +200,7 @@ static void putstr(const char *s)
205 outb_p(0xff & (pos >> 1), vidport+1); 200 outb_p(0xff & (pos >> 1), vidport+1);
206} 201}
207 202
208static void* memset(void* s, int c, size_t n) 203static void* memset(void* s, int c, unsigned n)
209{ 204{
210 int i; 205 int i;
211 char *ss = (char*)s; 206 char *ss = (char*)s;
@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n)
214 return s; 209 return s;
215} 210}
216 211
217static void* memcpy(void* __dest, __const void* __src, 212static void* memcpy(void* dest, const void* src, unsigned n)
218 size_t __n)
219{ 213{
220 int i; 214 int i;
221 char *d = (char *)__dest, *s = (char *)__src; 215 char *d = (char *)dest, *s = (char *)src;
222 216
223 for (i=0;i<__n;i++) d[i] = s[i]; 217 for (i=0;i<n;i++) d[i] = s[i];
224 return __dest; 218 return dest;
225} 219}
226 220
227/* =========================================================================== 221/* ===========================================================================
@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void)
309#else 303#else
310 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); 304 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
311#endif 305#endif
312 output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */ 306 output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
313 free_mem_end_ptr = (long)real_mode; 307 free_mem_end_ptr = (long)real_mode;
314} 308}
315 309
@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
324#ifdef STANDARD_MEMORY_BIOS_CALL 318#ifdef STANDARD_MEMORY_BIOS_CALL
325 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); 319 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
326#else 320#else
327 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 321 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
328 (3*1024))
329 error("Less than 4MB of memory");
330#endif 322#endif
331 mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; 323 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
332 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX 324 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
333 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff; 325 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
334 low_buffer_size = low_buffer_end - LOW_BUFFER_START; 326 low_buffer_size = low_buffer_end - LOW_BUFFER_START;
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 0fac85df64f1..5e70c2fb273a 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
37obj-$(CONFIG_VM86) += vm86.o 37obj-$(CONFIG_VM86) += vm86.o
38obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 38obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
39obj-$(CONFIG_HPET_TIMER) += hpet.o 39obj-$(CONFIG_HPET_TIMER) += hpet.o
40obj-$(CONFIG_K8_NB) += k8.o
40 41
41EXTRA_AFLAGS := -traditional 42EXTRA_AFLAGS := -traditional
42 43
@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
76$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ 77$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
77 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE 78 $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
78 $(call if_changed,syscall) 79 $(call if_changed,syscall)
80
81k8-y += ../../x86_64/kernel/k8.o
82
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 5cbd6f99fb2a..50eb0e03777e 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -4,27 +4,41 @@
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/sections.h> 5#include <asm/sections.h>
6 6
7#define DEBUG 0 7static int no_replacement = 0;
8#if DEBUG 8static int smp_alt_once = 0;
9# define DPRINTK(fmt, args...) printk(fmt, args) 9static int debug_alternative = 0;
10#else 10
11# define DPRINTK(fmt, args...) 11static int __init noreplacement_setup(char *s)
12#endif 12{
13 no_replacement = 1;
14 return 1;
15}
16static int __init bootonly(char *str)
17{
18 smp_alt_once = 1;
19 return 1;
20}
21static int __init debug_alt(char *str)
22{
23 debug_alternative = 1;
24 return 1;
25}
13 26
27__setup("noreplacement", noreplacement_setup);
28__setup("smp-alt-boot", bootonly);
29__setup("debug-alternative", debug_alt);
30
31#define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
33
34#ifdef GENERIC_NOP1
14/* Use inline assembly to define this because the nops are defined 35/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot 36 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */ 37 get them easily into strings. */
17asm("\t.data\nintelnops: " 38asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8); 40 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: " 41extern unsigned char intelnops[];
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 42static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL, 43 NULL,
30 intelnops, 44 intelnops,
@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38}; 52};
53#endif
54
55#ifdef K8_NOP1
56asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59extern unsigned char k8nops[];
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 60static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL, 61 NULL,
41 k8nops, 62 k8nops,
@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49}; 70};
71#endif
72
73#ifdef K7_NOP1
74asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77extern unsigned char k7nops[];
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 78static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL, 79 NULL,
52 k7nops, 80 k7nops,
@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60}; 88};
89#endif
90
91#ifdef CONFIG_X86_64
92
93extern char __vsyscall_0;
94static inline unsigned char** find_nop_table(void)
95{
96 return k8_nops;
97}
98
99#else /* CONFIG_X86_64 */
100
61static struct nop { 101static struct nop {
62 int cpuid; 102 int cpuid;
63 unsigned char **noptable; 103 unsigned char **noptable;
@@ -67,14 +107,6 @@ static struct nop {
67 { -1, NULL } 107 { -1, NULL }
68}; 108};
69 109
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void) 110static unsigned char** find_nop_table(void)
79{ 111{
80 unsigned char **noptable = intel_nops; 112 unsigned char **noptable = intel_nops;
@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void)
89 return noptable; 121 return noptable;
90} 122}
91 123
124#endif /* CONFIG_X86_64 */
125
126extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128extern u8 *__smp_locks[], *__smp_locks_end[];
129
130extern u8 __smp_alt_begin[], __smp_alt_end[];
131
92/* Replace instructions with better alternatives for this CPU type. 132/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with 133 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where 134 self modifying code. This implies that assymetric systems where
@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{ 139{
100 unsigned char **noptable = find_nop_table(); 140 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a; 141 struct alt_instr *a;
142 u8 *instr;
102 int diff, i, k; 143 int diff, i, k;
103 144
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); 145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
106 BUG_ON(a->replacementlen > a->instrlen); 147 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid)) 148 if (!boot_cpu_has(a->cpuid))
108 continue; 149 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen); 150 instr = a->instr;
151#ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
157 }
158#endif
159 memcpy(instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen; 160 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */ 161 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) { 162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
@@ -186,14 +236,6 @@ struct smp_alt_module {
186static LIST_HEAD(smp_alt_modules); 236static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt); 237static DEFINE_SPINLOCK(smp_alt);
188 238
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name, 239void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end, 240 void *locks, void *locks_end,
199 void *text, void *text_end) 241 void *text, void *text_end)
@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
201 struct smp_alt_module *smp; 243 struct smp_alt_module *smp;
202 unsigned long flags; 244 unsigned long flags;
203 245
246 if (no_replacement)
247 return;
248
204 if (smp_alt_once) { 249 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP)) 250 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end, 251 alternatives_smp_unlock(locks, locks_end,
@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod)
235 struct smp_alt_module *item; 280 struct smp_alt_module *item;
236 unsigned long flags; 281 unsigned long flags;
237 282
238 if (smp_alt_once) 283 if (no_replacement || smp_alt_once)
239 return; 284 return;
240 285
241 spin_lock_irqsave(&smp_alt, flags); 286 spin_lock_irqsave(&smp_alt, flags);
@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp)
256 struct smp_alt_module *mod; 301 struct smp_alt_module *mod;
257 unsigned long flags; 302 unsigned long flags;
258 303
259 if (smp_alt_once) 304 if (no_replacement || smp_alt_once)
260 return; 305 return;
261 BUG_ON(!smp && (num_online_cpus() > 1)); 306 BUG_ON(!smp && (num_online_cpus() > 1));
262 307
@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp)
285 330
286void __init alternative_instructions(void) 331void __init alternative_instructions(void)
287{ 332{
333 if (no_replacement) {
334 printk(KERN_INFO "(SMP-)alternatives turned off\n");
335 free_init_pages("SMP alternatives",
336 (unsigned long)__smp_alt_begin,
337 (unsigned long)__smp_alt_end);
338 return;
339 }
288 apply_alternatives(__alt_instructions, __alt_instructions_end); 340 apply_alternatives(__alt_instructions, __alt_instructions_end);
289 341
290 /* switch to patch-once-at-boottime-only mode and free the 342 /* switch to patch-once-at-boottime-only mode and free the
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5ab59c12335b..7ce09492fc0c 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/i8253.h> 38#include <asm/i8253.h>
39#include <asm/nmi.h>
39 40
40#include <mach_apic.h> 41#include <mach_apic.h>
41#include <mach_apicdef.h> 42#include <mach_apicdef.h>
@@ -156,7 +157,7 @@ void clear_local_APIC(void)
156 maxlvt = get_maxlvt(); 157 maxlvt = get_maxlvt();
157 158
158 /* 159 /*
159 * Masking an LVT entry on a P6 can trigger a local APIC error 160 * Masking an LVT entry can trigger a local APIC error
160 * if the vector is zero. Mask LVTERR first to prevent this. 161 * if the vector is zero. Mask LVTERR first to prevent this.
161 */ 162 */
162 if (maxlvt >= 3) { 163 if (maxlvt >= 3) {
@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void)
1117 unsigned long v; 1118 unsigned long v;
1118 1119
1119 v = apic_read(APIC_LVTT); 1120 v = apic_read(APIC_LVTT);
1120 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); 1121 /*
1122 * When an illegal vector value (0-15) is written to an LVT
1123 * entry and delivery mode is Fixed, the APIC may signal an
1124 * illegal vector error, with out regard to whether the mask
1125 * bit is set or whether an interrupt is actually seen on input.
1126 *
1127 * Boot sequence might call this function when the LVTT has
1128 * '0' vector value. So make sure vector field is set to
1129 * valid value.
1130 */
1131 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1132 apic_write_around(APIC_LVTT, v);
1121 } 1133 }
1122} 1134}
1123 1135
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 9e819eb68229..7c5729d1fd06 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -764,9 +764,9 @@ static int apm_do_idle(void)
764 int idled = 0; 764 int idled = 0;
765 int polling; 765 int polling;
766 766
767 polling = test_thread_flag(TIF_POLLING_NRFLAG); 767 polling = !!(current_thread_info()->status & TS_POLLING);
768 if (polling) { 768 if (polling) {
769 clear_thread_flag(TIF_POLLING_NRFLAG); 769 current_thread_info()->status &= ~TS_POLLING;
770 smp_mb__after_clear_bit(); 770 smp_mb__after_clear_bit();
771 } 771 }
772 if (!need_resched()) { 772 if (!need_resched()) {
@@ -774,7 +774,7 @@ static int apm_do_idle(void)
774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); 774 ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
775 } 775 }
776 if (polling) 776 if (polling)
777 set_thread_flag(TIF_POLLING_NRFLAG); 777 current_thread_info()->status |= TS_POLLING;
778 778
779 if (!idled) 779 if (!idled)
780 return 0; 780 return 0;
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 786d1a57048b..fd0457c9c827 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -224,15 +224,17 @@ static void __init init_amd(struct cpuinfo_x86 *c)
224 224
225#ifdef CONFIG_X86_HT 225#ifdef CONFIG_X86_HT
226 /* 226 /*
227 * On a AMD dual core setup the lower bits of the APIC id 227 * On a AMD multi core setup the lower bits of the APIC id
228 * distingush the cores. Assumes number of cores is a power 228 * distingush the cores.
229 * of two.
230 */ 229 */
231 if (c->x86_max_cores > 1) { 230 if (c->x86_max_cores > 1) {
232 int cpu = smp_processor_id(); 231 int cpu = smp_processor_id();
233 unsigned bits = 0; 232 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
234 while ((1 << bits) < c->x86_max_cores) 233
235 bits++; 234 if (bits == 0) {
235 while ((1 << bits) < c->x86_max_cores)
236 bits++;
237 }
236 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 238 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
237 phys_proc_id[cpu] >>= bits; 239 phys_proc_id[cpu] >>= bits;
238 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 240 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
@@ -240,6 +242,8 @@ static void __init init_amd(struct cpuinfo_x86 *c)
240 } 242 }
241#endif 243#endif
242 244
245 if (cpuid_eax(0x80000000) >= 0x80000006)
246 num_cache_leaves = 3;
243} 247}
244 248
245static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 249static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5386b29bb5a5..10afc645c540 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
122 122
123 select_idle_routine(c); 123 select_idle_routine(c);
124 l2 = init_intel_cacheinfo(c); 124 l2 = init_intel_cacheinfo(c);
125 if (c->cpuid_level > 9 ) {
126 unsigned eax = cpuid_eax(10);
127 /* Check for version and the number of counters */
128 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
129 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
130 }
125 131
126 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 132 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
127 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 133 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index c8547a6fa7e6..6c37b4fd8ce2 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,6 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD.
7 */ 8 */
8 9
9#include <linux/init.h> 10#include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
130 cpumask_t shared_cpu_map; 131 cpumask_t shared_cpu_map;
131}; 132};
132 133
133static unsigned short num_cache_leaves; 134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
162static unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16,
165 [0xf] = 0xffff // ??
166 };
167static unsigned char levels[] = { 1, 1, 2 };
168static unsigned char types[] = { 1, 2, 3 };
169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx,
172 union _cpuid4_leaf_ecx *ecx)
173{
174 unsigned dummy;
175 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d;
177 union l2_cache l2;
178
179 eax->full = 0;
180 ebx->full = 0;
181 ecx->full = 0;
182
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
185
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
187 return;
188
189 eax->split.is_self_initializing = 1;
190 eax->split.type = types[leaf];
191 eax->split.level = levels[leaf];
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc;
198 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb;
201 } else {
202 assoc = l2.assoc;
203 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size;
207 }
208
209 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1;
212 ebx->split.ways_of_associativity = assocs[assoc] - 1;
213 ebx->split.physical_line_partition = lines_per_tag - 1;
214 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
215 (ebx->split.ways_of_associativity + 1) - 1;
216}
134 217
135static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 218static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
136{ 219{
137 unsigned int eax, ebx, ecx, edx; 220 union _cpuid4_leaf_eax eax;
138 union _cpuid4_leaf_eax cache_eax; 221 union _cpuid4_leaf_ebx ebx;
222 union _cpuid4_leaf_ecx ecx;
223 unsigned edx;
139 224
140 cpuid_count(4, index, &eax, &ebx, &ecx, &edx); 225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
141 cache_eax.full = eax; 226 amd_cpuid4(index, &eax, &ebx, &ecx);
142 if (cache_eax.split.type == CACHE_TYPE_NULL) 227 else
228 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
229 if (eax.split.type == CACHE_TYPE_NULL)
143 return -EIO; /* better error ? */ 230 return -EIO; /* better error ? */
144 231
145 this_leaf->eax.full = eax; 232 this_leaf->eax = eax;
146 this_leaf->ebx.full = ebx; 233 this_leaf->ebx = ebx;
147 this_leaf->ecx.full = ecx; 234 this_leaf->ecx = ecx;
148 this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * 235 this_leaf->size = (ecx.split.number_of_sets + 1) *
149 (this_leaf->ebx.split.coherency_line_size + 1) * 236 (ebx.split.coherency_line_size + 1) *
150 (this_leaf->ebx.split.physical_line_partition + 1) * 237 (ebx.split.physical_line_partition + 1) *
151 (this_leaf->ebx.split.ways_of_associativity + 1); 238 (ebx.split.ways_of_associativity + 1);
152 return 0; 239 return 0;
153} 240}
154 241
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 21dc1bbb8067..0c88d3ec8c18 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
120 return 1; 120 return 1;
121} 121}
122 122
123/*
124 * By using the NMI code instead of a vector we just sneak thru the
125 * word generator coming out with just what we want. AND it does
126 * not matter if clustered_apic_mode is set or not.
127 */
128static void smp_send_nmi_allbutself(void) 123static void smp_send_nmi_allbutself(void)
129{ 124{
130 send_IPI_allbutself(APIC_DM_NMI); 125 send_IPI_allbutself(NMI_VECTOR);
131} 126}
132 127
133static void nmi_shootdown_cpus(void) 128static void nmi_shootdown_cpus(void)
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index cfc683f153b9..e6e4506e749a 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/dwarf2.h>
51#include "irq_vectors.h" 52#include "irq_vectors.h"
52 53
53#define nr_syscalls ((syscall_table_size)/4) 54#define nr_syscalls ((syscall_table_size)/4)
@@ -85,31 +86,67 @@ VM_MASK = 0x00020000
85#define SAVE_ALL \ 86#define SAVE_ALL \
86 cld; \ 87 cld; \
87 pushl %es; \ 88 pushl %es; \
89 CFI_ADJUST_CFA_OFFSET 4;\
90 /*CFI_REL_OFFSET es, 0;*/\
88 pushl %ds; \ 91 pushl %ds; \
92 CFI_ADJUST_CFA_OFFSET 4;\
93 /*CFI_REL_OFFSET ds, 0;*/\
89 pushl %eax; \ 94 pushl %eax; \
95 CFI_ADJUST_CFA_OFFSET 4;\
96 CFI_REL_OFFSET eax, 0;\
90 pushl %ebp; \ 97 pushl %ebp; \
98 CFI_ADJUST_CFA_OFFSET 4;\
99 CFI_REL_OFFSET ebp, 0;\
91 pushl %edi; \ 100 pushl %edi; \
101 CFI_ADJUST_CFA_OFFSET 4;\
102 CFI_REL_OFFSET edi, 0;\
92 pushl %esi; \ 103 pushl %esi; \
104 CFI_ADJUST_CFA_OFFSET 4;\
105 CFI_REL_OFFSET esi, 0;\
93 pushl %edx; \ 106 pushl %edx; \
107 CFI_ADJUST_CFA_OFFSET 4;\
108 CFI_REL_OFFSET edx, 0;\
94 pushl %ecx; \ 109 pushl %ecx; \
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET ecx, 0;\
95 pushl %ebx; \ 112 pushl %ebx; \
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET ebx, 0;\
96 movl $(__USER_DS), %edx; \ 115 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \ 116 movl %edx, %ds; \
98 movl %edx, %es; 117 movl %edx, %es;
99 118
100#define RESTORE_INT_REGS \ 119#define RESTORE_INT_REGS \
101 popl %ebx; \ 120 popl %ebx; \
121 CFI_ADJUST_CFA_OFFSET -4;\
122 CFI_RESTORE ebx;\
102 popl %ecx; \ 123 popl %ecx; \
124 CFI_ADJUST_CFA_OFFSET -4;\
125 CFI_RESTORE ecx;\
103 popl %edx; \ 126 popl %edx; \
127 CFI_ADJUST_CFA_OFFSET -4;\
128 CFI_RESTORE edx;\
104 popl %esi; \ 129 popl %esi; \
130 CFI_ADJUST_CFA_OFFSET -4;\
131 CFI_RESTORE esi;\
105 popl %edi; \ 132 popl %edi; \
133 CFI_ADJUST_CFA_OFFSET -4;\
134 CFI_RESTORE edi;\
106 popl %ebp; \ 135 popl %ebp; \
107 popl %eax 136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE ebp;\
138 popl %eax; \
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE eax
108 141
109#define RESTORE_REGS \ 142#define RESTORE_REGS \
110 RESTORE_INT_REGS; \ 143 RESTORE_INT_REGS; \
1111: popl %ds; \ 1441: popl %ds; \
145 CFI_ADJUST_CFA_OFFSET -4;\
146 /*CFI_RESTORE ds;*/\
1122: popl %es; \ 1472: popl %es; \
148 CFI_ADJUST_CFA_OFFSET -4;\
149 /*CFI_RESTORE es;*/\
113.section .fixup,"ax"; \ 150.section .fixup,"ax"; \
1143: movl $0,(%esp); \ 1513: movl $0,(%esp); \
115 jmp 1b; \ 152 jmp 1b; \
@@ -122,13 +159,43 @@ VM_MASK = 0x00020000
122 .long 2b,4b; \ 159 .long 2b,4b; \
123.previous 160.previous
124 161
162#define RING0_INT_FRAME \
163 CFI_STARTPROC simple;\
164 CFI_DEF_CFA esp, 3*4;\
165 /*CFI_OFFSET cs, -2*4;*/\
166 CFI_OFFSET eip, -3*4
167
168#define RING0_EC_FRAME \
169 CFI_STARTPROC simple;\
170 CFI_DEF_CFA esp, 4*4;\
171 /*CFI_OFFSET cs, -2*4;*/\
172 CFI_OFFSET eip, -3*4
173
174#define RING0_PTREGS_FRAME \
175 CFI_STARTPROC simple;\
176 CFI_DEF_CFA esp, OLDESP-EBX;\
177 /*CFI_OFFSET cs, CS-OLDESP;*/\
178 CFI_OFFSET eip, EIP-OLDESP;\
179 /*CFI_OFFSET es, ES-OLDESP;*/\
180 /*CFI_OFFSET ds, DS-OLDESP;*/\
181 CFI_OFFSET eax, EAX-OLDESP;\
182 CFI_OFFSET ebp, EBP-OLDESP;\
183 CFI_OFFSET edi, EDI-OLDESP;\
184 CFI_OFFSET esi, ESI-OLDESP;\
185 CFI_OFFSET edx, EDX-OLDESP;\
186 CFI_OFFSET ecx, ECX-OLDESP;\
187 CFI_OFFSET ebx, EBX-OLDESP
125 188
126ENTRY(ret_from_fork) 189ENTRY(ret_from_fork)
190 CFI_STARTPROC
127 pushl %eax 191 pushl %eax
192 CFI_ADJUST_CFA_OFFSET -4
128 call schedule_tail 193 call schedule_tail
129 GET_THREAD_INFO(%ebp) 194 GET_THREAD_INFO(%ebp)
130 popl %eax 195 popl %eax
196 CFI_ADJUST_CFA_OFFSET -4
131 jmp syscall_exit 197 jmp syscall_exit
198 CFI_ENDPROC
132 199
133/* 200/*
134 * Return to user mode is not as complex as all this looks, 201 * Return to user mode is not as complex as all this looks,
@@ -139,6 +206,7 @@ ENTRY(ret_from_fork)
139 206
140 # userspace resumption stub bypassing syscall exit tracing 207 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN 208 ALIGN
209 RING0_PTREGS_FRAME
142ret_from_exception: 210ret_from_exception:
143 preempt_stop 211 preempt_stop
144ret_from_intr: 212ret_from_intr:
@@ -171,20 +239,33 @@ need_resched:
171 call preempt_schedule_irq 239 call preempt_schedule_irq
172 jmp need_resched 240 jmp need_resched
173#endif 241#endif
242 CFI_ENDPROC
174 243
175/* SYSENTER_RETURN points to after the "sysenter" instruction in 244/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 245 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177 246
178 # sysenter call handler stub 247 # sysenter call handler stub
179ENTRY(sysenter_entry) 248ENTRY(sysenter_entry)
249 CFI_STARTPROC simple
250 CFI_DEF_CFA esp, 0
251 CFI_REGISTER esp, ebp
180 movl TSS_sysenter_esp0(%esp),%esp 252 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp: 253sysenter_past_esp:
182 sti 254 sti
183 pushl $(__USER_DS) 255 pushl $(__USER_DS)
256 CFI_ADJUST_CFA_OFFSET 4
257 /*CFI_REL_OFFSET ss, 0*/
184 pushl %ebp 258 pushl %ebp
259 CFI_ADJUST_CFA_OFFSET 4
260 CFI_REL_OFFSET esp, 0
185 pushfl 261 pushfl
262 CFI_ADJUST_CFA_OFFSET 4
186 pushl $(__USER_CS) 263 pushl $(__USER_CS)
264 CFI_ADJUST_CFA_OFFSET 4
265 /*CFI_REL_OFFSET cs, 0*/
187 pushl $SYSENTER_RETURN 266 pushl $SYSENTER_RETURN
267 CFI_ADJUST_CFA_OFFSET 4
268 CFI_REL_OFFSET eip, 0
188 269
189/* 270/*
190 * Load the potential sixth argument from user stack. 271 * Load the potential sixth argument from user stack.
@@ -199,6 +280,7 @@ sysenter_past_esp:
199.previous 280.previous
200 281
201 pushl %eax 282 pushl %eax
283 CFI_ADJUST_CFA_OFFSET 4
202 SAVE_ALL 284 SAVE_ALL
203 GET_THREAD_INFO(%ebp) 285 GET_THREAD_INFO(%ebp)
204 286
@@ -219,11 +301,14 @@ sysenter_past_esp:
219 xorl %ebp,%ebp 301 xorl %ebp,%ebp
220 sti 302 sti
221 sysexit 303 sysexit
304 CFI_ENDPROC
222 305
223 306
224 # system call handler stub 307 # system call handler stub
225ENTRY(system_call) 308ENTRY(system_call)
309 RING0_INT_FRAME # can't unwind into user space anyway
226 pushl %eax # save orig_eax 310 pushl %eax # save orig_eax
311 CFI_ADJUST_CFA_OFFSET 4
227 SAVE_ALL 312 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 313 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp) 314 testl $TF_MASK,EFLAGS(%esp)
@@ -256,10 +341,12 @@ restore_all:
256 movb CS(%esp), %al 341 movb CS(%esp), %al
257 andl $(VM_MASK | (4 << 8) | 3), %eax 342 andl $(VM_MASK | (4 << 8) | 3), %eax
258 cmpl $((4 << 8) | 3), %eax 343 cmpl $((4 << 8) | 3), %eax
344 CFI_REMEMBER_STATE
259 je ldt_ss # returning to user-space with LDT SS 345 je ldt_ss # returning to user-space with LDT SS
260restore_nocheck: 346restore_nocheck:
261 RESTORE_REGS 347 RESTORE_REGS
262 addl $4, %esp 348 addl $4, %esp
349 CFI_ADJUST_CFA_OFFSET -4
2631: iret 3501: iret
264.section .fixup,"ax" 351.section .fixup,"ax"
265iret_exc: 352iret_exc:
@@ -273,6 +360,7 @@ iret_exc:
273 .long 1b,iret_exc 360 .long 1b,iret_exc
274.previous 361.previous
275 362
363 CFI_RESTORE_STATE
276ldt_ss: 364ldt_ss:
277 larl OLDSS(%esp), %eax 365 larl OLDSS(%esp), %eax
278 jnz restore_nocheck 366 jnz restore_nocheck
@@ -285,11 +373,13 @@ ldt_ss:
285 * CPUs, which we can try to work around to make 373 * CPUs, which we can try to work around to make
286 * dosemu and wine happy. */ 374 * dosemu and wine happy. */
287 subl $8, %esp # reserve space for switch16 pointer 375 subl $8, %esp # reserve space for switch16 pointer
376 CFI_ADJUST_CFA_OFFSET 8
288 cli 377 cli
289 movl %esp, %eax 378 movl %esp, %eax
290 /* Set up the 16bit stack frame with switch32 pointer on top, 379 /* Set up the 16bit stack frame with switch32 pointer on top,
291 * and a switch16 pointer on top of the current frame. */ 380 * and a switch16 pointer on top of the current frame. */
292 call setup_x86_bogus_stack 381 call setup_x86_bogus_stack
382 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
293 RESTORE_REGS 383 RESTORE_REGS
294 lss 20+4(%esp), %esp # switch to 16bit stack 384 lss 20+4(%esp), %esp # switch to 16bit stack
2951: iret 3851: iret
@@ -297,9 +387,11 @@ ldt_ss:
297 .align 4 387 .align 4
298 .long 1b,iret_exc 388 .long 1b,iret_exc
299.previous 389.previous
390 CFI_ENDPROC
300 391
301 # perform work that needs to be done immediately before resumption 392 # perform work that needs to be done immediately before resumption
302 ALIGN 393 ALIGN
394 RING0_PTREGS_FRAME # can't unwind into user space anyway
303work_pending: 395work_pending:
304 testb $_TIF_NEED_RESCHED, %cl 396 testb $_TIF_NEED_RESCHED, %cl
305 jz work_notifysig 397 jz work_notifysig
@@ -329,8 +421,10 @@ work_notifysig: # deal with pending signals and
329work_notifysig_v86: 421work_notifysig_v86:
330#ifdef CONFIG_VM86 422#ifdef CONFIG_VM86
331 pushl %ecx # save ti_flags for do_notify_resume 423 pushl %ecx # save ti_flags for do_notify_resume
424 CFI_ADJUST_CFA_OFFSET 4
332 call save_v86_state # %eax contains pt_regs pointer 425 call save_v86_state # %eax contains pt_regs pointer
333 popl %ecx 426 popl %ecx
427 CFI_ADJUST_CFA_OFFSET -4
334 movl %eax, %esp 428 movl %eax, %esp
335 xorl %edx, %edx 429 xorl %edx, %edx
336 call do_notify_resume 430 call do_notify_resume
@@ -363,19 +457,21 @@ syscall_exit_work:
363 movl $1, %edx 457 movl $1, %edx
364 call do_syscall_trace 458 call do_syscall_trace
365 jmp resume_userspace 459 jmp resume_userspace
460 CFI_ENDPROC
366 461
367 ALIGN 462 RING0_INT_FRAME # can't unwind into user space anyway
368syscall_fault: 463syscall_fault:
369 pushl %eax # save orig_eax 464 pushl %eax # save orig_eax
465 CFI_ADJUST_CFA_OFFSET 4
370 SAVE_ALL 466 SAVE_ALL
371 GET_THREAD_INFO(%ebp) 467 GET_THREAD_INFO(%ebp)
372 movl $-EFAULT,EAX(%esp) 468 movl $-EFAULT,EAX(%esp)
373 jmp resume_userspace 469 jmp resume_userspace
374 470
375 ALIGN
376syscall_badsys: 471syscall_badsys:
377 movl $-ENOSYS,EAX(%esp) 472 movl $-ENOSYS,EAX(%esp)
378 jmp resume_userspace 473 jmp resume_userspace
474 CFI_ENDPROC
379 475
380#define FIXUP_ESPFIX_STACK \ 476#define FIXUP_ESPFIX_STACK \
381 movl %esp, %eax; \ 477 movl %esp, %eax; \
@@ -387,16 +483,21 @@ syscall_badsys:
387 movl %eax, %esp; 483 movl %eax, %esp;
388#define UNWIND_ESPFIX_STACK \ 484#define UNWIND_ESPFIX_STACK \
389 pushl %eax; \ 485 pushl %eax; \
486 CFI_ADJUST_CFA_OFFSET 4; \
390 movl %ss, %eax; \ 487 movl %ss, %eax; \
391 /* see if on 16bit stack */ \ 488 /* see if on 16bit stack */ \
392 cmpw $__ESPFIX_SS, %ax; \ 489 cmpw $__ESPFIX_SS, %ax; \
393 jne 28f; \ 490 je 28f; \
394 movl $__KERNEL_DS, %edx; \ 49127: popl %eax; \
395 movl %edx, %ds; \ 492 CFI_ADJUST_CFA_OFFSET -4; \
396 movl %edx, %es; \ 493.section .fixup,"ax"; \
49428: movl $__KERNEL_DS, %eax; \
495 movl %eax, %ds; \
496 movl %eax, %es; \
397 /* switch to 32bit stack */ \ 497 /* switch to 32bit stack */ \
398 FIXUP_ESPFIX_STACK \ 498 FIXUP_ESPFIX_STACK; \
39928: popl %eax; 499 jmp 27b; \
500.previous
400 501
401/* 502/*
402 * Build the entry stubs and pointer table with 503 * Build the entry stubs and pointer table with
@@ -408,9 +509,14 @@ ENTRY(interrupt)
408 509
409vector=0 510vector=0
410ENTRY(irq_entries_start) 511ENTRY(irq_entries_start)
512 RING0_INT_FRAME
411.rept NR_IRQS 513.rept NR_IRQS
412 ALIGN 514 ALIGN
515 .if vector
516 CFI_ADJUST_CFA_OFFSET -4
517 .endif
4131: pushl $vector-256 5181: pushl $vector-256
519 CFI_ADJUST_CFA_OFFSET 4
414 jmp common_interrupt 520 jmp common_interrupt
415.data 521.data
416 .long 1b 522 .long 1b
@@ -424,60 +530,99 @@ common_interrupt:
424 movl %esp,%eax 530 movl %esp,%eax
425 call do_IRQ 531 call do_IRQ
426 jmp ret_from_intr 532 jmp ret_from_intr
533 CFI_ENDPROC
427 534
428#define BUILD_INTERRUPT(name, nr) \ 535#define BUILD_INTERRUPT(name, nr) \
429ENTRY(name) \ 536ENTRY(name) \
537 RING0_INT_FRAME; \
430 pushl $nr-256; \ 538 pushl $nr-256; \
431 SAVE_ALL \ 539 CFI_ADJUST_CFA_OFFSET 4; \
540 SAVE_ALL; \
432 movl %esp,%eax; \ 541 movl %esp,%eax; \
433 call smp_/**/name; \ 542 call smp_/**/name; \
434 jmp ret_from_intr; 543 jmp ret_from_intr; \
544 CFI_ENDPROC
435 545
436/* The include is where all of the SMP etc. interrupts come from */ 546/* The include is where all of the SMP etc. interrupts come from */
437#include "entry_arch.h" 547#include "entry_arch.h"
438 548
439ENTRY(divide_error) 549ENTRY(divide_error)
550 RING0_INT_FRAME
440 pushl $0 # no error code 551 pushl $0 # no error code
552 CFI_ADJUST_CFA_OFFSET 4
441 pushl $do_divide_error 553 pushl $do_divide_error
554 CFI_ADJUST_CFA_OFFSET 4
442 ALIGN 555 ALIGN
443error_code: 556error_code:
444 pushl %ds 557 pushl %ds
558 CFI_ADJUST_CFA_OFFSET 4
559 /*CFI_REL_OFFSET ds, 0*/
445 pushl %eax 560 pushl %eax
561 CFI_ADJUST_CFA_OFFSET 4
562 CFI_REL_OFFSET eax, 0
446 xorl %eax, %eax 563 xorl %eax, %eax
447 pushl %ebp 564 pushl %ebp
565 CFI_ADJUST_CFA_OFFSET 4
566 CFI_REL_OFFSET ebp, 0
448 pushl %edi 567 pushl %edi
568 CFI_ADJUST_CFA_OFFSET 4
569 CFI_REL_OFFSET edi, 0
449 pushl %esi 570 pushl %esi
571 CFI_ADJUST_CFA_OFFSET 4
572 CFI_REL_OFFSET esi, 0
450 pushl %edx 573 pushl %edx
574 CFI_ADJUST_CFA_OFFSET 4
575 CFI_REL_OFFSET edx, 0
451 decl %eax # eax = -1 576 decl %eax # eax = -1
452 pushl %ecx 577 pushl %ecx
578 CFI_ADJUST_CFA_OFFSET 4
579 CFI_REL_OFFSET ecx, 0
453 pushl %ebx 580 pushl %ebx
581 CFI_ADJUST_CFA_OFFSET 4
582 CFI_REL_OFFSET ebx, 0
454 cld 583 cld
455 pushl %es 584 pushl %es
585 CFI_ADJUST_CFA_OFFSET 4
586 /*CFI_REL_OFFSET es, 0*/
456 UNWIND_ESPFIX_STACK 587 UNWIND_ESPFIX_STACK
457 popl %ecx 588 popl %ecx
589 CFI_ADJUST_CFA_OFFSET -4
590 /*CFI_REGISTER es, ecx*/
458 movl ES(%esp), %edi # get the function address 591 movl ES(%esp), %edi # get the function address
459 movl ORIG_EAX(%esp), %edx # get the error code 592 movl ORIG_EAX(%esp), %edx # get the error code
460 movl %eax, ORIG_EAX(%esp) 593 movl %eax, ORIG_EAX(%esp)
461 movl %ecx, ES(%esp) 594 movl %ecx, ES(%esp)
595 /*CFI_REL_OFFSET es, ES*/
462 movl $(__USER_DS), %ecx 596 movl $(__USER_DS), %ecx
463 movl %ecx, %ds 597 movl %ecx, %ds
464 movl %ecx, %es 598 movl %ecx, %es
465 movl %esp,%eax # pt_regs pointer 599 movl %esp,%eax # pt_regs pointer
466 call *%edi 600 call *%edi
467 jmp ret_from_exception 601 jmp ret_from_exception
602 CFI_ENDPROC
468 603
469ENTRY(coprocessor_error) 604ENTRY(coprocessor_error)
605 RING0_INT_FRAME
470 pushl $0 606 pushl $0
607 CFI_ADJUST_CFA_OFFSET 4
471 pushl $do_coprocessor_error 608 pushl $do_coprocessor_error
609 CFI_ADJUST_CFA_OFFSET 4
472 jmp error_code 610 jmp error_code
611 CFI_ENDPROC
473 612
474ENTRY(simd_coprocessor_error) 613ENTRY(simd_coprocessor_error)
614 RING0_INT_FRAME
475 pushl $0 615 pushl $0
616 CFI_ADJUST_CFA_OFFSET 4
476 pushl $do_simd_coprocessor_error 617 pushl $do_simd_coprocessor_error
618 CFI_ADJUST_CFA_OFFSET 4
477 jmp error_code 619 jmp error_code
620 CFI_ENDPROC
478 621
479ENTRY(device_not_available) 622ENTRY(device_not_available)
623 RING0_INT_FRAME
480 pushl $-1 # mark this as an int 624 pushl $-1 # mark this as an int
625 CFI_ADJUST_CFA_OFFSET 4
481 SAVE_ALL 626 SAVE_ALL
482 movl %cr0, %eax 627 movl %cr0, %eax
483 testl $0x4, %eax # EM (math emulation bit) 628 testl $0x4, %eax # EM (math emulation bit)
@@ -487,9 +632,12 @@ ENTRY(device_not_available)
487 jmp ret_from_exception 632 jmp ret_from_exception
488device_not_available_emulate: 633device_not_available_emulate:
489 pushl $0 # temporary storage for ORIG_EIP 634 pushl $0 # temporary storage for ORIG_EIP
635 CFI_ADJUST_CFA_OFFSET 4
490 call math_emulate 636 call math_emulate
491 addl $4, %esp 637 addl $4, %esp
638 CFI_ADJUST_CFA_OFFSET -4
492 jmp ret_from_exception 639 jmp ret_from_exception
640 CFI_ENDPROC
493 641
494/* 642/*
495 * Debug traps and NMI can happen at the one SYSENTER instruction 643 * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -514,16 +662,19 @@ label: \
514 pushl $sysenter_past_esp 662 pushl $sysenter_past_esp
515 663
516KPROBE_ENTRY(debug) 664KPROBE_ENTRY(debug)
665 RING0_INT_FRAME
517 cmpl $sysenter_entry,(%esp) 666 cmpl $sysenter_entry,(%esp)
518 jne debug_stack_correct 667 jne debug_stack_correct
519 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 668 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
520debug_stack_correct: 669debug_stack_correct:
521 pushl $-1 # mark this as an int 670 pushl $-1 # mark this as an int
671 CFI_ADJUST_CFA_OFFSET 4
522 SAVE_ALL 672 SAVE_ALL
523 xorl %edx,%edx # error code 0 673 xorl %edx,%edx # error code 0
524 movl %esp,%eax # pt_regs pointer 674 movl %esp,%eax # pt_regs pointer
525 call do_debug 675 call do_debug
526 jmp ret_from_exception 676 jmp ret_from_exception
677 CFI_ENDPROC
527 .previous .text 678 .previous .text
528/* 679/*
529 * NMI is doubly nasty. It can happen _while_ we're handling 680 * NMI is doubly nasty. It can happen _while_ we're handling
@@ -534,14 +685,18 @@ debug_stack_correct:
534 * fault happened on the sysenter path. 685 * fault happened on the sysenter path.
535 */ 686 */
536ENTRY(nmi) 687ENTRY(nmi)
688 RING0_INT_FRAME
537 pushl %eax 689 pushl %eax
690 CFI_ADJUST_CFA_OFFSET 4
538 movl %ss, %eax 691 movl %ss, %eax
539 cmpw $__ESPFIX_SS, %ax 692 cmpw $__ESPFIX_SS, %ax
540 popl %eax 693 popl %eax
694 CFI_ADJUST_CFA_OFFSET -4
541 je nmi_16bit_stack 695 je nmi_16bit_stack
542 cmpl $sysenter_entry,(%esp) 696 cmpl $sysenter_entry,(%esp)
543 je nmi_stack_fixup 697 je nmi_stack_fixup
544 pushl %eax 698 pushl %eax
699 CFI_ADJUST_CFA_OFFSET 4
545 movl %esp,%eax 700 movl %esp,%eax
546 /* Do not access memory above the end of our stack page, 701 /* Do not access memory above the end of our stack page,
547 * it might not exist. 702 * it might not exist.
@@ -549,16 +704,19 @@ ENTRY(nmi)
549 andl $(THREAD_SIZE-1),%eax 704 andl $(THREAD_SIZE-1),%eax
550 cmpl $(THREAD_SIZE-20),%eax 705 cmpl $(THREAD_SIZE-20),%eax
551 popl %eax 706 popl %eax
707 CFI_ADJUST_CFA_OFFSET -4
552 jae nmi_stack_correct 708 jae nmi_stack_correct
553 cmpl $sysenter_entry,12(%esp) 709 cmpl $sysenter_entry,12(%esp)
554 je nmi_debug_stack_check 710 je nmi_debug_stack_check
555nmi_stack_correct: 711nmi_stack_correct:
556 pushl %eax 712 pushl %eax
713 CFI_ADJUST_CFA_OFFSET 4
557 SAVE_ALL 714 SAVE_ALL
558 xorl %edx,%edx # zero error code 715 xorl %edx,%edx # zero error code
559 movl %esp,%eax # pt_regs pointer 716 movl %esp,%eax # pt_regs pointer
560 call do_nmi 717 call do_nmi
561 jmp restore_all 718 jmp restore_all
719 CFI_ENDPROC
562 720
563nmi_stack_fixup: 721nmi_stack_fixup:
564 FIX_STACK(12,nmi_stack_correct, 1) 722 FIX_STACK(12,nmi_stack_correct, 1)
@@ -574,94 +732,177 @@ nmi_debug_stack_check:
574 jmp nmi_stack_correct 732 jmp nmi_stack_correct
575 733
576nmi_16bit_stack: 734nmi_16bit_stack:
735 RING0_INT_FRAME
577 /* create the pointer to lss back */ 736 /* create the pointer to lss back */
578 pushl %ss 737 pushl %ss
738 CFI_ADJUST_CFA_OFFSET 4
579 pushl %esp 739 pushl %esp
740 CFI_ADJUST_CFA_OFFSET 4
580 movzwl %sp, %esp 741 movzwl %sp, %esp
581 addw $4, (%esp) 742 addw $4, (%esp)
582 /* copy the iret frame of 12 bytes */ 743 /* copy the iret frame of 12 bytes */
583 .rept 3 744 .rept 3
584 pushl 16(%esp) 745 pushl 16(%esp)
746 CFI_ADJUST_CFA_OFFSET 4
585 .endr 747 .endr
586 pushl %eax 748 pushl %eax
749 CFI_ADJUST_CFA_OFFSET 4
587 SAVE_ALL 750 SAVE_ALL
588 FIXUP_ESPFIX_STACK # %eax == %esp 751 FIXUP_ESPFIX_STACK # %eax == %esp
752 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
589 xorl %edx,%edx # zero error code 753 xorl %edx,%edx # zero error code
590 call do_nmi 754 call do_nmi
591 RESTORE_REGS 755 RESTORE_REGS
592 lss 12+4(%esp), %esp # back to 16bit stack 756 lss 12+4(%esp), %esp # back to 16bit stack
5931: iret 7571: iret
758 CFI_ENDPROC
594.section __ex_table,"a" 759.section __ex_table,"a"
595 .align 4 760 .align 4
596 .long 1b,iret_exc 761 .long 1b,iret_exc
597.previous 762.previous
598 763
599KPROBE_ENTRY(int3) 764KPROBE_ENTRY(int3)
765 RING0_INT_FRAME
600 pushl $-1 # mark this as an int 766 pushl $-1 # mark this as an int
767 CFI_ADJUST_CFA_OFFSET 4
601 SAVE_ALL 768 SAVE_ALL
602 xorl %edx,%edx # zero error code 769 xorl %edx,%edx # zero error code
603 movl %esp,%eax # pt_regs pointer 770 movl %esp,%eax # pt_regs pointer
604 call do_int3 771 call do_int3
605 jmp ret_from_exception 772 jmp ret_from_exception
773 CFI_ENDPROC
606 .previous .text 774 .previous .text
607 775
608ENTRY(overflow) 776ENTRY(overflow)
777 RING0_INT_FRAME
609 pushl $0 778 pushl $0
779 CFI_ADJUST_CFA_OFFSET 4
610 pushl $do_overflow 780 pushl $do_overflow
781 CFI_ADJUST_CFA_OFFSET 4
611 jmp error_code 782 jmp error_code
783 CFI_ENDPROC
612 784
613ENTRY(bounds) 785ENTRY(bounds)
786 RING0_INT_FRAME
614 pushl $0 787 pushl $0
788 CFI_ADJUST_CFA_OFFSET 4
615 pushl $do_bounds 789 pushl $do_bounds
790 CFI_ADJUST_CFA_OFFSET 4
616 jmp error_code 791 jmp error_code
792 CFI_ENDPROC
617 793
618ENTRY(invalid_op) 794ENTRY(invalid_op)
795 RING0_INT_FRAME
619 pushl $0 796 pushl $0
797 CFI_ADJUST_CFA_OFFSET 4
620 pushl $do_invalid_op 798 pushl $do_invalid_op
799 CFI_ADJUST_CFA_OFFSET 4
621 jmp error_code 800 jmp error_code
801 CFI_ENDPROC
622 802
623ENTRY(coprocessor_segment_overrun) 803ENTRY(coprocessor_segment_overrun)
804 RING0_INT_FRAME
624 pushl $0 805 pushl $0
806 CFI_ADJUST_CFA_OFFSET 4
625 pushl $do_coprocessor_segment_overrun 807 pushl $do_coprocessor_segment_overrun
808 CFI_ADJUST_CFA_OFFSET 4
626 jmp error_code 809 jmp error_code
810 CFI_ENDPROC
627 811
628ENTRY(invalid_TSS) 812ENTRY(invalid_TSS)
813 RING0_EC_FRAME
629 pushl $do_invalid_TSS 814 pushl $do_invalid_TSS
815 CFI_ADJUST_CFA_OFFSET 4
630 jmp error_code 816 jmp error_code
817 CFI_ENDPROC
631 818
632ENTRY(segment_not_present) 819ENTRY(segment_not_present)
820 RING0_EC_FRAME
633 pushl $do_segment_not_present 821 pushl $do_segment_not_present
822 CFI_ADJUST_CFA_OFFSET 4
634 jmp error_code 823 jmp error_code
824 CFI_ENDPROC
635 825
636ENTRY(stack_segment) 826ENTRY(stack_segment)
827 RING0_EC_FRAME
637 pushl $do_stack_segment 828 pushl $do_stack_segment
829 CFI_ADJUST_CFA_OFFSET 4
638 jmp error_code 830 jmp error_code
831 CFI_ENDPROC
639 832
640KPROBE_ENTRY(general_protection) 833KPROBE_ENTRY(general_protection)
834 RING0_EC_FRAME
641 pushl $do_general_protection 835 pushl $do_general_protection
836 CFI_ADJUST_CFA_OFFSET 4
642 jmp error_code 837 jmp error_code
838 CFI_ENDPROC
643 .previous .text 839 .previous .text
644 840
645ENTRY(alignment_check) 841ENTRY(alignment_check)
842 RING0_EC_FRAME
646 pushl $do_alignment_check 843 pushl $do_alignment_check
844 CFI_ADJUST_CFA_OFFSET 4
647 jmp error_code 845 jmp error_code
846 CFI_ENDPROC
648 847
649KPROBE_ENTRY(page_fault) 848KPROBE_ENTRY(page_fault)
849 RING0_EC_FRAME
650 pushl $do_page_fault 850 pushl $do_page_fault
851 CFI_ADJUST_CFA_OFFSET 4
651 jmp error_code 852 jmp error_code
853 CFI_ENDPROC
652 .previous .text 854 .previous .text
653 855
654#ifdef CONFIG_X86_MCE 856#ifdef CONFIG_X86_MCE
655ENTRY(machine_check) 857ENTRY(machine_check)
858 RING0_INT_FRAME
656 pushl $0 859 pushl $0
860 CFI_ADJUST_CFA_OFFSET 4
657 pushl machine_check_vector 861 pushl machine_check_vector
862 CFI_ADJUST_CFA_OFFSET 4
658 jmp error_code 863 jmp error_code
864 CFI_ENDPROC
659#endif 865#endif
660 866
661ENTRY(spurious_interrupt_bug) 867ENTRY(spurious_interrupt_bug)
868 RING0_INT_FRAME
662 pushl $0 869 pushl $0
870 CFI_ADJUST_CFA_OFFSET 4
663 pushl $do_spurious_interrupt_bug 871 pushl $do_spurious_interrupt_bug
872 CFI_ADJUST_CFA_OFFSET 4
664 jmp error_code 873 jmp error_code
874 CFI_ENDPROC
875
876#ifdef CONFIG_STACK_UNWIND
877ENTRY(arch_unwind_init_running)
878 CFI_STARTPROC
879 movl 4(%esp), %edx
880 movl (%esp), %ecx
881 leal 4(%esp), %eax
882 movl %ebx, EBX(%edx)
883 xorl %ebx, %ebx
884 movl %ebx, ECX(%edx)
885 movl %ebx, EDX(%edx)
886 movl %esi, ESI(%edx)
887 movl %edi, EDI(%edx)
888 movl %ebp, EBP(%edx)
889 movl %ebx, EAX(%edx)
890 movl $__USER_DS, DS(%edx)
891 movl $__USER_DS, ES(%edx)
892 movl %ebx, ORIG_EAX(%edx)
893 movl %ecx, EIP(%edx)
894 movl 12(%esp), %ecx
895 movl $__KERNEL_CS, CS(%edx)
896 movl %ebx, EFLAGS(%edx)
897 movl %eax, OLDESP(%edx)
898 movl 8(%esp), %eax
899 movl %ecx, 8(%esp)
900 movl EBX(%edx), %ebx
901 movl $__KERNEL_DS, OLDSS(%edx)
902 jmpl *%eax
903 CFI_ENDPROC
904ENDPROC(arch_unwind_init_running)
905#endif
665 906
666.section .rodata,"a" 907.section .rodata,"a"
667#include "syscall_table.S" 908#include "syscall_table.S"
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index a62df3e764c5..72ae414e4d49 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -38,6 +38,7 @@
38#include <asm/desc.h> 38#include <asm/desc.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/i8259.h> 40#include <asm/i8259.h>
41#include <asm/nmi.h>
41 42
42#include <mach_apic.h> 43#include <mach_apic.h>
43 44
@@ -50,6 +51,7 @@ atomic_t irq_mis_count;
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 51static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51 52
52static DEFINE_SPINLOCK(ioapic_lock); 53static DEFINE_SPINLOCK(ioapic_lock);
54static DEFINE_SPINLOCK(vector_lock);
53 55
54int timer_over_8254 __initdata = 1; 56int timer_over_8254 __initdata = 1;
55 57
@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1161int assign_irq_vector(int irq) 1163int assign_irq_vector(int irq)
1162{ 1164{
1163 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 1165 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
1166 unsigned long flags;
1167 int vector;
1168
1169 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
1164 1170
1165 BUG_ON(irq >= NR_IRQ_VECTORS); 1171 spin_lock_irqsave(&vector_lock, flags);
1166 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) 1172
1173 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
1174 spin_unlock_irqrestore(&vector_lock, flags);
1167 return IO_APIC_VECTOR(irq); 1175 return IO_APIC_VECTOR(irq);
1176 }
1168next: 1177next:
1169 current_vector += 8; 1178 current_vector += 8;
1170 if (current_vector == SYSCALL_VECTOR) 1179 if (current_vector == SYSCALL_VECTOR)
@@ -1172,16 +1181,21 @@ next:
1172 1181
1173 if (current_vector >= FIRST_SYSTEM_VECTOR) { 1182 if (current_vector >= FIRST_SYSTEM_VECTOR) {
1174 offset++; 1183 offset++;
1175 if (!(offset%8)) 1184 if (!(offset%8)) {
1185 spin_unlock_irqrestore(&vector_lock, flags);
1176 return -ENOSPC; 1186 return -ENOSPC;
1187 }
1177 current_vector = FIRST_DEVICE_VECTOR + offset; 1188 current_vector = FIRST_DEVICE_VECTOR + offset;
1178 } 1189 }
1179 1190
1180 vector_irq[current_vector] = irq; 1191 vector = current_vector;
1192 vector_irq[vector] = irq;
1181 if (irq != AUTO_ASSIGN) 1193 if (irq != AUTO_ASSIGN)
1182 IO_APIC_VECTOR(irq) = current_vector; 1194 IO_APIC_VECTOR(irq) = vector;
1183 1195
1184 return current_vector; 1196 spin_unlock_irqrestore(&vector_lock, flags);
1197
1198 return vector;
1185} 1199}
1186 1200
1187static struct hw_interrupt_type ioapic_level_type; 1201static struct hw_interrupt_type ioapic_level_type;
@@ -1193,21 +1207,14 @@ static struct hw_interrupt_type ioapic_edge_type;
1193 1207
1194static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1208static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1195{ 1209{
1196 if (use_pci_vector() && !platform_legacy_irq(irq)) { 1210 unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
1197 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1211
1198 trigger == IOAPIC_LEVEL) 1212 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1199 irq_desc[vector].handler = &ioapic_level_type; 1213 trigger == IOAPIC_LEVEL)
1200 else 1214 irq_desc[idx].handler = &ioapic_level_type;
1201 irq_desc[vector].handler = &ioapic_edge_type; 1215 else
1202 set_intr_gate(vector, interrupt[vector]); 1216 irq_desc[idx].handler = &ioapic_edge_type;
1203 } else { 1217 set_intr_gate(vector, interrupt[idx]);
1204 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1205 trigger == IOAPIC_LEVEL)
1206 irq_desc[irq].handler = &ioapic_level_type;
1207 else
1208 irq_desc[irq].handler = &ioapic_edge_type;
1209 set_intr_gate(vector, interrupt[irq]);
1210 }
1211} 1218}
1212 1219
1213static void __init setup_IO_APIC_irqs(void) 1220static void __init setup_IO_APIC_irqs(void)
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 49ce4c31b713..061533e0cb5e 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v)
227 if (i == 0) { 227 if (i == 0) {
228 seq_printf(p, " "); 228 seq_printf(p, " ");
229 for_each_online_cpu(j) 229 for_each_online_cpu(j)
230 seq_printf(p, "CPU%d ",j); 230 seq_printf(p, "CPU%-8d",j);
231 seq_putc(p, '\n'); 231 seq_putc(p, '\n');
232 } 232 }
233 233
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d43b498ec745..a76e93146585 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -14,21 +14,17 @@
14 */ 14 */
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/mm.h>
18#include <linux/delay.h> 17#include <linux/delay.h>
19#include <linux/bootmem.h>
20#include <linux/smp_lock.h>
21#include <linux/interrupt.h> 18#include <linux/interrupt.h>
22#include <linux/mc146818rtc.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h> 19#include <linux/module.h>
25#include <linux/nmi.h> 20#include <linux/nmi.h>
26#include <linux/sysdev.h> 21#include <linux/sysdev.h>
27#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/percpu.h>
28 24
29#include <asm/smp.h> 25#include <asm/smp.h>
30#include <asm/div64.h>
31#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/intel_arch_perfmon.h>
32 28
33#include "mach_traps.h" 29#include "mach_traps.h"
34 30
@@ -100,6 +96,9 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 96 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 97 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 98
99#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
100#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
101
103#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when 103/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all 104 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
212 211
213__setup("nmi_watchdog=", setup_nmi_watchdog); 212__setup("nmi_watchdog=", setup_nmi_watchdog);
214 213
214static void disable_intel_arch_watchdog(void);
215
215static void disable_lapic_nmi_watchdog(void) 216static void disable_lapic_nmi_watchdog(void)
216{ 217{
217 if (nmi_active <= 0) 218 if (nmi_active <= 0)
@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
221 wrmsr(MSR_K7_EVNTSEL0, 0, 0); 222 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
222 break; 223 break;
223 case X86_VENDOR_INTEL: 224 case X86_VENDOR_INTEL:
225 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
226 disable_intel_arch_watchdog();
227 break;
228 }
224 switch (boot_cpu_data.x86) { 229 switch (boot_cpu_data.x86) {
225 case 6: 230 case 6:
226 if (boot_cpu_data.x86_model > 0xd) 231 if (boot_cpu_data.x86_model > 0xd)
@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void)
449 return 1; 454 return 1;
450} 455}
451 456
457static void disable_intel_arch_watchdog(void)
458{
459 unsigned ebx;
460
461 /*
462 * Check whether the Architectural PerfMon supports
463 * Unhalted Core Cycles Event or not.
464 * NOTE: Corresponding bit = 0 in ebp indicates event present.
465 */
466 ebx = cpuid_ebx(10);
467 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
468 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
469}
470
471static int setup_intel_arch_watchdog(void)
472{
473 unsigned int evntsel;
474 unsigned ebx;
475
476 /*
477 * Check whether the Architectural PerfMon supports
478 * Unhalted Core Cycles Event or not.
479 * NOTE: Corresponding bit = 0 in ebp indicates event present.
480 */
481 ebx = cpuid_ebx(10);
482 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
483 return 0;
484
485 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486
487 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
488 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489
490 evntsel = ARCH_PERFMON_EVENTSEL_INT
491 | ARCH_PERFMON_EVENTSEL_OS
492 | ARCH_PERFMON_EVENTSEL_USR
493 | ARCH_PERFMON_NMI_EVENT_SEL
494 | ARCH_PERFMON_NMI_EVENT_UMASK;
495
496 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
497 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
498 apic_write(APIC_LVTPC, APIC_DM_NMI);
499 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
500 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
501 return 1;
502}
503
452void setup_apic_nmi_watchdog (void) 504void setup_apic_nmi_watchdog (void)
453{ 505{
454 switch (boot_cpu_data.x86_vendor) { 506 switch (boot_cpu_data.x86_vendor) {
@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
458 setup_k7_watchdog(); 510 setup_k7_watchdog();
459 break; 511 break;
460 case X86_VENDOR_INTEL: 512 case X86_VENDOR_INTEL:
513 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
514 if (!setup_intel_arch_watchdog())
515 return;
516 break;
517 }
461 switch (boot_cpu_data.x86) { 518 switch (boot_cpu_data.x86) {
462 case 6: 519 case 6:
463 if (boot_cpu_data.x86_model > 0xd) 520 if (boot_cpu_data.x86_model > 0xd)
@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
561 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 618 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
562 apic_write(APIC_LVTPC, APIC_DM_NMI); 619 apic_write(APIC_LVTPC, APIC_DM_NMI);
563 } 620 }
564 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { 621 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
622 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
565 /* Only P6 based Pentium M need to re-unmask 623 /* Only P6 based Pentium M need to re-unmask
566 * the apic vector but it doesn't hurt 624 * the apic vector but it doesn't hurt
567 * other P6 variant */ 625 * other P6 variant */
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6259afea46d1..6946b06e2784 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -102,7 +102,7 @@ void default_idle(void)
102 local_irq_enable(); 102 local_irq_enable();
103 103
104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) { 104 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
105 clear_thread_flag(TIF_POLLING_NRFLAG); 105 current_thread_info()->status &= ~TS_POLLING;
106 smp_mb__after_clear_bit(); 106 smp_mb__after_clear_bit();
107 while (!need_resched()) { 107 while (!need_resched()) {
108 local_irq_disable(); 108 local_irq_disable();
@@ -111,7 +111,7 @@ void default_idle(void)
111 else 111 else
112 local_irq_enable(); 112 local_irq_enable();
113 } 113 }
114 set_thread_flag(TIF_POLLING_NRFLAG); 114 current_thread_info()->status |= TS_POLLING;
115 } else { 115 } else {
116 while (!need_resched()) 116 while (!need_resched())
117 cpu_relax(); 117 cpu_relax();
@@ -174,7 +174,7 @@ void cpu_idle(void)
174{ 174{
175 int cpu = smp_processor_id(); 175 int cpu = smp_processor_id();
176 176
177 set_thread_flag(TIF_POLLING_NRFLAG); 177 current_thread_info()->status |= TS_POLLING;
178 178
179 /* endless idle loop with no priority at all */ 179 /* endless idle loop with no priority at all */
180 while (1) { 180 while (1) {
@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs)
312 cr3 = read_cr3(); 312 cr3 = read_cr3();
313 cr4 = read_cr4_safe(); 313 cr4 = read_cr4_safe();
314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
315 show_trace(NULL, &regs->esp); 315 show_trace(NULL, regs, &regs->esp);
316} 316}
317 317
318/* 318/*
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index d134e9643a58..c10789d7a9d3 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
114 114
115static inline int __prepare_ICR (unsigned int shortcut, int vector) 115static inline int __prepare_ICR (unsigned int shortcut, int vector)
116{ 116{
117 return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; 117 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
118
119 switch (vector) {
120 default:
121 icr |= APIC_DM_FIXED | vector;
122 break;
123 case NMI_VECTOR:
124 icr |= APIC_DM_NMI;
125 break;
126 }
127 return icr;
118} 128}
119 129
120static inline int __prepare_ICR2 (unsigned int mask) 130static inline int __prepare_ICR2 (unsigned int mask)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bd0ca5c9f053..bce5470ecb42 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -52,6 +52,7 @@
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/desc.h> 53#include <asm/desc.h>
54#include <asm/arch_hooks.h> 54#include <asm/arch_hooks.h>
55#include <asm/nmi.h>
55 56
56#include <mach_apic.h> 57#include <mach_apic.h>
57#include <mach_wakecpu.h> 58#include <mach_wakecpu.h>
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index dcc14477af1f..78464097470a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -28,6 +28,7 @@
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/kexec.h> 30#include <linux/kexec.h>
31#include <linux/unwind.h>
31 32
32#ifdef CONFIG_EISA 33#ifdef CONFIG_EISA
33#include <linux/ioport.h> 34#include <linux/ioport.h>
@@ -47,7 +48,7 @@
47#include <asm/desc.h> 48#include <asm/desc.h>
48#include <asm/i387.h> 49#include <asm/i387.h>
49#include <asm/nmi.h> 50#include <asm/nmi.h>
50 51#include <asm/unwind.h>
51#include <asm/smp.h> 52#include <asm/smp.h>
52#include <asm/arch_hooks.h> 53#include <asm/arch_hooks.h>
53#include <asm/kdebug.h> 54#include <asm/kdebug.h>
@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void);
92asmlinkage void machine_check(void); 93asmlinkage void machine_check(void);
93 94
94static int kstack_depth_to_print = 24; 95static int kstack_depth_to_print = 24;
96static int call_trace = 1;
95ATOMIC_NOTIFIER_HEAD(i386die_chain); 97ATOMIC_NOTIFIER_HEAD(i386die_chain);
96 98
97int register_die_notifier(struct notifier_block *nb) 99int register_die_notifier(struct notifier_block *nb)
@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
170 return ebp; 172 return ebp;
171} 173}
172 174
173static void show_trace_log_lvl(struct task_struct *task, 175static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
176{
177 int n = 0;
178 int printed = 0; /* nr of entries already printed on current line */
179
180 while (unwind(info) == 0 && UNW_PC(info)) {
181 ++n;
182 printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed);
183 if (arch_unw_user_mode(info))
184 break;
185 }
186 if (printed)
187 printk("\n");
188 return n;
189}
190
191static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
174 unsigned long *stack, char *log_lvl) 192 unsigned long *stack, char *log_lvl)
175{ 193{
176 unsigned long ebp; 194 unsigned long ebp;
@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task,
178 if (!task) 196 if (!task)
179 task = current; 197 task = current;
180 198
199 if (call_trace >= 0) {
200 int unw_ret = 0;
201 struct unwind_frame_info info;
202
203 if (regs) {
204 if (unwind_init_frame_info(&info, task, regs) == 0)
205 unw_ret = show_trace_unwind(&info, log_lvl);
206 } else if (task == current)
207 unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
208 else {
209 if (unwind_init_blocked(&info, task) == 0)
210 unw_ret = show_trace_unwind(&info, log_lvl);
211 }
212 if (unw_ret > 0) {
213 if (call_trace > 0)
214 return;
215 printk("%sLegacy call trace:\n", log_lvl);
216 }
217 }
218
181 if (task == current) { 219 if (task == current) {
182 /* Grab ebp right from our regs */ 220 /* Grab ebp right from our regs */
183 asm ("movl %%ebp, %0" : "=r" (ebp) : ); 221 asm ("movl %%ebp, %0" : "=r" (ebp) : );
@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task,
198 } 236 }
199} 237}
200 238
201void show_trace(struct task_struct *task, unsigned long * stack) 239void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
202{ 240{
203 show_trace_log_lvl(task, stack, ""); 241 show_trace_log_lvl(task, regs, stack, "");
204} 242}
205 243
206static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, 244static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
207 char *log_lvl) 245 unsigned long *esp, char *log_lvl)
208{ 246{
209 unsigned long *stack; 247 unsigned long *stack;
210 int i; 248 int i;
@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
225 printk("%08lx ", *stack++); 263 printk("%08lx ", *stack++);
226 } 264 }
227 printk("\n%sCall Trace:\n", log_lvl); 265 printk("\n%sCall Trace:\n", log_lvl);
228 show_trace_log_lvl(task, esp, log_lvl); 266 show_trace_log_lvl(task, regs, esp, log_lvl);
229} 267}
230 268
231void show_stack(struct task_struct *task, unsigned long *esp) 269void show_stack(struct task_struct *task, unsigned long *esp)
232{ 270{
233 printk(" "); 271 printk(" ");
234 show_stack_log_lvl(task, esp, ""); 272 show_stack_log_lvl(task, NULL, esp, "");
235} 273}
236 274
237/* 275/*
@@ -241,7 +279,7 @@ void dump_stack(void)
241{ 279{
242 unsigned long stack; 280 unsigned long stack;
243 281
244 show_trace(current, &stack); 282 show_trace(current, NULL, &stack);
245} 283}
246 284
247EXPORT_SYMBOL(dump_stack); 285EXPORT_SYMBOL(dump_stack);
@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs)
285 u8 __user *eip; 323 u8 __user *eip;
286 324
287 printk("\n" KERN_EMERG "Stack: "); 325 printk("\n" KERN_EMERG "Stack: ");
288 show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); 326 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
289 327
290 printk(KERN_EMERG "Code: "); 328 printk(KERN_EMERG "Code: ");
291 329
@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s)
1215 return 1; 1253 return 1;
1216} 1254}
1217__setup("kstack=", kstack_setup); 1255__setup("kstack=", kstack_setup);
1256
1257static int __init call_trace_setup(char *s)
1258{
1259 if (strcmp(s, "old") == 0)
1260 call_trace = -1;
1261 else if (strcmp(s, "both") == 0)
1262 call_trace = 0;
1263 else if (strcmp(s, "new") == 0)
1264 call_trace = 1;
1265 return 1;
1266}
1267__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 7512f39c9f25..2d4f1386e2b1 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -71,6 +71,15 @@ SECTIONS
71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } 71 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
72 _edata = .; /* End of data section */ 72 _edata = .; /* End of data section */
73 73
74#ifdef CONFIG_STACK_UNWIND
75 . = ALIGN(4);
76 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
77 __start_unwind = .;
78 *(.eh_frame)
79 __end_unwind = .;
80 }
81#endif
82
74 . = ALIGN(THREAD_SIZE); /* init_task */ 83 . = ALIGN(THREAD_SIZE); /* init_task */
75 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 84 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
76 *(.data.init_task) 85 *(.data.init_task)
diff --git a/arch/i386/oprofile/op_model_athlon.c b/arch/i386/oprofile/op_model_athlon.c
index 3ad9a72a5036..693bdea4a52b 100644
--- a/arch/i386/oprofile/op_model_athlon.c
+++ b/arch/i386/oprofile/op_model_athlon.c
@@ -13,6 +13,7 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/nmi.h>
16 17
17#include "op_x86_model.h" 18#include "op_x86_model.h"
18#include "op_counter.h" 19#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_p4.c b/arch/i386/oprofile/op_model_p4.c
index ac8a066035c2..7c61d357b82b 100644
--- a/arch/i386/oprofile/op_model_p4.c
+++ b/arch/i386/oprofile/op_model_p4.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 15#include <asm/fixmap.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"
diff --git a/arch/i386/oprofile/op_model_ppro.c b/arch/i386/oprofile/op_model_ppro.c
index d719015fc044..5c3ab4b027ad 100644
--- a/arch/i386/oprofile/op_model_ppro.c
+++ b/arch/i386/oprofile/op_model_ppro.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/msr.h> 15#include <asm/msr.h>
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/nmi.h>
17 18
18#include "op_x86_model.h" 19#include "op_x86_model.h"
19#include "op_counter.h" 20#include "op_counter.h"