aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h12
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/delay.h2
-rw-r--r--include/asm-i386/dwarf2.h54
-rw-r--r--include/asm-i386/hw_irq.h2
-rw-r--r--include/asm-i386/intel_arch_perfmon.h19
-rw-r--r--include/asm-i386/k8.h1
-rw-r--r--include/asm-i386/kdebug.h2
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/local.h26
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h7
-rw-r--r--include/asm-i386/mach-default/mach_timer.h4
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h3
-rw-r--r--include/asm-i386/nmi.h28
-rw-r--r--include/asm-i386/processor.h3
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-i386/thread_info.h7
-rw-r--r--include/asm-i386/timer.h57
-rw-r--r--include/asm-i386/timex.h34
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-i386/unwind.h98
22 files changed, 291 insertions, 123 deletions
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index d79e9ee10fd7..c61bd1a17f37 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -5,6 +5,8 @@
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7
8#include <linux/types.h>
9
8struct alt_instr { 10struct alt_instr {
9 u8 *instr; /* original instruction */ 11 u8 *instr; /* original instruction */
10 u8 *replacement; 12 u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 1d8362cb2c5d..2c1e371cebb6 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -111,24 +111,12 @@ extern void init_apic_mappings (void);
111extern void smp_local_timer_interrupt (struct pt_regs * regs); 111extern void smp_local_timer_interrupt (struct pt_regs * regs);
112extern void setup_boot_APIC_clock (void); 112extern void setup_boot_APIC_clock (void);
113extern void setup_secondary_APIC_clock (void); 113extern void setup_secondary_APIC_clock (void);
114extern void setup_apic_nmi_watchdog (void);
115extern int reserve_lapic_nmi(void);
116extern void release_lapic_nmi(void);
117extern void disable_timer_nmi_watchdog(void);
118extern void enable_timer_nmi_watchdog(void);
119extern void nmi_watchdog_tick (struct pt_regs * regs);
120extern int APIC_init_uniprocessor (void); 114extern int APIC_init_uniprocessor (void);
121extern void disable_APIC_timer(void); 115extern void disable_APIC_timer(void);
122extern void enable_APIC_timer(void); 116extern void enable_APIC_timer(void);
123 117
124extern void enable_NMI_through_LVT0 (void * dummy); 118extern void enable_NMI_through_LVT0 (void * dummy);
125 119
126extern unsigned int nmi_watchdog;
127#define NMI_NONE 0
128#define NMI_IO_APIC 1
129#define NMI_LOCAL_APIC 2
130#define NMI_INVALID 3
131
132extern int disable_timer_pin_1; 120extern int disable_timer_pin_1;
133 121
134void smp_send_timer_broadcast_ipi(struct pt_regs *regs); 122void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index 3ecedbafa8ce..d314ebb3d59e 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -72,6 +72,7 @@
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
75#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75 76
76/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 77/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
77#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 78#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index 456db8501c09..b1c7650dc7b9 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -23,4 +23,6 @@ extern void __delay(unsigned long loops);
23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ 23 ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
24 __ndelay(n)) 24 __ndelay(n))
25 25
26void use_tsc_delay(void);
27
26#endif /* defined(_I386_DELAY_H) */ 28#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
new file mode 100644
index 000000000000..2280f6272f80
--- /dev/null
+++ b/include/asm-i386/dwarf2.h
@@ -0,0 +1,54 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif
9
10/*
11 Macros for dwarf2 CFI unwind table entries.
12 See "as.info" for details on these pseudo ops. Unfortunately
13 they are only supported in very new binutils, so define them
14 away for older version.
15 */
16
17#ifdef CONFIG_UNWIND_INFO
18
19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc
21#define CFI_DEF_CFA .cfi_def_cfa
22#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
23#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
24#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
25#define CFI_OFFSET .cfi_offset
26#define CFI_REL_OFFSET .cfi_rel_offset
27#define CFI_REGISTER .cfi_register
28#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state
31
32#else
33
34/* Due to the structure of pre-exisiting code, don't use assembler line
35 comment character # to ignore the arguments. Instead, use a dummy macro. */
36.macro ignore a=0, b=0, c=0, d=0
37.endm
38
39#define CFI_STARTPROC ignore
40#define CFI_ENDPROC ignore
41#define CFI_DEF_CFA ignore
42#define CFI_DEF_CFA_REGISTER ignore
43#define CFI_DEF_CFA_OFFSET ignore
44#define CFI_ADJUST_CFA_OFFSET ignore
45#define CFI_OFFSET ignore
46#define CFI_REL_OFFSET ignore
47#define CFI_REGISTER ignore
48#define CFI_RESTORE ignore
49#define CFI_REMEMBER_STATE ignore
50#define CFI_RESTORE_STATE ignore
51
52#endif
53
54#endif
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 95d3fd090298..a4c0a5a9ffd8 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -19,6 +19,8 @@
19 19
20struct hw_interrupt_type; 20struct hw_interrupt_type;
21 21
22#define NMI_VECTOR 0x02
23
22/* 24/*
23 * Various low-level irq details needed by irq.c, process.c, 25 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c 26 * time.c, io_apic.c and smp.c
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
new file mode 100644
index 000000000000..134ea9cc5283
--- /dev/null
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_INTEL_ARCH_PERFMON_H
2#define X86_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h
new file mode 100644
index 000000000000..dfd88a6e6040
--- /dev/null
+++ b/include/asm-i386/k8.h
@@ -0,0 +1 @@
#include <asm-x86_64/k8.h>
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 96d0828ce096..d18cdb9fc9a6 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -19,6 +19,8 @@ struct die_args {
19 19
20extern int register_die_notifier(struct notifier_block *); 20extern int register_die_notifier(struct notifier_block *);
21extern int unregister_die_notifier(struct notifier_block *); 21extern int unregister_die_notifier(struct notifier_block *);
22extern int register_page_fault_notifier(struct notifier_block *);
23extern int unregister_page_fault_notifier(struct notifier_block *);
22extern struct atomic_notifier_head i386die_chain; 24extern struct atomic_notifier_head i386die_chain;
23 25
24 26
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 57d157c5cf89..0730a20f6db8 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -44,6 +44,7 @@ typedef u8 kprobe_opcode_t;
44 44
45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
46#define ARCH_SUPPORTS_KRETPROBES 46#define ARCH_SUPPORTS_KRETPROBES
47#define ARCH_INACTIVE_KPROBE_COUNT 0
47 48
48void arch_remove_kprobe(struct kprobe *p); 49void arch_remove_kprobe(struct kprobe *p);
49void kretprobe_trampoline(void); 50void kretprobe_trampoline(void);
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e67fa08260fe..3b4998c51d08 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
55 * much more efficient than these naive implementations. Note they take 55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address. 56 * a variable, not an address.
57 */ 57 */
58#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 58
59#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 59/* Need to disable preemption for the cpu local counters otherwise we could
60#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 60 still access a variable of a previous CPU in a non atomic way. */
61#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 61#define cpu_local_wrap_v(v) \
62#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 62 ({ local_t res__; \
63#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 63 preempt_disable(); \
64 res__ = (v); \
65 preempt_enable(); \
66 res__; })
67#define cpu_local_wrap(v) \
68 ({ preempt_disable(); \
69 v; \
70 preempt_enable(); }) \
71
72#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
73#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
74#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
75#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
76#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
77#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
64 78
65#define __cpu_local_inc(v) cpu_local_inc(v) 79#define __cpu_local_inc(v) cpu_local_inc(v)
66#define __cpu_local_dec(v) cpu_local_dec(v) 80#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index a1d0072e36bc..0dba244c86db 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
4void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(cpumask_t mask, int vector);
5void __send_IPI_shortcut(unsigned int shortcut, int vector); 8void __send_IPI_shortcut(unsigned int shortcut, int vector);
6 9
@@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector)
13 16
14static inline void __local_send_IPI_allbutself(int vector) 17static inline void __local_send_IPI_allbutself(int vector)
15{ 18{
16 if (no_broadcast) { 19 if (no_broadcast || vector == NMI_VECTOR) {
17 cpumask_t mask = cpu_online_map; 20 cpumask_t mask = cpu_online_map;
18 21
19 cpu_clear(smp_processor_id(), mask); 22 cpu_clear(smp_processor_id(), mask);
@@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector)
24 27
25static inline void __local_send_IPI_all(int vector) 28static inline void __local_send_IPI_all(int vector)
26{ 29{
27 if (no_broadcast) 30 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask(cpu_online_map, vector); 31 send_IPI_mask(cpu_online_map, vector);
29 else 32 else
30 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 33 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h
index 4b9703bb0288..807992fd4171 100644
--- a/include/asm-i386/mach-default/mach_timer.h
+++ b/include/asm-i386/mach-default/mach_timer.h
@@ -15,7 +15,9 @@
15#ifndef _MACH_TIMER_H 15#ifndef _MACH_TIMER_H
16#define _MACH_TIMER_H 16#define _MACH_TIMER_H
17 17
18#define CALIBRATE_LATCH (5 * LATCH) 18#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
19#define CALIBRATE_LATCH \
20 ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
19 21
20static inline void mach_prepare_counter(void) 22static inline void mach_prepare_counter(void)
21{ 23{
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
index 1cce2b924a80..94268399170d 100644
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -2,6 +2,7 @@
2#define __ASM_MACH_MPPARSE_H 2#define __ASM_MACH_MPPARSE_H
3 3
4#include <mach_apic.h> 4#include <mach_apic.h>
5#include <asm/tsc.h>
5 6
6extern int use_cyclone; 7extern int use_cyclone;
7 8
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
29 (!strncmp(productid, "VIGIL SMP", 9) 30 (!strncmp(productid, "VIGIL SMP", 9)
30 || !strncmp(productid, "EXA", 3) 31 || !strncmp(productid, "EXA", 3)
31 || !strncmp(productid, "RUTHLESS SMP", 12))){ 32 || !strncmp(productid, "RUTHLESS SMP", 12))){
33 mark_tsc_unstable();
32 use_cyclone = 1; /*enable cyclone-timer*/ 34 use_cyclone = 1; /*enable cyclone-timer*/
33 setup_summit(); 35 setup_summit();
34 return 1; 36 return 1;
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
42 if (!strncmp(oem_id, "IBM", 3) && 44 if (!strncmp(oem_id, "IBM", 3) &&
43 (!strncmp(oem_table_id, "SERVIGIL", 8) 45 (!strncmp(oem_table_id, "SERVIGIL", 8)
44 || !strncmp(oem_table_id, "EXA", 3))){ 46 || !strncmp(oem_table_id, "EXA", 3))){
47 mark_tsc_unstable();
45 use_cyclone = 1; /*enable cyclone-timer*/ 48 use_cyclone = 1; /*enable cyclone-timer*/
46 setup_summit(); 49 setup_summit();
47 return 1; 50 return 1;
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 21f16638fc61..67d994799999 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,24 +5,38 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8 8
9struct pt_regs; 9struct pt_regs;
10 10
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 12
13/** 13/**
14 * set_nmi_callback 14 * set_nmi_callback
15 * 15 *
16 * Set a handler for an NMI. Only one handler may be 16 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 17 * set. Return 1 if the NMI was handled.
18 */ 18 */
19void set_nmi_callback(nmi_callback_t callback); 19void set_nmi_callback(nmi_callback_t callback);
20 20
21/** 21/**
22 * unset_nmi_callback 22 * unset_nmi_callback
23 * 23 *
24 * Remove the handler previously set. 24 * Remove the handler previously set.
25 */ 25 */
26void unset_nmi_callback(void); 26void unset_nmi_callback(void);
27 27
28extern void setup_apic_nmi_watchdog (void);
29extern int reserve_lapic_nmi(void);
30extern void release_lapic_nmi(void);
31extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void);
33extern void nmi_watchdog_tick (struct pt_regs * regs);
34
35extern unsigned int nmi_watchdog;
36#define NMI_DEFAULT -1
37#define NMI_NONE 0
38#define NMI_IO_APIC 1
39#define NMI_LOCAL_APIC 2
40#define NMI_INVALID 3
41
28#endif /* ASM_NMI_H */ 42#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 0c83cf12eec9..55ea992da329 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -112,6 +112,7 @@ extern char ignore_fpu_irq;
112extern void identify_cpu(struct cpuinfo_x86 *); 112extern void identify_cpu(struct cpuinfo_x86 *);
113extern void print_cpu_info(struct cpuinfo_x86 *); 113extern void print_cpu_info(struct cpuinfo_x86 *);
114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
115extern unsigned short num_cache_leaves;
115 116
116#ifdef CONFIG_X86_HT 117#ifdef CONFIG_X86_HT
117extern void detect_ht(struct cpuinfo_x86 *c); 118extern void detect_ht(struct cpuinfo_x86 *c);
@@ -554,7 +555,7 @@ extern void prepare_to_copy(struct task_struct *tsk);
554extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 555extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
555 556
556extern unsigned long thread_saved_pc(struct task_struct *tsk); 557extern unsigned long thread_saved_pc(struct task_struct *tsk);
557void show_trace(struct task_struct *task, unsigned long *stack); 558void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
558 559
559unsigned long get_wchan(struct task_struct *p); 560unsigned long get_wchan(struct task_struct *p);
560 561
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 0249f912a29c..cab0180567f9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
427 * does not enforce ordering, since there is no data dependency between 427 * does not enforce ordering, since there is no data dependency between
428 * the read of "a" and the read of "b". Therefore, on some CPUs, such 428 * the read of "a" and the read of "b". Therefore, on some CPUs, such
429 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 429 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
430 * in cases like thiswhere there are no data dependencies. 430 * in cases like this where there are no data dependencies.
431 **/ 431 **/
432 432
433#define read_barrier_depends() do { } while(0) 433#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8420ed12491e..fdbc7f422ea5 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,8 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
140#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 140#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
141#define TIF_SECCOMP 8 /* secure computing */ 141#define TIF_SECCOMP 8 /* secure computing */
142#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 142#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
143#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 143#define TIF_MEMDIE 16
144#define TIF_MEMDIE 17
145 144
146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 145#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
147#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 146#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -153,7 +152,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 152#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
154#define _TIF_SECCOMP (1<<TIF_SECCOMP) 153#define _TIF_SECCOMP (1<<TIF_SECCOMP)
155#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 154#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
156#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
157 155
158/* work to do on interrupt/exception return */ 156/* work to do on interrupt/exception return */
159#define _TIF_WORK_MASK \ 157#define _TIF_WORK_MASK \
@@ -170,6 +168,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
170 * have to worry about atomic accesses. 168 * have to worry about atomic accesses.
171 */ 169 */
172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 170#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
171#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
172
173#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
173 174
174#endif /* __KERNEL__ */ 175#endif /* __KERNEL__ */
175 176
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index aed16437479d..d0ebd05f8516 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -3,68 +3,11 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5 5
6/**
7 * struct timer_ops - used to define a timer source
8 *
9 * @name: name of the timer.
10 * @init: Probes and initializes the timer. Takes clock= override
11 * string as an argument. Returns 0 on success, anything else
12 * on failure.
13 * @mark_offset: called by the timer interrupt.
14 * @get_offset: called by gettimeofday(). Returns the number of microseconds
15 * since the last timer interupt.
16 * @monotonic_clock: returns the number of nanoseconds since the init of the
17 * timer.
18 * @delay: delays this many clock cycles.
19 */
20struct timer_opts {
21 char* name;
22 void (*mark_offset)(void);
23 unsigned long (*get_offset)(void);
24 unsigned long long (*monotonic_clock)(void);
25 void (*delay)(unsigned long);
26 unsigned long (*read_timer)(void);
27 int (*suspend)(pm_message_t state);
28 int (*resume)(void);
29};
30
31struct init_timer_opts {
32 int (*init)(char *override);
33 struct timer_opts *opts;
34};
35
36#define TICK_SIZE (tick_nsec / 1000) 6#define TICK_SIZE (tick_nsec / 1000)
37
38extern struct timer_opts* __init select_timer(void);
39extern void clock_fallback(void);
40void setup_pit_timer(void); 7void setup_pit_timer(void);
41
42/* Modifiers for buggy PIT handling */ 8/* Modifiers for buggy PIT handling */
43
44extern int pit_latch_buggy; 9extern int pit_latch_buggy;
45
46extern struct timer_opts *cur_timer;
47extern int timer_ack; 10extern int timer_ack;
48
49/* list of externed timers */
50extern struct timer_opts timer_none;
51extern struct timer_opts timer_pit;
52extern struct init_timer_opts timer_pit_init;
53extern struct init_timer_opts timer_tsc_init;
54#ifdef CONFIG_X86_CYCLONE_TIMER
55extern struct init_timer_opts timer_cyclone_init;
56#endif
57
58extern unsigned long calibrate_tsc(void);
59extern unsigned long read_timer_tsc(void);
60extern void init_cpu_khz(void);
61extern int recalibrate_cpu_khz(void); 11extern int recalibrate_cpu_khz(void);
62#ifdef CONFIG_HPET_TIMER
63extern struct init_timer_opts timer_hpet_init;
64extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
65#endif
66 12
67#ifdef CONFIG_X86_PM_TIMER
68extern struct init_timer_opts timer_pmtmr_init;
69#endif
70#endif 13#endif
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index d434984303ca..3666044409f0 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -7,6 +7,7 @@
7#define _ASMi386_TIMEX_H 7#define _ASMi386_TIMEX_H
8 8
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/tsc.h>
10 11
11#ifdef CONFIG_X86_ELAN 12#ifdef CONFIG_X86_ELAN
12# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ 13# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
@@ -15,39 +16,6 @@
15#endif 16#endif
16 17
17 18
18/*
19 * Standard way to access the cycle counter on i586+ CPUs.
20 * Currently only used on SMP.
21 *
22 * If you really have a SMP machine with i486 chips or older,
23 * compile for that, and this will just always return zero.
24 * That's ok, it just means that the nicer scheduling heuristics
25 * won't work for you.
26 *
27 * We only use the low 32 bits, and we'd simply better make sure
28 * that we reschedule before that wraps. Scheduling at least every
29 * four billion cycles just basically sounds like a good idea,
30 * regardless of how fast the machine is.
31 */
32typedef unsigned long long cycles_t;
33
34static inline cycles_t get_cycles (void)
35{
36 unsigned long long ret=0;
37
38#ifndef CONFIG_X86_TSC
39 if (!cpu_has_tsc)
40 return 0;
41#endif
42
43#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
44 rdtscll(ret);
45#endif
46 return ret;
47}
48
49extern unsigned int cpu_khz;
50
51extern int read_current_timer(unsigned long *timer_value); 19extern int read_current_timer(unsigned long *timer_value);
52#define ARCH_HAS_READ_CURRENT_TIMER 1 20#define ARCH_HAS_READ_CURRENT_TIMER 1
53 21
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
new file mode 100644
index 000000000000..97b828ce31e0
--- /dev/null
+++ b/include/asm-i386/tsc.h
@@ -0,0 +1,49 @@
1/*
2 * linux/include/asm-i386/tsc.h
3 *
4 * i386 TSC related functions
5 */
6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H
8
9#include <linux/config.h>
10#include <asm/processor.h>
11
12/*
13 * Standard way to access the cycle counter on i586+ CPUs.
14 * Currently only used on SMP.
15 *
16 * If you really have a SMP machine with i486 chips or older,
17 * compile for that, and this will just always return zero.
18 * That's ok, it just means that the nicer scheduling heuristics
19 * won't work for you.
20 *
21 * We only use the low 32 bits, and we'd simply better make sure
22 * that we reschedule before that wraps. Scheduling at least every
23 * four billion cycles just basically sounds like a good idea,
24 * regardless of how fast the machine is.
25 */
26typedef unsigned long long cycles_t;
27
28extern unsigned int cpu_khz;
29extern unsigned int tsc_khz;
30
31static inline cycles_t get_cycles(void)
32{
33 unsigned long long ret = 0;
34
35#ifndef CONFIG_X86_TSC
36 if (!cpu_has_tsc)
37 return 0;
38#endif
39
40#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
41 rdtscll(ret);
42#endif
43 return ret;
44}
45
46extern void tsc_init(void);
47extern void mark_tsc_unstable(void);
48
49#endif
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
new file mode 100644
index 000000000000..d480f2e38215
--- /dev/null
+++ b/include/asm-i386/unwind.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/fixmap.h>
14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.eip
24#define UNW_SP(frame) (frame)->regs.esp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.ebp
27#define FRAME_RETADDR_OFFSET 4
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
30#define STACK_TOP(tsk) ((tsk)->thread.esp0)
31#endif
32#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
33
34#define UNW_REGISTER_INFO \
35 PTREGS_INFO(eax), \
36 PTREGS_INFO(ecx), \
37 PTREGS_INFO(edx), \
38 PTREGS_INFO(ebx), \
39 PTREGS_INFO(esp), \
40 PTREGS_INFO(ebp), \
41 PTREGS_INFO(esi), \
42 PTREGS_INFO(edi), \
43 PTREGS_INFO(eip)
44
45static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
46 /*const*/ struct pt_regs *regs)
47{
48 if (user_mode_vm(regs))
49 info->regs = *regs;
50 else {
51 memcpy(&info->regs, regs, offsetof(struct pt_regs, esp));
52 info->regs.esp = (unsigned long)&regs->esp;
53 info->regs.xss = __KERNEL_DS;
54 }
55}
56
57static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
58{
59 memset(&info->regs, 0, sizeof(info->regs));
60 info->regs.eip = info->task->thread.eip;
61 info->regs.xcs = __KERNEL_CS;
62 __get_user(info->regs.ebp, (long *)info->task->thread.esp);
63 info->regs.esp = info->task->thread.esp;
64 info->regs.xss = __KERNEL_DS;
65 info->regs.xds = __USER_DS;
66 info->regs.xes = __USER_DS;
67}
68
69extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
70 asmlinkage int (*callback)(struct unwind_frame_info *,
71 void *arg),
72 void *arg);
73
74static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
75{
76#if 0 /* This can only work when selector register and EFLAGS saves/restores
77 are properly annotated (and tracked in UNW_REGISTER_INFO). */
78 return user_mode_vm(&info->regs);
79#else
80 return info->regs.eip < PAGE_OFFSET
81 || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL)
82 && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE)
83 || info->regs.esp < PAGE_OFFSET;
84#endif
85}
86
87#else
88
89#define UNW_PC(frame) ((void)(frame), 0)
90
91static inline int arch_unw_user_mode(const void *info)
92{
93 return 0;
94}
95
96#endif
97
98#endif /* _ASM_I386_UNWIND_H */