aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h12
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/dwarf2.h54
-rw-r--r--include/asm-i386/hw_irq.h2
-rw-r--r--include/asm-i386/intel_arch_perfmon.h19
-rw-r--r--include/asm-i386/k8.h1
-rw-r--r--include/asm-i386/local.h26
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h7
-rw-r--r--include/asm-i386/nmi.h28
-rw-r--r--include/asm-i386/processor.h3
-rw-r--r--include/asm-i386/thread_info.h7
-rw-r--r--include/asm-i386/unwind.h98
-rw-r--r--include/asm-ia64/thread_info.h5
-rw-r--r--include/asm-x86_64/alternative.h146
-rw-r--r--include/asm-x86_64/apic.h26
-rw-r--r--include/asm-x86_64/atomic.h42
-rw-r--r--include/asm-x86_64/bitops.h7
-rw-r--r--include/asm-x86_64/calgary.h66
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/dma-mapping.h17
-rw-r--r--include/asm-x86_64/dma.h2
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hpet.h2
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/ia32_unistd.h308
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h19
-rw-r--r--include/asm-x86_64/k8.h14
-rw-r--r--include/asm-x86_64/local.h26
-rw-r--r--include/asm-x86_64/mce.h13
-rw-r--r--include/asm-x86_64/mutex.h4
-rw-r--r--include/asm-x86_64/nmi.h30
-rw-r--r--include/asm-x86_64/pci.h4
-rw-r--r--include/asm-x86_64/pgtable.h6
-rw-r--r--include/asm-x86_64/processor.h5
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h8
-rw-r--r--include/asm-x86_64/semaphore.h8
-rw-r--r--include/asm-x86_64/smp.h2
-rw-r--r--include/asm-x86_64/spinlock.h10
-rw-r--r--include/asm-x86_64/string.h3
-rw-r--r--include/asm-x86_64/system.h86
-rw-r--r--include/asm-x86_64/tce.h47
-rw-r--r--include/asm-x86_64/thread_info.h19
-rw-r--r--include/asm-x86_64/topology.h8
-rw-r--r--include/asm-x86_64/unwind.h106
-rw-r--r--include/linux/bitmap.h5
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/time.h11
-rw-r--r--include/linux/unwind.h127
53 files changed, 953 insertions, 539 deletions
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index d79e9ee10fd7..c61bd1a17f37 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -5,6 +5,8 @@
5 5
6#include <asm/types.h> 6#include <asm/types.h>
7 7
8#include <linux/types.h>
9
8struct alt_instr { 10struct alt_instr {
9 u8 *instr; /* original instruction */ 11 u8 *instr; /* original instruction */
10 u8 *replacement; 12 u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 1d8362cb2c5d..2c1e371cebb6 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -111,24 +111,12 @@ extern void init_apic_mappings (void);
111extern void smp_local_timer_interrupt (struct pt_regs * regs); 111extern void smp_local_timer_interrupt (struct pt_regs * regs);
112extern void setup_boot_APIC_clock (void); 112extern void setup_boot_APIC_clock (void);
113extern void setup_secondary_APIC_clock (void); 113extern void setup_secondary_APIC_clock (void);
114extern void setup_apic_nmi_watchdog (void);
115extern int reserve_lapic_nmi(void);
116extern void release_lapic_nmi(void);
117extern void disable_timer_nmi_watchdog(void);
118extern void enable_timer_nmi_watchdog(void);
119extern void nmi_watchdog_tick (struct pt_regs * regs);
120extern int APIC_init_uniprocessor (void); 114extern int APIC_init_uniprocessor (void);
121extern void disable_APIC_timer(void); 115extern void disable_APIC_timer(void);
122extern void enable_APIC_timer(void); 116extern void enable_APIC_timer(void);
123 117
124extern void enable_NMI_through_LVT0 (void * dummy); 118extern void enable_NMI_through_LVT0 (void * dummy);
125 119
126extern unsigned int nmi_watchdog;
127#define NMI_NONE 0
128#define NMI_IO_APIC 1
129#define NMI_LOCAL_APIC 2
130#define NMI_INVALID 3
131
132extern int disable_timer_pin_1; 120extern int disable_timer_pin_1;
133 121
134void smp_send_timer_broadcast_ipi(struct pt_regs *regs); 122void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index 3ecedbafa8ce..d314ebb3d59e 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -72,6 +72,7 @@
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 74#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
75#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75 76
76/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 77/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
77#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 78#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
new file mode 100644
index 000000000000..2280f6272f80
--- /dev/null
+++ b/include/asm-i386/dwarf2.h
@@ -0,0 +1,54 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif
9
10/*
11 Macros for dwarf2 CFI unwind table entries.
12 See "as.info" for details on these pseudo ops. Unfortunately
13 they are only supported in very new binutils, so define them
14 away for older version.
15 */
16
17#ifdef CONFIG_UNWIND_INFO
18
19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc
21#define CFI_DEF_CFA .cfi_def_cfa
22#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
23#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
24#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
25#define CFI_OFFSET .cfi_offset
26#define CFI_REL_OFFSET .cfi_rel_offset
27#define CFI_REGISTER .cfi_register
28#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state
31
32#else
33
34/* Due to the structure of pre-exisiting code, don't use assembler line
35 comment character # to ignore the arguments. Instead, use a dummy macro. */
36.macro ignore a=0, b=0, c=0, d=0
37.endm
38
39#define CFI_STARTPROC ignore
40#define CFI_ENDPROC ignore
41#define CFI_DEF_CFA ignore
42#define CFI_DEF_CFA_REGISTER ignore
43#define CFI_DEF_CFA_OFFSET ignore
44#define CFI_ADJUST_CFA_OFFSET ignore
45#define CFI_OFFSET ignore
46#define CFI_REL_OFFSET ignore
47#define CFI_REGISTER ignore
48#define CFI_RESTORE ignore
49#define CFI_REMEMBER_STATE ignore
50#define CFI_RESTORE_STATE ignore
51
52#endif
53
54#endif
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 95d3fd090298..a4c0a5a9ffd8 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -19,6 +19,8 @@
19 19
20struct hw_interrupt_type; 20struct hw_interrupt_type;
21 21
22#define NMI_VECTOR 0x02
23
22/* 24/*
23 * Various low-level irq details needed by irq.c, process.c, 25 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c 26 * time.c, io_apic.c and smp.c
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
new file mode 100644
index 000000000000..134ea9cc5283
--- /dev/null
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_INTEL_ARCH_PERFMON_H
2#define X86_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h
new file mode 100644
index 000000000000..dfd88a6e6040
--- /dev/null
+++ b/include/asm-i386/k8.h
@@ -0,0 +1 @@
#include <asm-x86_64/k8.h>
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e67fa08260fe..3b4998c51d08 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
55 * much more efficient than these naive implementations. Note they take 55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address. 56 * a variable, not an address.
57 */ 57 */
58#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 58
59#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 59/* Need to disable preemption for the cpu local counters otherwise we could
60#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 60 still access a variable of a previous CPU in a non atomic way. */
61#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 61#define cpu_local_wrap_v(v) \
62#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 62 ({ local_t res__; \
63#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 63 preempt_disable(); \
64 res__ = (v); \
65 preempt_enable(); \
66 res__; })
67#define cpu_local_wrap(v) \
68 ({ preempt_disable(); \
69 v; \
70 preempt_enable(); }) \
71
72#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
73#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
74#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
75#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
76#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
77#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
64 78
65#define __cpu_local_inc(v) cpu_local_inc(v) 79#define __cpu_local_inc(v) cpu_local_inc(v)
66#define __cpu_local_dec(v) cpu_local_dec(v) 80#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index a1d0072e36bc..0dba244c86db 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
4void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(cpumask_t mask, int vector);
5void __send_IPI_shortcut(unsigned int shortcut, int vector); 8void __send_IPI_shortcut(unsigned int shortcut, int vector);
6 9
@@ -13,7 +16,7 @@ static inline void send_IPI_mask(cpumask_t mask, int vector)
13 16
14static inline void __local_send_IPI_allbutself(int vector) 17static inline void __local_send_IPI_allbutself(int vector)
15{ 18{
16 if (no_broadcast) { 19 if (no_broadcast || vector == NMI_VECTOR) {
17 cpumask_t mask = cpu_online_map; 20 cpumask_t mask = cpu_online_map;
18 21
19 cpu_clear(smp_processor_id(), mask); 22 cpu_clear(smp_processor_id(), mask);
@@ -24,7 +27,7 @@ static inline void __local_send_IPI_allbutself(int vector)
24 27
25static inline void __local_send_IPI_all(int vector) 28static inline void __local_send_IPI_all(int vector)
26{ 29{
27 if (no_broadcast) 30 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask(cpu_online_map, vector); 31 send_IPI_mask(cpu_online_map, vector);
29 else 32 else
30 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 33 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 21f16638fc61..67d994799999 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,24 +5,38 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8 8
9struct pt_regs; 9struct pt_regs;
10 10
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 12
13/** 13/**
14 * set_nmi_callback 14 * set_nmi_callback
15 * 15 *
16 * Set a handler for an NMI. Only one handler may be 16 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 17 * set. Return 1 if the NMI was handled.
18 */ 18 */
19void set_nmi_callback(nmi_callback_t callback); 19void set_nmi_callback(nmi_callback_t callback);
20 20
21/** 21/**
22 * unset_nmi_callback 22 * unset_nmi_callback
23 * 23 *
24 * Remove the handler previously set. 24 * Remove the handler previously set.
25 */ 25 */
26void unset_nmi_callback(void); 26void unset_nmi_callback(void);
27 27
28extern void setup_apic_nmi_watchdog (void);
29extern int reserve_lapic_nmi(void);
30extern void release_lapic_nmi(void);
31extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void);
33extern void nmi_watchdog_tick (struct pt_regs * regs);
34
35extern unsigned int nmi_watchdog;
36#define NMI_DEFAULT -1
37#define NMI_NONE 0
38#define NMI_IO_APIC 1
39#define NMI_LOCAL_APIC 2
40#define NMI_INVALID 3
41
28#endif /* ASM_NMI_H */ 42#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 0c83cf12eec9..55ea992da329 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -112,6 +112,7 @@ extern char ignore_fpu_irq;
112extern void identify_cpu(struct cpuinfo_x86 *); 112extern void identify_cpu(struct cpuinfo_x86 *);
113extern void print_cpu_info(struct cpuinfo_x86 *); 113extern void print_cpu_info(struct cpuinfo_x86 *);
114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
115extern unsigned short num_cache_leaves;
115 116
116#ifdef CONFIG_X86_HT 117#ifdef CONFIG_X86_HT
117extern void detect_ht(struct cpuinfo_x86 *c); 118extern void detect_ht(struct cpuinfo_x86 *c);
@@ -554,7 +555,7 @@ extern void prepare_to_copy(struct task_struct *tsk);
554extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 555extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
555 556
556extern unsigned long thread_saved_pc(struct task_struct *tsk); 557extern unsigned long thread_saved_pc(struct task_struct *tsk);
557void show_trace(struct task_struct *task, unsigned long *stack); 558void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
558 559
559unsigned long get_wchan(struct task_struct *p); 560unsigned long get_wchan(struct task_struct *p);
560 561
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8420ed12491e..fdbc7f422ea5 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,8 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
140#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 140#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
141#define TIF_SECCOMP 8 /* secure computing */ 141#define TIF_SECCOMP 8 /* secure computing */
142#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 142#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
143#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 143#define TIF_MEMDIE 16
144#define TIF_MEMDIE 17
145 144
146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 145#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
147#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 146#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -153,7 +152,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
153#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 152#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
154#define _TIF_SECCOMP (1<<TIF_SECCOMP) 153#define _TIF_SECCOMP (1<<TIF_SECCOMP)
155#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 154#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
156#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
157 155
158/* work to do on interrupt/exception return */ 156/* work to do on interrupt/exception return */
159#define _TIF_WORK_MASK \ 157#define _TIF_WORK_MASK \
@@ -170,6 +168,9 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
170 * have to worry about atomic accesses. 168 * have to worry about atomic accesses.
171 */ 169 */
172#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 170#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
171#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
172
173#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
173 174
174#endif /* __KERNEL__ */ 175#endif /* __KERNEL__ */
175 176
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
new file mode 100644
index 000000000000..d480f2e38215
--- /dev/null
+++ b/include/asm-i386/unwind.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/fixmap.h>
14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.eip
24#define UNW_SP(frame) (frame)->regs.esp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.ebp
27#define FRAME_RETADDR_OFFSET 4
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
30#define STACK_TOP(tsk) ((tsk)->thread.esp0)
31#endif
32#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
33
34#define UNW_REGISTER_INFO \
35 PTREGS_INFO(eax), \
36 PTREGS_INFO(ecx), \
37 PTREGS_INFO(edx), \
38 PTREGS_INFO(ebx), \
39 PTREGS_INFO(esp), \
40 PTREGS_INFO(ebp), \
41 PTREGS_INFO(esi), \
42 PTREGS_INFO(edi), \
43 PTREGS_INFO(eip)
44
45static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
46 /*const*/ struct pt_regs *regs)
47{
48 if (user_mode_vm(regs))
49 info->regs = *regs;
50 else {
51 memcpy(&info->regs, regs, offsetof(struct pt_regs, esp));
52 info->regs.esp = (unsigned long)&regs->esp;
53 info->regs.xss = __KERNEL_DS;
54 }
55}
56
57static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
58{
59 memset(&info->regs, 0, sizeof(info->regs));
60 info->regs.eip = info->task->thread.eip;
61 info->regs.xcs = __KERNEL_CS;
62 __get_user(info->regs.ebp, (long *)info->task->thread.esp);
63 info->regs.esp = info->task->thread.esp;
64 info->regs.xss = __KERNEL_DS;
65 info->regs.xds = __USER_DS;
66 info->regs.xes = __USER_DS;
67}
68
69extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
70 asmlinkage int (*callback)(struct unwind_frame_info *,
71 void *arg),
72 void *arg);
73
74static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
75{
76#if 0 /* This can only work when selector register and EFLAGS saves/restores
77 are properly annotated (and tracked in UNW_REGISTER_INFO). */
78 return user_mode_vm(&info->regs);
79#else
80 return info->regs.eip < PAGE_OFFSET
81 || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL)
82 && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE)
83 || info->regs.esp < PAGE_OFFSET;
84#endif
85}
86
87#else
88
89#define UNW_PC(frame) ((void)(frame), 0)
90
91static inline int arch_unw_user_mode(const void *info)
92{
93 return 0;
94}
95
96#endif
97
98#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index e5392c4d30c6..8bc9869e5765 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -27,6 +27,7 @@ struct thread_info {
27 __u32 flags; /* thread_info flags (see TIF_*) */ 27 __u32 flags; /* thread_info flags (see TIF_*) */
28 __u32 cpu; /* current CPU */ 28 __u32 cpu; /* current CPU */
29 __u32 last_cpu; /* Last CPU thread ran on */ 29 __u32 last_cpu; /* Last CPU thread ran on */
30 __u32 status; /* Thread synchronous flags */
30 mm_segment_t addr_limit; /* user-level address space limit */ 31 mm_segment_t addr_limit; /* user-level address space limit */
31 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
32 struct restart_block restart_block; 33 struct restart_block restart_block;
@@ -103,4 +104,8 @@ struct thread_info {
103/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 104/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
104#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) 105#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
105 106
107#define TS_POLLING 1 /* true if in idle loop and not sleeping */
108
109#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
110
106#endif /* _ASM_IA64_THREAD_INFO_H */ 111#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
new file mode 100644
index 000000000000..387c8f66af7d
--- /dev/null
+++ b/include/asm-x86_64/alternative.h
@@ -0,0 +1,146 @@
1#ifndef _X86_64_ALTERNATIVE_H
2#define _X86_64_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7
8struct alt_instr {
9 u8 *instr; /* original instruction */
10 u8 *replacement;
11 u8 cpuid; /* cpuid bit set for replacement */
12 u8 instrlen; /* length of original instruction */
13 u8 replacementlen; /* length of new instruction, <= instrlen */
14 u8 pad[5];
15};
16
17extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
18
19struct module;
20extern void alternatives_smp_module_add(struct module *mod, char *name,
21 void *locks, void *locks_end,
22 void *text, void *text_end);
23extern void alternatives_smp_module_del(struct module *mod);
24extern void alternatives_smp_switch(int smp);
25
26#endif
27
28/*
29 * Alternative instructions for different CPU types or capabilities.
30 *
31 * This allows to use optimized instructions even on generic binary
32 * kernels.
33 *
34 * length of oldinstr must be longer or equal the length of newinstr
35 * It can be padded with nops as needed.
36 *
37 * For non barrier like inlines please define new variants
38 * without volatile and memory clobber.
39 */
40#define alternative(oldinstr, newinstr, feature) \
41 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
42 ".section .altinstructions,\"a\"\n" \
43 " .align 8\n" \
44 " .quad 661b\n" /* label */ \
45 " .quad 663f\n" /* new instruction */ \
46 " .byte %c0\n" /* feature bit */ \
47 " .byte 662b-661b\n" /* sourcelen */ \
48 " .byte 664f-663f\n" /* replacementlen */ \
49 ".previous\n" \
50 ".section .altinstr_replacement,\"ax\"\n" \
51 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
52 ".previous" :: "i" (feature) : "memory")
53
54/*
55 * Alternative inline assembly with input.
56 *
57 * Pecularities:
58 * No memory clobber here.
59 * Argument numbers start with 1.
60 * Best is to use constraints that are fixed size (like (%1) ... "r")
61 * If you use variable sized constraints like "m" or "g" in the
62 * replacement make sure to pad to the worst case length.
63 */
64#define alternative_input(oldinstr, newinstr, feature, input...) \
65 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
66 ".section .altinstructions,\"a\"\n" \
67 " .align 8\n" \
68 " .quad 661b\n" /* label */ \
69 " .quad 663f\n" /* new instruction */ \
70 " .byte %c0\n" /* feature bit */ \
71 " .byte 662b-661b\n" /* sourcelen */ \
72 " .byte 664f-663f\n" /* replacementlen */ \
73 ".previous\n" \
74 ".section .altinstr_replacement,\"ax\"\n" \
75 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
76 ".previous" :: "i" (feature), ##input)
77
78/* Like alternative_input, but with a single output argument */
79#define alternative_io(oldinstr, newinstr, feature, output, input...) \
80 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
81 ".section .altinstructions,\"a\"\n" \
82 " .align 8\n" \
83 " .quad 661b\n" /* label */ \
84 " .quad 663f\n" /* new instruction */ \
85 " .byte %c[feat]\n" /* feature bit */ \
86 " .byte 662b-661b\n" /* sourcelen */ \
87 " .byte 664f-663f\n" /* replacementlen */ \
88 ".previous\n" \
89 ".section .altinstr_replacement,\"ax\"\n" \
90 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
91 ".previous" : output : [feat] "i" (feature), ##input)
92
93/*
94 * Alternative inline assembly for SMP.
95 *
96 * alternative_smp() takes two versions (SMP first, UP second) and is
97 * for more complex stuff such as spinlocks.
98 *
99 * The LOCK_PREFIX macro defined here replaces the LOCK and
100 * LOCK_PREFIX macros used everywhere in the source tree.
101 *
102 * SMP alternatives use the same data structures as the other
103 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
104 * UP system running a SMP kernel. The existing apply_alternatives()
105 * works fine for patching a SMP kernel for UP.
106 *
107 * The SMP alternative tables can be kept after boot and contain both
108 * UP and SMP versions of the instructions to allow switching back to
109 * SMP at runtime, when hotplugging in a new CPU, which is especially
110 * useful in virtualized environments.
111 *
112 * The very common lock prefix is handled as special case in a
113 * separate table which is a pure address list without replacement ptr
114 * and size information. That keeps the table sizes small.
115 */
116
117#ifdef CONFIG_SMP
118#define alternative_smp(smpinstr, upinstr, args...) \
119 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
120 ".section .smp_altinstructions,\"a\"\n" \
121 " .align 8\n" \
122 " .quad 661b\n" /* label */ \
123 " .quad 663f\n" /* new instruction */ \
124 " .byte 0x66\n" /* X86_FEATURE_UP */ \
125 " .byte 662b-661b\n" /* sourcelen */ \
126 " .byte 664f-663f\n" /* replacementlen */ \
127 ".previous\n" \
128 ".section .smp_altinstr_replacement,\"awx\"\n" \
129 "663:\n\t" upinstr "\n" /* replacement */ \
130 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
131 ".previous" : args)
132
133#define LOCK_PREFIX \
134 ".section .smp_locks,\"a\"\n" \
135 " .align 8\n" \
136 " .quad 661f\n" /* address */ \
137 ".previous\n" \
138 "661:\n\tlock; "
139
140#else /* ! CONFIG_SMP */
141#define alternative_smp(smpinstr, upinstr, args...) \
142 asm volatile (upinstr : args)
143#define LOCK_PREFIX ""
144#endif
145
146#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index a731be2204d2..9c96a0a8d1bd 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -49,7 +49,8 @@ static __inline unsigned int apic_read(unsigned long reg)
49 49
50static __inline__ void apic_wait_icr_idle(void) 50static __inline__ void apic_wait_icr_idle(void)
51{ 51{
52 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); 52 while (apic_read( APIC_ICR ) & APIC_ICR_BUSY)
53 cpu_relax();
53} 54}
54 55
55static inline void ack_APIC_irq(void) 56static inline void ack_APIC_irq(void)
@@ -79,30 +80,23 @@ extern void init_apic_mappings (void);
79extern void smp_local_timer_interrupt (struct pt_regs * regs); 80extern void smp_local_timer_interrupt (struct pt_regs * regs);
80extern void setup_boot_APIC_clock (void); 81extern void setup_boot_APIC_clock (void);
81extern void setup_secondary_APIC_clock (void); 82extern void setup_secondary_APIC_clock (void);
82extern void setup_apic_nmi_watchdog (void);
83extern int reserve_lapic_nmi(void);
84extern void release_lapic_nmi(void);
85extern void disable_timer_nmi_watchdog(void);
86extern void enable_timer_nmi_watchdog(void);
87extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
88extern int APIC_init_uniprocessor (void); 83extern int APIC_init_uniprocessor (void);
89extern void disable_APIC_timer(void); 84extern void disable_APIC_timer(void);
90extern void enable_APIC_timer(void); 85extern void enable_APIC_timer(void);
91extern void clustered_apic_check(void); 86extern void clustered_apic_check(void);
92 87
93extern void nmi_watchdog_default(void); 88extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
94extern int setup_nmi_watchdog(char *); 89 unsigned char msg_type, unsigned char mask);
95 90
96extern unsigned int nmi_watchdog; 91#define K8_APIC_EXT_LVT_BASE 0x500
97#define NMI_DEFAULT -1 92#define K8_APIC_EXT_INT_MSG_FIX 0x0
98#define NMI_NONE 0 93#define K8_APIC_EXT_INT_MSG_SMI 0x2
99#define NMI_IO_APIC 1 94#define K8_APIC_EXT_INT_MSG_NMI 0x4
100#define NMI_LOCAL_APIC 2 95#define K8_APIC_EXT_INT_MSG_EXT 0x7
101#define NMI_INVALID 3 96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
102 97
103extern int disable_timer_pin_1; 98extern int disable_timer_pin_1;
104 99
105extern void setup_threshold_lvt(unsigned long lvt_off);
106 100
107void smp_send_timer_broadcast_ipi(void); 101void smp_send_timer_broadcast_ipi(void);
108void switch_APIC_timer_to_ipi(void *cpumask); 102void switch_APIC_timer_to_ipi(void *cpumask);
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index bd3fa67ed835..007e88d6d43f 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -1,7 +1,7 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <asm/types.h> 4#include <asm/alternative.h>
5 5
6/* atomic_t should be 32 bit signed type */ 6/* atomic_t should be 32 bit signed type */
7 7
@@ -52,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t;
52static __inline__ void atomic_add(int i, atomic_t *v) 52static __inline__ void atomic_add(int i, atomic_t *v)
53{ 53{
54 __asm__ __volatile__( 54 __asm__ __volatile__(
55 LOCK "addl %1,%0" 55 LOCK_PREFIX "addl %1,%0"
56 :"=m" (v->counter) 56 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter)); 57 :"ir" (i), "m" (v->counter));
58} 58}
@@ -67,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
67static __inline__ void atomic_sub(int i, atomic_t *v) 67static __inline__ void atomic_sub(int i, atomic_t *v)
68{ 68{
69 __asm__ __volatile__( 69 __asm__ __volatile__(
70 LOCK "subl %1,%0" 70 LOCK_PREFIX "subl %1,%0"
71 :"=m" (v->counter) 71 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter)); 72 :"ir" (i), "m" (v->counter));
73} 73}
@@ -86,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 unsigned char c; 86 unsigned char c;
87 87
88 __asm__ __volatile__( 88 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1" 89 LOCK_PREFIX "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c) 90 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory"); 91 :"ir" (i), "m" (v->counter) : "memory");
92 return c; 92 return c;
@@ -101,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
101static __inline__ void atomic_inc(atomic_t *v) 101static __inline__ void atomic_inc(atomic_t *v)
102{ 102{
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 LOCK "incl %0" 104 LOCK_PREFIX "incl %0"
105 :"=m" (v->counter) 105 :"=m" (v->counter)
106 :"m" (v->counter)); 106 :"m" (v->counter));
107} 107}
@@ -115,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v)
115static __inline__ void atomic_dec(atomic_t *v) 115static __inline__ void atomic_dec(atomic_t *v)
116{ 116{
117 __asm__ __volatile__( 117 __asm__ __volatile__(
118 LOCK "decl %0" 118 LOCK_PREFIX "decl %0"
119 :"=m" (v->counter) 119 :"=m" (v->counter)
120 :"m" (v->counter)); 120 :"m" (v->counter));
121} 121}
@@ -133,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
133 unsigned char c; 133 unsigned char c;
134 134
135 __asm__ __volatile__( 135 __asm__ __volatile__(
136 LOCK "decl %0; sete %1" 136 LOCK_PREFIX "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c) 137 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory"); 138 :"m" (v->counter) : "memory");
139 return c != 0; 139 return c != 0;
@@ -152,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
152 unsigned char c; 152 unsigned char c;
153 153
154 __asm__ __volatile__( 154 __asm__ __volatile__(
155 LOCK "incl %0; sete %1" 155 LOCK_PREFIX "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c) 156 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory"); 157 :"m" (v->counter) : "memory");
158 return c != 0; 158 return c != 0;
@@ -172,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
172 unsigned char c; 172 unsigned char c;
173 173
174 __asm__ __volatile__( 174 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1" 175 LOCK_PREFIX "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c) 176 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory"); 177 :"ir" (i), "m" (v->counter) : "memory");
178 return c; 178 return c;
@@ -189,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
189{ 189{
190 int __i = i; 190 int __i = i;
191 __asm__ __volatile__( 191 __asm__ __volatile__(
192 LOCK "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1;"
193 :"=r"(i) 193 :"=r"(i)
194 :"m"(v->counter), "0"(i)); 194 :"m"(v->counter), "0"(i));
195 return i + __i; 195 return i + __i;
@@ -237,7 +237,7 @@ typedef struct { volatile long counter; } atomic64_t;
237static __inline__ void atomic64_add(long i, atomic64_t *v) 237static __inline__ void atomic64_add(long i, atomic64_t *v)
238{ 238{
239 __asm__ __volatile__( 239 __asm__ __volatile__(
240 LOCK "addq %1,%0" 240 LOCK_PREFIX "addq %1,%0"
241 :"=m" (v->counter) 241 :"=m" (v->counter)
242 :"ir" (i), "m" (v->counter)); 242 :"ir" (i), "m" (v->counter));
243} 243}
@@ -252,7 +252,7 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
252static __inline__ void atomic64_sub(long i, atomic64_t *v) 252static __inline__ void atomic64_sub(long i, atomic64_t *v)
253{ 253{
254 __asm__ __volatile__( 254 __asm__ __volatile__(
255 LOCK "subq %1,%0" 255 LOCK_PREFIX "subq %1,%0"
256 :"=m" (v->counter) 256 :"=m" (v->counter)
257 :"ir" (i), "m" (v->counter)); 257 :"ir" (i), "m" (v->counter));
258} 258}
@@ -271,7 +271,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
271 unsigned char c; 271 unsigned char c;
272 272
273 __asm__ __volatile__( 273 __asm__ __volatile__(
274 LOCK "subq %2,%0; sete %1" 274 LOCK_PREFIX "subq %2,%0; sete %1"
275 :"=m" (v->counter), "=qm" (c) 275 :"=m" (v->counter), "=qm" (c)
276 :"ir" (i), "m" (v->counter) : "memory"); 276 :"ir" (i), "m" (v->counter) : "memory");
277 return c; 277 return c;
@@ -286,7 +286,7 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
286static __inline__ void atomic64_inc(atomic64_t *v) 286static __inline__ void atomic64_inc(atomic64_t *v)
287{ 287{
288 __asm__ __volatile__( 288 __asm__ __volatile__(
289 LOCK "incq %0" 289 LOCK_PREFIX "incq %0"
290 :"=m" (v->counter) 290 :"=m" (v->counter)
291 :"m" (v->counter)); 291 :"m" (v->counter));
292} 292}
@@ -300,7 +300,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
300static __inline__ void atomic64_dec(atomic64_t *v) 300static __inline__ void atomic64_dec(atomic64_t *v)
301{ 301{
302 __asm__ __volatile__( 302 __asm__ __volatile__(
303 LOCK "decq %0" 303 LOCK_PREFIX "decq %0"
304 :"=m" (v->counter) 304 :"=m" (v->counter)
305 :"m" (v->counter)); 305 :"m" (v->counter));
306} 306}
@@ -318,7 +318,7 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
318 unsigned char c; 318 unsigned char c;
319 319
320 __asm__ __volatile__( 320 __asm__ __volatile__(
321 LOCK "decq %0; sete %1" 321 LOCK_PREFIX "decq %0; sete %1"
322 :"=m" (v->counter), "=qm" (c) 322 :"=m" (v->counter), "=qm" (c)
323 :"m" (v->counter) : "memory"); 323 :"m" (v->counter) : "memory");
324 return c != 0; 324 return c != 0;
@@ -337,7 +337,7 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
337 unsigned char c; 337 unsigned char c;
338 338
339 __asm__ __volatile__( 339 __asm__ __volatile__(
340 LOCK "incq %0; sete %1" 340 LOCK_PREFIX "incq %0; sete %1"
341 :"=m" (v->counter), "=qm" (c) 341 :"=m" (v->counter), "=qm" (c)
342 :"m" (v->counter) : "memory"); 342 :"m" (v->counter) : "memory");
343 return c != 0; 343 return c != 0;
@@ -357,7 +357,7 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
357 unsigned char c; 357 unsigned char c;
358 358
359 __asm__ __volatile__( 359 __asm__ __volatile__(
360 LOCK "addq %2,%0; sets %1" 360 LOCK_PREFIX "addq %2,%0; sets %1"
361 :"=m" (v->counter), "=qm" (c) 361 :"=m" (v->counter), "=qm" (c)
362 :"ir" (i), "m" (v->counter) : "memory"); 362 :"ir" (i), "m" (v->counter) : "memory");
363 return c; 363 return c;
@@ -374,7 +374,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
374{ 374{
375 long __i = i; 375 long __i = i;
376 __asm__ __volatile__( 376 __asm__ __volatile__(
377 LOCK "xaddq %0, %1;" 377 LOCK_PREFIX "xaddq %0, %1;"
378 :"=r"(i) 378 :"=r"(i)
379 :"m"(v->counter), "0"(i)); 379 :"m"(v->counter), "0"(i));
380 return i + __i; 380 return i + __i;
@@ -418,11 +418,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
418 418
419/* These are x86-specific, used by some header files */ 419/* These are x86-specific, used by some header files */
420#define atomic_clear_mask(mask, addr) \ 420#define atomic_clear_mask(mask, addr) \
421__asm__ __volatile__(LOCK "andl %0,%1" \ 421__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
422: : "r" (~(mask)),"m" (*addr) : "memory") 422: : "r" (~(mask)),"m" (*addr) : "memory")
423 423
424#define atomic_set_mask(mask, addr) \ 424#define atomic_set_mask(mask, addr) \
425__asm__ __volatile__(LOCK "orl %0,%1" \ 425__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
426: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") 426: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
427 427
428/* Atomic operations are already serializing on x86 */ 428/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index e9bf933d25d0..f7ba57b1cc08 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -5,12 +5,7 @@
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
6 */ 6 */
7 7
8 8#include <asm/alternative.h>
9#ifdef CONFIG_SMP
10#define LOCK_PREFIX "lock ; "
11#else
12#define LOCK_PREFIX ""
13#endif
14 9
15#define ADDR (*(volatile long *) addr) 10#define ADDR (*(volatile long *) addr)
16 11
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
new file mode 100644
index 000000000000..6e1654f30986
--- /dev/null
+++ b/include/asm-x86_64/calgary.h
@@ -0,0 +1,66 @@
1/*
2 * Derived from include/asm-powerpc/iommu.h
3 *
4 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
5 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_X86_64_CALGARY_H
23#define _ASM_X86_64_CALGARY_H
24
25#include <linux/config.h>
26#include <linux/spinlock.h>
27#include <linux/device.h>
28#include <linux/dma-mapping.h>
29#include <asm/types.h>
30
31struct iommu_table {
32 unsigned long it_base; /* mapped address of tce table */
33 unsigned long it_hint; /* Hint for next alloc */
34 unsigned long *it_map; /* A simple allocation bitmap for now */
35 spinlock_t it_lock; /* Protects it_map */
36 unsigned int it_size; /* Size of iommu table in entries */
37 unsigned char it_busno; /* Bus number this table belongs to */
38 void __iomem *bbar;
39 u64 tar_val;
40 struct timer_list watchdog_timer;
41};
42
43#define TCE_TABLE_SIZE_UNSPECIFIED ~0
44#define TCE_TABLE_SIZE_64K 0
45#define TCE_TABLE_SIZE_128K 1
46#define TCE_TABLE_SIZE_256K 2
47#define TCE_TABLE_SIZE_512K 3
48#define TCE_TABLE_SIZE_1M 4
49#define TCE_TABLE_SIZE_2M 5
50#define TCE_TABLE_SIZE_4M 6
51#define TCE_TABLE_SIZE_8M 7
52
53#ifdef CONFIG_CALGARY_IOMMU
54extern int calgary_iommu_init(void);
55extern void detect_calgary(void);
56#else
57static inline int calgary_iommu_init(void) { return 1; }
58static inline void detect_calgary(void) { return; }
59#endif
60
61static inline unsigned int bus_to_phb(unsigned char busno)
62{
63 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
64}
65
66#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index 662964b74e34..ee792faaca01 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -46,6 +46,7 @@
46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ 46#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 47#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ 48#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */
49#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
49#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 50#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
50#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 51#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
51#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ 52#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
@@ -65,6 +66,8 @@
65#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ 66#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
66#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ 67#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
67#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ 68#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
69#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
70#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
68 71
69/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 72/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
70#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 73#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 498f66df36b9..b6da83dcc7a6 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -55,6 +55,13 @@ extern dma_addr_t bad_dma_address;
55extern struct dma_mapping_ops* dma_ops; 55extern struct dma_mapping_ops* dma_ops;
56extern int iommu_merge; 56extern int iommu_merge;
57 57
58static inline int valid_dma_direction(int dma_direction)
59{
60 return ((dma_direction == DMA_BIDIRECTIONAL) ||
61 (dma_direction == DMA_TO_DEVICE) ||
62 (dma_direction == DMA_FROM_DEVICE));
63}
64
58static inline int dma_mapping_error(dma_addr_t dma_addr) 65static inline int dma_mapping_error(dma_addr_t dma_addr)
59{ 66{
60 if (dma_ops->mapping_error) 67 if (dma_ops->mapping_error)
@@ -72,6 +79,7 @@ static inline dma_addr_t
72dma_map_single(struct device *hwdev, void *ptr, size_t size, 79dma_map_single(struct device *hwdev, void *ptr, size_t size,
73 int direction) 80 int direction)
74{ 81{
82 BUG_ON(!valid_dma_direction(direction));
75 return dma_ops->map_single(hwdev, ptr, size, direction); 83 return dma_ops->map_single(hwdev, ptr, size, direction);
76} 84}
77 85
@@ -79,6 +87,7 @@ static inline void
79dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 87dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
80 int direction) 88 int direction)
81{ 89{
90 BUG_ON(!valid_dma_direction(direction));
82 dma_ops->unmap_single(dev, addr, size, direction); 91 dma_ops->unmap_single(dev, addr, size, direction);
83} 92}
84 93
@@ -91,6 +100,7 @@ static inline void
91dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 100dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
92 size_t size, int direction) 101 size_t size, int direction)
93{ 102{
103 BUG_ON(!valid_dma_direction(direction));
94 if (dma_ops->sync_single_for_cpu) 104 if (dma_ops->sync_single_for_cpu)
95 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 105 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
96 direction); 106 direction);
@@ -101,6 +111,7 @@ static inline void
101dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
102 size_t size, int direction) 112 size_t size, int direction)
103{ 113{
114 BUG_ON(!valid_dma_direction(direction));
104 if (dma_ops->sync_single_for_device) 115 if (dma_ops->sync_single_for_device)
105 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 116 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
106 direction); 117 direction);
@@ -111,6 +122,7 @@ static inline void
111dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 122dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
112 unsigned long offset, size_t size, int direction) 123 unsigned long offset, size_t size, int direction)
113{ 124{
125 BUG_ON(!valid_dma_direction(direction));
114 if (dma_ops->sync_single_range_for_cpu) { 126 if (dma_ops->sync_single_range_for_cpu) {
115 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); 127 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
116 } 128 }
@@ -122,6 +134,7 @@ static inline void
122dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 134dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
123 unsigned long offset, size_t size, int direction) 135 unsigned long offset, size_t size, int direction)
124{ 136{
137 BUG_ON(!valid_dma_direction(direction));
125 if (dma_ops->sync_single_range_for_device) 138 if (dma_ops->sync_single_range_for_device)
126 dma_ops->sync_single_range_for_device(hwdev, dma_handle, 139 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
127 offset, size, direction); 140 offset, size, direction);
@@ -133,6 +146,7 @@ static inline void
133dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 146dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
134 int nelems, int direction) 147 int nelems, int direction)
135{ 148{
149 BUG_ON(!valid_dma_direction(direction));
136 if (dma_ops->sync_sg_for_cpu) 150 if (dma_ops->sync_sg_for_cpu)
137 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 151 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
138 flush_write_buffers(); 152 flush_write_buffers();
@@ -142,6 +156,7 @@ static inline void
142dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 156dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
143 int nelems, int direction) 157 int nelems, int direction)
144{ 158{
159 BUG_ON(!valid_dma_direction(direction));
145 if (dma_ops->sync_sg_for_device) { 160 if (dma_ops->sync_sg_for_device) {
146 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 161 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
147 } 162 }
@@ -152,6 +167,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
152static inline int 167static inline int
153dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) 168dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
154{ 169{
170 BUG_ON(!valid_dma_direction(direction));
155 return dma_ops->map_sg(hwdev, sg, nents, direction); 171 return dma_ops->map_sg(hwdev, sg, nents, direction);
156} 172}
157 173
@@ -159,6 +175,7 @@ static inline void
159dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 175dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
160 int direction) 176 int direction)
161{ 177{
178 BUG_ON(!valid_dma_direction(direction));
162 dma_ops->unmap_sg(hwdev, sg, nents, direction); 179 dma_ops->unmap_sg(hwdev, sg, nents, direction);
163} 180}
164 181
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index c556208d3dd7..a37c16f06289 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -1,4 +1,4 @@
1/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ 1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels. 2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992. 3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen 4 * High DMA channel support & info by Hannu Savolainen
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
deleted file mode 100644
index ada497b0b55b..000000000000
--- a/include/asm-x86_64/gart-mapping.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _X8664_GART_MAPPING_H
2#define _X8664_GART_MAPPING_H 1
3
4#include <linux/types.h>
5#include <asm/types.h>
6
7struct device;
8
9extern void*
10gart_alloc_coherent(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t gfp);
12
13extern int
14gart_dma_supported(struct device *hwdev, u64 mask);
15
16#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 18ff7ee9e774..b39098408b69 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -55,7 +55,7 @@
55 55
56extern int is_hpet_enabled(void); 56extern int is_hpet_enabled(void);
57extern int hpet_rtc_timer_init(void); 57extern int hpet_rtc_timer_init(void);
58extern int oem_force_hpet_timer(void); 58extern int apic_is_clustered_box(void);
59 59
60extern int hpet_use_timer; 60extern int hpet_use_timer;
61 61
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 3de96fd86a70..1b2ac55d3204 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -12,8 +12,6 @@
12 * <tomsoft@informatik.tu-chemnitz.de> 12 * <tomsoft@informatik.tu-chemnitz.de>
13 * 13 *
14 * hacked by Andi Kleen for x86-64. 14 * hacked by Andi Kleen for x86-64.
15 *
16 * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
17 */ 15 */
18 16
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index b4f4b172b15a..5b52ce507338 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -4,317 +4,15 @@
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
6 * this is for the kernel only. 6 * this is for the kernel only.
7 * Only add syscalls here where some part of the kernel needs to know
8 * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK
7 */ 9 */
8 10
9#define __NR_ia32_restart_syscall 0 11#define __NR_ia32_restart_syscall 0
10#define __NR_ia32_exit 1 12#define __NR_ia32_exit 1
11#define __NR_ia32_fork 2
12#define __NR_ia32_read 3 13#define __NR_ia32_read 3
13#define __NR_ia32_write 4 14#define __NR_ia32_write 4
14#define __NR_ia32_open 5 15#define __NR_ia32_sigreturn 119
15#define __NR_ia32_close 6
16#define __NR_ia32_waitpid 7
17#define __NR_ia32_creat 8
18#define __NR_ia32_link 9
19#define __NR_ia32_unlink 10
20#define __NR_ia32_execve 11
21#define __NR_ia32_chdir 12
22#define __NR_ia32_time 13
23#define __NR_ia32_mknod 14
24#define __NR_ia32_chmod 15
25#define __NR_ia32_lchown 16
26#define __NR_ia32_break 17
27#define __NR_ia32_oldstat 18
28#define __NR_ia32_lseek 19
29#define __NR_ia32_getpid 20
30#define __NR_ia32_mount 21
31#define __NR_ia32_umount 22
32#define __NR_ia32_setuid 23
33#define __NR_ia32_getuid 24
34#define __NR_ia32_stime 25
35#define __NR_ia32_ptrace 26
36#define __NR_ia32_alarm 27
37#define __NR_ia32_oldfstat 28
38#define __NR_ia32_pause 29
39#define __NR_ia32_utime 30
40#define __NR_ia32_stty 31
41#define __NR_ia32_gtty 32
42#define __NR_ia32_access 33
43#define __NR_ia32_nice 34
44#define __NR_ia32_ftime 35
45#define __NR_ia32_sync 36
46#define __NR_ia32_kill 37
47#define __NR_ia32_rename 38
48#define __NR_ia32_mkdir 39
49#define __NR_ia32_rmdir 40
50#define __NR_ia32_dup 41
51#define __NR_ia32_pipe 42
52#define __NR_ia32_times 43
53#define __NR_ia32_prof 44
54#define __NR_ia32_brk 45
55#define __NR_ia32_setgid 46
56#define __NR_ia32_getgid 47
57#define __NR_ia32_signal 48
58#define __NR_ia32_geteuid 49
59#define __NR_ia32_getegid 50
60#define __NR_ia32_acct 51
61#define __NR_ia32_umount2 52
62#define __NR_ia32_lock 53
63#define __NR_ia32_ioctl 54
64#define __NR_ia32_fcntl 55
65#define __NR_ia32_mpx 56
66#define __NR_ia32_setpgid 57
67#define __NR_ia32_ulimit 58
68#define __NR_ia32_oldolduname 59
69#define __NR_ia32_umask 60
70#define __NR_ia32_chroot 61
71#define __NR_ia32_ustat 62
72#define __NR_ia32_dup2 63
73#define __NR_ia32_getppid 64
74#define __NR_ia32_getpgrp 65
75#define __NR_ia32_setsid 66
76#define __NR_ia32_sigaction 67
77#define __NR_ia32_sgetmask 68
78#define __NR_ia32_ssetmask 69
79#define __NR_ia32_setreuid 70
80#define __NR_ia32_setregid 71
81#define __NR_ia32_sigsuspend 72
82#define __NR_ia32_sigpending 73
83#define __NR_ia32_sethostname 74
84#define __NR_ia32_setrlimit 75
85#define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */
86#define __NR_ia32_getrusage 77
87#define __NR_ia32_gettimeofday 78
88#define __NR_ia32_settimeofday 79
89#define __NR_ia32_getgroups 80
90#define __NR_ia32_setgroups 81
91#define __NR_ia32_select 82
92#define __NR_ia32_symlink 83
93#define __NR_ia32_oldlstat 84
94#define __NR_ia32_readlink 85
95#define __NR_ia32_uselib 86
96#define __NR_ia32_swapon 87
97#define __NR_ia32_reboot 88
98#define __NR_ia32_readdir 89
99#define __NR_ia32_mmap 90
100#define __NR_ia32_munmap 91
101#define __NR_ia32_truncate 92
102#define __NR_ia32_ftruncate 93
103#define __NR_ia32_fchmod 94
104#define __NR_ia32_fchown 95
105#define __NR_ia32_getpriority 96
106#define __NR_ia32_setpriority 97
107#define __NR_ia32_profil 98
108#define __NR_ia32_statfs 99
109#define __NR_ia32_fstatfs 100
110#define __NR_ia32_ioperm 101
111#define __NR_ia32_socketcall 102
112#define __NR_ia32_syslog 103
113#define __NR_ia32_setitimer 104
114#define __NR_ia32_getitimer 105
115#define __NR_ia32_stat 106
116#define __NR_ia32_lstat 107
117#define __NR_ia32_fstat 108
118#define __NR_ia32_olduname 109
119#define __NR_ia32_iopl 110
120#define __NR_ia32_vhangup 111
121#define __NR_ia32_idle 112
122#define __NR_ia32_vm86old 113
123#define __NR_ia32_wait4 114
124#define __NR_ia32_swapoff 115
125#define __NR_ia32_sysinfo 116
126#define __NR_ia32_ipc 117
127#define __NR_ia32_fsync 118
128#define __NR_ia32_sigreturn 119
129#define __NR_ia32_clone 120
130#define __NR_ia32_setdomainname 121
131#define __NR_ia32_uname 122
132#define __NR_ia32_modify_ldt 123
133#define __NR_ia32_adjtimex 124
134#define __NR_ia32_mprotect 125
135#define __NR_ia32_sigprocmask 126
136#define __NR_ia32_create_module 127
137#define __NR_ia32_init_module 128
138#define __NR_ia32_delete_module 129
139#define __NR_ia32_get_kernel_syms 130
140#define __NR_ia32_quotactl 131
141#define __NR_ia32_getpgid 132
142#define __NR_ia32_fchdir 133
143#define __NR_ia32_bdflush 134
144#define __NR_ia32_sysfs 135
145#define __NR_ia32_personality 136
146#define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */
147#define __NR_ia32_setfsuid 138
148#define __NR_ia32_setfsgid 139
149#define __NR_ia32__llseek 140
150#define __NR_ia32_getdents 141
151#define __NR_ia32__newselect 142
152#define __NR_ia32_flock 143
153#define __NR_ia32_msync 144
154#define __NR_ia32_readv 145
155#define __NR_ia32_writev 146
156#define __NR_ia32_getsid 147
157#define __NR_ia32_fdatasync 148
158#define __NR_ia32__sysctl 149
159#define __NR_ia32_mlock 150
160#define __NR_ia32_munlock 151
161#define __NR_ia32_mlockall 152
162#define __NR_ia32_munlockall 153
163#define __NR_ia32_sched_setparam 154
164#define __NR_ia32_sched_getparam 155
165#define __NR_ia32_sched_setscheduler 156
166#define __NR_ia32_sched_getscheduler 157
167#define __NR_ia32_sched_yield 158
168#define __NR_ia32_sched_get_priority_max 159
169#define __NR_ia32_sched_get_priority_min 160
170#define __NR_ia32_sched_rr_get_interval 161
171#define __NR_ia32_nanosleep 162
172#define __NR_ia32_mremap 163
173#define __NR_ia32_setresuid 164
174#define __NR_ia32_getresuid 165
175#define __NR_ia32_vm86 166
176#define __NR_ia32_query_module 167
177#define __NR_ia32_poll 168
178#define __NR_ia32_nfsservctl 169
179#define __NR_ia32_setresgid 170
180#define __NR_ia32_getresgid 171
181#define __NR_ia32_prctl 172
182#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
183#define __NR_ia32_rt_sigaction 174
184#define __NR_ia32_rt_sigprocmask 175
185#define __NR_ia32_rt_sigpending 176
186#define __NR_ia32_rt_sigtimedwait 177
187#define __NR_ia32_rt_sigqueueinfo 178
188#define __NR_ia32_rt_sigsuspend 179
189#define __NR_ia32_pread 180
190#define __NR_ia32_pwrite 181
191#define __NR_ia32_chown 182
192#define __NR_ia32_getcwd 183
193#define __NR_ia32_capget 184
194#define __NR_ia32_capset 185
195#define __NR_ia32_sigaltstack 186
196#define __NR_ia32_sendfile 187
197#define __NR_ia32_getpmsg 188 /* some people actually want streams */
198#define __NR_ia32_putpmsg 189 /* some people actually want streams */
199#define __NR_ia32_vfork 190
200#define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */
201#define __NR_ia32_mmap2 192
202#define __NR_ia32_truncate64 193
203#define __NR_ia32_ftruncate64 194
204#define __NR_ia32_stat64 195
205#define __NR_ia32_lstat64 196
206#define __NR_ia32_fstat64 197
207#define __NR_ia32_lchown32 198
208#define __NR_ia32_getuid32 199
209#define __NR_ia32_getgid32 200
210#define __NR_ia32_geteuid32 201
211#define __NR_ia32_getegid32 202
212#define __NR_ia32_setreuid32 203
213#define __NR_ia32_setregid32 204
214#define __NR_ia32_getgroups32 205
215#define __NR_ia32_setgroups32 206
216#define __NR_ia32_fchown32 207
217#define __NR_ia32_setresuid32 208
218#define __NR_ia32_getresuid32 209
219#define __NR_ia32_setresgid32 210
220#define __NR_ia32_getresgid32 211
221#define __NR_ia32_chown32 212
222#define __NR_ia32_setuid32 213
223#define __NR_ia32_setgid32 214
224#define __NR_ia32_setfsuid32 215
225#define __NR_ia32_setfsgid32 216
226#define __NR_ia32_pivot_root 217
227#define __NR_ia32_mincore 218
228#define __NR_ia32_madvise 219
229#define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */
230#define __NR_ia32_getdents64 220
231#define __NR_ia32_fcntl64 221
232#define __NR_ia32_tuxcall 222
233#define __NR_ia32_security 223
234#define __NR_ia32_gettid 224
235#define __NR_ia32_readahead 225
236#define __NR_ia32_setxattr 226
237#define __NR_ia32_lsetxattr 227
238#define __NR_ia32_fsetxattr 228
239#define __NR_ia32_getxattr 229
240#define __NR_ia32_lgetxattr 230
241#define __NR_ia32_fgetxattr 231
242#define __NR_ia32_listxattr 232
243#define __NR_ia32_llistxattr 233
244#define __NR_ia32_flistxattr 234
245#define __NR_ia32_removexattr 235
246#define __NR_ia32_lremovexattr 236
247#define __NR_ia32_fremovexattr 237
248#define __NR_ia32_tkill 238
249#define __NR_ia32_sendfile64 239
250#define __NR_ia32_futex 240
251#define __NR_ia32_sched_setaffinity 241
252#define __NR_ia32_sched_getaffinity 242
253#define __NR_ia32_set_thread_area 243
254#define __NR_ia32_get_thread_area 244
255#define __NR_ia32_io_setup 245
256#define __NR_ia32_io_destroy 246
257#define __NR_ia32_io_getevents 247
258#define __NR_ia32_io_submit 248
259#define __NR_ia32_io_cancel 249
260#define __NR_ia32_exit_group 252
261#define __NR_ia32_lookup_dcookie 253
262#define __NR_ia32_sys_epoll_create 254
263#define __NR_ia32_sys_epoll_ctl 255
264#define __NR_ia32_sys_epoll_wait 256
265#define __NR_ia32_remap_file_pages 257
266#define __NR_ia32_set_tid_address 258
267#define __NR_ia32_timer_create 259
268#define __NR_ia32_timer_settime (__NR_ia32_timer_create+1)
269#define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2)
270#define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3)
271#define __NR_ia32_timer_delete (__NR_ia32_timer_create+4)
272#define __NR_ia32_clock_settime (__NR_ia32_timer_create+5)
273#define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6)
274#define __NR_ia32_clock_getres (__NR_ia32_timer_create+7)
275#define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8)
276#define __NR_ia32_statfs64 268
277#define __NR_ia32_fstatfs64 269
278#define __NR_ia32_tgkill 270
279#define __NR_ia32_utimes 271
280#define __NR_ia32_fadvise64_64 272
281#define __NR_ia32_vserver 273
282#define __NR_ia32_mbind 274
283#define __NR_ia32_get_mempolicy 275
284#define __NR_ia32_set_mempolicy 276
285#define __NR_ia32_mq_open 277
286#define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1)
287#define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2)
288#define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3)
289#define __NR_ia32_mq_notify (__NR_ia32_mq_open+4)
290#define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5)
291#define __NR_ia32_kexec 283
292#define __NR_ia32_waitid 284
293/* #define __NR_sys_setaltroot 285 */
294#define __NR_ia32_add_key 286
295#define __NR_ia32_request_key 287
296#define __NR_ia32_keyctl 288
297#define __NR_ia32_ioprio_set 289
298#define __NR_ia32_ioprio_get 290
299#define __NR_ia32_inotify_init 291
300#define __NR_ia32_inotify_add_watch 292
301#define __NR_ia32_inotify_rm_watch 293
302#define __NR_ia32_migrate_pages 294
303#define __NR_ia32_openat 295
304#define __NR_ia32_mkdirat 296
305#define __NR_ia32_mknodat 297
306#define __NR_ia32_fchownat 298
307#define __NR_ia32_futimesat 299
308#define __NR_ia32_fstatat64 300
309#define __NR_ia32_unlinkat 301
310#define __NR_ia32_renameat 302
311#define __NR_ia32_linkat 303
312#define __NR_ia32_symlinkat 304
313#define __NR_ia32_readlinkat 305
314#define __NR_ia32_fchmodat 306
315#define __NR_ia32_faccessat 307
316#define __NR_ia32_pselect6 308
317#define __NR_ia32_ppoll 309
318#define __NR_ia32_unshare 310
319 17
320#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
new file mode 100644
index 000000000000..59c396431569
--- /dev/null
+++ b/include/asm-x86_64/intel_arch_perfmon.h
@@ -0,0 +1,19 @@
1#ifndef X86_64_INTEL_ARCH_PERFMON_H
2#define X86_64_INTEL_ARCH_PERFMON_H 1
3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6
7#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
8#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
9
10#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
11#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
12#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
13#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
18
19#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/k8.h b/include/asm-x86_64/k8.h
new file mode 100644
index 000000000000..699dd6961eda
--- /dev/null
+++ b/include/asm-x86_64/k8.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_K8_H
2#define _ASM_K8_H 1
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7
8extern int early_is_k8_nb(u32 value);
9extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void);
13
14#endif
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index cd17945bf218..e769e6200225 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
59 * This could be done better if we moved the per cpu data directly 59 * This could be done better if we moved the per cpu data directly
60 * after GS. 60 * after GS.
61 */ 61 */
62#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 62
63#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 63/* Need to disable preemption for the cpu local counters otherwise we could
64#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 64 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 65#define cpu_local_wrap_v(v) \
66#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 66 ({ local_t res__; \
67#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 67 preempt_disable(); \
68 res__ = (v); \
69 preempt_enable(); \
70 res__; })
71#define cpu_local_wrap(v) \
72 ({ preempt_disable(); \
73 v; \
74 preempt_enable(); }) \
75
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
68 82
69#define __cpu_local_inc(v) cpu_local_inc(v) 83#define __cpu_local_inc(v) cpu_local_inc(v)
70#define __cpu_local_dec(v) cpu_local_dec(v) 84#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 7229785094e3..d13687dfd691 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,13 +67,22 @@ struct mce_log {
67/* Software defined banks */ 67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128 68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ 70
71#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
72#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
73#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
74#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
75#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
76#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
77#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
78#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
72 79
73#ifdef __KERNEL__ 80#ifdef __KERNEL__
74#include <asm/atomic.h> 81#include <asm/atomic.h>
75 82
76void mce_log(struct mce *m); 83void mce_log(struct mce *m);
84DECLARE_PER_CPU(struct sys_device, device_mce);
85
77#ifdef CONFIG_X86_MCE_INTEL 86#ifdef CONFIG_X86_MCE_INTEL
78void mce_intel_feature_init(struct cpuinfo_x86 *c); 87void mce_intel_feature_init(struct cpuinfo_x86 *c);
79#else 88#else
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 11fbee2bd6c0..06fab6de2a88 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -24,7 +24,7 @@ do { \
24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 24 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
25 \ 25 \
26 __asm__ __volatile__( \ 26 __asm__ __volatile__( \
27 LOCK " decl (%%rdi) \n" \ 27 LOCK_PREFIX " decl (%%rdi) \n" \
28 " js 2f \n" \ 28 " js 2f \n" \
29 "1: \n" \ 29 "1: \n" \
30 \ 30 \
@@ -74,7 +74,7 @@ do { \
74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 74 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
75 \ 75 \
76 __asm__ __volatile__( \ 76 __asm__ __volatile__( \
77 LOCK " incl (%%rdi) \n" \ 77 LOCK_PREFIX " incl (%%rdi) \n" \
78 " jle 2f \n" \ 78 " jle 2f \n" \
79 "1: \n" \ 79 "1: \n" \
80 \ 80 \
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index d3abfc6a8fd5..efb45c894d76 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -5,26 +5,27 @@
5#define ASM_NMI_H 5#define ASM_NMI_H
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <asm/io.h>
8 9
9struct pt_regs; 10struct pt_regs;
10 11
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); 12typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12 13
13/** 14/**
14 * set_nmi_callback 15 * set_nmi_callback
15 * 16 *
16 * Set a handler for an NMI. Only one handler may be 17 * Set a handler for an NMI. Only one handler may be
17 * set. Return 1 if the NMI was handled. 18 * set. Return 1 if the NMI was handled.
18 */ 19 */
19void set_nmi_callback(nmi_callback_t callback); 20void set_nmi_callback(nmi_callback_t callback);
20 21
21/** 22/**
22 * unset_nmi_callback 23 * unset_nmi_callback
23 * 24 *
24 * Remove the handler previously set. 25 * Remove the handler previously set.
25 */ 26 */
26void unset_nmi_callback(void); 27void unset_nmi_callback(void);
27 28
28#ifdef CONFIG_PM 29#ifdef CONFIG_PM
29 30
30/** Replace the PM callback routine for NMI. */ 31/** Replace the PM callback routine for NMI. */
@@ -56,4 +57,21 @@ extern int unknown_nmi_panic;
56 57
57extern int check_nmi_watchdog(void); 58extern int check_nmi_watchdog(void);
58 59
60extern void setup_apic_nmi_watchdog (void);
61extern int reserve_lapic_nmi(void);
62extern void release_lapic_nmi(void);
63extern void disable_timer_nmi_watchdog(void);
64extern void enable_timer_nmi_watchdog(void);
65extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
66
67extern void nmi_watchdog_default(void);
68extern int setup_nmi_watchdog(char *);
69
70extern unsigned int nmi_watchdog;
71#define NMI_DEFAULT -1
72#define NMI_NONE 0
73#define NMI_IO_APIC 1
74#define NMI_LOCAL_APIC 2
75#define NMI_INVALID 3
76
59#endif /* ASM_NMI_H */ 77#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 2db0620d5449..49c5e9280598 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -39,8 +39,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
39#include <asm/scatterlist.h> 39#include <asm/scatterlist.h>
40#include <linux/string.h> 40#include <linux/string.h>
41#include <asm/page.h> 41#include <asm/page.h>
42#include <linux/dma-mapping.h> /* for have_iommu */
43 42
43extern void pci_iommu_alloc(void);
44extern int iommu_setup(char *opt); 44extern int iommu_setup(char *opt);
45 45
46/* The PCI address space does equal the physical memory 46/* The PCI address space does equal the physical memory
@@ -52,7 +52,7 @@ extern int iommu_setup(char *opt);
52 */ 52 */
53#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 53#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
54 54
55#ifdef CONFIG_GART_IOMMU 55#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
56 56
57/* 57/*
58 * x86-64 always supports DAC, but sometimes it is useful to force 58 * x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 31e83c3bd022..a31ab4e68a9b 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -337,14 +337,8 @@ static inline int pmd_large(pmd_t pte) {
337/* to find an entry in a page-table-directory. */ 337/* to find an entry in a page-table-directory. */
338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 338#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) 339#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
340#define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
341#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) 340#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
342 341
343static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
344{
345 return pud + pud_index(address);
346}
347
348/* PMD - Level 2 access */ 342/* PMD - Level 2 access */
349#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) 343#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
350#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 344#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3061a38a3b1d..3b3c1217fe61 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -69,7 +69,11 @@ struct cpuinfo_x86 {
69 cpumask_t llc_shared_map; /* cpus sharing the last level cache */ 69 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
70#endif 70#endif
71 __u8 apicid; 71 __u8 apicid;
72#ifdef CONFIG_SMP
72 __u8 booted_cores; /* number of cores as seen by OS */ 73 __u8 booted_cores; /* number of cores as seen by OS */
74 __u8 phys_proc_id; /* Physical Processor id. */
75 __u8 cpu_core_id; /* Core id. */
76#endif
73} ____cacheline_aligned; 77} ____cacheline_aligned;
74 78
75#define X86_VENDOR_INTEL 0 79#define X86_VENDOR_INTEL 0
@@ -96,6 +100,7 @@ extern char ignore_irq13;
96extern void identify_cpu(struct cpuinfo_x86 *); 100extern void identify_cpu(struct cpuinfo_x86 *);
97extern void print_cpu_info(struct cpuinfo_x86 *); 101extern void print_cpu_info(struct cpuinfo_x86 *);
98extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 102extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
103extern unsigned short num_cache_leaves;
99 104
100/* 105/*
101 * EFLAGS bits 106 * EFLAGS bits
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 8abf2a43c944..038fe1f47e6f 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -37,7 +37,6 @@ extern void ia32_sysenter_target(void);
37 37
38extern void config_acpi_tables(void); 38extern void config_acpi_tables(void);
39extern void ia32_syscall(void); 39extern void ia32_syscall(void);
40extern void iommu_hole_init(void);
41 40
42extern int pmtimer_mark_offset(void); 41extern int pmtimer_mark_offset(void);
43extern void pmtimer_resume(void); 42extern void pmtimer_resume(void);
@@ -75,7 +74,7 @@ extern void main_timer_handler(struct pt_regs *regs);
75 74
76extern unsigned long end_pfn_map; 75extern unsigned long end_pfn_map;
77 76
78extern void show_trace(unsigned long * rsp); 77extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp);
79extern void show_registers(struct pt_regs *regs); 78extern void show_registers(struct pt_regs *regs);
80 79
81extern void exception_table_check(void); 80extern void exception_table_check(void);
@@ -101,13 +100,9 @@ extern int unsynchronized_tsc(void);
101 100
102extern void select_idle_routine(const struct cpuinfo_x86 *c); 101extern void select_idle_routine(const struct cpuinfo_x86 *c);
103 102
104extern void gart_parse_options(char *);
105extern void __init no_iommu_init(void);
106
107extern unsigned long table_start, table_end; 103extern unsigned long table_start, table_end;
108 104
109extern int exception_trace; 105extern int exception_trace;
110extern int force_iommu, no_iommu;
111extern int using_apic_timer; 106extern int using_apic_timer;
112extern int disable_apic; 107extern int disable_apic;
113extern unsigned cpu_khz; 108extern unsigned cpu_khz;
@@ -116,7 +111,13 @@ extern int skip_ioapic_setup;
116extern int acpi_ht; 111extern int acpi_ht;
117extern int acpi_disabled; 112extern int acpi_disabled;
118 113
119#ifdef CONFIG_GART_IOMMU 114extern void no_iommu_init(void);
115extern int force_iommu, no_iommu;
116extern int iommu_detected;
117#ifdef CONFIG_IOMMU
118extern void gart_iommu_init(void);
119extern void gart_parse_options(char *);
120extern void iommu_hole_init(void);
120extern int fallback_aper_order; 121extern int fallback_aper_order;
121extern int fallback_aper_force; 122extern int fallback_aper_force;
122extern int iommu_aperture; 123extern int iommu_aperture;
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index 9942cc393064..dea0e9459264 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -24,7 +24,7 @@
24#define RW_LOCK_BIAS_STR "0x01000000" 24#define RW_LOCK_BIAS_STR "0x01000000"
25 25
26#define __build_read_lock_ptr(rw, helper) \ 26#define __build_read_lock_ptr(rw, helper) \
27 asm volatile(LOCK "subl $1,(%0)\n\t" \ 27 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
28 "js 2f\n" \ 28 "js 2f\n" \
29 "1:\n" \ 29 "1:\n" \
30 LOCK_SECTION_START("") \ 30 LOCK_SECTION_START("") \
@@ -34,7 +34,7 @@
34 ::"a" (rw) : "memory") 34 ::"a" (rw) : "memory")
35 35
36#define __build_read_lock_const(rw, helper) \ 36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK "subl $1,%0\n\t" \ 37 asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
38 "js 2f\n" \ 38 "js 2f\n" \
39 "1:\n" \ 39 "1:\n" \
40 LOCK_SECTION_START("") \ 40 LOCK_SECTION_START("") \
@@ -54,7 +54,7 @@
54 } while (0) 54 } while (0)
55 55
56#define __build_write_lock_ptr(rw, helper) \ 56#define __build_write_lock_ptr(rw, helper) \
57 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 57 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
58 "jnz 2f\n" \ 58 "jnz 2f\n" \
59 "1:\n" \ 59 "1:\n" \
60 LOCK_SECTION_START("") \ 60 LOCK_SECTION_START("") \
@@ -64,7 +64,7 @@
64 ::"a" (rw) : "memory") 64 ::"a" (rw) : "memory")
65 65
66#define __build_write_lock_const(rw, helper) \ 66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 67 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \ 68 "jnz 2f\n" \
69 "1:\n" \ 69 "1:\n" \
70 LOCK_SECTION_START("") \ 70 LOCK_SECTION_START("") \
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index a389aa6fe80f..064df08b9a0f 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -106,7 +106,7 @@ static inline void down(struct semaphore * sem)
106 106
107 __asm__ __volatile__( 107 __asm__ __volatile__(
108 "# atomic down operation\n\t" 108 "# atomic down operation\n\t"
109 LOCK "decl %0\n\t" /* --sem->count */ 109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "js 2f\n" 110 "js 2f\n"
111 "1:\n" 111 "1:\n"
112 LOCK_SECTION_START("") 112 LOCK_SECTION_START("")
@@ -130,7 +130,7 @@ static inline int down_interruptible(struct semaphore * sem)
130 130
131 __asm__ __volatile__( 131 __asm__ __volatile__(
132 "# atomic interruptible down operation\n\t" 132 "# atomic interruptible down operation\n\t"
133 LOCK "decl %1\n\t" /* --sem->count */ 133 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
134 "js 2f\n\t" 134 "js 2f\n\t"
135 "xorl %0,%0\n" 135 "xorl %0,%0\n"
136 "1:\n" 136 "1:\n"
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
154 154
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 "# atomic interruptible down operation\n\t" 156 "# atomic interruptible down operation\n\t"
157 LOCK "decl %1\n\t" /* --sem->count */ 157 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
158 "js 2f\n\t" 158 "js 2f\n\t"
159 "xorl %0,%0\n" 159 "xorl %0,%0\n"
160 "1:\n" 160 "1:\n"
@@ -178,7 +178,7 @@ static inline void up(struct semaphore * sem)
178{ 178{
179 __asm__ __volatile__( 179 __asm__ __volatile__(
180 "# atomic up operation\n\t" 180 "# atomic up operation\n\t"
181 LOCK "incl %0\n\t" /* ++sem->count */ 181 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
182 "jle 2f\n" 182 "jle 2f\n"
183 "1:\n" 183 "1:\n"
184 LOCK_SECTION_START("") 184 LOCK_SECTION_START("")
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 7686b9b25aef..6805e1feb300 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info),
53 53
54extern cpumask_t cpu_sibling_map[NR_CPUS]; 54extern cpumask_t cpu_sibling_map[NR_CPUS];
55extern cpumask_t cpu_core_map[NR_CPUS]; 55extern cpumask_t cpu_core_map[NR_CPUS];
56extern u8 phys_proc_id[NR_CPUS];
57extern u8 cpu_core_id[NR_CPUS];
58extern u8 cpu_llc_id[NR_CPUS]; 56extern u8 cpu_llc_id[NR_CPUS];
59 57
60#define SMP_TRAMPOLINE_BASE 0x6000 58#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 5d8a5e3589ff..8d3421996f94 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -31,15 +31,19 @@
31 "jmp 1b\n" \ 31 "jmp 1b\n" \
32 LOCK_SECTION_END 32 LOCK_SECTION_END
33 33
34#define __raw_spin_lock_string_up \
35 "\n\tdecl %0"
36
34#define __raw_spin_unlock_string \ 37#define __raw_spin_unlock_string \
35 "movl $1,%0" \ 38 "movl $1,%0" \
36 :"=m" (lock->slock) : : "memory" 39 :"=m" (lock->slock) : : "memory"
37 40
38static inline void __raw_spin_lock(raw_spinlock_t *lock) 41static inline void __raw_spin_lock(raw_spinlock_t *lock)
39{ 42{
40 __asm__ __volatile__( 43 alternative_smp(
41 __raw_spin_lock_string 44 __raw_spin_lock_string,
42 :"=m" (lock->slock) : : "memory"); 45 __raw_spin_lock_string_up,
46 "=m" (lock->slock) : : "memory");
43} 47}
44 48
45#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 49#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index ee6bf275349e..9505d9f4bead 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -6,7 +6,8 @@
6/* Written 2002 by Andi Kleen */ 6/* Written 2002 by Andi Kleen */
7 7
8/* Only used for special circumstances. Stolen from i386/string.h */ 8/* Only used for special circumstances. Stolen from i386/string.h */
9static inline void * __inline_memcpy(void * to, const void * from, size_t n) 9static __always_inline void *
10__inline_memcpy(void * to, const void * from, size_t n)
10{ 11{
11unsigned long d0, d1, d2; 12unsigned long d0, d1, d2;
12__asm__ __volatile__( 13__asm__ __volatile__(
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f48e0dad8b3d..68e559f3631c 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,15 +3,10 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/alternative.h>
6 7
7#ifdef __KERNEL__ 8#ifdef __KERNEL__
8 9
9#ifdef CONFIG_SMP
10#define LOCK_PREFIX "lock ; "
11#else
12#define LOCK_PREFIX ""
13#endif
14
15#define __STR(x) #x 10#define __STR(x) #x
16#define STR(x) __STR(x) 11#define STR(x) __STR(x)
17 12
@@ -34,7 +29,7 @@
34 "thread_return:\n\t" \ 29 "thread_return:\n\t" \
35 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ 30 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
36 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 31 "movq %P[thread_info](%%rsi),%%r8\n\t" \
37 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ 32 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
38 "movq %%rax,%%rdi\n\t" \ 33 "movq %%rax,%%rdi\n\t" \
39 "jc ret_from_fork\n\t" \ 34 "jc ret_from_fork\n\t" \
40 RESTORE_CONTEXT \ 35 RESTORE_CONTEXT \
@@ -69,82 +64,6 @@ extern void load_gs_index(unsigned);
69 ".previous" \ 64 ".previous" \
70 : :"r" (value), "r" (0)) 65 : :"r" (value), "r" (0))
71 66
72#ifdef __KERNEL__
73struct alt_instr {
74 __u8 *instr; /* original instruction */
75 __u8 *replacement;
76 __u8 cpuid; /* cpuid bit set for replacement */
77 __u8 instrlen; /* length of original instruction */
78 __u8 replacementlen; /* length of new instruction, <= instrlen */
79 __u8 pad[5];
80};
81#endif
82
83/*
84 * Alternative instructions for different CPU types or capabilities.
85 *
86 * This allows to use optimized instructions even on generic binary
87 * kernels.
88 *
89 * length of oldinstr must be longer or equal the length of newinstr
90 * It can be padded with nops as needed.
91 *
92 * For non barrier like inlines please define new variants
93 * without volatile and memory clobber.
94 */
95#define alternative(oldinstr, newinstr, feature) \
96 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
97 ".section .altinstructions,\"a\"\n" \
98 " .align 8\n" \
99 " .quad 661b\n" /* label */ \
100 " .quad 663f\n" /* new instruction */ \
101 " .byte %c0\n" /* feature bit */ \
102 " .byte 662b-661b\n" /* sourcelen */ \
103 " .byte 664f-663f\n" /* replacementlen */ \
104 ".previous\n" \
105 ".section .altinstr_replacement,\"ax\"\n" \
106 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
107 ".previous" :: "i" (feature) : "memory")
108
109/*
110 * Alternative inline assembly with input.
111 *
112 * Peculiarities:
113 * No memory clobber here.
114 * Argument numbers start with 1.
115 * Best is to use constraints that are fixed size (like (%1) ... "r")
116 * If you use variable sized constraints like "m" or "g" in the
117 * replacement make sure to pad to the worst case length.
118 */
119#define alternative_input(oldinstr, newinstr, feature, input...) \
120 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
121 ".section .altinstructions,\"a\"\n" \
122 " .align 8\n" \
123 " .quad 661b\n" /* label */ \
124 " .quad 663f\n" /* new instruction */ \
125 " .byte %c0\n" /* feature bit */ \
126 " .byte 662b-661b\n" /* sourcelen */ \
127 " .byte 664f-663f\n" /* replacementlen */ \
128 ".previous\n" \
129 ".section .altinstr_replacement,\"ax\"\n" \
130 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
131 ".previous" :: "i" (feature), ##input)
132
133/* Like alternative_input, but with a single output argument */
134#define alternative_io(oldinstr, newinstr, feature, output, input...) \
135 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
136 ".section .altinstructions,\"a\"\n" \
137 " .align 8\n" \
138 " .quad 661b\n" /* label */ \
139 " .quad 663f\n" /* new instruction */ \
140 " .byte %c[feat]\n" /* feature bit */ \
141 " .byte 662b-661b\n" /* sourcelen */ \
142 " .byte 664f-663f\n" /* replacementlen */ \
143 ".previous\n" \
144 ".section .altinstr_replacement,\"ax\"\n" \
145 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
146 ".previous" : output : [feat] "i" (feature), ##input)
147
148/* 67/*
149 * Clear and set 'TS' bit respectively 68 * Clear and set 'TS' bit respectively
150 */ 69 */
@@ -366,5 +285,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
366void cpu_idle_wait(void); 285void cpu_idle_wait(void);
367 286
368extern unsigned long arch_align_stack(unsigned long sp); 287extern unsigned long arch_align_stack(unsigned long sp);
288extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
369 289
370#endif 290#endif
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
new file mode 100644
index 000000000000..ee51d31528d6
--- /dev/null
+++ b/include/asm-x86_64/tce.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
3 * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
4 *
5 * This file is derived from asm-powerpc/tce.h.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_X86_64_TCE_H
23#define _ASM_X86_64_TCE_H
24
25extern void* tce_table_kva[];
26extern unsigned int specified_table_size;
27struct iommu_table;
28
29#define TCE_ENTRY_SIZE 8 /* in bytes */
30
31#define TCE_READ_SHIFT 0
32#define TCE_WRITE_SHIFT 1
33#define TCE_HUBID_SHIFT 2 /* unused */
34#define TCE_RSVD_SHIFT 8 /* unused */
35#define TCE_RPN_SHIFT 12
36#define TCE_UNUSED_SHIFT 48 /* unused */
37
38#define TCE_RPN_MASK 0x0000fffffffff000ULL
39
40extern void tce_build(struct iommu_table *tbl, unsigned long index,
41 unsigned int npages, unsigned long uaddr, int direction);
42extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
43extern void* alloc_tce_table(void);
44extern void free_tce_table(void *tbl);
45extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar);
46
47#endif /* _ASM_X86_64_TCE_H */
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 4ac0e0a36934..2029b00351f3 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -73,8 +73,21 @@ static inline struct thread_info *stack_thread_info(void)
73} 73}
74 74
75/* thread information allocation */ 75/* thread information allocation */
76#ifdef CONFIG_DEBUG_STACK_USAGE
77#define alloc_thread_info(tsk) \
78 ({ \
79 struct thread_info *ret; \
80 \
81 ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \
82 if (ret) \
83 memset(ret, 0, THREAD_SIZE); \
84 ret; \
85 })
86#else
76#define alloc_thread_info(tsk) \ 87#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 88 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
89#endif
90
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 91#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79 92
80#else /* !__ASSEMBLY__ */ 93#else /* !__ASSEMBLY__ */
@@ -101,7 +114,7 @@ static inline struct thread_info *stack_thread_info(void)
101#define TIF_IRET 5 /* force IRET */ 114#define TIF_IRET 5 /* force IRET */
102#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
103#define TIF_SECCOMP 8 /* secure computing */ 116#define TIF_SECCOMP 8 /* secure computing */
104#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 117/* 16 free */
105#define TIF_IA32 17 /* 32bit process */ 118#define TIF_IA32 17 /* 32bit process */
106#define TIF_FORK 18 /* ret_from_fork */ 119#define TIF_FORK 18 /* ret_from_fork */
107#define TIF_ABI_PENDING 19 120#define TIF_ABI_PENDING 19
@@ -115,7 +128,6 @@ static inline struct thread_info *stack_thread_info(void)
115#define _TIF_IRET (1<<TIF_IRET) 128#define _TIF_IRET (1<<TIF_IRET)
116#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 129#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
117#define _TIF_SECCOMP (1<<TIF_SECCOMP) 130#define _TIF_SECCOMP (1<<TIF_SECCOMP)
118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
119#define _TIF_IA32 (1<<TIF_IA32) 131#define _TIF_IA32 (1<<TIF_IA32)
120#define _TIF_FORK (1<<TIF_FORK) 132#define _TIF_FORK (1<<TIF_FORK)
121#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 133#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
@@ -137,6 +149,9 @@ static inline struct thread_info *stack_thread_info(void)
137 */ 149 */
138#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ 150#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
139#define TS_COMPAT 0x0002 /* 32bit syscall active */ 151#define TS_COMPAT 0x0002 /* 32bit syscall active */
152#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
153
154#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
140 155
141#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
142 157
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 80c4e44d011c..c4e46e7fa7ba 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -7,8 +7,6 @@
7#include <asm/mpspec.h> 7#include <asm/mpspec.h>
8#include <asm/bitops.h> 8#include <asm/bitops.h>
9 9
10/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
11
12extern cpumask_t cpu_online_map; 10extern cpumask_t cpu_online_map;
13 11
14extern unsigned char cpu_to_node[]; 12extern unsigned char cpu_to_node[];
@@ -57,10 +55,8 @@ extern int __node_distance(int, int);
57#endif 55#endif
58 56
59#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
60#define topology_physical_package_id(cpu) \ 58#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
61 (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) 59#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
62#define topology_core_id(cpu) \
63 (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
64#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 60#define topology_core_siblings(cpu) (cpu_core_map[cpu])
65#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 61#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
66#endif 62#endif
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
new file mode 100644
index 000000000000..f3e7124effe3
--- /dev/null
+++ b/include/asm-x86_64/unwind.h
@@ -0,0 +1,106 @@
1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/ptrace.h>
14#include <asm/uaccess.h>
15#include <asm/vsyscall.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21};
22
23#define UNW_PC(frame) (frame)->regs.rip
24#define UNW_SP(frame) (frame)->regs.rsp
25#ifdef CONFIG_FRAME_POINTER
26#define UNW_FP(frame) (frame)->regs.rbp
27#define FRAME_RETADDR_OFFSET 8
28#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
30#define STACK_TOP(tsk) ((tsk)->thread.rsp0)
31#endif
32/* Might need to account for the special exception and interrupt handling
33 stacks here, since normally
34 EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
35 but the construct is needed only for getting across the stack switch to
36 the interrupt stack - thus considering the IRQ stack itself is unnecessary,
37 and the overhead of comparing against all exception handling stacks seems
38 not desirable. */
39#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
40
41#define UNW_REGISTER_INFO \
42 PTREGS_INFO(rax), \
43 PTREGS_INFO(rdx), \
44 PTREGS_INFO(rcx), \
45 PTREGS_INFO(rbx), \
46 PTREGS_INFO(rsi), \
47 PTREGS_INFO(rdi), \
48 PTREGS_INFO(rbp), \
49 PTREGS_INFO(rsp), \
50 PTREGS_INFO(r8), \
51 PTREGS_INFO(r9), \
52 PTREGS_INFO(r10), \
53 PTREGS_INFO(r11), \
54 PTREGS_INFO(r12), \
55 PTREGS_INFO(r13), \
56 PTREGS_INFO(r14), \
57 PTREGS_INFO(r15), \
58 PTREGS_INFO(rip)
59
60static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
61 /*const*/ struct pt_regs *regs)
62{
63 info->regs = *regs;
64}
65
66static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
67{
68 extern const char thread_return[];
69
70 memset(&info->regs, 0, sizeof(info->regs));
71 info->regs.rip = (unsigned long)thread_return;
72 info->regs.cs = __KERNEL_CS;
73 __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp);
74 info->regs.rsp = info->task->thread.rsp;
75 info->regs.ss = __KERNEL_DS;
76}
77
78extern int arch_unwind_init_running(struct unwind_frame_info *,
79 int (*callback)(struct unwind_frame_info *,
80 void *arg),
81 void *arg);
82
83static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
84{
85#if 0 /* This can only work when selector register saves/restores
86 are properly annotated (and tracked in UNW_REGISTER_INFO). */
87 return user_mode(&info->regs);
88#else
89 return (long)info->regs.rip >= 0
90 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
91 || (long)info->regs.rsp >= 0;
92#endif
93}
94
95#else
96
97#define UNW_PC(frame) ((void)(frame), 0)
98
99static inline int arch_unw_user_mode(const void *info)
100{
101 return 0;
102}
103
104#endif
105
106#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d9ed27969855..dcc5de7cc487 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -24,6 +24,9 @@
24 * The available bitmap operations and their rough meaning in the 24 * The available bitmap operations and their rough meaning in the
25 * case that the bitmap is a single unsigned long are thus: 25 * case that the bitmap is a single unsigned long are thus:
26 * 26 *
27 * Note that nbits should be always a compile time evaluable constant.
28 * Otherwise many inlines will generate horrible code.
29 *
27 * bitmap_zero(dst, nbits) *dst = 0UL 30 * bitmap_zero(dst, nbits) *dst = 0UL
28 * bitmap_fill(dst, nbits) *dst = ~0UL 31 * bitmap_fill(dst, nbits) *dst = ~0UL
29 * bitmap_copy(dst, src, nbits) *dst = *src 32 * bitmap_copy(dst, src, nbits) *dst = *src
@@ -244,6 +247,8 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
244 247
245static inline int bitmap_weight(const unsigned long *src, int nbits) 248static inline int bitmap_weight(const unsigned long *src, int nbits)
246{ 249{
250 if (nbits <= BITS_PER_LONG)
251 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
247 return __bitmap_weight(src, nbits); 252 return __bitmap_weight(src, nbits);
248} 253}
249 254
diff --git a/include/linux/compat.h b/include/linux/compat.h
index dda1697ec753..9760753e662b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -226,5 +226,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
226 226
227asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 227asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
228 228
229extern int compat_printk(const char *fmt, ...);
230
229#endif /* CONFIG_COMPAT */ 231#endif /* CONFIG_COMPAT */
230#endif /* _LINUX_COMPAT_H */ 232#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3c5e4c2e517d..5c1ec1f84eab 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -32,6 +32,7 @@ extern const char linux_banner[];
32 32
33#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 33#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
34#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) 34#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
35#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
35 36
36#define KERN_EMERG "<0>" /* system is unusable */ 37#define KERN_EMERG "<0>" /* system is unusable */
37#define KERN_ALERT "<1>" /* action must be taken immediately */ 38#define KERN_ALERT "<1>" /* action must be taken immediately */
@@ -336,6 +337,12 @@ struct sysinfo {
336/* Force a compilation error if condition is true */ 337/* Force a compilation error if condition is true */
337#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 338#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
338 339
340/* Force a compilation error if condition is true, but also produce a
341 result (of value 0 and type size_t), so the expression can be used
342 e.g. in a structure initializer (or where-ever else comma expressions
343 aren't permitted). */
344#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
345
339/* Trap pasters of __FUNCTION__ at compile-time */ 346/* Trap pasters of __FUNCTION__ at compile-time */
340#define __FUNCTION__ (__func__) 347#define __FUNCTION__ (__func__)
341 348
diff --git a/include/linux/module.h b/include/linux/module.h
index 2d366098eab5..9ebbb74b7b72 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -285,6 +285,9 @@ struct module
285 /* The size of the executable code in each section. */ 285 /* The size of the executable code in each section. */
286 unsigned long init_text_size, core_text_size; 286 unsigned long init_text_size, core_text_size;
287 287
288 /* The handle returned from unwind_add_table. */
289 void *unwind_info;
290
288 /* Arch-specific module values */ 291 /* Arch-specific module values */
289 struct mod_arch_specific arch; 292 struct mod_arch_specific arch;
290 293
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6a60770984e9..349ef908a222 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -148,9 +148,11 @@ enum
148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ 148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ 149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ 150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */
151}; 152};
152 153
153 154
155
154/* CTL_VM names: */ 156/* CTL_VM names: */
155enum 157enum
156{ 158{
diff --git a/include/linux/time.h b/include/linux/time.h
index 65dd85b2105e..c05f8bb9a323 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -28,10 +28,13 @@ struct timezone {
28#ifdef __KERNEL__ 28#ifdef __KERNEL__
29 29
30/* Parameters used to convert the timespec values: */ 30/* Parameters used to convert the timespec values: */
31#define MSEC_PER_SEC 1000L 31#define MSEC_PER_SEC 1000L
32#define USEC_PER_SEC 1000000L 32#define USEC_PER_MSEC 1000L
33#define NSEC_PER_SEC 1000000000L 33#define NSEC_PER_USEC 1000L
34#define NSEC_PER_USEC 1000L 34#define NSEC_PER_MSEC 1000000L
35#define USEC_PER_SEC 1000000L
36#define NSEC_PER_SEC 1000000000L
37#define FSEC_PER_SEC 1000000000000000L
35 38
36static inline int timespec_equal(struct timespec *a, struct timespec *b) 39static inline int timespec_equal(struct timespec *a, struct timespec *b)
37{ 40{
diff --git a/include/linux/unwind.h b/include/linux/unwind.h
new file mode 100644
index 000000000000..ce48e2cd37a2
--- /dev/null
+++ b/include/linux/unwind.h
@@ -0,0 +1,127 @@
1#ifndef _LINUX_UNWIND_H
2#define _LINUX_UNWIND_H
3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 *
9 * A simple API for unwinding kernel stacks. This is used for
10 * debugging and error reporting purposes. The kernel doesn't need
11 * full-blown stack unwinding with all the bells and whistles, so there
12 * is not much point in implementing the full Dwarf2 unwind API.
13 */
14
15#include <linux/config.h>
16
17struct module;
18
19#ifdef CONFIG_STACK_UNWIND
20
21#include <asm/unwind.h>
22
23#ifndef ARCH_UNWIND_SECTION_NAME
24#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
25#endif
26
27/*
28 * Initialize unwind support.
29 */
30extern void unwind_init(void);
31
32#ifdef CONFIG_MODULES
33
34extern void *unwind_add_table(struct module *,
35 const void *table_start,
36 unsigned long table_size);
37
38extern void unwind_remove_table(void *handle, int init_only);
39
40#endif
41
42extern int unwind_init_frame_info(struct unwind_frame_info *,
43 struct task_struct *,
44 /*const*/ struct pt_regs *);
45
46/*
47 * Prepare to unwind a blocked task.
48 */
49extern int unwind_init_blocked(struct unwind_frame_info *,
50 struct task_struct *);
51
52/*
53 * Prepare to unwind the currently running thread.
54 */
55extern int unwind_init_running(struct unwind_frame_info *,
56 asmlinkage int (*callback)(struct unwind_frame_info *,
57 void *arg),
58 void *arg);
59
60/*
61 * Unwind to previous to frame. Returns 0 if successful, negative
62 * number in case of an error.
63 */
64extern int unwind(struct unwind_frame_info *);
65
66/*
67 * Unwind until the return pointer is in user-land (or until an error
68 * occurs). Returns 0 if successful, negative number in case of
69 * error.
70 */
71extern int unwind_to_user(struct unwind_frame_info *);
72
73#else
74
75struct unwind_frame_info {};
76
77static inline void unwind_init(void) {}
78
79#ifdef CONFIG_MODULES
80
81static inline void *unwind_add_table(struct module *mod,
82 const void *table_start,
83 unsigned long table_size)
84{
85 return NULL;
86}
87
88#endif
89
90static inline void unwind_remove_table(void *handle, int init_only)
91{
92}
93
94static inline int unwind_init_frame_info(struct unwind_frame_info *info,
95 struct task_struct *tsk,
96 const struct pt_regs *regs)
97{
98 return -ENOSYS;
99}
100
101static inline int unwind_init_blocked(struct unwind_frame_info *info,
102 struct task_struct *tsk)
103{
104 return -ENOSYS;
105}
106
107static inline int unwind_init_running(struct unwind_frame_info *info,
108 asmlinkage int (*cb)(struct unwind_frame_info *,
109 void *arg),
110 void *arg)
111{
112 return -ENOSYS;
113}
114
115static inline int unwind(struct unwind_frame_info *info)
116{
117 return -ENOSYS;
118}
119
120static inline int unwind_to_user(struct unwind_frame_info *info)
121{
122 return -ENOSYS;
123}
124
125#endif
126
127#endif /* _LINUX_UNWIND_H */