aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 16:07:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 16:07:55 -0400
commitb278240839e20fa9384ea430df463b367b90e04e (patch)
treef99f0c8cdd4cc7f177cd75440e6bd181cded7fb3 /include
parentdd77a4ee0f3981693d4229aa1d57cea9e526ff47 (diff)
parent3f75f42d7733e73aca5c78326489efd4189e0111 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (225 commits) [PATCH] Don't set calgary iommu as default y [PATCH] i386/x86-64: New Intel feature flags [PATCH] x86: Add a cumulative thermal throttle event counter. [PATCH] i386: Make the jiffies compares use the 64bit safe macros. [PATCH] x86: Refactor thermal throttle processing [PATCH] Add 64bit jiffies compares (for use with get_jiffies_64) [PATCH] Fix unwinder warning in traps.c [PATCH] x86: Allow disabling early pci scans with pci=noearly or disallowing conf1 [PATCH] x86: Move direct PCI scanning functions out of line [PATCH] i386/x86-64: Make all early PCI scans dependent on CONFIG_PCI [PATCH] Don't leak NT bit into next task [PATCH] i386/x86-64: Work around gcc bug with noreturn functions in unwinder [PATCH] Fix some broken white space in ia32_signal.c [PATCH] Initialize argument registers for 32bit signal handlers. [PATCH] Remove all traces of signal number conversion [PATCH] Don't synchronize time reading on single core AMD systems [PATCH] Remove outdated comment in x86-64 mmconfig code [PATCH] Use string instructions for Core2 copy/clear [PATCH] x86: - restore i8259A eoi status on resume [PATCH] i386: Split multi-line printk in oops output. ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/acpi.h14
-rw-r--r--include/asm-i386/alternative-asm.i14
-rw-r--r--include/asm-i386/apic.h16
-rw-r--r--include/asm-i386/desc.h121
-rw-r--r--include/asm-i386/dwarf2.h11
-rw-r--r--include/asm-i386/e820.h2
-rw-r--r--include/asm-i386/frame.i24
-rw-r--r--include/asm-i386/genapic.h69
-rw-r--r--include/asm-i386/intel_arch_perfmon.h14
-rw-r--r--include/asm-i386/io_apic.h11
-rw-r--r--include/asm-i386/kexec.h27
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h4
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h11
-rw-r--r--include/asm-i386/mutex.h16
-rw-r--r--include/asm-i386/nmi.h37
-rw-r--r--include/asm-i386/pgtable.h2
-rw-r--r--include/asm-i386/ptrace.h9
-rw-r--r--include/asm-i386/rwlock.h48
-rw-r--r--include/asm-i386/rwsem.h62
-rw-r--r--include/asm-i386/segment.h17
-rw-r--r--include/asm-i386/semaphore.h49
-rw-r--r--include/asm-i386/smp.h20
-rw-r--r--include/asm-i386/spinlock.h134
-rw-r--r--include/asm-i386/stacktrace.h1
-rw-r--r--include/asm-i386/therm_throt.h9
-rw-r--r--include/asm-i386/tlbflush.h4
-rw-r--r--include/asm-i386/tsc.h1
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/unwind.h8
-rw-r--r--include/asm-ia64/module.h3
-rw-r--r--include/asm-um/alternative-asm.i6
-rw-r--r--include/asm-um/frame.i6
-rw-r--r--include/asm-x86_64/acpi.h2
-rw-r--r--include/asm-x86_64/alternative-asm.i14
-rw-r--r--include/asm-x86_64/apic.h9
-rw-r--r--include/asm-x86_64/bitops.h2
-rw-r--r--include/asm-x86_64/calgary.h7
-rw-r--r--include/asm-x86_64/dwarf2.h8
-rw-r--r--include/asm-x86_64/e820.h9
-rw-r--r--include/asm-x86_64/fixmap.h4
-rw-r--r--include/asm-x86_64/genapic.h1
-rw-r--r--include/asm-x86_64/i387.h9
-rw-r--r--include/asm-x86_64/intel_arch_perfmon.h14
-rw-r--r--include/asm-x86_64/io_apic.h6
-rw-r--r--include/asm-x86_64/irq.h2
-rw-r--r--include/asm-x86_64/kexec.h29
-rw-r--r--include/asm-x86_64/linkage.h2
-rw-r--r--include/asm-x86_64/mach_apic.h1
-rw-r--r--include/asm-x86_64/mce.h2
-rw-r--r--include/asm-x86_64/mmx.h14
-rw-r--r--include/asm-x86_64/mpspec.h11
-rw-r--r--include/asm-x86_64/msr.h11
-rw-r--r--include/asm-x86_64/mutex.h20
-rw-r--r--include/asm-x86_64/nmi.h38
-rw-r--r--include/asm-x86_64/pci-direct.h42
-rw-r--r--include/asm-x86_64/pda.h109
-rw-r--r--include/asm-x86_64/percpu.h10
-rw-r--r--include/asm-x86_64/pgtable.h8
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/rwlock.h64
-rw-r--r--include/asm-x86_64/segment.h5
-rw-r--r--include/asm-x86_64/semaphore.h40
-rw-r--r--include/asm-x86_64/signal.h4
-rw-r--r--include/asm-x86_64/smp.h29
-rw-r--r--include/asm-x86_64/spinlock.h79
-rw-r--r--include/asm-x86_64/stacktrace.h18
-rw-r--r--include/asm-x86_64/system.h5
-rw-r--r--include/asm-x86_64/tce.h1
-rw-r--r--include/asm-x86_64/therm_throt.h1
-rw-r--r--include/asm-x86_64/thread_info.h9
-rw-r--r--include/asm-x86_64/tlbflush.h70
-rw-r--r--include/asm-x86_64/uaccess.h68
-rw-r--r--include/asm-x86_64/unistd.h5
-rw-r--r--include/asm-x86_64/unwind.h9
-rw-r--r--include/asm-x86_64/vsyscall.h9
-rw-r--r--include/linux/edd.h1
-rw-r--r--include/linux/getcpu.h16
-rw-r--r--include/linux/jiffies.h15
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/linkage.h6
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/stacktrace.h7
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/vermagic.h4
85 files changed, 879 insertions, 777 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 20f523954218..6016632d032f 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -131,21 +131,7 @@ static inline void disable_acpi(void)
131extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 131extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
132 132
133#ifdef CONFIG_X86_IO_APIC 133#ifdef CONFIG_X86_IO_APIC
134extern int skip_ioapic_setup;
135extern int acpi_skip_timer_override; 134extern int acpi_skip_timer_override;
136
137static inline void disable_ioapic_setup(void)
138{
139 skip_ioapic_setup = 1;
140}
141
142static inline int ioapic_setup_disabled(void)
143{
144 return skip_ioapic_setup;
145}
146
147#else
148static inline void disable_ioapic_setup(void) { }
149#endif 135#endif
150 136
151static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 137static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i
new file mode 100644
index 000000000000..6c47e3b9484b
--- /dev/null
+++ b/include/asm-i386/alternative-asm.i
@@ -0,0 +1,14 @@
1#include <linux/config.h>
2
3#ifdef CONFIG_SMP
4 .macro LOCK_PREFIX
51: lock
6 .section .smp_locks,"a"
7 .align 4
8 .long 1b
9 .previous
10 .endm
11#else
12 .macro LOCK_PREFIX
13 .endm
14#endif
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 2c1e371cebb6..3a42b7d6fc92 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -16,20 +16,8 @@
16#define APIC_VERBOSE 1 16#define APIC_VERBOSE 1
17#define APIC_DEBUG 2 17#define APIC_DEBUG 2
18 18
19extern int enable_local_apic;
20extern int apic_verbosity; 19extern int apic_verbosity;
21 20
22static inline void lapic_disable(void)
23{
24 enable_local_apic = -1;
25 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
26}
27
28static inline void lapic_enable(void)
29{
30 enable_local_apic = 1;
31}
32
33/* 21/*
34 * Define the default level of output to be very little 22 * Define the default level of output to be very little
35 * This can be turned up by using apic=verbose for more 23 * This can be turned up by using apic=verbose for more
@@ -42,6 +30,8 @@ static inline void lapic_enable(void)
42 } while (0) 30 } while (0)
43 31
44 32
33extern void generic_apic_probe(void);
34
45#ifdef CONFIG_X86_LOCAL_APIC 35#ifdef CONFIG_X86_LOCAL_APIC
46 36
47/* 37/*
@@ -117,8 +107,6 @@ extern void enable_APIC_timer(void);
117 107
118extern void enable_NMI_through_LVT0 (void * dummy); 108extern void enable_NMI_through_LVT0 (void * dummy);
119 109
120extern int disable_timer_pin_1;
121
122void smp_send_timer_broadcast_ipi(struct pt_regs *regs); 110void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
123void switch_APIC_timer_to_ipi(void *cpumask); 111void switch_APIC_timer_to_ipi(void *cpumask);
124void switch_ipi_to_APIC_timer(void *cpumask); 112void switch_ipi_to_APIC_timer(void *cpumask);
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 89b8b82c82b3..5874ef119ffd 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
33 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; 33 return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
34} 34}
35 35
36/*
37 * This is the ldt that every process will get unless we need
38 * something other than this.
39 */
40extern struct desc_struct default_ldt[];
41extern struct desc_struct idt_table[];
42extern void set_intr_gate(unsigned int irq, void * addr);
43
44static inline void pack_descriptor(__u32 *a, __u32 *b,
45 unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
46{
47 *a = ((base & 0xffff) << 16) | (limit & 0xffff);
48 *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
49 (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
50}
51
52static inline void pack_gate(__u32 *a, __u32 *b,
53 unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
54{
55 *a = (seg << 16) | (base & 0xffff);
56 *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
57}
58
59#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
60#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
61#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
62#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
63#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
64#define DESCTYPE_DPL3 0x60 /* DPL-3 */
65#define DESCTYPE_S 0x10 /* !system */
66
36#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 67#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
37#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) 68#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
38 69
39#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) 70#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
40#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) 71#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
41#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) 72#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
42#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) 73#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
43 74
44#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) 75#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
45#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) 76#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
46#define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) 77#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
47#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) 78#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
48 79
49/* 80#if TLS_SIZE != 24
50 * This is the ldt that every process will get unless we need 81# error update this code.
51 * something other than this. 82#endif
52 */
53extern struct desc_struct default_ldt[];
54extern void set_intr_gate(unsigned int irq, void * addr);
55 83
56#define _set_tssldt_desc(n,addr,limit,type) \ 84static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
57__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
58 "movw %w1,2(%2)\n\t" \
59 "rorl $16,%1\n\t" \
60 "movb %b1,4(%2)\n\t" \
61 "movb %4,5(%2)\n\t" \
62 "movb $0,6(%2)\n\t" \
63 "movb %h1,7(%2)\n\t" \
64 "rorl $16,%1" \
65 : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
66
67static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
68{ 85{
69 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, 86#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
70 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); 87 C(0); C(1); C(2);
88#undef C
71} 89}
72 90
73#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) 91static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
92{
93 __u32 *lp = (__u32 *)((char *)dt + entry*8);
94 *lp = entry_a;
95 *(lp+1) = entry_b;
96}
97
98#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
99#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
100#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
101
102static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
103{
104 __u32 a, b;
105 pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
106 write_idt_entry(idt_table, gate, a, b);
107}
74 108
75static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) 109static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
76{ 110{
77 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); 111 __u32 a, b;
112 pack_descriptor(&a, &b, (unsigned long)addr,
113 offsetof(struct tss_struct, __cacheline_filler) - 1,
114 DESCTYPE_TSS, 0);
115 write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
78} 116}
79 117
118static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
119{
120 __u32 a, b;
121 pack_descriptor(&a, &b, (unsigned long)addr,
122 entries * sizeof(struct desc_struct) - 1,
123 DESCTYPE_LDT, 0);
124 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
125}
126
127#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
128
80#define LDT_entry_a(info) \ 129#define LDT_entry_a(info) \
81 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) 130 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
82 131
@@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
102 (info)->seg_not_present == 1 && \ 151 (info)->seg_not_present == 1 && \
103 (info)->useable == 0 ) 152 (info)->useable == 0 )
104 153
105static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
106{
107 __u32 *lp = (__u32 *)((char *)ldt + entry*8);
108 *lp = entry_a;
109 *(lp+1) = entry_b;
110}
111
112#if TLS_SIZE != 24
113# error update this code.
114#endif
115
116static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
117{
118#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
119 C(0); C(1); C(2);
120#undef C
121}
122
123static inline void clear_LDT(void) 154static inline void clear_LDT(void)
124{ 155{
125 int cpu = get_cpu(); 156 int cpu = get_cpu();
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
index 2280f6272f80..6d66398a307d 100644
--- a/include/asm-i386/dwarf2.h
+++ b/include/asm-i386/dwarf2.h
@@ -1,8 +1,6 @@
1#ifndef _DWARF2_H 1#ifndef _DWARF2_H
2#define _DWARF2_H 2#define _DWARF2_H
3 3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files" 5#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif 6#endif
@@ -28,6 +26,13 @@
28#define CFI_RESTORE .cfi_restore 26#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state 27#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state 28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
31 36
32#else 37#else
33 38
@@ -48,6 +53,8 @@
48#define CFI_RESTORE ignore 53#define CFI_RESTORE ignore
49#define CFI_REMEMBER_STATE ignore 54#define CFI_REMEMBER_STATE ignore
50#define CFI_RESTORE_STATE ignore 55#define CFI_RESTORE_STATE ignore
56#define CFI_UNDEFINED ignore
57#define CFI_SIGNAL_FRAME ignore
51 58
52#endif 59#endif
53 60
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index ca82acb8cb1f..f7514fb6e8e4 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -18,7 +18,7 @@
18 18
19#define E820_RAM 1 19#define E820_RAM 1
20#define E820_RESERVED 2 20#define E820_RESERVED 2
21#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ 21#define E820_ACPI 3
22#define E820_NVS 4 22#define E820_NVS 4
23 23
24#define HIGH_MEMORY (1024*1024) 24#define HIGH_MEMORY (1024*1024)
diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i
new file mode 100644
index 000000000000..4d68ddce18b6
--- /dev/null
+++ b/include/asm-i386/frame.i
@@ -0,0 +1,24 @@
1#include <linux/config.h>
2#include <asm/dwarf2.h>
3
4/* The annotation hides the frame from the unwinder and makes it look
5 like a ordinary ebp save/restore. This avoids some special cases for
6 frame pointer later */
7#ifdef CONFIG_FRAME_POINTER
8 .macro FRAME
9 pushl %ebp
10 CFI_ADJUST_CFA_OFFSET 4
11 CFI_REL_OFFSET ebp,0
12 movl %esp,%ebp
13 .endm
14 .macro ENDFRAME
15 popl %ebp
16 CFI_ADJUST_CFA_OFFSET -4
17 CFI_RESTORE ebp
18 .endm
19#else
20 .macro FRAME
21 .endm
22 .macro ENDFRAME
23 .endm
24#endif
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index b3783a32abee..8ffbb0f07457 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef _ASM_GENAPIC_H
2#define _ASM_GENAPIC_H 1 2#define _ASM_GENAPIC_H 1
3 3
4#include <asm/mpspec.h>
5
4/* 6/*
5 * Generic APIC driver interface. 7 * Generic APIC driver interface.
6 * 8 *
@@ -63,14 +65,25 @@ struct genapic {
63 unsigned (*get_apic_id)(unsigned long x); 65 unsigned (*get_apic_id)(unsigned long x);
64 unsigned long apic_id_mask; 66 unsigned long apic_id_mask;
65 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 67 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
66 68
69#ifdef CONFIG_SMP
67 /* ipi */ 70 /* ipi */
68 void (*send_IPI_mask)(cpumask_t mask, int vector); 71 void (*send_IPI_mask)(cpumask_t mask, int vector);
69 void (*send_IPI_allbutself)(int vector); 72 void (*send_IPI_allbutself)(int vector);
70 void (*send_IPI_all)(int vector); 73 void (*send_IPI_all)(int vector);
74#endif
71}; 75};
72 76
73#define APICFUNC(x) .x = x 77#define APICFUNC(x) .x = x,
78
79/* More functions could be probably marked IPIFUNC and save some space
80 in UP GENERICARCH kernels, but I don't have the nerve right now
81 to untangle this mess. -AK */
82#ifdef CONFIG_SMP
83#define IPIFUNC(x) APICFUNC(x)
84#else
85#define IPIFUNC(x)
86#endif
74 87
75#define APIC_INIT(aname, aprobe) { \ 88#define APIC_INIT(aname, aprobe) { \
76 .name = aname, \ 89 .name = aname, \
@@ -80,33 +93,33 @@ struct genapic {
80 .no_balance_irq = NO_BALANCE_IRQ, \ 93 .no_balance_irq = NO_BALANCE_IRQ, \
81 .ESR_DISABLE = esr_disable, \ 94 .ESR_DISABLE = esr_disable, \
82 .apic_destination_logical = APIC_DEST_LOGICAL, \ 95 .apic_destination_logical = APIC_DEST_LOGICAL, \
83 APICFUNC(apic_id_registered), \ 96 APICFUNC(apic_id_registered) \
84 APICFUNC(target_cpus), \ 97 APICFUNC(target_cpus) \
85 APICFUNC(check_apicid_used), \ 98 APICFUNC(check_apicid_used) \
86 APICFUNC(check_apicid_present), \ 99 APICFUNC(check_apicid_present) \
87 APICFUNC(init_apic_ldr), \ 100 APICFUNC(init_apic_ldr) \
88 APICFUNC(ioapic_phys_id_map), \ 101 APICFUNC(ioapic_phys_id_map) \
89 APICFUNC(clustered_apic_check), \ 102 APICFUNC(clustered_apic_check) \
90 APICFUNC(multi_timer_check), \ 103 APICFUNC(multi_timer_check) \
91 APICFUNC(apicid_to_node), \ 104 APICFUNC(apicid_to_node) \
92 APICFUNC(cpu_to_logical_apicid), \ 105 APICFUNC(cpu_to_logical_apicid) \
93 APICFUNC(cpu_present_to_apicid), \ 106 APICFUNC(cpu_present_to_apicid) \
94 APICFUNC(apicid_to_cpu_present), \ 107 APICFUNC(apicid_to_cpu_present) \
95 APICFUNC(mpc_apic_id), \ 108 APICFUNC(mpc_apic_id) \
96 APICFUNC(setup_portio_remap), \ 109 APICFUNC(setup_portio_remap) \
97 APICFUNC(check_phys_apicid_present), \ 110 APICFUNC(check_phys_apicid_present) \
98 APICFUNC(mpc_oem_bus_info), \ 111 APICFUNC(mpc_oem_bus_info) \
99 APICFUNC(mpc_oem_pci_bus), \ 112 APICFUNC(mpc_oem_pci_bus) \
100 APICFUNC(mps_oem_check), \ 113 APICFUNC(mps_oem_check) \
101 APICFUNC(get_apic_id), \ 114 APICFUNC(get_apic_id) \
102 .apic_id_mask = APIC_ID_MASK, \ 115 .apic_id_mask = APIC_ID_MASK, \
103 APICFUNC(cpu_mask_to_apicid), \ 116 APICFUNC(cpu_mask_to_apicid) \
104 APICFUNC(acpi_madt_oem_check), \ 117 APICFUNC(acpi_madt_oem_check) \
105 APICFUNC(send_IPI_mask), \ 118 IPIFUNC(send_IPI_mask) \
106 APICFUNC(send_IPI_allbutself), \ 119 IPIFUNC(send_IPI_allbutself) \
107 APICFUNC(send_IPI_all), \ 120 IPIFUNC(send_IPI_all) \
108 APICFUNC(enable_apic_mode), \ 121 APICFUNC(enable_apic_mode) \
109 APICFUNC(phys_pkg_id), \ 122 APICFUNC(phys_pkg_id) \
110 } 123 }
111 124
112extern struct genapic *genapic; 125extern struct genapic *genapic;
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
index 134ea9cc5283..b52cd60a075b 100644
--- a/include/asm-i386/intel_arch_perfmon.h
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -14,6 +14,18 @@
14 14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) 15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) 17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
18 30
19#endif /* X86_INTEL_ARCH_PERFMON_H */ 31#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 5092e819b8a2..5d309275a1dc 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -188,6 +188,16 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
188/* 1 if "noapic" boot option passed */ 188/* 1 if "noapic" boot option passed */
189extern int skip_ioapic_setup; 189extern int skip_ioapic_setup;
190 190
191static inline void disable_ioapic_setup(void)
192{
193 skip_ioapic_setup = 1;
194}
195
196static inline int ioapic_setup_disabled(void)
197{
198 return skip_ioapic_setup;
199}
200
191/* 201/*
192 * If we use the IO-APIC for IRQ routing, disable automatic 202 * If we use the IO-APIC for IRQ routing, disable automatic
193 * assignment of PCI IRQ's. 203 * assignment of PCI IRQ's.
@@ -206,6 +216,7 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
206 216
207#else /* !CONFIG_X86_IO_APIC */ 217#else /* !CONFIG_X86_IO_APIC */
208#define io_apic_assign_pci_irqs 0 218#define io_apic_assign_pci_irqs 0
219static inline void disable_ioapic_setup(void) { }
209#endif 220#endif
210 221
211extern int assign_irq_vector(int irq); 222extern int assign_irq_vector(int irq);
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index 53f0e06672dc..4dfc9f5ed031 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -1,6 +1,26 @@
1#ifndef _I386_KEXEC_H 1#ifndef _I386_KEXEC_H
2#define _I386_KEXEC_H 2#define _I386_KEXEC_H
3 3
4#define PA_CONTROL_PAGE 0
5#define VA_CONTROL_PAGE 1
6#define PA_PGD 2
7#define VA_PGD 3
8#define PA_PTE_0 4
9#define VA_PTE_0 5
10#define PA_PTE_1 6
11#define VA_PTE_1 7
12#ifdef CONFIG_X86_PAE
13#define PA_PMD_0 8
14#define VA_PMD_0 9
15#define PA_PMD_1 10
16#define VA_PMD_1 11
17#define PAGES_NR 12
18#else
19#define PAGES_NR 8
20#endif
21
22#ifndef __ASSEMBLY__
23
4#include <asm/fixmap.h> 24#include <asm/fixmap.h>
5#include <asm/ptrace.h> 25#include <asm/ptrace.h>
6#include <asm/string.h> 26#include <asm/string.h>
@@ -72,5 +92,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
72 newregs->eip = (unsigned long)current_text_addr(); 92 newregs->eip = (unsigned long)current_text_addr();
73 } 93 }
74} 94}
95asmlinkage NORET_TYPE void
96relocate_kernel(unsigned long indirection_page,
97 unsigned long control_page,
98 unsigned long start_address,
99 unsigned int has_pae) ATTRIB_NORET;
100
101#endif /* __ASSEMBLY__ */
75 102
76#endif /* _I386_KEXEC_H */ 103#endif /* _I386_KEXEC_H */
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
index b5f3f0d0b2bc..26333685a7fb 100644
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -123,9 +123,13 @@ extern u8 cpu_2_logical_apicid[];
123/* Mapping from cpu number to logical apicid */ 123/* Mapping from cpu number to logical apicid */
124static inline int cpu_to_logical_apicid(int cpu) 124static inline int cpu_to_logical_apicid(int cpu)
125{ 125{
126#ifdef CONFIG_SMP
126 if (cpu >= NR_CPUS) 127 if (cpu >= NR_CPUS)
127 return BAD_APICID; 128 return BAD_APICID;
128 return (int)cpu_2_logical_apicid[cpu]; 129 return (int)cpu_2_logical_apicid[cpu];
130#else
131 return logical_smp_processor_id();
132#endif
129} 133}
130 134
131static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) 135static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index 9fd073286289..a81b05961595 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -46,10 +46,12 @@ extern u8 cpu_2_logical_apicid[];
46static inline void init_apic_ldr(void) 46static inline void init_apic_ldr(void)
47{ 47{
48 unsigned long val, id; 48 unsigned long val, id;
49 int i, count; 49 int count = 0;
50 u8 lid;
51 u8 my_id = (u8)hard_smp_processor_id(); 50 u8 my_id = (u8)hard_smp_processor_id();
52 u8 my_cluster = (u8)apicid_cluster(my_id); 51 u8 my_cluster = (u8)apicid_cluster(my_id);
52#ifdef CONFIG_SMP
53 u8 lid;
54 int i;
53 55
54 /* Create logical APIC IDs by counting CPUs already in cluster. */ 56 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = NR_CPUS; --i >= 0; ) { 57 for (count = 0, i = NR_CPUS; --i >= 0; ) {
@@ -57,6 +59,7 @@ static inline void init_apic_ldr(void)
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) 59 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count; 60 ++count;
59 } 61 }
62#endif
60 /* We only have a 4 wide bitmap in cluster mode. If a deranged 63 /* We only have a 4 wide bitmap in cluster mode. If a deranged
61 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ 64 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
62 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); 65 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
@@ -91,9 +94,13 @@ static inline int apicid_to_node(int logical_apicid)
91/* Mapping from cpu number to logical apicid */ 94/* Mapping from cpu number to logical apicid */
92static inline int cpu_to_logical_apicid(int cpu) 95static inline int cpu_to_logical_apicid(int cpu)
93{ 96{
97#ifdef CONFIG_SMP
94 if (cpu >= NR_CPUS) 98 if (cpu >= NR_CPUS)
95 return BAD_APICID; 99 return BAD_APICID;
96 return (int)cpu_2_logical_apicid[cpu]; 100 return (int)cpu_2_logical_apicid[cpu];
101#else
102 return logical_smp_processor_id();
103#endif
97} 104}
98 105
99static inline int cpu_present_to_apicid(int mps_cpu) 106static inline int cpu_present_to_apicid(int mps_cpu)
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 05a538531229..7a17d9e58ad6 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -30,14 +30,10 @@ do { \
30 \ 30 \
31 __asm__ __volatile__( \ 31 __asm__ __volatile__( \
32 LOCK_PREFIX " decl (%%eax) \n" \ 32 LOCK_PREFIX " decl (%%eax) \n" \
33 " js 2f \n" \ 33 " jns 1f \n" \
34 " call "#fail_fn" \n" \
34 "1: \n" \ 35 "1: \n" \
35 \ 36 \
36 LOCK_SECTION_START("") \
37 "2: call "#fail_fn" \n" \
38 " jmp 1b \n" \
39 LOCK_SECTION_END \
40 \
41 :"=a" (dummy) \ 37 :"=a" (dummy) \
42 : "a" (count) \ 38 : "a" (count) \
43 : "memory", "ecx", "edx"); \ 39 : "memory", "ecx", "edx"); \
@@ -86,14 +82,10 @@ do { \
86 \ 82 \
87 __asm__ __volatile__( \ 83 __asm__ __volatile__( \
88 LOCK_PREFIX " incl (%%eax) \n" \ 84 LOCK_PREFIX " incl (%%eax) \n" \
89 " jle 2f \n" \ 85 " jg 1f \n" \
86 " call "#fail_fn" \n" \
90 "1: \n" \ 87 "1: \n" \
91 \ 88 \
92 LOCK_SECTION_START("") \
93 "2: call "#fail_fn" \n" \
94 " jmp 1b \n" \
95 LOCK_SECTION_END \
96 \
97 :"=a" (dummy) \ 89 :"=a" (dummy) \
98 : "a" (count) \ 90 : "a" (count) \
99 : "memory", "ecx", "edx"); \ 91 : "memory", "ecx", "edx"); \
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 67d994799999..303bcd4592bb 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -6,32 +6,29 @@
6 6
7#include <linux/pm.h> 7#include <linux/pm.h>
8 8
9struct pt_regs;
10
11typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
12
13/** 9/**
14 * set_nmi_callback 10 * do_nmi_callback
15 * 11 *
16 * Set a handler for an NMI. Only one handler may be 12 * Check to see if a callback exists and execute it. Return 1
17 * set. Return 1 if the NMI was handled. 13 * if the handler exists and was handled successfully.
18 */ 14 */
19void set_nmi_callback(nmi_callback_t callback); 15int do_nmi_callback(struct pt_regs *regs, int cpu);
20 16
21/** 17extern int nmi_watchdog_enabled;
22 * unset_nmi_callback 18extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
23 * 19extern int avail_to_resrv_perfctr_nmi(unsigned int);
24 * Remove the handler previously set. 20extern int reserve_perfctr_nmi(unsigned int);
25 */ 21extern void release_perfctr_nmi(unsigned int);
26void unset_nmi_callback(void); 22extern int reserve_evntsel_nmi(unsigned int);
27 23extern void release_evntsel_nmi(unsigned int);
28extern void setup_apic_nmi_watchdog (void); 24
29extern int reserve_lapic_nmi(void); 25extern void setup_apic_nmi_watchdog (void *);
30extern void release_lapic_nmi(void); 26extern void stop_apic_nmi_watchdog (void *);
31extern void disable_timer_nmi_watchdog(void); 27extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void); 28extern void enable_timer_nmi_watchdog(void);
33extern void nmi_watchdog_tick (struct pt_regs * regs); 29extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
34 30
31extern atomic_t nmi_active;
35extern unsigned int nmi_watchdog; 32extern unsigned int nmi_watchdog;
36#define NMI_DEFAULT -1 33#define NMI_DEFAULT -1
37#define NMI_NONE 0 34#define NMI_NONE 0
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 0dc051a8078b..541b3e234335 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -411,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address);
411 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} 411 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
412#endif 412#endif
413 413
414extern void noexec_setup(const char *str);
415
416#if defined(CONFIG_HIGHPTE) 414#if defined(CONFIG_HIGHPTE)
417#define pte_offset_map(dir, address) \ 415#define pte_offset_map(dir, address) \
418 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) 416 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index 1910880fcd40..a4a0e5207db5 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -27,6 +27,7 @@ struct pt_regs {
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28 28
29#include <asm/vm86.h> 29#include <asm/vm86.h>
30#include <asm/segment.h>
30 31
31struct task_struct; 32struct task_struct;
32extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); 33extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
@@ -40,18 +41,14 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro
40 */ 41 */
41static inline int user_mode(struct pt_regs *regs) 42static inline int user_mode(struct pt_regs *regs)
42{ 43{
43 return (regs->xcs & 3) != 0; 44 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
44} 45}
45static inline int user_mode_vm(struct pt_regs *regs) 46static inline int user_mode_vm(struct pt_regs *regs)
46{ 47{
47 return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0; 48 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
48} 49}
49#define instruction_pointer(regs) ((regs)->eip) 50#define instruction_pointer(regs) ((regs)->eip)
50#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
51extern unsigned long profile_pc(struct pt_regs *regs); 51extern unsigned long profile_pc(struct pt_regs *regs);
52#else
53#define profile_pc(regs) instruction_pointer(regs)
54#endif
55#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
56 53
57#endif 54#endif
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 87c069ccba08..c3e5db32fa48 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -20,52 +20,6 @@
20#define RW_LOCK_BIAS 0x01000000 20#define RW_LOCK_BIAS 0x01000000
21#define RW_LOCK_BIAS_STR "0x01000000" 21#define RW_LOCK_BIAS_STR "0x01000000"
22 22
23#define __build_read_lock_ptr(rw, helper) \ 23/* Code is in asm-i386/spinlock.h */
24 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \
25 "jns 1f\n" \
26 "call " helper "\n\t" \
27 "1:\n" \
28 ::"a" (rw) : "memory")
29
30#define __build_read_lock_const(rw, helper) \
31 asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \
32 "jns 1f\n" \
33 "pushl %%eax\n\t" \
34 "leal %0,%%eax\n\t" \
35 "call " helper "\n\t" \
36 "popl %%eax\n\t" \
37 "1:\n" \
38 :"+m" (*(volatile int *)rw) : : "memory")
39
40#define __build_read_lock(rw, helper) do { \
41 if (__builtin_constant_p(rw)) \
42 __build_read_lock_const(rw, helper); \
43 else \
44 __build_read_lock_ptr(rw, helper); \
45 } while (0)
46
47#define __build_write_lock_ptr(rw, helper) \
48 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
49 "jz 1f\n" \
50 "call " helper "\n\t" \
51 "1:\n" \
52 ::"a" (rw) : "memory")
53
54#define __build_write_lock_const(rw, helper) \
55 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
56 "jz 1f\n" \
57 "pushl %%eax\n\t" \
58 "leal %0,%%eax\n\t" \
59 "call " helper "\n\t" \
60 "popl %%eax\n\t" \
61 "1:\n" \
62 :"+m" (*(volatile int *)rw) : : "memory")
63
64#define __build_write_lock(rw, helper) do { \
65 if (__builtin_constant_p(rw)) \
66 __build_write_lock_const(rw, helper); \
67 else \
68 __build_write_lock_ptr(rw, helper); \
69 } while (0)
70 24
71#endif 25#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 43113f5608eb..bc598d6388e3 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -99,17 +99,9 @@ static inline void __down_read(struct rw_semaphore *sem)
99 __asm__ __volatile__( 99 __asm__ __volatile__(
100 "# beginning down_read\n\t" 100 "# beginning down_read\n\t"
101LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ 101LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
102 " js 2f\n\t" /* jump if we weren't granted the lock */ 102 " jns 1f\n"
103 " call call_rwsem_down_read_failed\n"
103 "1:\n\t" 104 "1:\n\t"
104 LOCK_SECTION_START("")
105 "2:\n\t"
106 " pushl %%ecx\n\t"
107 " pushl %%edx\n\t"
108 " call rwsem_down_read_failed\n\t"
109 " popl %%edx\n\t"
110 " popl %%ecx\n\t"
111 " jmp 1b\n"
112 LOCK_SECTION_END
113 "# ending down_read\n\t" 105 "# ending down_read\n\t"
114 : "+m" (sem->count) 106 : "+m" (sem->count)
115 : "a" (sem) 107 : "a" (sem)
@@ -151,15 +143,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
151 "# beginning down_write\n\t" 143 "# beginning down_write\n\t"
152LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ 144LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
153 " testl %%edx,%%edx\n\t" /* was the count 0 before? */ 145 " testl %%edx,%%edx\n\t" /* was the count 0 before? */
154 " jnz 2f\n\t" /* jump if we weren't granted the lock */ 146 " jz 1f\n"
155 "1:\n\t" 147 " call call_rwsem_down_write_failed\n"
156 LOCK_SECTION_START("") 148 "1:\n"
157 "2:\n\t"
158 " pushl %%ecx\n\t"
159 " call rwsem_down_write_failed\n\t"
160 " popl %%ecx\n\t"
161 " jmp 1b\n"
162 LOCK_SECTION_END
163 "# ending down_write" 149 "# ending down_write"
164 : "+m" (sem->count), "=d" (tmp) 150 : "+m" (sem->count), "=d" (tmp)
165 : "a" (sem), "1" (tmp) 151 : "a" (sem), "1" (tmp)
@@ -193,17 +179,9 @@ static inline void __up_read(struct rw_semaphore *sem)
193 __asm__ __volatile__( 179 __asm__ __volatile__(
194 "# beginning __up_read\n\t" 180 "# beginning __up_read\n\t"
195LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ 181LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
196 " js 2f\n\t" /* jump if the lock is being waited upon */ 182 " jns 1f\n\t"
197 "1:\n\t" 183 " call call_rwsem_wake\n"
198 LOCK_SECTION_START("") 184 "1:\n"
199 "2:\n\t"
200 " decw %%dx\n\t" /* do nothing if still outstanding active readers */
201 " jnz 1b\n\t"
202 " pushl %%ecx\n\t"
203 " call rwsem_wake\n\t"
204 " popl %%ecx\n\t"
205 " jmp 1b\n"
206 LOCK_SECTION_END
207 "# ending __up_read\n" 185 "# ending __up_read\n"
208 : "+m" (sem->count), "=d" (tmp) 186 : "+m" (sem->count), "=d" (tmp)
209 : "a" (sem), "1" (tmp) 187 : "a" (sem), "1" (tmp)
@@ -219,17 +197,9 @@ static inline void __up_write(struct rw_semaphore *sem)
219 "# beginning __up_write\n\t" 197 "# beginning __up_write\n\t"
220 " movl %2,%%edx\n\t" 198 " movl %2,%%edx\n\t"
221LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ 199LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
222 " jnz 2f\n\t" /* jump if the lock is being waited upon */ 200 " jz 1f\n"
201 " call call_rwsem_wake\n"
223 "1:\n\t" 202 "1:\n\t"
224 LOCK_SECTION_START("")
225 "2:\n\t"
226 " decw %%dx\n\t" /* did the active count reduce to 0? */
227 " jnz 1b\n\t" /* jump back if not */
228 " pushl %%ecx\n\t"
229 " call rwsem_wake\n\t"
230 " popl %%ecx\n\t"
231 " jmp 1b\n"
232 LOCK_SECTION_END
233 "# ending __up_write\n" 203 "# ending __up_write\n"
234 : "+m" (sem->count) 204 : "+m" (sem->count)
235 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) 205 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
@@ -244,17 +214,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
244 __asm__ __volatile__( 214 __asm__ __volatile__(
245 "# beginning __downgrade_write\n\t" 215 "# beginning __downgrade_write\n\t"
246LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ 216LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
247 " js 2f\n\t" /* jump if the lock is being waited upon */ 217 " jns 1f\n\t"
218 " call call_rwsem_downgrade_wake\n"
248 "1:\n\t" 219 "1:\n\t"
249 LOCK_SECTION_START("")
250 "2:\n\t"
251 " pushl %%ecx\n\t"
252 " pushl %%edx\n\t"
253 " call rwsem_downgrade_wake\n\t"
254 " popl %%edx\n\t"
255 " popl %%ecx\n\t"
256 " jmp 1b\n"
257 LOCK_SECTION_END
258 "# ending __downgrade_write\n" 220 "# ending __downgrade_write\n"
259 : "+m" (sem->count) 221 : "+m" (sem->count)
260 : "a" (sem), "i" (-RWSEM_WAITING_BIAS) 222 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index faf995307b9e..b7ab59685ba7 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -83,6 +83,11 @@
83 83
84#define GDT_SIZE (GDT_ENTRIES * 8) 84#define GDT_SIZE (GDT_ENTRIES * 8)
85 85
86/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
87#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
88/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
89#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
90
86/* Simple and small GDT entries for booting only */ 91/* Simple and small GDT entries for booting only */
87 92
88#define GDT_ENTRY_BOOT_CS 2 93#define GDT_ENTRY_BOOT_CS 2
@@ -112,4 +117,16 @@
112 */ 117 */
113#define IDT_ENTRIES 256 118#define IDT_ENTRIES 256
114 119
120/* Bottom two bits of selector give the ring privilege level */
121#define SEGMENT_RPL_MASK 0x3
122/* Bit 2 is table indicator (LDT/GDT) */
123#define SEGMENT_TI_MASK 0x4
124
125/* User mode is privilege level 3 */
126#define USER_RPL 0x3
127/* LDT segment has TI set, GDT has it cleared */
128#define SEGMENT_LDT 0x4
129#define SEGMENT_GDT 0x0
130
131#define get_kernel_rpl() 0
115#endif 132#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index d51e800acf29..e63b6a68f04c 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -100,13 +100,10 @@ static inline void down(struct semaphore * sem)
100 __asm__ __volatile__( 100 __asm__ __volatile__(
101 "# atomic down operation\n\t" 101 "# atomic down operation\n\t"
102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */ 102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
103 "js 2f\n" 103 "jns 2f\n"
104 "1:\n" 104 "\tlea %0,%%eax\n\t"
105 LOCK_SECTION_START("") 105 "call __down_failed\n"
106 "2:\tlea %0,%%eax\n\t" 106 "2:"
107 "call __down_failed\n\t"
108 "jmp 1b\n"
109 LOCK_SECTION_END
110 :"+m" (sem->count) 107 :"+m" (sem->count)
111 : 108 :
112 :"memory","ax"); 109 :"memory","ax");
@@ -123,15 +120,12 @@ static inline int down_interruptible(struct semaphore * sem)
123 might_sleep(); 120 might_sleep();
124 __asm__ __volatile__( 121 __asm__ __volatile__(
125 "# atomic interruptible down operation\n\t" 122 "# atomic interruptible down operation\n\t"
123 "xorl %0,%0\n\t"
126 LOCK_PREFIX "decl %1\n\t" /* --sem->count */ 124 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
127 "js 2f\n\t" 125 "jns 2f\n\t"
128 "xorl %0,%0\n" 126 "lea %1,%%eax\n\t"
129 "1:\n" 127 "call __down_failed_interruptible\n"
130 LOCK_SECTION_START("") 128 "2:"
131 "2:\tlea %1,%%eax\n\t"
132 "call __down_failed_interruptible\n\t"
133 "jmp 1b\n"
134 LOCK_SECTION_END
135 :"=a" (result), "+m" (sem->count) 129 :"=a" (result), "+m" (sem->count)
136 : 130 :
137 :"memory"); 131 :"memory");
@@ -148,15 +142,12 @@ static inline int down_trylock(struct semaphore * sem)
148 142
149 __asm__ __volatile__( 143 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t" 144 "# atomic interruptible down operation\n\t"
145 "xorl %0,%0\n\t"
151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */ 146 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "js 2f\n\t" 147 "jns 2f\n\t"
153 "xorl %0,%0\n" 148 "lea %1,%%eax\n\t"
154 "1:\n"
155 LOCK_SECTION_START("")
156 "2:\tlea %1,%%eax\n\t"
157 "call __down_failed_trylock\n\t" 149 "call __down_failed_trylock\n\t"
158 "jmp 1b\n" 150 "2:\n"
159 LOCK_SECTION_END
160 :"=a" (result), "+m" (sem->count) 151 :"=a" (result), "+m" (sem->count)
161 : 152 :
162 :"memory"); 153 :"memory");
@@ -166,22 +157,16 @@ static inline int down_trylock(struct semaphore * sem)
166/* 157/*
167 * Note! This is subtle. We jump to wake people up only if 158 * Note! This is subtle. We jump to wake people up only if
168 * the semaphore was negative (== somebody was waiting on it). 159 * the semaphore was negative (== somebody was waiting on it).
169 * The default case (no contention) will result in NO
170 * jumps for both down() and up().
171 */ 160 */
172static inline void up(struct semaphore * sem) 161static inline void up(struct semaphore * sem)
173{ 162{
174 __asm__ __volatile__( 163 __asm__ __volatile__(
175 "# atomic up operation\n\t" 164 "# atomic up operation\n\t"
176 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ 165 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
177 "jle 2f\n" 166 "jg 1f\n\t"
178 "1:\n" 167 "lea %0,%%eax\n\t"
179 LOCK_SECTION_START("") 168 "call __up_wakeup\n"
180 "2:\tlea %0,%%eax\n\t" 169 "1:"
181 "call __up_wakeup\n\t"
182 "jmp 1b\n"
183 LOCK_SECTION_END
184 ".subsection 0\n"
185 :"+m" (sem->count) 170 :"+m" (sem->count)
186 : 171 :
187 :"memory","ax"); 172 :"memory","ax");
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 142d10e34ade..32ac8c91d5c5 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -80,17 +80,12 @@ static inline int hard_smp_processor_id(void)
80 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); 80 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
81} 81}
82#endif 82#endif
83
84static __inline int logical_smp_processor_id(void)
85{
86 /* we don't want to mark this access volatile - bad code generation */
87 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
88}
89
90#endif 83#endif
91 84
92extern int __cpu_disable(void); 85extern int __cpu_disable(void);
93extern void __cpu_die(unsigned int cpu); 86extern void __cpu_die(unsigned int cpu);
87extern unsigned int num_processors;
88
94#endif /* !__ASSEMBLY__ */ 89#endif /* !__ASSEMBLY__ */
95 90
96#else /* CONFIG_SMP */ 91#else /* CONFIG_SMP */
@@ -100,4 +95,15 @@ extern void __cpu_die(unsigned int cpu);
100#define NO_PROC_ID 0xFF /* No processor magic marker */ 95#define NO_PROC_ID 0xFF /* No processor magic marker */
101 96
102#endif 97#endif
98
99#ifndef __ASSEMBLY__
100#ifdef CONFIG_X86_LOCAL_APIC
101static __inline int logical_smp_processor_id(void)
102{
103 /* we don't want to mark this access volatile - bad code generation */
104 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
105}
106#endif
107#endif
108
103#endif 109#endif
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index d1020363c41a..b0b3043f05e1 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -4,8 +4,12 @@
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h> 5#include <asm/rwlock.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm/processor.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8 9
10#define CLI_STRING "cli"
11#define STI_STRING "sti"
12
9/* 13/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 14 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 15 *
@@ -17,67 +21,64 @@
17 * (the type definitions are in asm/spinlock_types.h) 21 * (the type definitions are in asm/spinlock_types.h)
18 */ 22 */
19 23
20#define __raw_spin_is_locked(x) \ 24static inline int __raw_spin_is_locked(raw_spinlock_t *x)
21 (*(volatile signed char *)(&(x)->slock) <= 0) 25{
22 26 return *(volatile signed char *)(&(x)->slock) <= 0;
23#define __raw_spin_lock_string \ 27}
24 "\n1:\t" \
25 LOCK_PREFIX " ; decb %0\n\t" \
26 "jns 3f\n" \
27 "2:\t" \
28 "rep;nop\n\t" \
29 "cmpb $0,%0\n\t" \
30 "jle 2b\n\t" \
31 "jmp 1b\n" \
32 "3:\n\t"
33
34/*
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
38 */
39#define __raw_spin_lock_string_flags \
40 "\n1:\t" \
41 LOCK_PREFIX " ; decb %0\n\t" \
42 "jns 5f\n" \
43 "2:\t" \
44 "testl $0x200, %1\n\t" \
45 "jz 4f\n\t" \
46 "sti\n" \
47 "3:\t" \
48 "rep;nop\n\t" \
49 "cmpb $0, %0\n\t" \
50 "jle 3b\n\t" \
51 "cli\n\t" \
52 "jmp 1b\n" \
53 "4:\t" \
54 "rep;nop\n\t" \
55 "cmpb $0, %0\n\t" \
56 "jg 1b\n\t" \
57 "jmp 4b\n" \
58 "5:\n\t"
59 28
60static inline void __raw_spin_lock(raw_spinlock_t *lock) 29static inline void __raw_spin_lock(raw_spinlock_t *lock)
61{ 30{
62 asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); 31 asm volatile("\n1:\t"
32 LOCK_PREFIX " ; decb %0\n\t"
33 "jns 3f\n"
34 "2:\t"
35 "rep;nop\n\t"
36 "cmpb $0,%0\n\t"
37 "jle 2b\n\t"
38 "jmp 1b\n"
39 "3:\n\t"
40 : "+m" (lock->slock) : : "memory");
63} 41}
64 42
65/* 43/*
66 * It is easier for the lock validator if interrupts are not re-enabled 44 * It is easier for the lock validator if interrupts are not re-enabled
67 * in the middle of a lock-acquire. This is a performance feature anyway 45 * in the middle of a lock-acquire. This is a performance feature anyway
68 * so we turn it off: 46 * so we turn it off:
47 *
48 * NOTE: there's an irqs-on section here, which normally would have to be
49 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
69 */ 50 */
70#ifndef CONFIG_PROVE_LOCKING 51#ifndef CONFIG_PROVE_LOCKING
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 52static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
72{ 53{
73 asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); 54 asm volatile(
55 "\n1:\t"
56 LOCK_PREFIX " ; decb %0\n\t"
57 "jns 5f\n"
58 "2:\t"
59 "testl $0x200, %1\n\t"
60 "jz 4f\n\t"
61 STI_STRING "\n"
62 "3:\t"
63 "rep;nop\n\t"
64 "cmpb $0, %0\n\t"
65 "jle 3b\n\t"
66 CLI_STRING "\n\t"
67 "jmp 1b\n"
68 "4:\t"
69 "rep;nop\n\t"
70 "cmpb $0, %0\n\t"
71 "jg 1b\n\t"
72 "jmp 4b\n"
73 "5:\n\t"
74 : "+m" (lock->slock) : "r" (flags) : "memory");
74} 75}
75#endif 76#endif
76 77
77static inline int __raw_spin_trylock(raw_spinlock_t *lock) 78static inline int __raw_spin_trylock(raw_spinlock_t *lock)
78{ 79{
79 char oldval; 80 char oldval;
80 __asm__ __volatile__( 81 asm volatile(
81 "xchgb %b0,%1" 82 "xchgb %b0,%1"
82 :"=q" (oldval), "+m" (lock->slock) 83 :"=q" (oldval), "+m" (lock->slock)
83 :"0" (0) : "memory"); 84 :"0" (0) : "memory");
@@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
93 94
94#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) 95#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
95 96
96#define __raw_spin_unlock_string \
97 "movb $1,%0" \
98 :"+m" (lock->slock) : : "memory"
99
100
101static inline void __raw_spin_unlock(raw_spinlock_t *lock) 97static inline void __raw_spin_unlock(raw_spinlock_t *lock)
102{ 98{
103 __asm__ __volatile__( 99 asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
104 __raw_spin_unlock_string
105 );
106} 100}
107 101
108#else 102#else
109 103
110#define __raw_spin_unlock_string \
111 "xchgb %b0, %1" \
112 :"=q" (oldval), "+m" (lock->slock) \
113 :"0" (oldval) : "memory"
114
115static inline void __raw_spin_unlock(raw_spinlock_t *lock) 104static inline void __raw_spin_unlock(raw_spinlock_t *lock)
116{ 105{
117 char oldval = 1; 106 char oldval = 1;
118 107
119 __asm__ __volatile__( 108 asm volatile("xchgb %b0, %1"
120 __raw_spin_unlock_string 109 : "=q" (oldval), "+m" (lock->slock)
121 ); 110 : "0" (oldval) : "memory");
122} 111}
123 112
124#endif 113#endif
125 114
126#define __raw_spin_unlock_wait(lock) \ 115static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
127 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 116{
117 while (__raw_spin_is_locked(lock))
118 cpu_relax();
119}
128 120
129/* 121/*
130 * Read-write spinlocks, allowing multiple readers 122 * Read-write spinlocks, allowing multiple readers
@@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
151 * read_can_lock - would read_trylock() succeed? 143 * read_can_lock - would read_trylock() succeed?
152 * @lock: the rwlock in question. 144 * @lock: the rwlock in question.
153 */ 145 */
154#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 146static inline int __raw_read_can_lock(raw_rwlock_t *x)
147{
148 return (int)(x)->lock > 0;
149}
155 150
156/** 151/**
157 * write_can_lock - would write_trylock() succeed? 152 * write_can_lock - would write_trylock() succeed?
158 * @lock: the rwlock in question. 153 * @lock: the rwlock in question.
159 */ 154 */
160#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 155static inline int __raw_write_can_lock(raw_rwlock_t *x)
156{
157 return (x)->lock == RW_LOCK_BIAS;
158}
161 159
162static inline void __raw_read_lock(raw_rwlock_t *rw) 160static inline void __raw_read_lock(raw_rwlock_t *rw)
163{ 161{
164 __build_read_lock(rw, "__read_lock_failed"); 162 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
163 "jns 1f\n"
164 "call __read_lock_failed\n\t"
165 "1:\n"
166 ::"a" (rw) : "memory");
165} 167}
166 168
167static inline void __raw_write_lock(raw_rwlock_t *rw) 169static inline void __raw_write_lock(raw_rwlock_t *rw)
168{ 170{
169 __build_write_lock(rw, "__write_lock_failed"); 171 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
172 "jz 1f\n"
173 "call __write_lock_failed\n\t"
174 "1:\n"
175 ::"a" (rw) : "memory");
170} 176}
171 177
172static inline int __raw_read_trylock(raw_rwlock_t *lock) 178static inline int __raw_read_trylock(raw_rwlock_t *lock)
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h
new file mode 100644
index 000000000000..7d1f6a5cbfca
--- /dev/null
+++ b/include/asm-i386/stacktrace.h
@@ -0,0 +1 @@
#include <asm-x86_64/stacktrace.h>
diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h
new file mode 100644
index 000000000000..399bf6026b16
--- /dev/null
+++ b/include/asm-i386/therm_throt.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_I386_THERM_THROT_H__
2#define __ASM_I386_THERM_THROT_H__ 1
3
4#include <asm/atomic.h>
5
6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr);
8
9#endif /* __ASM_I386_THERM_THROT_H__ */
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index d57ca5c540b6..360648b0f2b3 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -36,8 +36,6 @@
36 : "memory"); \ 36 : "memory"); \
37 } while (0) 37 } while (0)
38 38
39extern unsigned long pgkern_mask;
40
41# define __flush_tlb_all() \ 39# define __flush_tlb_all() \
42 do { \ 40 do { \
43 if (cpu_has_pge) \ 41 if (cpu_has_pge) \
@@ -49,7 +47,7 @@ extern unsigned long pgkern_mask;
49#define cpu_has_invlpg (boot_cpu_data.x86 > 3) 47#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
50 48
51#define __flush_tlb_single(addr) \ 49#define __flush_tlb_single(addr) \
52 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) 50 __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
53 51
54#ifdef CONFIG_X86_INVLPG 52#ifdef CONFIG_X86_INVLPG
55# define __flush_tlb_one(addr) __flush_tlb_single(addr) 53# define __flush_tlb_one(addr) __flush_tlb_single(addr)
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 97b828ce31e0..c13933185c1c 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -6,7 +6,6 @@
6#ifndef _ASM_i386_TSC_H 6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H 7#define _ASM_i386_TSC_H
8 8
9#include <linux/config.h>
10#include <asm/processor.h> 9#include <asm/processor.h>
11 10
12/* 11/*
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index fc1c8ddae149..565d0897b205 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -323,10 +323,11 @@
323#define __NR_tee 315 323#define __NR_tee 315
324#define __NR_vmsplice 316 324#define __NR_vmsplice 316
325#define __NR_move_pages 317 325#define __NR_move_pages 317
326#define __NR_getcpu 318
326 327
327#ifdef __KERNEL__ 328#ifdef __KERNEL__
328 329
329#define NR_syscalls 318 330#define NR_syscalls 319
330 331
331/* 332/*
332 * user-visible error numbers are in the range -1 - -128: see 333 * user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 4c1a0b968569..5031d693b89d 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -18,6 +18,7 @@ struct unwind_frame_info
18{ 18{
19 struct pt_regs regs; 19 struct pt_regs regs;
20 struct task_struct *task; 20 struct task_struct *task;
21 unsigned call_frame:1;
21}; 22};
22 23
23#define UNW_PC(frame) (frame)->regs.eip 24#define UNW_PC(frame) (frame)->regs.eip
@@ -28,6 +29,8 @@ struct unwind_frame_info
28#define FRAME_LINK_OFFSET 0 29#define FRAME_LINK_OFFSET 0
29#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) 30#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
30#define STACK_TOP(tsk) ((tsk)->thread.esp0) 31#define STACK_TOP(tsk) ((tsk)->thread.esp0)
32#else
33#define UNW_FP(frame) ((void)(frame), 0)
31#endif 34#endif
32#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) 35#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
33 36
@@ -42,6 +45,10 @@ struct unwind_frame_info
42 PTREGS_INFO(edi), \ 45 PTREGS_INFO(edi), \
43 PTREGS_INFO(eip) 46 PTREGS_INFO(eip)
44 47
48#define UNW_DEFAULT_RA(raItem, dataAlign) \
49 ((raItem).where == Memory && \
50 !((raItem).value * (dataAlign) + 4))
51
45static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, 52static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
46 /*const*/ struct pt_regs *regs) 53 /*const*/ struct pt_regs *regs)
47{ 54{
@@ -88,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
88 95
89#define UNW_PC(frame) ((void)(frame), 0) 96#define UNW_PC(frame) ((void)(frame), 0)
90#define UNW_SP(frame) ((void)(frame), 0) 97#define UNW_SP(frame) ((void)(frame), 0)
98#define UNW_FP(frame) ((void)(frame), 0)
91 99
92static inline int arch_unw_user_mode(const void *info) 100static inline int arch_unw_user_mode(const void *info)
93{ 101{
diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h
index 85c82bd819f2..d2da61e4c49b 100644
--- a/include/asm-ia64/module.h
+++ b/include/asm-ia64/module.h
@@ -28,7 +28,8 @@ struct mod_arch_specific {
28#define Elf_Ehdr Elf64_Ehdr 28#define Elf_Ehdr Elf64_Ehdr
29 29
30#define MODULE_PROC_FAMILY "ia64" 30#define MODULE_PROC_FAMILY "ia64"
31#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY 31#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY \
32 "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
32 33
33#define ARCH_SHF_SMALL SHF_IA_64_SHORT 34#define ARCH_SHF_SMALL SHF_IA_64_SHORT
34 35
diff --git a/include/asm-um/alternative-asm.i b/include/asm-um/alternative-asm.i
new file mode 100644
index 000000000000..cae9faca132f
--- /dev/null
+++ b/include/asm-um/alternative-asm.i
@@ -0,0 +1,6 @@
1#ifndef __UM_ALTERNATIVE_ASM_I
2#define __UM_ALTERNATIVE_ASM_I
3
4#include "asm/arch/alternative-asm.i"
5
6#endif
diff --git a/include/asm-um/frame.i b/include/asm-um/frame.i
new file mode 100644
index 000000000000..09d5dca5d928
--- /dev/null
+++ b/include/asm-um/frame.i
@@ -0,0 +1,6 @@
1#ifndef __UM_FRAME_I
2#define __UM_FRAME_I
3
4#include "asm/arch/frame.i"
5
6#endif
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index 2c95a319c056..ed59aa4c6ff9 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -155,8 +155,6 @@ extern void acpi_reserve_bootmem(void);
155 155
156#endif /*CONFIG_ACPI_SLEEP*/ 156#endif /*CONFIG_ACPI_SLEEP*/
157 157
158#define boot_cpu_physical_apicid boot_cpu_id
159
160extern int acpi_disabled; 158extern int acpi_disabled;
161extern int acpi_pci_disabled; 159extern int acpi_pci_disabled;
162 160
diff --git a/include/asm-x86_64/alternative-asm.i b/include/asm-x86_64/alternative-asm.i
new file mode 100644
index 000000000000..e4041f4fa4dc
--- /dev/null
+++ b/include/asm-x86_64/alternative-asm.i
@@ -0,0 +1,14 @@
1#include <linux/config.h>
2
3#ifdef CONFIG_SMP
4 .macro LOCK_PREFIX
51: lock
6 .section .smp_locks,"a"
7 .align 8
8 .quad 1b
9 .previous
10 .endm
11#else
12 .macro LOCK_PREFIX
13 .endm
14#endif
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 9c96a0a8d1bd..9e66d32330c9 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -17,6 +17,8 @@
17 17
18extern int apic_verbosity; 18extern int apic_verbosity;
19extern int apic_runs_main_timer; 19extern int apic_runs_main_timer;
20extern int ioapic_force;
21extern int apic_mapped;
20 22
21/* 23/*
22 * Define the default level of output to be very little 24 * Define the default level of output to be very little
@@ -29,8 +31,6 @@ extern int apic_runs_main_timer;
29 printk(s, ##a); \ 31 printk(s, ##a); \
30 } while (0) 32 } while (0)
31 33
32#ifdef CONFIG_X86_LOCAL_APIC
33
34struct pt_regs; 34struct pt_regs;
35 35
36/* 36/*
@@ -95,17 +95,12 @@ extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
95#define K8_APIC_EXT_INT_MSG_EXT 0x7 95#define K8_APIC_EXT_INT_MSG_EXT 0x7
96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 96#define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0
97 97
98extern int disable_timer_pin_1;
99
100
101void smp_send_timer_broadcast_ipi(void); 98void smp_send_timer_broadcast_ipi(void);
102void switch_APIC_timer_to_ipi(void *cpumask); 99void switch_APIC_timer_to_ipi(void *cpumask);
103void switch_ipi_to_APIC_timer(void *cpumask); 100void switch_ipi_to_APIC_timer(void *cpumask);
104 101
105#define ARCH_APICTIMER_STOPS_ON_C3 1 102#define ARCH_APICTIMER_STOPS_ON_C3 1
106 103
107#endif /* CONFIG_X86_LOCAL_APIC */
108
109extern unsigned boot_cpu_id; 104extern unsigned boot_cpu_id;
110 105
111#endif /* __ASM_APIC_H */ 106#endif /* __ASM_APIC_H */
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index f7ba57b1cc08..5b535eaf5309 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -399,6 +399,8 @@ static __inline__ int fls(int x)
399 return r+1; 399 return r+1;
400} 400}
401 401
402#define ARCH_HAS_FAST_MULTIPLIER 1
403
402#include <asm-generic/bitops/hweight.h> 404#include <asm-generic/bitops/hweight.h>
403 405
404#endif /* __KERNEL__ */ 406#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 4e3919524240..6b93f5a3a5c8 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -24,7 +24,6 @@
24#ifndef _ASM_X86_64_CALGARY_H 24#ifndef _ASM_X86_64_CALGARY_H
25#define _ASM_X86_64_CALGARY_H 25#define _ASM_X86_64_CALGARY_H
26 26
27#include <linux/config.h>
28#include <linux/spinlock.h> 27#include <linux/spinlock.h>
29#include <linux/device.h> 28#include <linux/device.h>
30#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
@@ -34,12 +33,12 @@ struct iommu_table {
34 unsigned long it_base; /* mapped address of tce table */ 33 unsigned long it_base; /* mapped address of tce table */
35 unsigned long it_hint; /* Hint for next alloc */ 34 unsigned long it_hint; /* Hint for next alloc */
36 unsigned long *it_map; /* A simple allocation bitmap for now */ 35 unsigned long *it_map; /* A simple allocation bitmap for now */
36 void __iomem *bbar; /* Bridge BAR */
37 u64 tar_val; /* Table Address Register */
38 struct timer_list watchdog_timer;
37 spinlock_t it_lock; /* Protects it_map */ 39 spinlock_t it_lock; /* Protects it_map */
38 unsigned int it_size; /* Size of iommu table in entries */ 40 unsigned int it_size; /* Size of iommu table in entries */
39 unsigned char it_busno; /* Bus number this table belongs to */ 41 unsigned char it_busno; /* Bus number this table belongs to */
40 void __iomem *bbar;
41 u64 tar_val;
42 struct timer_list watchdog_timer;
43}; 42};
44 43
45#define TCE_TABLE_SIZE_UNSPECIFIED ~0 44#define TCE_TABLE_SIZE_UNSPECIFIED ~0
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
index 0744db777676..eedc08526b0b 100644
--- a/include/asm-x86_64/dwarf2.h
+++ b/include/asm-x86_64/dwarf2.h
@@ -13,7 +13,7 @@
13 away for older version. 13 away for older version.
14 */ 14 */
15 15
16#ifdef CONFIG_UNWIND_INFO 16#ifdef CONFIG_AS_CFI
17 17
18#define CFI_STARTPROC .cfi_startproc 18#define CFI_STARTPROC .cfi_startproc
19#define CFI_ENDPROC .cfi_endproc 19#define CFI_ENDPROC .cfi_endproc
@@ -28,6 +28,11 @@
28#define CFI_REMEMBER_STATE .cfi_remember_state 28#define CFI_REMEMBER_STATE .cfi_remember_state
29#define CFI_RESTORE_STATE .cfi_restore_state 29#define CFI_RESTORE_STATE .cfi_restore_state
30#define CFI_UNDEFINED .cfi_undefined 30#define CFI_UNDEFINED .cfi_undefined
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
31 36
32#else 37#else
33 38
@@ -45,6 +50,7 @@
45#define CFI_REMEMBER_STATE # 50#define CFI_REMEMBER_STATE #
46#define CFI_RESTORE_STATE # 51#define CFI_RESTORE_STATE #
47#define CFI_UNDEFINED # 52#define CFI_UNDEFINED #
53#define CFI_SIGNAL_FRAME #
48 54
49#endif 55#endif
50 56
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index f65674832318..e15d3c8628f3 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -19,13 +19,9 @@
19 19
20#define E820_RAM 1 20#define E820_RAM 1
21#define E820_RESERVED 2 21#define E820_RESERVED 2
22#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ 22#define E820_ACPI 3
23#define E820_NVS 4 23#define E820_NVS 4
24 24
25#define HIGH_MEMORY (1024*1024)
26
27#define LOWMEMSIZE() (0x9f000)
28
29#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
30struct e820entry { 26struct e820entry {
31 u64 addr; /* start of memory segment */ 27 u64 addr; /* start of memory segment */
@@ -56,8 +52,7 @@ extern void e820_setup_gap(void);
56extern unsigned long e820_hole_size(unsigned long start_pfn, 52extern unsigned long e820_hole_size(unsigned long start_pfn,
57 unsigned long end_pfn); 53 unsigned long end_pfn);
58 54
59extern void __init parse_memopt(char *p, char **end); 55extern void finish_e820_parsing(void);
60extern void __init parse_memmapopt(char *p, char **end);
61 56
62extern struct e820map e820; 57extern struct e820map e820;
63 58
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index 0b4ffbd1a125..1b620db5b9e3 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -37,13 +37,9 @@ enum fixed_addresses {
37 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, 37 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
38 VSYSCALL_HPET, 38 VSYSCALL_HPET,
39 FIX_HPET_BASE, 39 FIX_HPET_BASE,
40#ifdef CONFIG_X86_LOCAL_APIC
41 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 40 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
42#endif
43#ifdef CONFIG_X86_IO_APIC
44 FIX_IO_APIC_BASE_0, 41 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, 42 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
46#endif
47 __end_of_fixed_addresses 43 __end_of_fixed_addresses
48}; 44};
49 45
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index 50b38e7c58e4..81e714665344 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -16,7 +16,6 @@ struct genapic {
16 char *name; 16 char *name;
17 u32 int_delivery_mode; 17 u32 int_delivery_mode;
18 u32 int_dest_mode; 18 u32 int_dest_mode;
19 u32 int_delivery_dest; /* for quick IPIs */
20 int (*apic_id_registered)(void); 19 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void); 20 cpumask_t (*target_cpus)(void);
22 void (*init_apic_ldr)(void); 21 void (*init_apic_ldr)(void);
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index cba8a3b0cded..0217b74cc9fc 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -24,6 +24,7 @@ extern unsigned int mxcsr_feature_mask;
24extern void mxcsr_feature_mask_init(void); 24extern void mxcsr_feature_mask_init(void);
25extern void init_fpu(struct task_struct *child); 25extern void init_fpu(struct task_struct *child);
26extern int save_i387(struct _fpstate __user *buf); 26extern int save_i387(struct _fpstate __user *buf);
27extern asmlinkage void math_state_restore(void);
27 28
28/* 29/*
29 * FPU lazy state save handling... 30 * FPU lazy state save handling...
@@ -31,7 +32,9 @@ extern int save_i387(struct _fpstate __user *buf);
31 32
32#define unlazy_fpu(tsk) do { \ 33#define unlazy_fpu(tsk) do { \
33 if (task_thread_info(tsk)->status & TS_USEDFPU) \ 34 if (task_thread_info(tsk)->status & TS_USEDFPU) \
34 save_init_fpu(tsk); \ 35 save_init_fpu(tsk); \
36 else \
37 tsk->fpu_counter = 0; \
35} while (0) 38} while (0)
36 39
37/* Ignore delayed exceptions from user space */ 40/* Ignore delayed exceptions from user space */
@@ -134,8 +137,8 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
134#else 137#else
135 : [fx] "cdaSDb" (fx), "0" (0)); 138 : [fx] "cdaSDb" (fx), "0" (0));
136#endif 139#endif
137 if (unlikely(err)) 140 if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
138 __clear_user(fx, sizeof(struct i387_fxsave_struct)); 141 err = -EFAULT;
139 /* No need to clear here because the caller clears USED_MATH */ 142 /* No need to clear here because the caller clears USED_MATH */
140 return err; 143 return err;
141} 144}
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h
index 59c396431569..8633331420ec 100644
--- a/include/asm-x86_64/intel_arch_perfmon.h
+++ b/include/asm-x86_64/intel_arch_perfmon.h
@@ -14,6 +14,18 @@
14 14
15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) 15#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 16#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) 17#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
18#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
19 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
20
21union cpuid10_eax {
22 struct {
23 unsigned int version_id:8;
24 unsigned int num_counters:8;
25 unsigned int bit_width:8;
26 unsigned int mask_length:8;
27 } split;
28 unsigned int full;
29};
18 30
19#endif /* X86_64_INTEL_ARCH_PERFMON_H */ 31#endif /* X86_64_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index fb7a0909a174..5d1b5c68e36e 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -10,8 +10,6 @@
10 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar 10 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
11 */ 11 */
12 12
13#ifdef CONFIG_X86_IO_APIC
14
15#ifdef CONFIG_PCI_MSI 13#ifdef CONFIG_PCI_MSI
16static inline int use_pci_vector(void) {return 1;} 14static inline int use_pci_vector(void) {return 1;}
17static inline void disable_edge_ioapic_vector(unsigned int vector) { } 15static inline void disable_edge_ioapic_vector(unsigned int vector) { }
@@ -209,10 +207,6 @@ extern int timer_uses_ioapic_pin_0;
209 207
210extern int sis_apic_bug; /* dummy */ 208extern int sis_apic_bug; /* dummy */
211 209
212#else /* !CONFIG_X86_IO_APIC */
213#define io_apic_assign_pci_irqs 0
214#endif
215
216extern int assign_irq_vector(int irq); 210extern int assign_irq_vector(int irq);
217 211
218void enable_NMI_through_LVT0 (void * dummy); 212void enable_NMI_through_LVT0 (void * dummy);
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
index 9db5a1b4f7b1..43469d8ab71a 100644
--- a/include/asm-x86_64/irq.h
+++ b/include/asm-x86_64/irq.h
@@ -44,9 +44,7 @@ static __inline__ int irq_canonicalize(int irq)
44 return ((irq == 2) ? 9 : irq); 44 return ((irq == 2) ? 9 : irq);
45} 45}
46 46
47#ifdef CONFIG_X86_LOCAL_APIC
48#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ 47#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
49#endif
50 48
51#ifdef CONFIG_HOTPLUG_CPU 49#ifdef CONFIG_HOTPLUG_CPU
52#include <linux/cpumask.h> 50#include <linux/cpumask.h>
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index c564bae03433..5fab957e1091 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -1,6 +1,27 @@
1#ifndef _X86_64_KEXEC_H 1#ifndef _X86_64_KEXEC_H
2#define _X86_64_KEXEC_H 2#define _X86_64_KEXEC_H
3 3
4#define PA_CONTROL_PAGE 0
5#define VA_CONTROL_PAGE 1
6#define PA_PGD 2
7#define VA_PGD 3
8#define PA_PUD_0 4
9#define VA_PUD_0 5
10#define PA_PMD_0 6
11#define VA_PMD_0 7
12#define PA_PTE_0 8
13#define VA_PTE_0 9
14#define PA_PUD_1 10
15#define VA_PUD_1 11
16#define PA_PMD_1 12
17#define VA_PMD_1 13
18#define PA_PTE_1 14
19#define VA_PTE_1 15
20#define PA_TABLE_PAGE 16
21#define PAGES_NR 17
22
23#ifndef __ASSEMBLY__
24
4#include <linux/string.h> 25#include <linux/string.h>
5 26
6#include <asm/page.h> 27#include <asm/page.h>
@@ -64,4 +85,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
64 newregs->rip = (unsigned long)current_text_addr(); 85 newregs->rip = (unsigned long)current_text_addr();
65 } 86 }
66} 87}
88
89NORET_TYPE void
90relocate_kernel(unsigned long indirection_page,
91 unsigned long page_list,
92 unsigned long start_address) ATTRIB_NORET;
93
94#endif /* __ASSEMBLY__ */
95
67#endif /* _X86_64_KEXEC_H */ 96#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/linkage.h b/include/asm-x86_64/linkage.h
index 291c2d01c44f..b5f39d0189ce 100644
--- a/include/asm-x86_64/linkage.h
+++ b/include/asm-x86_64/linkage.h
@@ -1,6 +1,6 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H 2#define __ASM_LINKAGE_H
3 3
4/* Nothing to see here... */ 4#define __ALIGN .p2align 4,,15
5 5
6#endif 6#endif
diff --git a/include/asm-x86_64/mach_apic.h b/include/asm-x86_64/mach_apic.h
index 0acea44c9377..d33422450c00 100644
--- a/include/asm-x86_64/mach_apic.h
+++ b/include/asm-x86_64/mach_apic.h
@@ -16,7 +16,6 @@
16 16
17#define INT_DELIVERY_MODE (genapic->int_delivery_mode) 17#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
18#define INT_DEST_MODE (genapic->int_dest_mode) 18#define INT_DEST_MODE (genapic->int_dest_mode)
19#define INT_DELIVERY_DEST (genapic->int_delivery_dest)
20#define TARGET_CPUS (genapic->target_cpus()) 19#define TARGET_CPUS (genapic->target_cpus())
21#define apic_id_registered (genapic->apic_id_registered) 20#define apic_id_registered (genapic->apic_id_registered)
22#define init_apic_ldr (genapic->init_apic_ldr) 21#define init_apic_ldr (genapic->init_apic_ldr)
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index d13687dfd691..5a11146d6d9c 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -99,6 +99,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
99} 99}
100#endif 100#endif
101 101
102void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
103
102extern atomic_t mce_entry; 104extern atomic_t mce_entry;
103 105
104#endif 106#endif
diff --git a/include/asm-x86_64/mmx.h b/include/asm-x86_64/mmx.h
deleted file mode 100644
index 46b71da99869..000000000000
--- a/include/asm-x86_64/mmx.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_MMX_H
2#define _ASM_MMX_H
3
4/*
5 * MMX 3Dnow! helper operations
6 */
7
8#include <linux/types.h>
9
10extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from);
13
14#endif
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 14fc3ddd9031..017fddb61dc5 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -159,13 +159,7 @@ struct mpc_config_lintsrc
159#define MAX_MP_BUSSES 256 159#define MAX_MP_BUSSES 256
160/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 160/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
161#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 161#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
162enum mp_bustype { 162extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
163 MP_BUS_ISA = 1,
164 MP_BUS_EISA,
165 MP_BUS_PCI,
166 MP_BUS_MCA
167};
168extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
169extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; 163extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
170 164
171extern unsigned int boot_cpu_physical_apicid; 165extern unsigned int boot_cpu_physical_apicid;
@@ -178,18 +172,15 @@ extern int mp_irq_entries;
178extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; 172extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
179extern int mpc_default_type; 173extern int mpc_default_type;
180extern unsigned long mp_lapic_addr; 174extern unsigned long mp_lapic_addr;
181extern int pic_mode;
182 175
183#ifdef CONFIG_ACPI 176#ifdef CONFIG_ACPI
184extern void mp_register_lapic (u8 id, u8 enabled); 177extern void mp_register_lapic (u8 id, u8 enabled);
185extern void mp_register_lapic_address (u64 address); 178extern void mp_register_lapic_address (u64 address);
186 179
187#ifdef CONFIG_X86_IO_APIC
188extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); 180extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
189extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); 181extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
190extern void mp_config_acpi_legacy_irqs (void); 182extern void mp_config_acpi_legacy_irqs (void);
191extern int mp_register_gsi (u32 gsi, int triggering, int polarity); 183extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
192#endif /*CONFIG_X86_IO_APIC*/
193#endif 184#endif
194 185
195extern int using_apic_timer; 186extern int using_apic_timer;
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 10f8b51cec8b..37e194169fac 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -66,14 +66,25 @@
66#define rdtscl(low) \ 66#define rdtscl(low) \
67 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 67 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
68 68
69#define rdtscp(low,high,aux) \
70 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
71
69#define rdtscll(val) do { \ 72#define rdtscll(val) do { \
70 unsigned int __a,__d; \ 73 unsigned int __a,__d; \
71 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 74 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
72 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 75 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
73} while(0) 76} while(0)
74 77
78#define rdtscpll(val, aux) do { \
79 unsigned long __a, __d; \
80 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
81 (val) = (__d << 32) | __a; \
82} while (0)
83
75#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 84#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
76 85
86#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
87
77#define rdpmc(counter,low,high) \ 88#define rdpmc(counter,low,high) \
78 __asm__ __volatile__("rdpmc" \ 89 __asm__ __volatile__("rdpmc" \
79 : "=a" (low), "=d" (high) \ 90 : "=a" (low), "=d" (high) \
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
index 06fab6de2a88..16396b1de3e4 100644
--- a/include/asm-x86_64/mutex.h
+++ b/include/asm-x86_64/mutex.h
@@ -25,13 +25,9 @@ do { \
25 \ 25 \
26 __asm__ __volatile__( \ 26 __asm__ __volatile__( \
27 LOCK_PREFIX " decl (%%rdi) \n" \ 27 LOCK_PREFIX " decl (%%rdi) \n" \
28 " js 2f \n" \ 28 " jns 1f \n" \
29 "1: \n" \ 29 " call "#fail_fn" \n" \
30 \ 30 "1:" \
31 LOCK_SECTION_START("") \
32 "2: call "#fail_fn" \n" \
33 " jmp 1b \n" \
34 LOCK_SECTION_END \
35 \ 31 \
36 :"=D" (dummy) \ 32 :"=D" (dummy) \
37 : "D" (v) \ 33 : "D" (v) \
@@ -75,13 +71,9 @@ do { \
75 \ 71 \
76 __asm__ __volatile__( \ 72 __asm__ __volatile__( \
77 LOCK_PREFIX " incl (%%rdi) \n" \ 73 LOCK_PREFIX " incl (%%rdi) \n" \
78 " jle 2f \n" \ 74 " jg 1f \n" \
79 "1: \n" \ 75 " call "#fail_fn" \n" \
80 \ 76 "1: " \
81 LOCK_SECTION_START("") \
82 "2: call "#fail_fn" \n" \
83 " jmp 1b \n" \
84 LOCK_SECTION_END \
85 \ 77 \
86 :"=D" (dummy) \ 78 :"=D" (dummy) \
87 : "D" (v) \ 79 : "D" (v) \
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index efb45c894d76..cbf2669bca71 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -7,24 +7,13 @@
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <asm/io.h> 8#include <asm/io.h>
9 9
10struct pt_regs;
11
12typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
13
14/**
15 * set_nmi_callback
16 *
17 * Set a handler for an NMI. Only one handler may be
18 * set. Return 1 if the NMI was handled.
19 */
20void set_nmi_callback(nmi_callback_t callback);
21
22/** 10/**
23 * unset_nmi_callback 11 * do_nmi_callback
24 * 12 *
25 * Remove the handler previously set. 13 * Check to see if a callback exists and execute it. Return 1
14 * if the handler exists and was handled successfully.
26 */ 15 */
27void unset_nmi_callback(void); 16int do_nmi_callback(struct pt_regs *regs, int cpu);
28 17
29#ifdef CONFIG_PM 18#ifdef CONFIG_PM
30 19
@@ -48,25 +37,32 @@ static inline void unset_nmi_pm_callback(struct pm_dev * dev)
48#endif /* CONFIG_PM */ 37#endif /* CONFIG_PM */
49 38
50extern void default_do_nmi(struct pt_regs *); 39extern void default_do_nmi(struct pt_regs *);
51extern void die_nmi(char *str, struct pt_regs *regs); 40extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
52 41
53#define get_nmi_reason() inb(0x61) 42#define get_nmi_reason() inb(0x61)
54 43
55extern int panic_on_timeout; 44extern int panic_on_timeout;
56extern int unknown_nmi_panic; 45extern int unknown_nmi_panic;
46extern int nmi_watchdog_enabled;
57 47
58extern int check_nmi_watchdog(void); 48extern int check_nmi_watchdog(void);
59 49extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
60extern void setup_apic_nmi_watchdog (void); 50extern int avail_to_resrv_perfctr_nmi(unsigned int);
61extern int reserve_lapic_nmi(void); 51extern int reserve_perfctr_nmi(unsigned int);
62extern void release_lapic_nmi(void); 52extern void release_perfctr_nmi(unsigned int);
53extern int reserve_evntsel_nmi(unsigned int);
54extern void release_evntsel_nmi(unsigned int);
55
56extern void setup_apic_nmi_watchdog (void *);
57extern void stop_apic_nmi_watchdog (void *);
63extern void disable_timer_nmi_watchdog(void); 58extern void disable_timer_nmi_watchdog(void);
64extern void enable_timer_nmi_watchdog(void); 59extern void enable_timer_nmi_watchdog(void);
65extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); 60extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
66 61
67extern void nmi_watchdog_default(void); 62extern void nmi_watchdog_default(void);
68extern int setup_nmi_watchdog(char *); 63extern int setup_nmi_watchdog(char *);
69 64
65extern atomic_t nmi_active;
70extern unsigned int nmi_watchdog; 66extern unsigned int nmi_watchdog;
71#define NMI_DEFAULT -1 67#define NMI_DEFAULT -1
72#define NMI_NONE 0 68#define NMI_NONE 0
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
index 036b6ca5b53b..eba9cb471df3 100644
--- a/include/asm-x86_64/pci-direct.h
+++ b/include/asm-x86_64/pci-direct.h
@@ -2,47 +2,15 @@
2#define ASM_PCI_DIRECT_H 1 2#define ASM_PCI_DIRECT_H 1
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/io.h>
6 5
7/* Direct PCI access. This is used for PCI accesses in early boot before 6/* Direct PCI access. This is used for PCI accesses in early boot before
8 the PCI subsystem works. */ 7 the PCI subsystem works. */
9 8
10#define PDprintk(x...) 9extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
10extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
11extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
12extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
11 13
12static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) 14extern int early_pci_allowed(void);
13{
14 u32 v;
15 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
16 v = inl(0xcfc);
17 if (v != 0xffffffff)
18 PDprintk("%x reading 4 from %x: %x\n", slot, offset, v);
19 return v;
20}
21
22static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
23{
24 u8 v;
25 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
26 v = inb(0xcfc + (offset&3));
27 PDprintk("%x reading 1 from %x: %x\n", slot, offset, v);
28 return v;
29}
30
31static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
32{
33 u16 v;
34 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
35 v = inw(0xcfc + (offset&2));
36 PDprintk("%x reading 2 from %x: %x\n", slot, offset, v);
37 return v;
38}
39
40static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
41 u32 val)
42{
43 PDprintk("%x writing to %x: %x\n", slot, offset, val);
44 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
45 outl(val, 0xcfc);
46}
47 15
48#endif 16#endif
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index b47c3df9ed1d..14996d962bac 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -9,20 +9,24 @@
9 9
10/* Per processor datastructure. %gs points to it while the kernel runs */ 10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda { 11struct x8664_pda {
12 struct task_struct *pcurrent; /* Current process */ 12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* Per cpu data offset from linker address */ 13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 unsigned long kernelstack; /* top of kernel stack for current */ 14 address */
15 unsigned long oldrsp; /* user rsp for system call */ 15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16#if DEBUG_STKSZ > EXCEPTION_STKSZ 16 unsigned long oldrsp; /* 24 user rsp for system call */
17 unsigned long debugstack; /* #DB/#BP stack. */ 17 int irqcount; /* 32 Irq nesting counter. Starts with -1 */
18 int cpunumber; /* 36 Logical CPU number */
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
18#endif 23#endif
19 int irqcount; /* Irq nesting counter. Starts with -1 */ 24 char *irqstackptr;
20 int cpunumber; /* Logical CPU number */
21 char *irqstackptr; /* top of irqstack */
22 int nodenumber; /* number of current node */ 25 int nodenumber; /* number of current node */
23 unsigned int __softirq_pending; 26 unsigned int __softirq_pending;
24 unsigned int __nmi_count; /* number of NMI on this CPUs */ 27 unsigned int __nmi_count; /* number of NMI on this CPUs */
25 int mmu_state; 28 short mmu_state;
29 short isidle;
26 struct mm_struct *active_mm; 30 struct mm_struct *active_mm;
27 unsigned apic_timer_irqs; 31 unsigned apic_timer_irqs;
28} ____cacheline_aligned_in_smp; 32} ____cacheline_aligned_in_smp;
@@ -36,44 +40,69 @@ extern struct x8664_pda boot_cpu_pda[];
36 * There is no fast way to get the base address of the PDA, all the accesses 40 * There is no fast way to get the base address of the PDA, all the accesses
37 * have to mention %fs/%gs. So it needs to be done this Torvaldian way. 41 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
38 */ 42 */
39#define sizeof_field(type,field) (sizeof(((type *)0)->field)) 43extern void __bad_pda_field(void) __attribute__((noreturn));
40#define typeof_field(type,field) typeof(((type *)0)->field)
41 44
42extern void __bad_pda_field(void); 45/*
46 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
47 * all PDA accesses so it gets read/write dependencies right.
48 */
49extern struct x8664_pda _proxy_pda;
43 50
44#define pda_offset(field) offsetof(struct x8664_pda, field) 51#define pda_offset(field) offsetof(struct x8664_pda, field)
45 52
46#define pda_to_op(op,field,val) do { \ 53#define pda_to_op(op,field,val) do { \
47 typedef typeof_field(struct x8664_pda, field) T__; \ 54 typedef typeof(_proxy_pda.field) T__; \
48 switch (sizeof_field(struct x8664_pda, field)) { \ 55 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
49case 2: \ 56 switch (sizeof(_proxy_pda.field)) { \
50asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 57 case 2: \
51case 4: \ 58 asm(op "w %1,%%gs:%c2" : \
52asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 59 "+m" (_proxy_pda.field) : \
53case 8: \ 60 "ri" ((T__)val), \
54asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 61 "i"(pda_offset(field))); \
55 default: __bad_pda_field(); \ 62 break; \
56 } \ 63 case 4: \
64 asm(op "l %1,%%gs:%c2" : \
65 "+m" (_proxy_pda.field) : \
66 "ri" ((T__)val), \
67 "i" (pda_offset(field))); \
68 break; \
69 case 8: \
70 asm(op "q %1,%%gs:%c2": \
71 "+m" (_proxy_pda.field) : \
72 "ri" ((T__)val), \
73 "i"(pda_offset(field))); \
74 break; \
75 default: \
76 __bad_pda_field(); \
77 } \
57 } while (0) 78 } while (0)
58 79
59/* 80#define pda_from_op(op,field) ({ \
60 * AK: PDA read accesses should be neither volatile nor have an memory clobber. 81 typeof(_proxy_pda.field) ret__; \
61 * Unfortunately removing them causes all hell to break lose currently. 82 switch (sizeof(_proxy_pda.field)) { \
62 */ 83 case 2: \
63#define pda_from_op(op,field) ({ \ 84 asm(op "w %%gs:%c1,%0" : \
64 typeof_field(struct x8664_pda, field) ret__; \ 85 "=r" (ret__) : \
65 switch (sizeof_field(struct x8664_pda, field)) { \ 86 "i" (pda_offset(field)), \
66case 2: \ 87 "m" (_proxy_pda.field)); \
67asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ 88 break; \
68case 4: \ 89 case 4: \
69asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ 90 asm(op "l %%gs:%c1,%0": \
70case 8: \ 91 "=r" (ret__): \
71asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ 92 "i" (pda_offset(field)), \
72 default: __bad_pda_field(); \ 93 "m" (_proxy_pda.field)); \
73 } \ 94 break; \
95 case 8: \
96 asm(op "q %%gs:%c1,%0": \
97 "=r" (ret__) : \
98 "i" (pda_offset(field)), \
99 "m" (_proxy_pda.field)); \
100 break; \
101 default: \
102 __bad_pda_field(); \
103 } \
74 ret__; }) 104 ret__; })
75 105
76
77#define read_pda(field) pda_from_op("mov",field) 106#define read_pda(field) pda_from_op("mov",field)
78#define write_pda(field,val) pda_to_op("mov",field,val) 107#define write_pda(field,val) pda_to_op("mov",field,val)
79#define add_pda(field,val) pda_to_op("add",field,val) 108#define add_pda(field,val) pda_to_op("add",field,val)
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index bffb2f886a51..285756010c51 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,6 +11,16 @@
11 11
12#include <asm/pda.h> 12#include <asm/pda.h>
13 13
14#ifdef CONFIG_MODULES
15# define PERCPU_MODULE_RESERVE 8192
16#else
17# define PERCPU_MODULE_RESERVE 0
18#endif
19
20#define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 24#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 25#define __my_cpu_offset() read_pda(data_offset)
16 26
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 51eba2395171..6899e770b173 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -21,12 +21,9 @@ extern unsigned long __supported_pte_mask;
21 21
22#define swapper_pg_dir init_level4_pgt 22#define swapper_pg_dir init_level4_pgt
23 23
24extern int nonx_setup(char *str);
25extern void paging_init(void); 24extern void paging_init(void);
26extern void clear_kernel_mapping(unsigned long addr, unsigned long size); 25extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
27 26
28extern unsigned long pgkern_mask;
29
30/* 27/*
31 * ZERO_PAGE is a global shared page that is always zero: used 28 * ZERO_PAGE is a global shared page that is always zero: used
32 * for zero-mapped memory areas etc.. 29 * for zero-mapped memory areas etc..
@@ -265,7 +262,7 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
265#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) 262#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
266static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 263static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
267static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 264static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
268static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 265static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
269static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 266static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
270static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 267static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
271static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 268static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
@@ -278,11 +275,12 @@ static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) &
278static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } 275static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
279static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } 276static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
280static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } 277static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
281static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } 278static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
282static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 279static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
283static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 280static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
284static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } 281static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
285static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } 282static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
283static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
286 284
287struct vm_area_struct; 285struct vm_area_struct;
288 286
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 038fe1f47e6f..b73d0c76613c 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -51,10 +51,8 @@ extern unsigned long long monotonic_base;
51extern int sysctl_vsyscall; 51extern int sysctl_vsyscall;
52extern int nohpet; 52extern int nohpet;
53extern unsigned long vxtime_hz; 53extern unsigned long vxtime_hz;
54extern void time_init_gtod(void);
54 55
55extern int numa_setup(char *opt);
56
57extern int setup_early_printk(char *);
58extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); 56extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
59 57
60extern void early_identify_cpu(struct cpuinfo_x86 *c); 58extern void early_identify_cpu(struct cpuinfo_x86 *c);
@@ -91,7 +89,7 @@ extern void syscall32_cpu_init(void);
91 89
92extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); 90extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
93 91
94extern void check_ioapic(void); 92extern void early_quirks(void);
95extern void check_efer(void); 93extern void check_efer(void);
96 94
97extern int unhandled_signal(struct task_struct *tsk, int sig); 95extern int unhandled_signal(struct task_struct *tsk, int sig);
@@ -103,13 +101,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c);
103extern unsigned long table_start, table_end; 101extern unsigned long table_start, table_end;
104 102
105extern int exception_trace; 103extern int exception_trace;
106extern int using_apic_timer;
107extern int disable_apic;
108extern unsigned cpu_khz; 104extern unsigned cpu_khz;
109extern int ioapic_force;
110extern int skip_ioapic_setup;
111extern int acpi_ht;
112extern int acpi_disabled;
113 105
114extern void no_iommu_init(void); 106extern void no_iommu_init(void);
115extern int force_iommu, no_iommu; 107extern int force_iommu, no_iommu;
@@ -131,7 +123,8 @@ extern int fix_aperture;
131 123
132extern int reboot_force; 124extern int reboot_force;
133extern int notsc_setup(char *); 125extern int notsc_setup(char *);
134extern int setup_additional_cpus(char *); 126
127extern int gsi_irq_sharing(int gsi);
135 128
136extern void smp_local_timer_interrupt(struct pt_regs * regs); 129extern void smp_local_timer_interrupt(struct pt_regs * regs);
137 130
diff --git a/include/asm-x86_64/rwlock.h b/include/asm-x86_64/rwlock.h
index dea0e9459264..72aeebed920b 100644
--- a/include/asm-x86_64/rwlock.h
+++ b/include/asm-x86_64/rwlock.h
@@ -18,69 +18,9 @@
18#ifndef _ASM_X86_64_RWLOCK_H 18#ifndef _ASM_X86_64_RWLOCK_H
19#define _ASM_X86_64_RWLOCK_H 19#define _ASM_X86_64_RWLOCK_H
20 20
21#include <linux/stringify.h>
22
23#define RW_LOCK_BIAS 0x01000000 21#define RW_LOCK_BIAS 0x01000000
24#define RW_LOCK_BIAS_STR "0x01000000" 22#define RW_LOCK_BIAS_STR "0x01000000"
25
26#define __build_read_lock_ptr(rw, helper) \
27 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \
28 "js 2f\n" \
29 "1:\n" \
30 LOCK_SECTION_START("") \
31 "2:\tcall " helper "\n\t" \
32 "jmp 1b\n" \
33 LOCK_SECTION_END \
34 ::"a" (rw) : "memory")
35
36#define __build_read_lock_const(rw, helper) \
37 asm volatile(LOCK_PREFIX "subl $1,%0\n\t" \
38 "js 2f\n" \
39 "1:\n" \
40 LOCK_SECTION_START("") \
41 "2:\tpushq %%rax\n\t" \
42 "leaq %0,%%rax\n\t" \
43 "call " helper "\n\t" \
44 "popq %%rax\n\t" \
45 "jmp 1b\n" \
46 LOCK_SECTION_END \
47 :"=m" (*((volatile int *)rw))::"memory")
48
49#define __build_read_lock(rw, helper) do { \
50 if (__builtin_constant_p(rw)) \
51 __build_read_lock_const(rw, helper); \
52 else \
53 __build_read_lock_ptr(rw, helper); \
54 } while (0)
55
56#define __build_write_lock_ptr(rw, helper) \
57 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
58 "jnz 2f\n" \
59 "1:\n" \
60 LOCK_SECTION_START("") \
61 "2:\tcall " helper "\n\t" \
62 "jmp 1b\n" \
63 LOCK_SECTION_END \
64 ::"a" (rw) : "memory")
65
66#define __build_write_lock_const(rw, helper) \
67 asm volatile(LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
68 "jnz 2f\n" \
69 "1:\n" \
70 LOCK_SECTION_START("") \
71 "2:\tpushq %%rax\n\t" \
72 "leaq %0,%%rax\n\t" \
73 "call " helper "\n\t" \
74 "popq %%rax\n\t" \
75 "jmp 1b\n" \
76 LOCK_SECTION_END \
77 :"=m" (*((volatile long *)rw))::"memory")
78 23
79#define __build_write_lock(rw, helper) do { \ 24/* Actual code is in asm/spinlock.h or in arch/x86_64/lib/rwlock.S */
80 if (__builtin_constant_p(rw)) \
81 __build_write_lock_const(rw, helper); \
82 else \
83 __build_write_lock_ptr(rw, helper); \
84 } while (0)
85 25
86#endif 26#endif
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index d4bed33fb32c..334ddcdd8f92 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -20,15 +20,16 @@
20#define __USER_CS 0x33 /* 6*8+3 */ 20#define __USER_CS 0x33 /* 6*8+3 */
21#define __USER32_DS __USER_DS 21#define __USER32_DS __USER_DS
22 22
23#define GDT_ENTRY_TLS 1
24#define GDT_ENTRY_TSS 8 /* needs two entries */ 23#define GDT_ENTRY_TSS 8 /* needs two entries */
25#define GDT_ENTRY_LDT 10 /* needs two entries */ 24#define GDT_ENTRY_LDT 10 /* needs two entries */
26#define GDT_ENTRY_TLS_MIN 12 25#define GDT_ENTRY_TLS_MIN 12
27#define GDT_ENTRY_TLS_MAX 14 26#define GDT_ENTRY_TLS_MAX 14
28/* 15 free */
29 27
30#define GDT_ENTRY_TLS_ENTRIES 3 28#define GDT_ENTRY_TLS_ENTRIES 3
31 29
30#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
31#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
32
32/* TLS indexes for 64bit - hardcoded in arch_prctl */ 33/* TLS indexes for 64bit - hardcoded in arch_prctl */
33#define FS_TLS 0 34#define FS_TLS 0
34#define GS_TLS 1 35#define GS_TLS 1
diff --git a/include/asm-x86_64/semaphore.h b/include/asm-x86_64/semaphore.h
index 064df08b9a0f..107bd90429e8 100644
--- a/include/asm-x86_64/semaphore.h
+++ b/include/asm-x86_64/semaphore.h
@@ -107,12 +107,9 @@ static inline void down(struct semaphore * sem)
107 __asm__ __volatile__( 107 __asm__ __volatile__(
108 "# atomic down operation\n\t" 108 "# atomic down operation\n\t"
109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */ 109 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
110 "js 2f\n" 110 "jns 1f\n\t"
111 "1:\n" 111 "call __down_failed\n"
112 LOCK_SECTION_START("") 112 "1:"
113 "2:\tcall __down_failed\n\t"
114 "jmp 1b\n"
115 LOCK_SECTION_END
116 :"=m" (sem->count) 113 :"=m" (sem->count)
117 :"D" (sem) 114 :"D" (sem)
118 :"memory"); 115 :"memory");
@@ -130,14 +127,11 @@ static inline int down_interruptible(struct semaphore * sem)
130 127
131 __asm__ __volatile__( 128 __asm__ __volatile__(
132 "# atomic interruptible down operation\n\t" 129 "# atomic interruptible down operation\n\t"
130 "xorl %0,%0\n\t"
133 LOCK_PREFIX "decl %1\n\t" /* --sem->count */ 131 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
134 "js 2f\n\t" 132 "jns 2f\n\t"
135 "xorl %0,%0\n" 133 "call __down_failed_interruptible\n"
136 "1:\n" 134 "2:\n"
137 LOCK_SECTION_START("")
138 "2:\tcall __down_failed_interruptible\n\t"
139 "jmp 1b\n"
140 LOCK_SECTION_END
141 :"=a" (result), "=m" (sem->count) 135 :"=a" (result), "=m" (sem->count)
142 :"D" (sem) 136 :"D" (sem)
143 :"memory"); 137 :"memory");
@@ -154,14 +148,11 @@ static inline int down_trylock(struct semaphore * sem)
154 148
155 __asm__ __volatile__( 149 __asm__ __volatile__(
156 "# atomic interruptible down operation\n\t" 150 "# atomic interruptible down operation\n\t"
151 "xorl %0,%0\n\t"
157 LOCK_PREFIX "decl %1\n\t" /* --sem->count */ 152 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
158 "js 2f\n\t" 153 "jns 2f\n\t"
159 "xorl %0,%0\n" 154 "call __down_failed_trylock\n\t"
160 "1:\n" 155 "2:\n"
161 LOCK_SECTION_START("")
162 "2:\tcall __down_failed_trylock\n\t"
163 "jmp 1b\n"
164 LOCK_SECTION_END
165 :"=a" (result), "=m" (sem->count) 156 :"=a" (result), "=m" (sem->count)
166 :"D" (sem) 157 :"D" (sem)
167 :"memory","cc"); 158 :"memory","cc");
@@ -179,12 +170,9 @@ static inline void up(struct semaphore * sem)
179 __asm__ __volatile__( 170 __asm__ __volatile__(
180 "# atomic up operation\n\t" 171 "# atomic up operation\n\t"
181 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ 172 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
182 "jle 2f\n" 173 "jg 1f\n\t"
183 "1:\n" 174 "call __up_wakeup\n"
184 LOCK_SECTION_START("") 175 "1:"
185 "2:\tcall __up_wakeup\n\t"
186 "jmp 1b\n"
187 LOCK_SECTION_END
188 :"=m" (sem->count) 176 :"=m" (sem->count)
189 :"D" (sem) 177 :"D" (sem)
190 :"memory"); 178 :"memory");
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
index 3ede2a61973a..4581f978b299 100644
--- a/include/asm-x86_64/signal.h
+++ b/include/asm-x86_64/signal.h
@@ -24,10 +24,6 @@ typedef struct {
24} sigset_t; 24} sigset_t;
25 25
26 26
27struct pt_regs;
28asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
29
30
31#else 27#else
32/* Here we must cater to libcs that poke about in kernel headers. */ 28/* Here we must cater to libcs that poke about in kernel headers. */
33 29
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index ce97f65e1d10..d6b7c057edba 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -4,27 +4,18 @@
4/* 4/*
5 * We need the APIC definitions automatically as part of 'smp.h' 5 * We need the APIC definitions automatically as part of 'smp.h'
6 */ 6 */
7#ifndef __ASSEMBLY__
8#include <linux/threads.h> 7#include <linux/threads.h>
9#include <linux/cpumask.h> 8#include <linux/cpumask.h>
10#include <linux/bitops.h> 9#include <linux/bitops.h>
11extern int disable_apic; 10extern int disable_apic;
12#endif
13 11
14#ifdef CONFIG_X86_LOCAL_APIC
15#ifndef __ASSEMBLY__
16#include <asm/fixmap.h> 12#include <asm/fixmap.h>
17#include <asm/mpspec.h> 13#include <asm/mpspec.h>
18#ifdef CONFIG_X86_IO_APIC
19#include <asm/io_apic.h> 14#include <asm/io_apic.h>
20#endif
21#include <asm/apic.h> 15#include <asm/apic.h>
22#include <asm/thread_info.h> 16#include <asm/thread_info.h>
23#endif
24#endif
25 17
26#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
27#ifndef ASSEMBLY
28 19
29#include <asm/pda.h> 20#include <asm/pda.h>
30 21
@@ -42,7 +33,6 @@ extern cpumask_t cpu_initialized;
42 33
43extern void smp_alloc_memory(void); 34extern void smp_alloc_memory(void);
44extern volatile unsigned long smp_invalidate_needed; 35extern volatile unsigned long smp_invalidate_needed;
45extern int pic_mode;
46extern void lock_ipi_call_lock(void); 36extern void lock_ipi_call_lock(void);
47extern void unlock_ipi_call_lock(void); 37extern void unlock_ipi_call_lock(void);
48extern int smp_num_siblings; 38extern int smp_num_siblings;
@@ -74,20 +64,16 @@ static inline int hard_smp_processor_id(void)
74 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); 64 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
75} 65}
76 66
77extern int safe_smp_processor_id(void);
78extern int __cpu_disable(void); 67extern int __cpu_disable(void);
79extern void __cpu_die(unsigned int cpu); 68extern void __cpu_die(unsigned int cpu);
80extern void prefill_possible_map(void); 69extern void prefill_possible_map(void);
81extern unsigned num_processors; 70extern unsigned num_processors;
82extern unsigned disabled_cpus; 71extern unsigned disabled_cpus;
83 72
84#endif /* !ASSEMBLY */
85
86#define NO_PROC_ID 0xFF /* No processor magic marker */ 73#define NO_PROC_ID 0xFF /* No processor magic marker */
87 74
88#endif 75#endif
89 76
90#ifndef ASSEMBLY
91/* 77/*
92 * Some lowlevel functions might want to know about 78 * Some lowlevel functions might want to know about
93 * the real APIC ID <-> CPU # mapping. 79 * the real APIC ID <-> CPU # mapping.
@@ -109,11 +95,8 @@ static inline int cpu_present_to_apicid(int mps_cpu)
109 return BAD_APICID; 95 return BAD_APICID;
110} 96}
111 97
112#endif /* !ASSEMBLY */
113
114#ifndef CONFIG_SMP 98#ifndef CONFIG_SMP
115#define stack_smp_processor_id() 0 99#define stack_smp_processor_id() 0
116#define safe_smp_processor_id() 0
117#define cpu_logical_map(x) (x) 100#define cpu_logical_map(x) (x)
118#else 101#else
119#include <asm/thread_info.h> 102#include <asm/thread_info.h>
@@ -125,19 +108,23 @@ static inline int cpu_present_to_apicid(int mps_cpu)
125}) 108})
126#endif 109#endif
127 110
128#ifndef __ASSEMBLY__
129static __inline int logical_smp_processor_id(void) 111static __inline int logical_smp_processor_id(void)
130{ 112{
131 /* we don't want to mark this access volatile - bad code generation */ 113 /* we don't want to mark this access volatile - bad code generation */
132 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); 114 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
133} 115}
134#endif
135 116
136#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
137#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] 118#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
138#else 119#else
139#define cpu_physical_id(cpu) boot_cpu_id 120#define cpu_physical_id(cpu) boot_cpu_id
140#endif 121static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
141 122 void *info, int retry, int wait)
123{
124 /* Disable interrupts here? */
125 func(info);
126 return 0;
127}
128#endif /* !CONFIG_SMP */
142#endif 129#endif
143 130
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 248a79f0eaff..be7a9e629fb2 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -16,31 +16,23 @@
16 * (the type definitions are in asm/spinlock_types.h) 16 * (the type definitions are in asm/spinlock_types.h)
17 */ 17 */
18 18
19#define __raw_spin_is_locked(x) \ 19static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
20 (*(volatile signed int *)(&(x)->slock) <= 0) 20{
21 21 return *(volatile signed int *)(&(lock)->slock) <= 0;
22#define __raw_spin_lock_string \ 22}
23 "\n1:\t" \
24 LOCK_PREFIX " ; decl %0\n\t" \
25 "js 2f\n" \
26 LOCK_SECTION_START("") \
27 "2:\t" \
28 "rep;nop\n\t" \
29 "cmpl $0,%0\n\t" \
30 "jle 2b\n\t" \
31 "jmp 1b\n" \
32 LOCK_SECTION_END
33
34#define __raw_spin_lock_string_up \
35 "\n\tdecl %0"
36
37#define __raw_spin_unlock_string \
38 "movl $1,%0" \
39 :"=m" (lock->slock) : : "memory"
40 23
41static inline void __raw_spin_lock(raw_spinlock_t *lock) 24static inline void __raw_spin_lock(raw_spinlock_t *lock)
42{ 25{
43 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); 26 asm volatile(
27 "\n1:\t"
28 LOCK_PREFIX " ; decl %0\n\t"
29 "jns 2f\n"
30 "3:\n"
31 "rep;nop\n\t"
32 "cmpl $0,%0\n\t"
33 "jle 3b\n\t"
34 "jmp 1b\n"
35 "2:\t" : "=m" (lock->slock) : : "memory");
44} 36}
45 37
46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 38#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -49,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
49{ 41{
50 int oldval; 42 int oldval;
51 43
52 __asm__ __volatile__( 44 asm volatile(
53 "xchgl %0,%1" 45 "xchgl %0,%1"
54 :"=q" (oldval), "=m" (lock->slock) 46 :"=q" (oldval), "=m" (lock->slock)
55 :"0" (0) : "memory"); 47 :"0" (0) : "memory");
@@ -59,13 +51,14 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 51
60static inline void __raw_spin_unlock(raw_spinlock_t *lock) 52static inline void __raw_spin_unlock(raw_spinlock_t *lock)
61{ 53{
62 __asm__ __volatile__( 54 asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
63 __raw_spin_unlock_string
64 );
65} 55}
66 56
67#define __raw_spin_unlock_wait(lock) \ 57static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
68 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 58{
59 while (__raw_spin_is_locked(lock))
60 cpu_relax();
61}
69 62
70/* 63/*
71 * Read-write spinlocks, allowing multiple readers 64 * Read-write spinlocks, allowing multiple readers
@@ -79,26 +72,34 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
79 * 72 *
80 * On x86, we implement read-write locks as a 32-bit counter 73 * On x86, we implement read-write locks as a 32-bit counter
81 * with the high bit (sign) being the "contended" bit. 74 * with the high bit (sign) being the "contended" bit.
82 *
83 * The inline assembly is non-obvious. Think about it.
84 *
85 * Changed to use the same technique as rw semaphores. See
86 * semaphore.h for details. -ben
87 *
88 * the helpers are in arch/i386/kernel/semaphore.c
89 */ 75 */
90 76
91#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 77static inline int __raw_read_can_lock(raw_rwlock_t *lock)
92#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 78{
79 return (int)(lock)->lock > 0;
80}
81
82static inline int __raw_write_can_lock(raw_rwlock_t *lock)
83{
84 return (lock)->lock == RW_LOCK_BIAS;
85}
93 86
94static inline void __raw_read_lock(raw_rwlock_t *rw) 87static inline void __raw_read_lock(raw_rwlock_t *rw)
95{ 88{
96 __build_read_lock(rw, "__read_lock_failed"); 89 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
90 "jns 1f\n"
91 "call __read_lock_failed\n"
92 "1:\n"
93 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
97} 94}
98 95
99static inline void __raw_write_lock(raw_rwlock_t *rw) 96static inline void __raw_write_lock(raw_rwlock_t *rw)
100{ 97{
101 __build_write_lock(rw, "__write_lock_failed"); 98 asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
99 "jz 1f\n"
100 "\tcall __write_lock_failed\n\t"
101 "1:\n"
102 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
102} 103}
103 104
104static inline int __raw_read_trylock(raw_rwlock_t *lock) 105static inline int __raw_read_trylock(raw_rwlock_t *lock)
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
new file mode 100644
index 000000000000..5eb9799bef76
--- /dev/null
+++ b/include/asm-x86_64/stacktrace.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_STACKTRACE_H
2#define _ASM_STACKTRACE_H 1
3
4/* Generic stack tracer with callbacks */
5
6struct stacktrace_ops {
7 void (*warning)(void *data, char *msg);
8 /* msg must contain %s for the symbol */
9 void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
10 void (*address)(void *data, unsigned long address);
11 /* On negative return stop dumping */
12 int (*stack)(void *data, char *name);
13};
14
15void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
16 struct stacktrace_ops *ops, void *data);
17
18#endif
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 6bf170bceae1..bd376bc8c4ab 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -14,12 +14,13 @@
14#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" 14#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
15 15
16/* frame pointer must be last for get_wchan */ 16/* frame pointer must be last for get_wchan */
17#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" 17#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
18#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t" 18#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
19 19
20#define __EXTRA_CLOBBER \ 20#define __EXTRA_CLOBBER \
21 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" 21 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
22 22
23/* Save restore flags to clear handle leaking NT */
23#define switch_to(prev,next,last) \ 24#define switch_to(prev,next,last) \
24 asm volatile(SAVE_CONTEXT \ 25 asm volatile(SAVE_CONTEXT \
25 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 26 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
index 53e9a68b3336..dbb047febc5e 100644
--- a/include/asm-x86_64/tce.h
+++ b/include/asm-x86_64/tce.h
@@ -24,7 +24,6 @@
24#ifndef _ASM_X86_64_TCE_H 24#ifndef _ASM_X86_64_TCE_H
25#define _ASM_X86_64_TCE_H 25#define _ASM_X86_64_TCE_H
26 26
27extern void* tce_table_kva[];
28extern unsigned int specified_table_size; 27extern unsigned int specified_table_size;
29struct iommu_table; 28struct iommu_table;
30 29
diff --git a/include/asm-x86_64/therm_throt.h b/include/asm-x86_64/therm_throt.h
new file mode 100644
index 000000000000..5aac059007ba
--- /dev/null
+++ b/include/asm-x86_64/therm_throt.h
@@ -0,0 +1 @@
#include <asm-i386/therm_throt.h>
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 2029b00351f3..787a08114b48 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -114,11 +114,14 @@ static inline struct thread_info *stack_thread_info(void)
114#define TIF_IRET 5 /* force IRET */ 114#define TIF_IRET 5 /* force IRET */
115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
116#define TIF_SECCOMP 8 /* secure computing */ 116#define TIF_SECCOMP 8 /* secure computing */
117#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
117/* 16 free */ 118/* 16 free */
118#define TIF_IA32 17 /* 32bit process */ 119#define TIF_IA32 17 /* 32bit process */
119#define TIF_FORK 18 /* ret_from_fork */ 120#define TIF_FORK 18 /* ret_from_fork */
120#define TIF_ABI_PENDING 19 121#define TIF_ABI_PENDING 19
121#define TIF_MEMDIE 20 122#define TIF_MEMDIE 20
123#define TIF_DEBUG 21 /* uses debug registers */
124#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
122 125
123#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 126#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
124#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 127#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -128,9 +131,12 @@ static inline struct thread_info *stack_thread_info(void)
128#define _TIF_IRET (1<<TIF_IRET) 131#define _TIF_IRET (1<<TIF_IRET)
129#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 132#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
130#define _TIF_SECCOMP (1<<TIF_SECCOMP) 133#define _TIF_SECCOMP (1<<TIF_SECCOMP)
134#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
131#define _TIF_IA32 (1<<TIF_IA32) 135#define _TIF_IA32 (1<<TIF_IA32)
132#define _TIF_FORK (1<<TIF_FORK) 136#define _TIF_FORK (1<<TIF_FORK)
133#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 137#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
138#define _TIF_DEBUG (1<<TIF_DEBUG)
139#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
134 140
135/* work to do on interrupt/exception return */ 141/* work to do on interrupt/exception return */
136#define _TIF_WORK_MASK \ 142#define _TIF_WORK_MASK \
@@ -138,6 +144,9 @@ static inline struct thread_info *stack_thread_info(void)
138/* work to do on any return to user space */ 144/* work to do on any return to user space */
139#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 145#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
140 146
147/* flags to check in __switch_to() */
148#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
149
141#define PREEMPT_ACTIVE 0x10000000 150#define PREEMPT_ACTIVE 0x10000000
142 151
143/* 152/*
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index d16d5b60f419..983bd296c81a 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -4,44 +4,44 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6 6
7#define __flush_tlb() \ 7static inline unsigned long get_cr3(void)
8 do { \ 8{
9 unsigned long tmpreg; \ 9 unsigned long cr3;
10 \ 10 asm volatile("mov %%cr3,%0" : "=r" (cr3));
11 __asm__ __volatile__( \ 11 return cr3;
12 "movq %%cr3, %0; # flush TLB \n" \ 12}
13 "movq %0, %%cr3; \n" \
14 : "=r" (tmpreg) \
15 :: "memory"); \
16 } while (0)
17 13
18/* 14static inline void set_cr3(unsigned long cr3)
19 * Global pages have to be flushed a bit differently. Not a real 15{
20 * performance problem because this does not happen often. 16 asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
21 */ 17}
22#define __flush_tlb_global() \ 18
23 do { \ 19static inline void __flush_tlb(void)
24 unsigned long tmpreg, cr4, cr4_orig; \ 20{
25 \ 21 set_cr3(get_cr3());
26 __asm__ __volatile__( \ 22}
27 "movq %%cr4, %2; # turn off PGE \n" \ 23
28 "movq %2, %1; \n" \ 24static inline unsigned long get_cr4(void)
29 "andq %3, %1; \n" \ 25{
30 "movq %1, %%cr4; \n" \ 26 unsigned long cr4;
31 "movq %%cr3, %0; # flush TLB \n" \ 27 asm volatile("mov %%cr4,%0" : "=r" (cr4));
32 "movq %0, %%cr3; \n" \ 28 return cr4;
33 "movq %2, %%cr4; # turn PGE back on \n" \ 29}
34 : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ 30
35 : "i" (~X86_CR4_PGE) \ 31static inline void set_cr4(unsigned long cr4)
36 : "memory"); \ 32{
37 } while (0) 33 asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
38 34}
39extern unsigned long pgkern_mask; 35
40 36static inline void __flush_tlb_all(void)
41#define __flush_tlb_all() __flush_tlb_global() 37{
38 unsigned long cr4 = get_cr4();
39 set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
40 set_cr4(cr4); /* write old PGE again and flush TLBs */
41}
42 42
43#define __flush_tlb_one(addr) \ 43#define __flush_tlb_one(addr) \
44 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) 44 __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
45 45
46 46
47/* 47/*
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 1e1fa003daa3..e856570c0689 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -84,7 +84,7 @@ struct exception_table_entry
84 */ 84 */
85 85
86#define __get_user_x(size,ret,x,ptr) \ 86#define __get_user_x(size,ret,x,ptr) \
87 __asm__ __volatile__("call __get_user_" #size \ 87 asm volatile("call __get_user_" #size \
88 :"=a" (ret),"=d" (x) \ 88 :"=a" (ret),"=d" (x) \
89 :"c" (ptr) \ 89 :"c" (ptr) \
90 :"r8") 90 :"r8")
@@ -101,7 +101,7 @@ struct exception_table_entry
101 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ 101 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
102 default: __get_user_bad(); break; \ 102 default: __get_user_bad(); break; \
103 } \ 103 } \
104 (x) = (__typeof__(*(ptr)))__val_gu; \ 104 (x) = (typeof(*(ptr)))__val_gu; \
105 __ret_gu; \ 105 __ret_gu; \
106}) 106})
107 107
@@ -112,7 +112,7 @@ extern void __put_user_8(void);
112extern void __put_user_bad(void); 112extern void __put_user_bad(void);
113 113
114#define __put_user_x(size,ret,x,ptr) \ 114#define __put_user_x(size,ret,x,ptr) \
115 __asm__ __volatile__("call __put_user_" #size \ 115 asm volatile("call __put_user_" #size \
116 :"=a" (ret) \ 116 :"=a" (ret) \
117 :"c" (ptr),"d" (x) \ 117 :"c" (ptr),"d" (x) \
118 :"r8") 118 :"r8")
@@ -139,7 +139,7 @@ extern void __put_user_bad(void);
139#define __put_user_check(x,ptr,size) \ 139#define __put_user_check(x,ptr,size) \
140({ \ 140({ \
141 int __pu_err; \ 141 int __pu_err; \
142 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 142 typeof(*(ptr)) __user *__pu_addr = (ptr); \
143 switch (size) { \ 143 switch (size) { \
144 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ 144 case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
145 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ 145 case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
@@ -173,7 +173,7 @@ struct __large_struct { unsigned long buf[100]; };
173 * aliasing issues. 173 * aliasing issues.
174 */ 174 */
175#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ 175#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
176 __asm__ __volatile__( \ 176 asm volatile( \
177 "1: mov"itype" %"rtype"1,%2\n" \ 177 "1: mov"itype" %"rtype"1,%2\n" \
178 "2:\n" \ 178 "2:\n" \
179 ".section .fixup,\"ax\"\n" \ 179 ".section .fixup,\"ax\"\n" \
@@ -193,7 +193,7 @@ struct __large_struct { unsigned long buf[100]; };
193 int __gu_err; \ 193 int __gu_err; \
194 unsigned long __gu_val; \ 194 unsigned long __gu_val; \
195 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 195 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
196 (x) = (__typeof__(*(ptr)))__gu_val; \ 196 (x) = (typeof(*(ptr)))__gu_val; \
197 __gu_err; \ 197 __gu_err; \
198}) 198})
199 199
@@ -217,7 +217,7 @@ do { \
217} while (0) 217} while (0)
218 218
219#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ 219#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
220 __asm__ __volatile__( \ 220 asm volatile( \
221 "1: mov"itype" %2,%"rtype"1\n" \ 221 "1: mov"itype" %2,%"rtype"1\n" \
222 "2:\n" \ 222 "2:\n" \
223 ".section .fixup,\"ax\"\n" \ 223 ".section .fixup,\"ax\"\n" \
@@ -237,15 +237,20 @@ do { \
237 */ 237 */
238 238
239/* Handles exceptions in both to and from, but doesn't do access_ok */ 239/* Handles exceptions in both to and from, but doesn't do access_ok */
240extern unsigned long copy_user_generic(void *to, const void *from, unsigned len); 240__must_check unsigned long
241 241copy_user_generic(void *to, const void *from, unsigned len);
242extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len); 242
243extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 243__must_check unsigned long
244extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 244copy_to_user(void __user *to, const void *from, unsigned len);
245 245__must_check unsigned long
246static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 246copy_from_user(void *to, const void __user *from, unsigned len);
247__must_check unsigned long
248copy_in_user(void __user *to, const void __user *from, unsigned len);
249
250static __always_inline __must_check
251int __copy_from_user(void *dst, const void __user *src, unsigned size)
247{ 252{
248 int ret = 0; 253 int ret = 0;
249 if (!__builtin_constant_p(size)) 254 if (!__builtin_constant_p(size))
250 return copy_user_generic(dst,(__force void *)src,size); 255 return copy_user_generic(dst,(__force void *)src,size);
251 switch (size) { 256 switch (size) {
@@ -272,9 +277,10 @@ static __always_inline int __copy_from_user(void *dst, const void __user *src, u
272 } 277 }
273} 278}
274 279
275static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 280static __always_inline __must_check
281int __copy_to_user(void __user *dst, const void *src, unsigned size)
276{ 282{
277 int ret = 0; 283 int ret = 0;
278 if (!__builtin_constant_p(size)) 284 if (!__builtin_constant_p(size))
279 return copy_user_generic((__force void *)dst,src,size); 285 return copy_user_generic((__force void *)dst,src,size);
280 switch (size) { 286 switch (size) {
@@ -303,10 +309,10 @@ static __always_inline int __copy_to_user(void __user *dst, const void *src, uns
303 } 309 }
304} 310}
305 311
306 312static __always_inline __must_check
307static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 313int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
308{ 314{
309 int ret = 0; 315 int ret = 0;
310 if (!__builtin_constant_p(size)) 316 if (!__builtin_constant_p(size))
311 return copy_user_generic((__force void *)dst,(__force void *)src,size); 317 return copy_user_generic((__force void *)dst,(__force void *)src,size);
312 switch (size) { 318 switch (size) {
@@ -344,15 +350,17 @@ static __always_inline int __copy_in_user(void __user *dst, const void __user *s
344 } 350 }
345} 351}
346 352
347long strncpy_from_user(char *dst, const char __user *src, long count); 353__must_check long
348long __strncpy_from_user(char *dst, const char __user *src, long count); 354strncpy_from_user(char *dst, const char __user *src, long count);
349long strnlen_user(const char __user *str, long n); 355__must_check long
350long __strnlen_user(const char __user *str, long n); 356__strncpy_from_user(char *dst, const char __user *src, long count);
351long strlen_user(const char __user *str); 357__must_check long strnlen_user(const char __user *str, long n);
352unsigned long clear_user(void __user *mem, unsigned long len); 358__must_check long __strnlen_user(const char __user *str, long n);
353unsigned long __clear_user(void __user *mem, unsigned long len); 359__must_check long strlen_user(const char __user *str);
354 360__must_check unsigned long clear_user(void __user *mem, unsigned long len);
355#define __copy_to_user_inatomic __copy_to_user 361__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
356#define __copy_from_user_inatomic __copy_from_user 362
363__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
364#define __copy_to_user_inatomic copy_user_generic
357 365
358#endif /* __X86_64_UACCESS_H */ 366#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 80fd48e84bbb..eeb98c168e98 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -600,9 +600,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat)
600#define __NR_faccessat 269 600#define __NR_faccessat 269
601__SYSCALL(__NR_faccessat, sys_faccessat) 601__SYSCALL(__NR_faccessat, sys_faccessat)
602#define __NR_pselect6 270 602#define __NR_pselect6 270
603__SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ 603__SYSCALL(__NR_pselect6, sys_pselect6)
604#define __NR_ppoll 271 604#define __NR_ppoll 271
605__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ 605__SYSCALL(__NR_ppoll, sys_ppoll)
606#define __NR_unshare 272 606#define __NR_unshare 272
607__SYSCALL(__NR_unshare, sys_unshare) 607__SYSCALL(__NR_unshare, sys_unshare)
608#define __NR_set_robust_list 273 608#define __NR_set_robust_list 273
@@ -658,6 +658,7 @@ do { \
658#define __ARCH_WANT_SYS_SIGPENDING 658#define __ARCH_WANT_SYS_SIGPENDING
659#define __ARCH_WANT_SYS_SIGPROCMASK 659#define __ARCH_WANT_SYS_SIGPROCMASK
660#define __ARCH_WANT_SYS_RT_SIGACTION 660#define __ARCH_WANT_SYS_RT_SIGACTION
661#define __ARCH_WANT_SYS_RT_SIGSUSPEND
661#define __ARCH_WANT_SYS_TIME 662#define __ARCH_WANT_SYS_TIME
662#define __ARCH_WANT_COMPAT_SYS_TIME 663#define __ARCH_WANT_COMPAT_SYS_TIME
663 664
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index 1f6e9bfb569e..2e7ff10fd775 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -18,6 +18,7 @@ struct unwind_frame_info
18{ 18{
19 struct pt_regs regs; 19 struct pt_regs regs;
20 struct task_struct *task; 20 struct task_struct *task;
21 unsigned call_frame:1;
21}; 22};
22 23
23#define UNW_PC(frame) (frame)->regs.rip 24#define UNW_PC(frame) (frame)->regs.rip
@@ -57,6 +58,10 @@ struct unwind_frame_info
57 PTREGS_INFO(r15), \ 58 PTREGS_INFO(r15), \
58 PTREGS_INFO(rip) 59 PTREGS_INFO(rip)
59 60
61#define UNW_DEFAULT_RA(raItem, dataAlign) \
62 ((raItem).where == Memory && \
63 !((raItem).value * (dataAlign) + 8))
64
60static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, 65static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
61 /*const*/ struct pt_regs *regs) 66 /*const*/ struct pt_regs *regs)
62{ 67{
@@ -94,8 +99,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
94 99
95#else 100#else
96 101
97#define UNW_PC(frame) ((void)(frame), 0) 102#define UNW_PC(frame) ((void)(frame), 0UL)
98#define UNW_SP(frame) ((void)(frame), 0) 103#define UNW_SP(frame) ((void)(frame), 0UL)
99 104
100static inline int arch_unw_user_mode(const void *info) 105static inline int arch_unw_user_mode(const void *info)
101{ 106{
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 146b24402a5f..2281e9399b96 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -4,6 +4,7 @@
4enum vsyscall_num { 4enum vsyscall_num {
5 __NR_vgettimeofday, 5 __NR_vgettimeofday,
6 __NR_vtime, 6 __NR_vtime,
7 __NR_vgetcpu,
7}; 8};
8 9
9#define VSYSCALL_START (-10UL << 20) 10#define VSYSCALL_START (-10UL << 20)
@@ -15,6 +16,7 @@ enum vsyscall_num {
15#include <linux/seqlock.h> 16#include <linux/seqlock.h>
16 17
17#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) 18#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
18#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) 20#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
19#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) 21#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
20#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) 22#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
@@ -26,6 +28,9 @@ enum vsyscall_num {
26#define VXTIME_HPET 2 28#define VXTIME_HPET 2
27#define VXTIME_PMTMR 3 29#define VXTIME_PMTMR 3
28 30
31#define VGETCPU_RDTSCP 1
32#define VGETCPU_LSL 2
33
29struct vxtime_data { 34struct vxtime_data {
30 long hpet_address; /* HPET base address */ 35 long hpet_address; /* HPET base address */
31 int last; 36 int last;
@@ -40,6 +45,7 @@ struct vxtime_data {
40 45
41/* vsyscall space (readonly) */ 46/* vsyscall space (readonly) */
42extern struct vxtime_data __vxtime; 47extern struct vxtime_data __vxtime;
48extern int __vgetcpu_mode;
43extern struct timespec __xtime; 49extern struct timespec __xtime;
44extern volatile unsigned long __jiffies; 50extern volatile unsigned long __jiffies;
45extern unsigned long __wall_jiffies; 51extern unsigned long __wall_jiffies;
@@ -48,6 +54,7 @@ extern seqlock_t __xtime_lock;
48 54
49/* kernel space (writeable) */ 55/* kernel space (writeable) */
50extern struct vxtime_data vxtime; 56extern struct vxtime_data vxtime;
57extern int vgetcpu_mode;
51extern unsigned long wall_jiffies; 58extern unsigned long wall_jiffies;
52extern struct timezone sys_tz; 59extern struct timezone sys_tz;
53extern int sysctl_vsyscall; 60extern int sysctl_vsyscall;
@@ -55,6 +62,8 @@ extern seqlock_t xtime_lock;
55 62
56extern int sysctl_vsyscall; 63extern int sysctl_vsyscall;
57 64
65extern void vsyscall_set_cpu(int cpu);
66
58#define ARCH_HAVE_XTIME_LOCK 1 67#define ARCH_HAVE_XTIME_LOCK 1
59 68
60#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
diff --git a/include/linux/edd.h b/include/linux/edd.h
index 162512b886f7..b2b3e68aa512 100644
--- a/include/linux/edd.h
+++ b/include/linux/edd.h
@@ -52,6 +52,7 @@
52#define EDD_CL_EQUALS 0x3d646465 /* "edd=" */ 52#define EDD_CL_EQUALS 0x3d646465 /* "edd=" */
53#define EDD_CL_OFF 0x666f /* "of" for off */ 53#define EDD_CL_OFF 0x666f /* "of" for off */
54#define EDD_CL_SKIP 0x6b73 /* "sk" for skipmbr */ 54#define EDD_CL_SKIP 0x6b73 /* "sk" for skipmbr */
55#define EDD_CL_ON 0x6e6f /* "on" for on */
55 56
56#ifndef __ASSEMBLY__ 57#ifndef __ASSEMBLY__
57 58
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
new file mode 100644
index 000000000000..031ed3780e45
--- /dev/null
+++ b/include/linux/getcpu.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_GETCPU_H
2#define _LINUX_GETCPU_H 1
3
4/* Cache for getcpu() to speed it up. Results might be upto a jiffie
5 out of date, but will be faster.
6 User programs should not refer to the contents of this structure.
7 It is only a cache for vgetcpu(). It might change in future kernels.
8 The user program must store this information per thread (__thread)
9 If you want 100% accurate information pass NULL instead. */
10struct getcpu_cache {
11 unsigned long t0;
12 unsigned long t1;
13 unsigned long res[4];
14};
15
16#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 329ebcffa106..c8d5f207c3d4 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -115,6 +115,21 @@ static inline u64 get_jiffies_64(void)
115 ((long)(a) - (long)(b) >= 0)) 115 ((long)(a) - (long)(b) >= 0))
116#define time_before_eq(a,b) time_after_eq(b,a) 116#define time_before_eq(a,b) time_after_eq(b,a)
117 117
118/* Same as above, but does so with platform independent 64bit types.
119 * These must be used when utilizing jiffies_64 (i.e. return value of
120 * get_jiffies_64() */
121#define time_after64(a,b) \
122 (typecheck(__u64, a) && \
123 typecheck(__u64, b) && \
124 ((__s64)(b) - (__s64)(a) < 0))
125#define time_before64(a,b) time_after64(b,a)
126
127#define time_after_eq64(a,b) \
128 (typecheck(__u64, a) && \
129 typecheck(__u64, b) && \
130 ((__s64)(a) - (__s64)(b) >= 0))
131#define time_before_eq64(a,b) time_after_eq64(b,a)
132
118/* 133/*
119 * Have the 32 bit jiffies value wrap 5 minutes after boot 134 * Have the 32 bit jiffies value wrap 5 minutes after boot
120 * so jiffies wrap bugs show up earlier. 135 * so jiffies wrap bugs show up earlier.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e44a37e2c71c..4fa373bb18ac 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -187,6 +187,7 @@ extern void bust_spinlocks(int yes);
187extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ 187extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
188extern int panic_timeout; 188extern int panic_timeout;
189extern int panic_on_oops; 189extern int panic_on_oops;
190extern int panic_on_unrecovered_nmi;
190extern int tainted; 191extern int tainted;
191extern const char *print_tainted(void); 192extern const char *print_tainted(void);
192extern void add_taint(unsigned); 193extern void add_taint(unsigned);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 932021f872d5..6c9873f88287 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -35,9 +35,13 @@
35#endif 35#endif
36 36
37#define KPROBE_ENTRY(name) \ 37#define KPROBE_ENTRY(name) \
38 .section .kprobes.text, "ax"; \ 38 .pushsection .kprobes.text, "ax"; \
39 ENTRY(name) 39 ENTRY(name)
40 40
41#define KPROBE_END(name) \
42 END(name); \
43 .popsection
44
41#ifndef END 45#ifndef END
42#define END(name) \ 46#define END(name) \
43 .size name, .-name 47 .size name, .-name
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 34ed0d99b1bd..9d4aa7f95bc8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -819,6 +819,11 @@ struct task_struct {
819 unsigned did_exec:1; 819 unsigned did_exec:1;
820 pid_t pid; 820 pid_t pid;
821 pid_t tgid; 821 pid_t tgid;
822
823#ifdef CONFIG_CC_STACKPROTECTOR
824 /* Canary value for the -fstack-protector gcc feature */
825 unsigned long stack_canary;
826#endif
822 /* 827 /*
823 * pointers to (original) parent process, youngest child, younger sibling, 828 * pointers to (original) parent process, youngest child, younger sibling,
824 * older sibling, respectively. (p->father can be replaced with 829 * older sibling, respectively. (p->father can be replaced with
@@ -865,6 +870,15 @@ struct task_struct {
865 struct key *thread_keyring; /* keyring private to this thread */ 870 struct key *thread_keyring; /* keyring private to this thread */
866 unsigned char jit_keyring; /* default keyring to attach requested keys to */ 871 unsigned char jit_keyring; /* default keyring to attach requested keys to */
867#endif 872#endif
873 /*
874 * fpu_counter contains the number of consecutive context switches
875 * that the FPU is used. If this is over a threshold, the lazy fpu
876 * saving becomes unlazy to save the trap. This is an unsigned char
877 * so that after 256 times the counter wraps and the behavior turns
878 * lazy again; this to deal with bursty apps that only use FPU for
879 * a short time
880 */
881 unsigned char fpu_counter;
868 int oomkilladj; /* OOM kill score adjustment (bit shift). */ 882 int oomkilladj; /* OOM kill score adjustment (bit shift). */
869 char comm[TASK_COMM_LEN]; /* executable name excluding path 883 char comm[TASK_COMM_LEN]; /* executable name excluding path
870 - access with [gs]et_task_comm (which lock 884 - access with [gs]et_task_comm (which lock
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 9cc81e572224..50e2b01e517c 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -5,15 +5,16 @@
5struct stack_trace { 5struct stack_trace {
6 unsigned int nr_entries, max_entries; 6 unsigned int nr_entries, max_entries;
7 unsigned long *entries; 7 unsigned long *entries;
8 int skip; /* input argument: How many entries to skip */
9 int all_contexts; /* input argument: if true do than one stack */
8}; 10};
9 11
10extern void save_stack_trace(struct stack_trace *trace, 12extern void save_stack_trace(struct stack_trace *trace,
11 struct task_struct *task, int all_contexts, 13 struct task_struct *task);
12 unsigned int skip);
13 14
14extern void print_stack_trace(struct stack_trace *trace, int spaces); 15extern void print_stack_trace(struct stack_trace *trace, int spaces);
15#else 16#else
16# define save_stack_trace(trace, task, all, skip) do { } while (0) 17# define save_stack_trace(trace, task) do { } while (0)
17# define print_stack_trace(trace) do { } while (0) 18# define print_stack_trace(trace) do { } while (0)
18#endif 19#endif
19 20
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 008f04c56737..3f0f716225ec 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -53,6 +53,7 @@ struct mq_attr;
53struct compat_stat; 53struct compat_stat;
54struct compat_timeval; 54struct compat_timeval;
55struct robust_list_head; 55struct robust_list_head;
56struct getcpu_cache;
56 57
57#include <linux/types.h> 58#include <linux/types.h>
58#include <linux/aio_abi.h> 59#include <linux/aio_abi.h>
@@ -596,5 +597,6 @@ asmlinkage long sys_get_robust_list(int pid,
596 size_t __user *len_ptr); 597 size_t __user *len_ptr);
597asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, 598asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
598 size_t len); 599 size_t len);
600asmlinkage long sys_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *cache);
599 601
600#endif 602#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index eca555781d05..1b24bd45e080 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -150,6 +150,8 @@ enum
150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */ 150 KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */ 151 KERN_COMPAT_LOG=73, /* int: print compat layer messages */
152 KERN_MAX_LOCK_DEPTH=74, 152 KERN_MAX_LOCK_DEPTH=74,
153 KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
154 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
153}; 155};
154 156
155 157
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 46919f9f5eb3..4d0909e53595 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -24,5 +24,5 @@
24#define VERMAGIC_STRING \ 24#define VERMAGIC_STRING \
25 UTS_RELEASE " " \ 25 UTS_RELEASE " " \
26 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ 26 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
27 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC \ 27 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC
28 "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__) 28