aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/a.out-core.h2
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h74
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/calling.h56
-rw-r--r--arch/x86/include/asm/cpu.h6
-rw-r--r--arch/x86/include/asm/cpumask.h4
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)2
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/fixmap_32.h4
-rw-r--r--arch/x86/include/asm/fixmap_64.h4
-rw-r--r--arch/x86/include/asm/genapic.h262
-rw-r--r--arch/x86/include/asm/genapic_32.h141
-rw-r--r--arch/x86/include/asm/genapic_64.h60
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/io.h93
-rw-r--r--arch/x86/include/asm/io_32.h88
-rw-r--r--arch/x86/include/asm/io_64.h61
-rw-r--r--arch/x86/include/asm/io_apic.h15
-rw-r--r--arch/x86/include/asm/ipi.h77
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_vectors.h218
-rw-r--r--arch/x86/include/asm/kexec.h27
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/mpspec.h35
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/page.h20
-rw-r--r--arch/x86/include/asm/paravirt.h472
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/percpu.h22
-rw-r--r--arch/x86/include/asm/pgtable-2level.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level.h35
-rw-r--r--arch/x86/include/asm/pgtable.h226
-rw-r--r--arch/x86/include/asm/pgtable_32.h46
-rw-r--r--arch/x86/include/asm/pgtable_64.h64
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h20
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/segment.h9
-rw-r--r--arch/x86/include/asm/setup.h8
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h19
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/spinlock.h69
-rw-r--r--arch/x86/include/asm/stackprotector.h96
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/syscalls.h20
-rw-r--r--arch/x86/include/asm/system.h44
-rw-r--r--arch/x86/include/asm/thread_info.h1
-rw-r--r--arch/x86/include/asm/topology.h6
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uaccess.h138
-rw-r--r--arch/x86/include/asm/uv/uv.h3
-rw-r--r--arch/x86/include/asm/voyager.h42
-rw-r--r--arch/x86/include/asm/xen/events.h6
91 files changed, 1601 insertions, 2672 deletions
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h
index 3c601f8224be..bb70e397aa84 100644
--- a/arch/x86/include/asm/a.out-core.h
+++ b/arch/x86/include/asm/a.out-core.h
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
55 dump->regs.ds = (u16)regs->ds; 55 dump->regs.ds = (u16)regs->ds;
56 dump->regs.es = (u16)regs->es; 56 dump->regs.es = (u16)regs->es;
57 dump->regs.fs = (u16)regs->fs; 57 dump->regs.fs = (u16)regs->fs;
58 savesegment(gs, dump->regs.gs); 58 dump->regs.gs = get_user_gs(regs);
59 dump->regs.orig_ax = regs->orig_ax; 59 dump->regs.orig_ax = regs->orig_ax;
60 dump->regs.ip = regs->ip; 60 dump->regs.ip = regs->ip;
61 dump->regs.cs = (u16)regs->cs; 61 dump->regs.cs = (u16)regs->cs;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 9830681446ad..4518dc500903 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
102 acpi_noirq = 1; 102 acpi_noirq = 1;
103} 103}
104 104
105/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
106#define FIX_ACPI_PAGES 4
107
108extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 105extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
109 106
110static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 107static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855e..fba49f66228f 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -33,7 +33,13 @@
33 } while (0) 33 } while (0)
34 34
35 35
36#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
36extern void generic_apic_probe(void); 37extern void generic_apic_probe(void);
38#else
39static inline void generic_apic_probe(void)
40{
41}
42#endif
37 43
38#ifdef CONFIG_X86_LOCAL_APIC 44#ifdef CONFIG_X86_LOCAL_APIC
39 45
@@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
41extern int local_apic_timer_c2_ok; 47extern int local_apic_timer_c2_ok;
42 48
43extern int disable_apic; 49extern int disable_apic;
50
51#ifdef CONFIG_SMP
52extern void __inquire_remote_apic(int apicid);
53#else /* CONFIG_SMP */
54static inline void __inquire_remote_apic(int apicid)
55{
56}
57#endif /* CONFIG_SMP */
58
59static inline void default_inquire_remote_apic(int apicid)
60{
61 if (apic_verbosity >= APIC_DEBUG)
62 __inquire_remote_apic(apicid);
63}
64
44/* 65/*
45 * Basic functions accessing APICs. 66 * Basic functions accessing APICs.
46 */ 67 */
@@ -124,12 +145,35 @@ struct apic_ops {
124 145
125extern struct apic_ops *apic_ops; 146extern struct apic_ops *apic_ops;
126 147
127#define apic_read (apic_ops->read) 148static inline u32 apic_read(u32 reg)
128#define apic_write (apic_ops->write) 149{
129#define apic_icr_read (apic_ops->icr_read) 150 return apic_ops->read(reg);
130#define apic_icr_write (apic_ops->icr_write) 151}
131#define apic_wait_icr_idle (apic_ops->wait_icr_idle) 152
132#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) 153static inline void apic_write(u32 reg, u32 val)
154{
155 apic_ops->write(reg, val);
156}
157
158static inline u64 apic_icr_read(void)
159{
160 return apic_ops->icr_read();
161}
162
163static inline void apic_icr_write(u32 low, u32 high)
164{
165 apic_ops->icr_write(low, high);
166}
167
168static inline void apic_wait_icr_idle(void)
169{
170 apic_ops->wait_icr_idle();
171}
172
173static inline u32 safe_apic_wait_icr_idle(void)
174{
175 return apic_ops->safe_wait_icr_idle();
176}
133 177
134extern int get_physical_broadcast(void); 178extern int get_physical_broadcast(void);
135 179
@@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
196 240
197#endif /* !CONFIG_X86_LOCAL_APIC */ 241#endif /* !CONFIG_X86_LOCAL_APIC */
198 242
243#ifdef CONFIG_X86_64
244#define SET_APIC_ID(x) (apic->set_apic_id(x))
245#else
246
247#ifdef CONFIG_X86_LOCAL_APIC
248static inline unsigned default_get_apic_id(unsigned long x)
249{
250 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
251
252 if (APIC_XAPIC(ver))
253 return (x >> 24) & 0xFF;
254 else
255 return (x >> 24) & 0x0F;
256}
257#endif
258
259#endif
260
199#endif /* _ASM_X86_APIC_H */ 261#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..20370c6db74b 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f537911..000000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H
3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1)
6
7static inline int apic_id_registered(void)
8{
9 return (1);
10}
11
12static inline const cpumask_t *target_cpus(void)
13{
14#ifdef CONFIG_SMP
15 return &cpu_online_map;
16#else
17 return &cpumask_of_cpu(0);
18#endif
19}
20
21#undef APIC_DEST_LOGICAL
22#define APIC_DEST_LOGICAL 0
23#define APIC_DFR_VALUE (APIC_DFR_FLAT)
24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0)
27
28static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
29{
30 return (0);
31}
32
33static inline unsigned long check_apicid_present(int bit)
34{
35 return (1);
36}
37
38static inline unsigned long calculate_ldr(int cpu)
39{
40 unsigned long val, id;
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 id = xapic_phys_to_log_apicid(cpu);
43 val |= SET_APIC_LOGICAL_ID(id);
44 return val;
45}
46
47/*
48 * Set up the logical destination ID.
49 *
50 * Intel recommends to set DFR, LDR and TPR before enabling
51 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
52 * document number 292116). So here it goes...
53 */
54static inline void init_apic_ldr(void)
55{
56 unsigned long val;
57 int cpu = smp_processor_id();
58
59 apic_write(APIC_DFR, APIC_DFR_VALUE);
60 val = calculate_ldr(cpu);
61 apic_write(APIC_LDR, val);
62}
63
64static inline void setup_apic_routing(void)
65{
66 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
67 "Physflat", nr_ioapics);
68}
69
70static inline int multi_timer_check(int apic, int irq)
71{
72 return (0);
73}
74
75static inline int apicid_to_node(int logical_apicid)
76{
77 return apicid_2_node[hard_smp_processor_id()];
78}
79
80static inline int cpu_present_to_apicid(int mps_cpu)
81{
82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84
85 return BAD_APICID;
86}
87
88static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
89{
90 return physid_mask_of_physid(phys_apicid);
91}
92
93extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu)
96{
97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID;
99 return cpu_physical_id(cpu);
100}
101
102static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
103{
104 /* For clustered we don't have a good way to do this yet - hack */
105 return physids_promote(0xFFL);
106}
107
108static inline void setup_portio_remap(void)
109{
110}
111
112static inline void enable_apic_mode(void)
113{
114}
115
116static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118 return (1);
119}
120
121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{
124 int cpu;
125 int apicid;
126
127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid;
130}
131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
151{
152 return cpuid_apic >> index_msb;
153}
154
155#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae6..000000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 2bc162e0ec6e..0e63c9a2a8d0 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -1,5 +1,55 @@
1/* 1/*
2 * Some macros to handle stack frames in assembly. 2
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24For 32-bit we have the following conventions - kernel is built with
25-mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47*/
48
49
50/*
51 * 64-bit system call stack frame layout defines and helpers,
52 * for assembly code:
3 */ 53 */
4 54
5#define R15 0 55#define R15 0
@@ -9,7 +59,7 @@
9#define RBP 32 59#define RBP 32
10#define RBX 40 60#define RBX 40
11 61
12/* arguments: interrupts/non tracing syscalls only save upto here*/ 62/* arguments: interrupts/non tracing syscalls only save up to here: */
13#define R11 48 63#define R11 48
14#define R10 56 64#define R10 56
15#define R9 64 65#define R9 64
@@ -22,7 +72,7 @@
22#define ORIG_RAX 120 /* + error_code */ 72#define ORIG_RAX 120 /* + error_code */
23/* end of arguments */ 73/* end of arguments */
24 74
25/* cpu exception frame or undefined in case of fast syscall. */ 75/* cpu exception frame or undefined in case of fast syscall: */
26#define RIP 128 76#define RIP 128
27#define CS 136 77#define CS 136
28#define EFLAGS 144 78#define EFLAGS 144
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index f03b23e32864..b185091bf19c 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,10 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35#ifdef CONFIG_X86_HAS_BOOT_CPU_ID 35extern unsigned int boot_cpu_id;
36extern unsigned char boot_cpu_id;
37#else
38#define boot_cpu_id 0
39#endif
40 36
41#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index 26c6dad90479..a7f3c75f8ad7 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -10,6 +10,8 @@ extern cpumask_var_t cpu_callout_mask;
10extern cpumask_var_t cpu_initialized_mask; 10extern cpumask_var_t cpu_initialized_mask;
11extern cpumask_var_t cpu_sibling_setup_mask; 11extern cpumask_var_t cpu_sibling_setup_mask;
12 12
13extern void setup_cpu_local_masks(void);
14
13#else /* CONFIG_X86_32 */ 15#else /* CONFIG_X86_32 */
14 16
15extern cpumask_t cpu_callin_map; 17extern cpumask_t cpu_callin_map;
@@ -22,6 +24,8 @@ extern cpumask_t cpu_sibling_setup_map;
22#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) 24#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
23#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) 25#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
24 26
27static inline void setup_cpu_local_masks(void) { }
28
25#endif /* CONFIG_X86_32 */ 29#endif /* CONFIG_X86_32 */
26 30
27#endif /* __ASSEMBLY__ */ 31#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a0..23ecda0b28a0 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f51a3ddde01a..83c1bc8d2e8a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
112 * now struct_user_regs, they are different) 112 * now struct_user_regs, they are different)
113 */ 113 */
114 114
115#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 115#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
116do { \ 116do { \
117 pr_reg[0] = regs->bx; \ 117 pr_reg[0] = regs->bx; \
118 pr_reg[1] = regs->cx; \ 118 pr_reg[1] = regs->cx; \
@@ -124,7 +124,6 @@ do { \
124 pr_reg[7] = regs->ds & 0xffff; \ 124 pr_reg[7] = regs->ds & 0xffff; \
125 pr_reg[8] = regs->es & 0xffff; \ 125 pr_reg[8] = regs->es & 0xffff; \
126 pr_reg[9] = regs->fs & 0xffff; \ 126 pr_reg[9] = regs->fs & 0xffff; \
127 savesegment(gs, pr_reg[10]); \
128 pr_reg[11] = regs->orig_ax; \ 127 pr_reg[11] = regs->orig_ax; \
129 pr_reg[12] = regs->ip; \ 128 pr_reg[12] = regs->ip; \
130 pr_reg[13] = regs->cs & 0xffff; \ 129 pr_reg[13] = regs->cs & 0xffff; \
@@ -133,6 +132,18 @@ do { \
133 pr_reg[16] = regs->ss & 0xffff; \ 132 pr_reg[16] = regs->ss & 0xffff; \
134} while (0); 133} while (0);
135 134
135#define ELF_CORE_COPY_REGS(pr_reg, regs) \
136do { \
137 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
138 pr_reg[10] = get_user_gs(regs); \
139} while (0);
140
141#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
142do { \
143 ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
144 savesegment(gs, pr_reg[10]); \
145} while (0);
146
136#define ELF_PLATFORM (utsname()->machine) 147#define ELF_PLATFORM (utsname()->machine)
137#define set_personality_64bit() do { } while (0) 148#define set_personality_64bit() do { } while (0)
138 149
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index b87b077cc231..854d538ae857 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,7 +9,7 @@
9 * is no hardware IRQ pin equivalent for them, they are triggered 9 * is no hardware IRQ pin equivalent for them, they are triggered
10 * through the ICC by us (IPIs) 10 * through the ICC by us (IPIs)
11 */ 11 */
12#ifdef CONFIG_X86_SMP 12#ifdef CONFIG_SMP
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc74465..000000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
1#ifndef __ASM_ES7000_APIC_H
2#define __ASM_ES7000_APIC_H
3
4#include <linux/gfp.h>
5
6#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
7#define esr_disable (1)
8
9static inline int apic_id_registered(void)
10{
11 return (1);
12}
13
14static inline const cpumask_t *target_cpus_cluster(void)
15{
16 return &CPU_MASK_ALL;
17}
18
19static inline const cpumask_t *target_cpus(void)
20{
21 return &cpumask_of_cpu(smp_processor_id());
22}
23
24#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
25#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
26#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
27#define NO_BALANCE_IRQ_CLUSTER (1)
28
29#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0
35
36static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
37{
38 return 0;
39}
40static inline unsigned long check_apicid_present(int bit)
41{
42 return physid_isset(bit, phys_cpu_present_map);
43}
44
45#define apicid_cluster(apicid) (apicid & 0xF0)
46
47static inline unsigned long calculate_ldr(int cpu)
48{
49 unsigned long id;
50 id = xapic_phys_to_log_apicid(cpu);
51 return (SET_APIC_LOGICAL_ID(id));
52}
53
54/*
55 * Set up the logical destination ID.
56 *
57 * Intel recommends to set DFR, LdR and TPR before enabling
58 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
59 * document number 292116). So here it goes...
60 */
61static inline void init_apic_ldr_cluster(void)
62{
63 unsigned long val;
64 int cpu = smp_processor_id();
65
66 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
67 val = calculate_ldr(cpu);
68 apic_write(APIC_LDR, val);
69}
70
71static inline void init_apic_ldr(void)
72{
73 unsigned long val;
74 int cpu = smp_processor_id();
75
76 apic_write(APIC_DFR, APIC_DFR_VALUE);
77 val = calculate_ldr(cpu);
78 apic_write(APIC_LDR, val);
79}
80
81extern int apic_version [MAX_APICS];
82static inline void setup_apic_routing(void)
83{
84 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
85 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
86 (apic_version[apic] == 0x14) ?
87 "Physical Cluster" : "Logical Cluster",
88 nr_ioapics, cpus_addr(*target_cpus())[0]);
89}
90
91static inline int multi_timer_check(int apic, int irq)
92{
93 return 0;
94}
95
96static inline int apicid_to_node(int logical_apicid)
97{
98 return 0;
99}
100
101
102static inline int cpu_present_to_apicid(int mps_cpu)
103{
104 if (!mps_cpu)
105 return boot_cpu_physical_apicid;
106 else if (mps_cpu < nr_cpu_ids)
107 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
108 else
109 return BAD_APICID;
110}
111
112static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
113{
114 static int id = 0;
115 physid_mask_t mask;
116 mask = physid_mask_of_physid(id);
117 ++id;
118 return mask;
119}
120
121extern u8 cpu_2_logical_apicid[];
122/* Mapping from cpu number to logical apicid */
123static inline int cpu_to_logical_apicid(int cpu)
124{
125#ifdef CONFIG_SMP
126 if (cpu >= nr_cpu_ids)
127 return BAD_APICID;
128 return (int)cpu_2_logical_apicid[cpu];
129#else
130 return logical_smp_processor_id();
131#endif
132}
133
134static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
135{
136 /* For clustered we don't have a good way to do this yet - hack */
137 return physids_promote(0xff);
138}
139
140
141static inline void setup_portio_remap(void)
142{
143}
144
145extern unsigned int boot_cpu_physical_apicid;
146static inline int check_phys_apicid_present(int cpu_physical_apicid)
147{
148 boot_cpu_physical_apicid = read_apic_id();
149 return (1);
150}
151
152static inline unsigned int
153cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
154{
155 int num_bits_set;
156 int cpus_found = 0;
157 int cpu;
158 int apicid;
159
160 num_bits_set = cpumask_weight(cpumask);
161 /* Return id to all */
162 if (num_bits_set == nr_cpu_ids)
163 return 0xFF;
164 /*
165 * The cpus in the mask must all be on the apic cluster. If are not
166 * on the same apicid cluster return default value of TARGET_CPUS.
167 */
168 cpu = cpumask_first(cpumask);
169 apicid = cpu_to_logical_apicid(cpu);
170 while (cpus_found < num_bits_set) {
171 if (cpumask_test_cpu(cpu, cpumask)) {
172 int new_apicid = cpu_to_logical_apicid(cpu);
173 if (apicid_cluster(apicid) !=
174 apicid_cluster(new_apicid)){
175 printk ("%s: Not a valid mask!\n", __func__);
176 return 0xFF;
177 }
178 apicid = new_apicid;
179 cpus_found++;
180 }
181 cpu++;
182 }
183 return apicid;
184}
185
186static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
187{
188 int num_bits_set;
189 int cpus_found = 0;
190 int cpu;
191 int apicid;
192
193 num_bits_set = cpus_weight(*cpumask);
194 /* Return id to all */
195 if (num_bits_set == nr_cpu_ids)
196 return cpu_to_logical_apicid(0);
197 /*
198 * The cpus in the mask must all be on the apic cluster. If are not
199 * on the same apicid cluster return default value of TARGET_CPUS.
200 */
201 cpu = first_cpu(*cpumask);
202 apicid = cpu_to_logical_apicid(cpu);
203 while (cpus_found < num_bits_set) {
204 if (cpu_isset(cpu, *cpumask)) {
205 int new_apicid = cpu_to_logical_apicid(cpu);
206 if (apicid_cluster(apicid) !=
207 apicid_cluster(new_apicid)){
208 printk ("%s: Not a valid mask!\n", __func__);
209 return cpu_to_logical_apicid(0);
210 }
211 apicid = new_apicid;
212 cpus_found++;
213 }
214 cpu++;
215 }
216 return apicid;
217}
218
219
220static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
221 const struct cpumask *andmask)
222{
223 int apicid = cpu_to_logical_apicid(0);
224 cpumask_var_t cpumask;
225
226 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
227 return apicid;
228
229 cpumask_and(cpumask, inmask, andmask);
230 cpumask_and(cpumask, cpumask, cpu_online_mask);
231 apicid = cpu_mask_to_apicid(cpumask);
232
233 free_cpumask_var(cpumask);
234 return apicid;
235}
236
237static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
238{
239 return cpuid_apic >> index_msb;
240}
241
242#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_ES7000_APICDEF_H
2#define __ASM_ES7000_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xFF);
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8a..000000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec2..000000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_ES7000_MPPARSE_H
2#define __ASM_ES7000_MPPARSE_H
3
4#include <linux/acpi.h>
5
6extern int parse_unisys_oem (char *oemptr);
7extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
8extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
9extern void setup_unisys(void);
10
11#ifndef CONFIG_X86_GENERICARCH
12extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
13extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
14#endif
15
16#ifdef CONFIG_ACPI
17
18static inline int es7000_check_dsdt(void)
19{
20 struct acpi_table_header header;
21
22 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
23 !strncmp(header.oem_id, "UNISYS", 6))
24 return 1;
25 return 0;
26}
27#endif
28
29#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee436..000000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW 0x467
5#define TRAMPOLINE_PHYS_HIGH 0x469
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9#ifndef CONFIG_ES7000_CLUSTERED_APIC
10 while (!atomic_read(deassert))
11 cpu_relax();
12#endif
13 return;
14}
15
16/* Nothing to do for most platforms, since cleared by the INIT cycle */
17static inline void smp_callin_clear_local_apic(void)
18{
19}
20
21static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
22{
23}
24
25static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
26{
27}
28
29extern void __inquire_remote_apic(int apicid);
30
31static inline void inquire_remote_apic(int apicid)
32{
33 if (apic_verbosity >= APIC_DEBUG)
34 __inquire_remote_apic(apicid);
35}
36
37#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
index c7115c1d7217..047d9bab2b31 100644
--- a/arch/x86/include/asm/fixmap_32.h
+++ b/arch/x86/include/asm/fixmap_32.h
@@ -95,10 +95,6 @@ enum fixed_addresses {
95 (__end_of_permanent_fixed_addresses & 255), 95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, 96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST, 97 FIX_WP_TEST,
98#ifdef CONFIG_ACPI
99 FIX_ACPI_BEGIN,
100 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
101#endif
102#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 98#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
103 FIX_OHCI1394_BASE, 99 FIX_OHCI1394_BASE,
104#endif 100#endif
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
index 00a30ab9b1a5..298d9ba3faeb 100644
--- a/arch/x86/include/asm/fixmap_64.h
+++ b/arch/x86/include/asm/fixmap_64.h
@@ -50,10 +50,6 @@ enum fixed_addresses {
50 FIX_PARAVIRT_BOOTMAP, 50 FIX_PARAVIRT_BOOTMAP,
51#endif 51#endif
52 __end_of_permanent_fixed_addresses, 52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_ACPI
54 FIX_ACPI_BEGIN,
55 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
56#endif
57#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
58 FIX_OHCI1394_BASE, 54 FIX_OHCI1394_BASE,
59#endif 55#endif
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6f..273b99452ae0 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1,263 @@
1#ifndef _ASM_X86_GENAPIC_H
2#define _ASM_X86_GENAPIC_H
3
4#include <linux/cpumask.h>
5
6#include <asm/mpspec.h>
7#include <asm/atomic.h>
8
9/*
10 * Copyright 2004 James Cleverdon, IBM.
11 * Subject to the GNU Public License, v.2
12 *
13 * Generic APIC sub-arch data struct.
14 *
15 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
16 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
17 * James Cleverdon.
18 */
19struct genapic {
20 char *name;
21
22 int (*probe)(void);
23 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
24 int (*apic_id_registered)(void);
25
26 u32 irq_delivery_mode;
27 u32 irq_dest_mode;
28
29 const struct cpumask *(*target_cpus)(void);
30
31 int disable_esr;
32
33 int dest_logical;
34 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
35 unsigned long (*check_apicid_present)(int apicid);
36
37 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
38 void (*init_apic_ldr)(void);
39
40 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
41
42 void (*setup_apic_routing)(void);
43 int (*multi_timer_check)(int apic, int irq);
44 int (*apicid_to_node)(int logical_apicid);
45 int (*cpu_to_logical_apicid)(int cpu);
46 int (*cpu_present_to_apicid)(int mps_cpu);
47 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
48 void (*setup_portio_remap)(void);
49 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
50 void (*enable_apic_mode)(void);
51 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
52
53 /*
54 * When one of the next two hooks returns 1 the genapic
55 * is switched to this. Essentially they are additional
56 * probe functions:
57 */
58 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
59
60 unsigned int (*get_apic_id)(unsigned long x);
61 unsigned long (*set_apic_id)(unsigned int id);
62 unsigned long apic_id_mask;
63
64 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
65 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
66 const struct cpumask *andmask);
67
68 /* ipi */
69 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
70 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
71 int vector);
72 void (*send_IPI_allbutself)(int vector);
73 void (*send_IPI_all)(int vector);
74 void (*send_IPI_self)(int vector);
75
76 /* wakeup_secondary_cpu */
77 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
78
79 int trampoline_phys_low;
80 int trampoline_phys_high;
81
82 void (*wait_for_init_deassert)(atomic_t *deassert);
83 void (*smp_callin_clear_local_apic)(void);
84 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
85 void (*inquire_remote_apic)(int apicid);
86};
87
88extern struct genapic *apic;
89
90/*
91 * Warm reset vector default position:
92 */
93#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
94#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
95
1#ifdef CONFIG_X86_32 96#ifdef CONFIG_X86_32
2# include "genapic_32.h" 97extern void es7000_update_genapic_to_cluster(void);
3#else 98#else
4# include "genapic_64.h" 99extern struct genapic apic_flat;
100extern struct genapic apic_physflat;
101extern struct genapic apic_x2apic_cluster;
102extern struct genapic apic_x2apic_phys;
103extern int default_acpi_madt_oem_check(char *, char *);
104
105extern void apic_send_IPI_self(int vector);
106
107extern struct genapic apic_x2apic_uv_x;
108DECLARE_PER_CPU(int, x2apic_extra_bits);
109
110extern void default_setup_apic_routing(void);
111
112extern int default_cpu_present_to_apicid(int mps_cpu);
113extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
5#endif 114#endif
115
116static inline void default_wait_for_init_deassert(atomic_t *deassert)
117{
118 while (!atomic_read(deassert))
119 cpu_relax();
120 return;
121}
122
123extern void generic_bigsmp_probe(void);
124
125
126#ifdef CONFIG_X86_LOCAL_APIC
127
128#include <asm/smp.h>
129
130#define APIC_DFR_VALUE (APIC_DFR_FLAT)
131
132static inline const struct cpumask *default_target_cpus(void)
133{
134#ifdef CONFIG_SMP
135 return cpu_online_mask;
136#else
137 return cpumask_of(0);
138#endif
139}
140
141DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
142
143
144static inline unsigned int read_apic_id(void)
145{
146 unsigned int reg;
147
148 reg = apic_read(APIC_ID);
149
150 return apic->get_apic_id(reg);
151}
152
153#ifdef CONFIG_X86_64
154extern void default_setup_apic_routing(void);
155#else
156
157/*
158 * Set up the logical destination ID.
159 *
160 * Intel recommends to set DFR, LDR and TPR before enabling
161 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
162 * document number 292116). So here it goes...
163 */
164extern void default_init_apic_ldr(void);
165
166static inline int default_apic_id_registered(void)
167{
168 return physid_isset(read_apic_id(), phys_cpu_present_map);
169}
170
171static inline unsigned int
172default_cpu_mask_to_apicid(const struct cpumask *cpumask)
173{
174 return cpumask_bits(cpumask)[0];
175}
176
177static inline unsigned int
178default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
179 const struct cpumask *andmask)
180{
181 unsigned long mask1 = cpumask_bits(cpumask)[0];
182 unsigned long mask2 = cpumask_bits(andmask)[0];
183 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
184
185 return (unsigned int)(mask1 & mask2 & mask3);
186}
187
188static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
189{
190 return cpuid_apic >> index_msb;
191}
192
193static inline void default_setup_apic_routing(void)
194{
195#ifdef CONFIG_X86_IO_APIC
196 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
197 "Flat", nr_ioapics);
198#endif
199}
200
201extern int default_apicid_to_node(int logical_apicid);
202
203#endif
204
205static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
206{
207 return physid_isset(apicid, bitmap);
208}
209
210static inline unsigned long default_check_apicid_present(int bit)
211{
212 return physid_isset(bit, phys_cpu_present_map);
213}
214
215static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
216{
217 return phys_map;
218}
219
220/* Mapping from cpu number to logical apicid */
221static inline int default_cpu_to_logical_apicid(int cpu)
222{
223 return 1 << cpu;
224}
225
226static inline int __default_cpu_present_to_apicid(int mps_cpu)
227{
228 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
229 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
230 else
231 return BAD_APICID;
232}
233
234static inline int
235__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
236{
237 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
238}
239
240#ifdef CONFIG_X86_32
241static inline int default_cpu_present_to_apicid(int mps_cpu)
242{
243 return __default_cpu_present_to_apicid(mps_cpu);
244}
245
246static inline int
247default_check_phys_apicid_present(int boot_cpu_physical_apicid)
248{
249 return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
250}
251#else
252extern int default_cpu_present_to_apicid(int mps_cpu);
253extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
254#endif
255
256static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
257{
258 return physid_mask_of_physid(phys_apicid);
259}
260
261#endif /* CONFIG_X86_LOCAL_APIC */
262
263#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 4334502d3664..000000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,141 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_32_H
2#define _ASM_X86_GENAPIC_32_H
3
4#include <asm/mpspec.h>
5#include <asm/atomic.h>
6
7/*
8 * Generic APIC driver interface.
9 *
10 * An straight forward mapping of the APIC related parts of the
11 * x86 subarchitecture interface to a dynamic object.
12 *
13 * This is used by the "generic" x86 subarchitecture.
14 *
15 * Copyright 2003 Andi Kleen, SuSE Labs.
16 */
17
18struct mpc_bus;
19struct mpc_table;
20struct mpc_cpu;
21
22struct genapic {
23 char *name;
24 int (*probe)(void);
25
26 int (*apic_id_registered)(void);
27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode;
29 int int_dest_mode;
30 int ESR_DISABLE;
31 int apic_destination_logical;
32 unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
33 unsigned long (*check_apicid_present)(int apicid);
34 int no_balance_irq;
35 int no_ioapic_check;
36 void (*init_apic_ldr)(void);
37 physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
38
39 void (*setup_apic_routing)(void);
40 int (*multi_timer_check)(int apic, int irq);
41 int (*apicid_to_node)(int logical_apicid);
42 int (*cpu_to_logical_apicid)(int cpu);
43 int (*cpu_present_to_apicid)(int mps_cpu);
44 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
45 void (*setup_portio_remap)(void);
46 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
47 void (*enable_apic_mode)(void);
48 u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
49
50 /* mpparse */
51 /* When one of the next two hooks returns 1 the genapic
52 is switched to this. Essentially they are additional probe
53 functions. */
54 int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
55 char *productid);
56 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
57
58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
64
65#ifdef CONFIG_SMP
66 /* ipi */
67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
70 void (*send_IPI_allbutself)(int vector);
71 void (*send_IPI_all)(int vector);
72#endif
73 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
74 int trampoline_phys_low;
75 int trampoline_phys_high;
76 void (*wait_for_init_deassert)(atomic_t *deassert);
77 void (*smp_callin_clear_local_apic)(void);
78 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
79 void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
80 void (*inquire_remote_apic)(int apicid);
81};
82
83#define APICFUNC(x) .x = x,
84
85/* More functions could be probably marked IPIFUNC and save some space
86 in UP GENERICARCH kernels, but I don't have the nerve right now
87 to untangle this mess. -AK */
88#ifdef CONFIG_SMP
89#define IPIFUNC(x) APICFUNC(x)
90#else
91#define IPIFUNC(x)
92#endif
93
94#define APIC_INIT(aname, aprobe) \
95{ \
96 .name = aname, \
97 .probe = aprobe, \
98 .int_delivery_mode = INT_DELIVERY_MODE, \
99 .int_dest_mode = INT_DEST_MODE, \
100 .no_balance_irq = NO_BALANCE_IRQ, \
101 .ESR_DISABLE = esr_disable, \
102 .apic_destination_logical = APIC_DEST_LOGICAL, \
103 APICFUNC(apic_id_registered) \
104 APICFUNC(target_cpus) \
105 APICFUNC(check_apicid_used) \
106 APICFUNC(check_apicid_present) \
107 APICFUNC(init_apic_ldr) \
108 APICFUNC(ioapic_phys_id_map) \
109 APICFUNC(setup_apic_routing) \
110 APICFUNC(multi_timer_check) \
111 APICFUNC(apicid_to_node) \
112 APICFUNC(cpu_to_logical_apicid) \
113 APICFUNC(cpu_present_to_apicid) \
114 APICFUNC(apicid_to_cpu_present) \
115 APICFUNC(setup_portio_remap) \
116 APICFUNC(check_phys_apicid_present) \
117 APICFUNC(mps_oem_check) \
118 APICFUNC(get_apic_id) \
119 .apic_id_mask = APIC_ID_MASK, \
120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
122 APICFUNC(vector_allocation_domain) \
123 APICFUNC(acpi_madt_oem_check) \
124 IPIFUNC(send_IPI_mask) \
125 IPIFUNC(send_IPI_allbutself) \
126 IPIFUNC(send_IPI_all) \
127 APICFUNC(enable_apic_mode) \
128 APICFUNC(phys_pkg_id) \
129 .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
130 .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
131 APICFUNC(wait_for_init_deassert) \
132 APICFUNC(smp_callin_clear_local_apic) \
133 APICFUNC(store_NMI_vector) \
134 APICFUNC(restore_NMI_vector) \
135 APICFUNC(inquire_remote_apic) \
136}
137
138extern struct genapic *genapic;
139extern void es7000_update_genapic_to_cluster(void);
140
141#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index 7bb092c59055..000000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H
3
4#include <linux/cpumask.h>
5
6/*
7 * Copyright 2004 James Cleverdon, IBM.
8 * Subject to the GNU Public License, v.2
9 *
10 * Generic APIC sub-arch data struct.
11 *
12 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
13 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
14 * James Cleverdon.
15 */
16
17struct genapic {
18 char *name;
19 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
20 u32 int_delivery_mode;
21 u32 int_dest_mode;
22 int (*apic_id_registered)(void);
23 const struct cpumask *(*target_cpus)(void);
24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
25 void (*init_apic_ldr)(void);
26 /* ipi */
27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
30 void (*send_IPI_allbutself)(int vector);
31 void (*send_IPI_all)(int vector);
32 void (*send_IPI_self)(int vector);
33 /* */
34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
37 unsigned int (*phys_pkg_id)(int index_msb);
38 unsigned int (*get_apic_id)(unsigned long x);
39 unsigned long (*set_apic_id)(unsigned int id);
40 unsigned long apic_id_mask;
41 /* wakeup_secondary_cpu */
42 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
43};
44
45extern struct genapic *genapic;
46
47extern struct genapic apic_flat;
48extern struct genapic apic_physflat;
49extern struct genapic apic_x2apic_cluster;
50extern struct genapic apic_x2apic_phys;
51extern int acpi_madt_oem_check(char *, char *);
52
53extern void apic_send_IPI_self(int vector);
54
55extern struct genapic apic_x2apic_uv_x;
56DECLARE_PER_CPU(int, x2apic_extra_bits);
57
58extern void setup_apic_routing(void);
59
60#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index aa93e53b85ee..f39881b6b68b 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,8 +25,6 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
32extern void error_interrupt(void); 30extern void error_interrupt(void);
@@ -60,7 +58,7 @@ extern void make_8259A_irq(unsigned int irq);
60extern void init_8259A(int aeoi); 58extern void init_8259A(int aeoi);
61 59
62/* IOAPIC */ 60/* IOAPIC */
63#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) 61#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
64extern unsigned long io_apic_irqs; 62extern unsigned long io_apic_irqs;
65 63
66extern void init_VISWS_APIC_irqs(void); 64extern void init_VISWS_APIC_irqs(void);
@@ -69,15 +67,7 @@ extern void disable_IO_APIC(void);
69extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); 67extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
70extern void setup_ioapic_dest(void); 68extern void setup_ioapic_dest(void);
71 69
72#ifdef CONFIG_X86_64
73extern void enable_IO_APIC(void); 70extern void enable_IO_APIC(void);
74#endif
75
76/* IPI functions */
77#ifdef CONFIG_X86_32
78extern void send_IPI_self(int vector);
79#endif
80extern void send_IPI(int dest, int vector);
81 71
82/* Statistics */ 72/* Statistics */
83extern atomic_t irq_err_count; 73extern atomic_t irq_err_count;
@@ -86,21 +76,11 @@ extern atomic_t irq_mis_count;
86/* EISA */ 76/* EISA */
87extern void eisa_set_level_irq(unsigned int irq); 77extern void eisa_set_level_irq(unsigned int irq);
88 78
89/* Voyager functions */
90extern asmlinkage void vic_cpi_interrupt(void);
91extern asmlinkage void vic_sys_interrupt(void);
92extern asmlinkage void vic_cmn_interrupt(void);
93extern asmlinkage void qic_timer_interrupt(void);
94extern asmlinkage void qic_invalidate_interrupt(void);
95extern asmlinkage void qic_reschedule_interrupt(void);
96extern asmlinkage void qic_enable_irq_interrupt(void);
97extern asmlinkage void qic_call_function_interrupt(void);
98
99/* SMP */ 79/* SMP */
100extern void smp_apic_timer_interrupt(struct pt_regs *); 80extern void smp_apic_timer_interrupt(struct pt_regs *);
101extern void smp_spurious_interrupt(struct pt_regs *); 81extern void smp_spurious_interrupt(struct pt_regs *);
102extern void smp_error_interrupt(struct pt_regs *); 82extern void smp_error_interrupt(struct pt_regs *);
103#ifdef CONFIG_X86_SMP 83#ifdef CONFIG_SMP
104extern void smp_reschedule_interrupt(struct pt_regs *); 84extern void smp_reschedule_interrupt(struct pt_regs *);
105extern void smp_call_function_interrupt(struct pt_regs *); 85extern void smp_call_function_interrupt(struct pt_regs *);
106extern void smp_call_function_single_interrupt(struct pt_regs *); 86extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..e5a2ab44cd5c 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7#include <asm-generic/int-ll64.h> 7#include <asm-generic/int-ll64.h>
8#include <asm/page.h>
8 9
9#define build_mmio_read(name, size, type, reg, barrier) \ 10#define build_mmio_read(name, size, type, reg, barrier) \
10static inline type name(const volatile void __iomem *addr) \ 11static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
80#define readq readq 81#define readq readq
81#define writeq writeq 82#define writeq writeq
82 83
84/**
85 * virt_to_phys - map virtual addresses to physical
86 * @address: address to remap
87 *
88 * The returned physical address is the physical (CPU) mapping for
89 * the memory address given. It is only valid to use this function on
90 * addresses directly mapped or allocated via kmalloc.
91 *
92 * This function does not give bus mappings for DMA transfers. In
93 * almost all conceivable cases a device driver should not be using
94 * this function
95 */
96
97static inline phys_addr_t virt_to_phys(volatile void *address)
98{
99 return __pa(address);
100}
101
102/**
103 * phys_to_virt - map physical address to virtual
104 * @address: address to remap
105 *
106 * The returned virtual address is a current CPU mapping for
107 * the memory address given. It is only valid to use this function on
108 * addresses that have a kernel mapping
109 *
110 * This function does not handle bus mappings for DMA transfers. In
111 * almost all conceivable cases a device driver should not be using
112 * this function
113 */
114
115static inline void *phys_to_virt(phys_addr_t address)
116{
117 return __va(address);
118}
119
120/*
121 * Change "struct page" to physical address.
122 */
123#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
124
125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 */
128#define isa_virt_to_bus virt_to_phys
129#define isa_page_to_bus page_to_phys
130#define isa_bus_to_virt phys_to_virt
131
132/*
133 * However PCI ones are not necessarily 1:1 and therefore these interfaces
134 * are forbidden in portable PCI drivers.
135 *
136 * Allow them on x86 for legacy drivers, though.
137 */
138#define virt_to_bus virt_to_phys
139#define bus_to_virt phys_to_virt
140
141/**
142 * ioremap - map bus memory into CPU space
143 * @offset: bus address of the memory
144 * @size: size of the resource to map
145 *
146 * ioremap performs a platform specific sequence of operations to
147 * make bus memory CPU accessible via the readb/readw/readl/writeb/
148 * writew/writel functions and the other mmio helpers. The returned
149 * address is not guaranteed to be usable directly as a virtual
150 * address.
151 *
152 * If the area you are trying to map is a PCI BAR you should have a
153 * look at pci_iomap().
154 */
155extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
156extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
157extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
158 unsigned long prot_val);
159
160/*
161 * The default ioremap() behavior is non-cached:
162 */
163static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
164{
165 return ioremap_nocache(offset, size);
166}
167
168extern void iounmap(volatile void __iomem *addr);
169
170extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
171
172
83#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
84# include "io_32.h" 174# include "io_32.h"
85#else 175#else
@@ -91,7 +181,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
91 181
92extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 182extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
93 unsigned long prot_val); 183 unsigned long prot_val);
94extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 184extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
95 185
96/* 186/*
97 * early_ioremap() and early_iounmap() are for temporary early boot-time 187 * early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -105,5 +195,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
105extern void early_iounmap(void __iomem *addr, unsigned long size); 195extern void early_iounmap(void __iomem *addr, unsigned long size);
106extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 196extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
107 197
198#define IO_SPACE_LIMIT 0xffff
108 199
109#endif /* _ASM_X86_IO_H */ 200#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
index d8e242e1b396..a299900f5920 100644
--- a/arch/x86/include/asm/io_32.h
+++ b/arch/x86/include/asm/io_32.h
@@ -37,8 +37,6 @@
37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 37 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
38 */ 38 */
39 39
40#define IO_SPACE_LIMIT 0xffff
41
42#define XQUAD_PORTIO_BASE 0xfe400000 40#define XQUAD_PORTIO_BASE 0xfe400000
43#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 41#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
44 42
@@ -53,92 +51,6 @@
53 */ 51 */
54#define xlate_dev_kmem_ptr(p) p 52#define xlate_dev_kmem_ptr(p) p
55 53
56/**
57 * virt_to_phys - map virtual addresses to physical
58 * @address: address to remap
59 *
60 * The returned physical address is the physical (CPU) mapping for
61 * the memory address given. It is only valid to use this function on
62 * addresses directly mapped or allocated via kmalloc.
63 *
64 * This function does not give bus mappings for DMA transfers. In
65 * almost all conceivable cases a device driver should not be using
66 * this function
67 */
68
69static inline unsigned long virt_to_phys(volatile void *address)
70{
71 return __pa(address);
72}
73
74/**
75 * phys_to_virt - map physical address to virtual
76 * @address: address to remap
77 *
78 * The returned virtual address is a current CPU mapping for
79 * the memory address given. It is only valid to use this function on
80 * addresses that have a kernel mapping
81 *
82 * This function does not handle bus mappings for DMA transfers. In
83 * almost all conceivable cases a device driver should not be using
84 * this function
85 */
86
87static inline void *phys_to_virt(unsigned long address)
88{
89 return __va(address);
90}
91
92/*
93 * Change "struct page" to physical address.
94 */
95#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
96
97/**
98 * ioremap - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * If the area you are trying to map is a PCI BAR you should have a
109 * look at pci_iomap().
110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
115
116/*
117 * The default ioremap() behavior is non-cached:
118 */
119static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
120{
121 return ioremap_nocache(offset, size);
122}
123
124extern void iounmap(volatile void __iomem *addr);
125
126/*
127 * ISA I/O bus memory addresses are 1:1 with the physical address.
128 */
129#define isa_virt_to_bus virt_to_phys
130#define isa_page_to_bus page_to_phys
131#define isa_bus_to_virt phys_to_virt
132
133/*
134 * However PCI ones are not necessarily 1:1 and therefore these interfaces
135 * are forbidden in portable PCI drivers.
136 *
137 * Allow them on x86 for legacy drivers, though.
138 */
139#define virt_to_bus virt_to_phys
140#define bus_to_virt phys_to_virt
141
142static inline void 54static inline void
143memset_io(volatile void __iomem *addr, unsigned char val, int count) 55memset_io(volatile void __iomem *addr, unsigned char val, int count)
144{ 56{
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
index 563c16270ba6..244067893af4 100644
--- a/arch/x86/include/asm/io_64.h
+++ b/arch/x86/include/asm/io_64.h
@@ -136,73 +136,12 @@ __OUTS(b)
136__OUTS(w) 136__OUTS(w)
137__OUTS(l) 137__OUTS(l)
138 138
139#define IO_SPACE_LIMIT 0xffff
140
141#if defined(__KERNEL__) && defined(__x86_64__) 139#if defined(__KERNEL__) && defined(__x86_64__)
142 140
143#include <linux/vmalloc.h> 141#include <linux/vmalloc.h>
144 142
145#ifndef __i386__
146/*
147 * Change virtual addresses to physical addresses and vv.
148 * These are pretty trivial
149 */
150static inline unsigned long virt_to_phys(volatile void *address)
151{
152 return __pa(address);
153}
154
155static inline void *phys_to_virt(unsigned long address)
156{
157 return __va(address);
158}
159#endif
160
161/*
162 * Change "struct page" to physical address.
163 */
164#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
165
166#include <asm-generic/iomap.h> 143#include <asm-generic/iomap.h>
167 144
168/*
169 * This one maps high address device memory and turns off caching for that area.
170 * it's useful if some control registers are in such an area and write combining
171 * or read caching is not desirable:
172 */
173extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
174extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
175extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
176 unsigned long prot_val);
177
178/*
179 * The default ioremap() behavior is non-cached:
180 */
181static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
182{
183 return ioremap_nocache(offset, size);
184}
185
186extern void iounmap(volatile void __iomem *addr);
187
188extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
189
190/*
191 * ISA I/O bus memory addresses are 1:1 with the physical address.
192 */
193#define isa_virt_to_bus virt_to_phys
194#define isa_page_to_bus page_to_phys
195#define isa_bus_to_virt phys_to_virt
196
197/*
198 * However PCI ones are not necessarily 1:1 and therefore these interfaces
199 * are forbidden in portable PCI drivers.
200 *
201 * Allow them on x86 for legacy drivers, though.
202 */
203#define virt_to_bus virt_to_phys
204#define bus_to_virt phys_to_virt
205
206void __memcpy_fromio(void *, unsigned long, unsigned); 145void __memcpy_fromio(void *, unsigned long, unsigned);
207void __memcpy_toio(unsigned long, const void *, unsigned); 146void __memcpy_toio(unsigned long, const void *, unsigned);
208 147
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 08ec793aa043..59cb4a1317b7 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -143,15 +143,6 @@ extern int noioapicreroute;
143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
144extern int timer_through_8259; 144extern int timer_through_8259;
145 145
146static inline void disable_ioapic_setup(void)
147{
148#ifdef CONFIG_PCI
149 noioapicquirk = 1;
150 noioapicreroute = -1;
151#endif
152 skip_ioapic_setup = 1;
153}
154
155/* 146/*
156 * If we use the IO-APIC for IRQ routing, disable automatic 147 * If we use the IO-APIC for IRQ routing, disable automatic
157 * assignment of PCI IRQ's. 148 * assignment of PCI IRQ's.
@@ -178,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
178 169
179extern void probe_nr_irqs_gsi(void); 170extern void probe_nr_irqs_gsi(void);
180 171
172extern int setup_ioapic_entry(int apic, int irq,
173 struct IO_APIC_route_entry *entry,
174 unsigned int destination, int trigger,
175 int polarity, int vector);
176extern void ioapic_write_entry(int apic, int pin,
177 struct IO_APIC_route_entry e);
181#else /* !CONFIG_X86_IO_APIC */ 178#else /* !CONFIG_X86_IO_APIC */
182#define io_apic_assign_pci_irqs 0 179#define io_apic_assign_pci_irqs 0
183static const int timer_through_8259 = 0; 180static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d3..5f2efc5d9927 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_IPI_H 1#ifndef _ASM_X86_IPI_H
2#define _ASM_X86_IPI_H 2#define _ASM_X86_IPI_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
55 cpu_relax(); 57 cpu_relax();
56} 58}
57 59
58static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, 60static inline void
59 unsigned int dest) 61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
60{ 62{
61 /* 63 /*
62 * Subtle. In the case of the 'never do double writes' workaround 64 * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
87 * This is used to send an IPI with no shorthand notation (the destination is 89 * This is used to send an IPI with no shorthand notation (the destination is
88 * specified in bits 56 to 63 of the ICR). 90 * specified in bits 56 to 63 of the ICR).
89 */ 91 */
90static inline void __send_IPI_dest_field(unsigned int mask, int vector, 92static inline void
91 unsigned int dest) 93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
92{ 94{
93 unsigned long cfg; 95 unsigned long cfg;
94 96
@@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 119 native_apic_mem_write(APIC_ICR, cfg);
118} 120}
119 121
120static inline void send_IPI_mask_sequence(const struct cpumask *mask, 122extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
121 int vector) 123 int vector);
122{ 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
123 unsigned long flags; 125 int vector);
124 unsigned long query_cpu; 126#include <asm/genapic.h>
125 127
126 /* 128extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 * Hack. The clustered APIC addressing mode doesn't allow us to send 129 int vector);
128 * to an arbitrary mask, so I do a unicast to each CPU instead. 130extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 * - mbligh 131 int vector);
130 */ 132
131 local_irq_save(flags); 133/* Avoid include hell */
132 for_each_cpu(query_cpu, mask) { 134#define NMI_VECTOR 0x02
133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 135
134 vector, APIC_DEST_PHYSICAL); 136extern int no_broadcast;
135 } 137
136 local_irq_restore(flags); 138static inline void __default_local_send_IPI_allbutself(int vector)
139{
140 if (no_broadcast || vector == NMI_VECTOR)
141 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
142 else
143 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
137} 144}
138 145
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask, 146static inline void __default_local_send_IPI_all(int vector)
140 int vector)
141{ 147{
142 unsigned long flags; 148 if (no_broadcast || vector == NMI_VECTOR)
143 unsigned int query_cpu; 149 apic->send_IPI_mask(cpu_online_mask, vector);
144 unsigned int this_cpu = smp_processor_id(); 150 else
145 151 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155} 152}
156 153
154#ifdef CONFIG_X86_32
155extern void default_send_IPI_mask_logical(const struct cpumask *mask,
156 int vector);
157extern void default_send_IPI_allbutself(int vector);
158extern void default_send_IPI_all(int vector);
159extern void default_send_IPI_self(int vector);
160#endif
161
162#endif
163
157#endif /* _ASM_X86_IPI_H */ 164#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d3..107eb2196691 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs);
40extern void init_IRQ(void); 39extern void init_IRQ(void);
41extern void native_init_IRQ(void); 40extern void native_init_IRQ(void);
41extern bool handle_irq(unsigned irq, struct pt_regs *regs);
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
42 44
43/* Interrupt vector management */ 45/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 46extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 0e2220bb3142..b66b518ff000 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
1#ifndef _ASM_X86_IRQ_VECTORS_H 1#ifndef _ASM_X86_IRQ_VECTORS_H
2#define _ASM_X86_IRQ_VECTORS_H 2#define _ASM_X86_IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4/*
5 * Linux IRQ vector layout.
6 *
7 * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
8 * be defined by Linux. They are used as a jump table by the CPU when a
9 * given vector is triggered - by a CPU-external, CPU-internal or
10 * software-triggered event.
11 *
12 * Linux sets the kernel code address each entry jumps to early during
13 * bootup, and never changes them. This is the general layout of the
14 * IDT entries:
15 *
16 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
17 * Vectors 32 ... 127 : device interrupts
18 * Vector 128 : legacy int80 syscall interface
19 * Vectors 129 ... 237 : device interrupts
20 * Vectors 238 ... 255 : special interrupts
21 *
22 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
23 *
24 * This file enumerates the exact layout of them:
25 */
5 26
6#define NMI_VECTOR 0x02 27#define NMI_VECTOR 0x02
7 28
8/* 29/*
9 * IDT vectors usable for external interrupt sources start 30 * IDT vectors usable for external interrupt sources start
10 * at 0x20: 31 * at 0x20:
11 */ 32 */
12#define FIRST_EXTERNAL_VECTOR 0x20 33#define FIRST_EXTERNAL_VECTOR 0x20
13 34
14#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80 36# define SYSCALL_VECTOR 0x80
16#else 37#else
17# define IA32_SYSCALL_VECTOR 0x80 38# define IA32_SYSCALL_VECTOR 0x80
18#endif 39#endif
19 40
20/* 41/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration. 43 * cleanup after irq migration.
23 */ 44 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 45#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25 46
26/* 47/*
27 * Vectors 0x30-0x3f are used for ISA interrupts. 48 * Vectors 0x30-0x3f are used for ISA interrupts.
28 */ 49 */
29#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 50#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
30#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 51
31#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 52#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
32#define IRQ3_VECTOR (IRQ0_VECTOR + 3) 53#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
33#define IRQ4_VECTOR (IRQ0_VECTOR + 4) 54#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
34#define IRQ5_VECTOR (IRQ0_VECTOR + 5) 55#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
35#define IRQ6_VECTOR (IRQ0_VECTOR + 6) 56#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
36#define IRQ7_VECTOR (IRQ0_VECTOR + 7) 57#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
37#define IRQ8_VECTOR (IRQ0_VECTOR + 8) 58#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
38#define IRQ9_VECTOR (IRQ0_VECTOR + 9) 59#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
39#define IRQ10_VECTOR (IRQ0_VECTOR + 10) 60#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
40#define IRQ11_VECTOR (IRQ0_VECTOR + 11) 61#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
41#define IRQ12_VECTOR (IRQ0_VECTOR + 12) 62#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
42#define IRQ13_VECTOR (IRQ0_VECTOR + 13) 63#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
43#define IRQ14_VECTOR (IRQ0_VECTOR + 14) 64#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
44#define IRQ15_VECTOR (IRQ0_VECTOR + 15) 65#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
66#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
45 67
46/* 68/*
47 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff 69 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -50,43 +72,44 @@
50 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. 72 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
51 * TLB, reschedule and local APIC vectors are performance-critical. 73 * TLB, reschedule and local APIC vectors are performance-critical.
52 */ 74 */
53#ifdef CONFIG_X86_32
54 75
55# define SPURIOUS_APIC_VECTOR 0xff 76#define SPURIOUS_APIC_VECTOR 0xff
56# define ERROR_APIC_VECTOR 0xfe 77/*
57# define RESCHEDULE_VECTOR 0xfd 78 * Sanity check
58# define CALL_FUNCTION_VECTOR 0xfc 79 */
59# define CALL_FUNCTION_SINGLE_VECTOR 0xfb 80#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
60# define THERMAL_APIC_VECTOR 0xfa 81# error SPURIOUS_APIC_VECTOR definition error
61/* 0xf8 - 0xf9 : free */ 82#endif
62# define INVALIDATE_TLB_VECTOR_END 0xf7
63# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
64 83
65# define NUM_INVALIDATE_TLB_VECTORS 8 84#define ERROR_APIC_VECTOR 0xfe
85#define RESCHEDULE_VECTOR 0xfd
86#define CALL_FUNCTION_VECTOR 0xfc
87#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
88#define THERMAL_APIC_VECTOR 0xfa
66 89
90#ifdef CONFIG_X86_32
91/* 0xf8 - 0xf9 : free */
67#else 92#else
68
69# define SPURIOUS_APIC_VECTOR 0xff
70# define ERROR_APIC_VECTOR 0xfe
71# define RESCHEDULE_VECTOR 0xfd
72# define CALL_FUNCTION_VECTOR 0xfc
73# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
74# define THERMAL_APIC_VECTOR 0xfa
75# define THRESHOLD_APIC_VECTOR 0xf9 93# define THRESHOLD_APIC_VECTOR 0xf9
76# define UV_BAU_MESSAGE 0xf8 94# define UV_BAU_MESSAGE 0xf8
77# define INVALIDATE_TLB_VECTOR_END 0xf7
78# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
79
80#define NUM_INVALIDATE_TLB_VECTORS 8
81
82#endif 95#endif
83 96
97/* f0-f7 used for spreading out TLB flushes: */
98#define INVALIDATE_TLB_VECTOR_END 0xf7
99#define INVALIDATE_TLB_VECTOR_START 0xf0
100#define NUM_INVALIDATE_TLB_VECTORS 8
101
84/* 102/*
85 * Local APIC timer IRQ vector is on a different priority level, 103 * Local APIC timer IRQ vector is on a different priority level,
86 * to work around the 'lost local interrupt if more than 2 IRQ 104 * to work around the 'lost local interrupt if more than 2 IRQ
87 * sources per level' errata. 105 * sources per level' errata.
88 */ 106 */
89#define LOCAL_TIMER_VECTOR 0xef 107#define LOCAL_TIMER_VECTOR 0xef
108
109/*
110 * Performance monitoring interrupt vector:
111 */
112#define LOCAL_PERF_VECTOR 0xee
90 113
91/* 114/*
92 * Performance monitoring interrupt vector: 115 * Performance monitoring interrupt vector:
@@ -98,80 +121,53 @@
98 * start at 0x31(0x41) to spread out vectors evenly between priority 121 * start at 0x31(0x41) to spread out vectors evenly between priority
99 * levels. (0x80 is the syscall vector) 122 * levels. (0x80 is the syscall vector)
100 */ 123 */
101#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2) 124#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
102
103#define NR_VECTORS 256
104
105#define FPU_IRQ 13
106
107#define FIRST_VM86_IRQ 3
108#define LAST_VM86_IRQ 15
109#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
110 125
111#define NR_IRQS_LEGACY 16 126#define NR_VECTORS 256
112 127
113#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) 128#define FPU_IRQ 13
114
115#include <asm/apicnum.h> /* need MAX_IO_APICS */
116
117#ifndef CONFIG_SPARSE_IRQ
118# if NR_CPUS < MAX_IO_APICS
119# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
120# else
121# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
122# endif
123#else
124 129
125# define NR_IRQS \ 130#define FIRST_VM86_IRQ 3
126 ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \ 131#define LAST_VM86_IRQ 15
127 (NR_VECTORS + (8 * NR_CPUS)) : \
128 (NR_VECTORS + (32 * MAX_IO_APICS))) \
129 132
133#ifndef __ASSEMBLY__
134static inline int invalid_vm86_irq(int irq)
135{
136 return irq < 3 || irq > 15;
137}
130#endif 138#endif
131 139
132#elif defined(CONFIG_X86_VOYAGER) 140/*
133 141 * Size the maximum number of interrupts.
134# define NR_IRQS 224 142 *
143 * If the irq_desc[] array has a sparse layout, we can size things
144 * generously - it scales up linearly with the maximum number of CPUs,
145 * and the maximum number of IO-APICs, whichever is higher.
146 *
147 * In other cases we size more conservatively, to not create too large
148 * static arrays.
149 */
135 150
136#else /* IO_APIC || VOYAGER */ 151#define NR_IRQS_LEGACY 16
137 152
138# define NR_IRQS 16 153#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
154#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
139 155
156#ifdef CONFIG_X86_IO_APIC
157# ifdef CONFIG_SPARSE_IRQ
158# define NR_IRQS \
159 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
160 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
161 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
162# else
163# if NR_CPUS < MAX_IO_APICS
164# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
165# else
166# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
167# endif
168# endif
169#else /* !CONFIG_X86_IO_APIC: */
170# define NR_IRQS NR_IRQS_LEGACY
140#endif 171#endif
141 172
142/* Voyager specific defines */
143/* These define the CPIs we use in linux */
144#define VIC_CPI_LEVEL0 0
145#define VIC_CPI_LEVEL1 1
146/* now the fake CPIs */
147#define VIC_TIMER_CPI 2
148#define VIC_INVALIDATE_CPI 3
149#define VIC_RESCHEDULE_CPI 4
150#define VIC_ENABLE_IRQ_CPI 5
151#define VIC_CALL_FUNCTION_CPI 6
152#define VIC_CALL_FUNCTION_SINGLE_CPI 7
153
154/* Now the QIC CPIs: Since we don't need the two initial levels,
155 * these are 2 less than the VIC CPIs */
156#define QIC_CPI_OFFSET 1
157#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
158#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
159#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
160#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
161#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
162#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
163
164#define VIC_START_FAKE_CPI VIC_TIMER_CPI
165#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
166
167/* this is the SYS_INT CPI. */
168#define VIC_SYS_INT 8
169#define VIC_CMN_INT 15
170
171/* This is the boot CPI for alternate processors. It gets overwritten
172 * by the above once the system has activated all available processors */
173#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
174#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
175
176
177#endif /* _ASM_X86_IRQ_VECTORS_H */ 173#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index c61d8b2ab8b9..0ceb6d19ed30 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -9,23 +9,8 @@
9# define PAGES_NR 4 9# define PAGES_NR 4
10#else 10#else
11# define PA_CONTROL_PAGE 0 11# define PA_CONTROL_PAGE 0
12# define VA_CONTROL_PAGE 1 12# define PA_TABLE_PAGE 1
13# define PA_PGD 2 13# define PAGES_NR 2
14# define VA_PGD 3
15# define PA_PUD_0 4
16# define VA_PUD_0 5
17# define PA_PMD_0 6
18# define VA_PMD_0 7
19# define PA_PTE_0 8
20# define VA_PTE_0 9
21# define PA_PUD_1 10
22# define VA_PUD_1 11
23# define PA_PMD_1 12
24# define VA_PMD_1 13
25# define PA_PTE_1 14
26# define VA_PTE_1 15
27# define PA_TABLE_PAGE 16
28# define PAGES_NR 17
29#endif 14#endif
30 15
31#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
@@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
157 unsigned long start_address) ATTRIB_NORET; 142 unsigned long start_address) ATTRIB_NORET;
158#endif 143#endif
159 144
160#ifdef CONFIG_X86_32
161#define ARCH_HAS_KIMAGE_ARCH 145#define ARCH_HAS_KIMAGE_ARCH
162 146
147#ifdef CONFIG_X86_32
163struct kimage_arch { 148struct kimage_arch {
164 pgd_t *pgd; 149 pgd_t *pgd;
165#ifdef CONFIG_X86_PAE 150#ifdef CONFIG_X86_PAE
@@ -169,6 +154,12 @@ struct kimage_arch {
169 pte_t *pte0; 154 pte_t *pte0;
170 pte_t *pte1; 155 pte_t *pte1;
171}; 156};
157#else
158struct kimage_arch {
159 pud_t *pud;
160 pmd_t *pmd;
161 pte_t *pte;
162};
172#endif 163#endif
173 164
174#endif /* __ASSEMBLY__ */ 165#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27e..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
3
4#ifdef CONFIG_X86_LOCAL_APIC
5
6#include <mach_apicdef.h>
7#include <asm/smp.h>
8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10
11static inline const struct cpumask *target_cpus(void)
12{
13#ifdef CONFIG_SMP
14 return cpu_online_mask;
15#else
16 return cpumask_of(0);
17#endif
18}
19
20#define NO_BALANCE_IRQ (0)
21#define esr_disable (0)
22
23#ifdef CONFIG_X86_64
24#include <asm/genapic.h>
25#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
26#define INT_DEST_MODE (genapic->int_dest_mode)
27#define TARGET_CPUS (genapic->target_cpus())
28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
32#define phys_pkg_id (genapic->phys_pkg_id)
33#define vector_allocation_domain (genapic->vector_allocation_domain)
34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
35#define send_IPI_self (genapic->send_IPI_self)
36#define wakeup_secondary_cpu (genapic->wakeup_cpu)
37extern void setup_apic_routing(void);
38#else
39#define INT_DELIVERY_MODE dest_LowestPrio
40#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
41#define TARGET_CPUS (target_cpus())
42#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
43/*
44 * Set up the logical destination ID.
45 *
46 * Intel recommends to set DFR, LDR and TPR before enabling
47 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
48 * document number 292116). So here it goes...
49 */
50static inline void init_apic_ldr(void)
51{
52 unsigned long val;
53
54 apic_write(APIC_DFR, APIC_DFR_VALUE);
55 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
56 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
57 apic_write(APIC_LDR, val);
58}
59
60static inline int apic_id_registered(void)
61{
62 return physid_isset(read_apic_id(), phys_cpu_present_map);
63}
64
65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
66{
67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
78}
79
80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
81{
82 return cpuid_apic >> index_msb;
83}
84
85static inline void setup_apic_routing(void)
86{
87#ifdef CONFIG_X86_IO_APIC
88 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
89 "Flat", nr_ioapics);
90#endif
91}
92
93static inline int apicid_to_node(int logical_apicid)
94{
95#ifdef CONFIG_SMP
96 return apicid_2_node[hard_smp_processor_id()];
97#else
98 return 0;
99#endif
100}
101
102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
103{
104 /* Careful. Some cpus do not strictly honor the set of cpus
105 * specified in the interrupt destination when using lowest
106 * priority interrupt delivery mode.
107 *
108 * In particular there was a hyperthreading cpu observed to
109 * deliver interrupts to the wrong hyperthread when only one
110 * hyperthread was specified in the interrupt desitination.
111 */
112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
113}
114#endif
115
116static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
117{
118 return physid_isset(apicid, bitmap);
119}
120
121static inline unsigned long check_apicid_present(int bit)
122{
123 return physid_isset(bit, phys_cpu_present_map);
124}
125
126static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
127{
128 return phys_map;
129}
130
131static inline int multi_timer_check(int apic, int irq)
132{
133 return 0;
134}
135
136/* Mapping from cpu number to logical apicid */
137static inline int cpu_to_logical_apicid(int cpu)
138{
139 return 1 << cpu;
140}
141
142static inline int cpu_present_to_apicid(int mps_cpu)
143{
144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
146 else
147 return BAD_APICID;
148}
149
150static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
151{
152 return physid_mask_of_physid(phys_apicid);
153}
154
155static inline void setup_portio_remap(void)
156{
157}
158
159static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
160{
161 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
162}
163
164static inline void enable_apic_mode(void)
165{
166}
167#endif /* CONFIG_X86_LOCAL_APIC */
168#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c6..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
2#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
3
4#include <asm/apic.h>
5
6#ifdef CONFIG_X86_64
7#define APIC_ID_MASK (genapic->apic_id_mask)
8#define GET_APIC_ID(x) (genapic->get_apic_id(x))
9#define SET_APIC_ID(x) (genapic->set_apic_id(x))
10#else
11#define APIC_ID_MASK (0xF<<24)
12static inline unsigned get_apic_id(unsigned long x)
13{
14 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
15 if (APIC_XAPIC(ver))
16 return (((x)>>24)&0xFF);
17 else
18 return (((x)>>24)&0xF);
19}
20
21#define GET_APIC_ID(x) get_apic_id(x)
22#endif
23
24#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155da..000000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
2#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
3
4/* Avoid include hell */
5#define NMI_VECTOR 0x02
6
7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
9void __send_IPI_shortcut(unsigned int shortcut, int vector);
10
11extern int no_broadcast;
12
13#ifdef CONFIG_X86_64
14#include <asm/genapic.h>
15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
17#else
18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
19{
20 send_IPI_mask_bitmask(mask, vector);
21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
23#endif
24
25static inline void __local_send_IPI_allbutself(int vector)
26{
27 if (no_broadcast || vector == NMI_VECTOR)
28 send_IPI_mask_allbutself(cpu_online_mask, vector);
29 else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31}
32
33static inline void __local_send_IPI_all(int vector)
34{
35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_mask, vector);
37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39}
40
41#ifdef CONFIG_X86_64
42#define send_IPI_allbutself (genapic->send_IPI_allbutself)
43#define send_IPI_all (genapic->send_IPI_all)
44#else
45static inline void send_IPI_allbutself(int vector)
46{
47 /*
48 * if there are no other CPUs in the system then we get an APIC send
49 * error if we try to broadcast, thus avoid sending IPIs in this case.
50 */
51 if (!(num_online_cpus() > 1))
52 return;
53
54 __local_send_IPI_allbutself(vector);
55 return;
56}
57
58static inline void send_IPI_all(int vector)
59{
60 __local_send_IPI_all(vector);
61}
62#endif
63
64#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68cd..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
3
4static inline int
5mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
6{
7 return 0;
8}
9
10/* Hook from generic ACPI tables.c */
11static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
12{
13 return 0;
14}
15
16
17#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be8..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
2#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#if CONFIG_BASE_SMALL == 0
7#define MAX_MP_BUSSES 256
8#else
9#define MAX_MP_BUSSES 32
10#endif
11
12#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b9..000000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (0x467)
5#define TRAMPOLINE_PHYS_HIGH (0x469)
6
7static inline void wait_for_init_deassert(atomic_t *deassert)
8{
9 while (!atomic_read(deassert))
10 cpu_relax();
11 return;
12}
13
14/* Nothing to do for most platforms, since cleared by the INIT cycle */
15static inline void smp_callin_clear_local_apic(void)
16{
17}
18
19static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
20{
21}
22
23static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
24{
25}
26
27#ifdef CONFIG_SMP
28extern void __inquire_remote_apic(int apicid);
29#else /* CONFIG_SMP */
30static inline void __inquire_remote_apic(int apicid)
31{
32}
33#endif /* CONFIG_SMP */
34
35static inline void inquire_remote_apic(int apicid)
36{
37 if (apic_verbosity >= APIC_DEBUG)
38 __inquire_remote_apic(apicid);
39}
40
41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb33..000000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
2#define _ASM_X86_MACH_GENERIC_GPIO_H
3
4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio);
6int gpio_direction_input(unsigned gpio);
7int gpio_direction_output(unsigned gpio, int value);
8int gpio_get_value(unsigned gpio);
9void gpio_set_value(unsigned gpio, int value);
10int gpio_to_irq(unsigned gpio);
11int irq_to_gpio(unsigned irq);
12
13#include <asm-generic/gpio.h> /* cansleep wrappers */
14
15#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad5..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
2#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
3
4#include <asm/genapic.h>
5
6#define esr_disable (genapic->ESR_DISABLE)
7#define NO_BALANCE_IRQ (genapic->no_balance_irq)
8#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
9#define INT_DEST_MODE (genapic->int_dest_mode)
10#undef APIC_DEST_LOGICAL
11#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
12#define TARGET_CPUS (genapic->target_cpus())
13#define apic_id_registered (genapic->apic_id_registered)
14#define init_apic_ldr (genapic->init_apic_ldr)
15#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
16#define setup_apic_routing (genapic->setup_apic_routing)
17#define multi_timer_check (genapic->multi_timer_check)
18#define apicid_to_node (genapic->apicid_to_node)
19#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
20#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
21#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
22#define setup_portio_remap (genapic->setup_portio_remap)
23#define check_apicid_present (genapic->check_apicid_present)
24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
28#define vector_allocation_domain (genapic->vector_allocation_domain)
29#define enable_apic_mode (genapic->enable_apic_mode)
30#define phys_pkg_id (genapic->phys_pkg_id)
31#define wakeup_secondary_cpu (genapic->wakeup_cpu)
32
33extern void generic_bigsmp_probe(void);
34
35#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f4..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
2#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
3
4#ifndef APIC_DEFINITION
5#include <asm/genapic.h>
6
7#define GET_APIC_ID (genapic->get_apic_id)
8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif
10
11#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d9..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
2#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
3
4#include <asm/genapic.h>
5
6#define send_IPI_mask (genapic->send_IPI_mask)
7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all)
9
10#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca94..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
3
4
5extern int mps_oem_check(struct mpc_table *, char *, char *);
6
7extern int acpi_madt_oem_check(char *, char *);
8
9#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc407226578..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
2#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
7/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
8#define MAX_MP_BUSSES 260
9
10extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8a..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
2#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
5#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
6#define wait_for_init_deassert (genapic->wait_for_init_deassert)
7#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
8#define store_NMI_vector (genapic->store_NMI_vector)
9#define restore_NMI_vector (genapic->restore_NMI_vector)
10#define inquire_remote_apic (genapic->inquire_remote_apic)
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b0..000000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
2#define _ASM_X86_MACH_RDC321X_GPIO_H
3
4#include <linux/kernel.h>
5
6extern int rdc_gpio_get_value(unsigned gpio);
7extern void rdc_gpio_set_value(unsigned gpio, int value);
8extern int rdc_gpio_direction_input(unsigned gpio);
9extern int rdc_gpio_direction_output(unsigned gpio, int value);
10extern int rdc_gpio_request(unsigned gpio, const char *label);
11extern void rdc_gpio_free(unsigned gpio);
12extern void __init rdc321x_gpio_setup(void);
13
14/* Wrappers for the arch-neutral GPIO API */
15
16static inline int gpio_request(unsigned gpio, const char *label)
17{
18 return rdc_gpio_request(gpio, label);
19}
20
21static inline void gpio_free(unsigned gpio)
22{
23 might_sleep();
24 rdc_gpio_free(gpio);
25}
26
27static inline int gpio_direction_input(unsigned gpio)
28{
29 return rdc_gpio_direction_input(gpio);
30}
31
32static inline int gpio_direction_output(unsigned gpio, int value)
33{
34 return rdc_gpio_direction_output(gpio, value);
35}
36
37static inline int gpio_get_value(unsigned gpio)
38{
39 return rdc_gpio_get_value(gpio);
40}
41
42static inline void gpio_set_value(unsigned gpio, int value)
43{
44 rdc_gpio_set_value(gpio, value);
45}
46
47static inline int gpio_to_irq(unsigned gpio)
48{
49 return gpio;
50}
51
52static inline int irq_to_gpio(unsigned irq)
53{
54 return irq;
55}
56
57/* For cansleep */
58#include <asm-generic/gpio.h>
59
60#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae9..853728519ae9 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..f7920601e472 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 52948df9cd1d..f923203dc39a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -79,7 +79,7 @@ do { \
79#ifdef CONFIG_X86_32 79#ifdef CONFIG_X86_32
80#define deactivate_mm(tsk, mm) \ 80#define deactivate_mm(tsk, mm) \
81do { \ 81do { \
82 loadsegment(gs, 0); \ 82 lazy_load_gs(0); \
83} while (0) 83} while (0)
84#else 84#else
85#define deactivate_mm(tsk, mm) \ 85#define deactivate_mm(tsk, mm) \
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713f..5916c8df09d9 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
9extern int pic_mode; 9extern int pic_mode;
10 10
11#ifdef CONFIG_X86_32 11#ifdef CONFIG_X86_32
12#include <mach_mpspec.h> 12
13/*
14 * Summit or generic (i.e. installer) kernels need lots of bus entries.
15 * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
16 */
17#if CONFIG_BASE_SMALL == 0
18# define MAX_MP_BUSSES 260
19#else
20# define MAX_MP_BUSSES 32
21#endif
22
23#define MAX_IRQ_SOURCES 256
13 24
14extern unsigned int def_to_bigsmp; 25extern unsigned int def_to_bigsmp;
15extern u8 apicid_2_node[]; 26extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
20extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 31extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
21#endif 32#endif
22 33
23#define MAX_APICID 256 34#define MAX_APICID 256
24 35
25#else 36#else /* CONFIG_X86_64: */
26 37
27#define MAX_MP_BUSSES 256 38#define MAX_MP_BUSSES 256
28/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 39/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
29#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 40#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
30 41
31#endif 42#endif /* CONFIG_X86_64 */
32 43
33extern void early_find_smp_config(void); 44extern void early_find_smp_config(void);
34extern void early_get_smp_config(void); 45extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
45extern int mpc_default_type; 56extern int mpc_default_type;
46extern unsigned long mp_lapic_addr; 57extern unsigned long mp_lapic_addr;
47 58
48extern void find_smp_config(void);
49extern void get_smp_config(void); 59extern void get_smp_config(void);
60
50#ifdef CONFIG_X86_MPPARSE 61#ifdef CONFIG_X86_MPPARSE
62extern void find_smp_config(void);
51extern void early_reserve_e820_mpc_new(void); 63extern void early_reserve_e820_mpc_new(void);
52#else 64#else
65static inline void find_smp_config(void) { }
53static inline void early_reserve_e820_mpc_new(void) { } 66static inline void early_reserve_e820_mpc_new(void) { }
54#endif 67#endif
55 68
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
64#ifdef CONFIG_X86_IO_APIC 77#ifdef CONFIG_X86_IO_APIC
65extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 78extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
66 u32 gsi, int triggering, int polarity); 79 u32 gsi, int triggering, int polarity);
80extern int mp_find_ioapic(int gsi);
81extern int mp_find_ioapic_pin(int ioapic, int gsi);
67#else 82#else
68static inline int 83static inline int
69mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, 84mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
148 163
149extern physid_mask_t phys_cpu_present_map; 164extern physid_mask_t phys_cpu_present_map;
150 165
166extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
167
168extern int default_acpi_madt_oem_check(char *, char *);
169
170extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
171
151#endif /* _ASM_X86_MPSPEC_H */ 172#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c16..9f0a5f5d29ec 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
33 33
34extern void *xquad_portio;
35
34/* 36/*
35 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the 37 * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
36 */ 38 */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8e..000000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef __ASM_NUMAQ_APIC_H
2#define __ASM_NUMAQ_APIC_H
3
4#include <asm/io.h>
5#include <linux/mmzone.h>
6#include <linux/nodemask.h>
7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9
10static inline const cpumask_t *target_cpus(void)
11{
12 return &CPU_MASK_ALL;
13}
14
15#define NO_BALANCE_IRQ (1)
16#define esr_disable (1)
17
18#define INT_DELIVERY_MODE dest_LowestPrio
19#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
20
21static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
22{
23 return physid_isset(apicid, bitmap);
24}
25static inline unsigned long check_apicid_present(int bit)
26{
27 return physid_isset(bit, phys_cpu_present_map);
28}
29#define apicid_cluster(apicid) (apicid & 0xF0)
30
31static inline int apic_id_registered(void)
32{
33 return 1;
34}
35
36static inline void init_apic_ldr(void)
37{
38 /* Already done in NUMA-Q firmware */
39}
40
41static inline void setup_apic_routing(void)
42{
43 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
44 "NUMA-Q", nr_ioapics);
45}
46
47/*
48 * Skip adding the timer int on secondary nodes, which causes
49 * a small but painful rift in the time-space continuum.
50 */
51static inline int multi_timer_check(int apic, int irq)
52{
53 return apic != 0 && irq == 0;
54}
55
56static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
57{
58 /* We don't have a good way to do this yet - hack */
59 return physids_promote(0xFUL);
60}
61
62/* Mapping from cpu number to logical apicid */
63extern u8 cpu_2_logical_apicid[];
64static inline int cpu_to_logical_apicid(int cpu)
65{
66 if (cpu >= nr_cpu_ids)
67 return BAD_APICID;
68 return (int)cpu_2_logical_apicid[cpu];
69}
70
71/*
72 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
73 * cpu to APIC ID relation to properly interact with the intelligent
74 * mode of the cluster controller.
75 */
76static inline int cpu_present_to_apicid(int mps_cpu)
77{
78 if (mps_cpu < 60)
79 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
80 else
81 return BAD_APICID;
82}
83
84static inline int apicid_to_node(int logical_apicid)
85{
86 return logical_apicid >> 4;
87}
88
89static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
90{
91 int node = apicid_to_node(logical_apicid);
92 int cpu = __ffs(logical_apicid & 0xf);
93
94 return physid_mask_of_physid(cpu + 4*node);
95}
96
97extern void *xquad_portio;
98
99static inline void setup_portio_remap(void)
100{
101 int num_quads = num_online_nodes();
102
103 if (num_quads <= 1)
104 return;
105
106 printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
107 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
108 printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
109 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
110}
111
112static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
113{
114 return (1);
115}
116
117static inline void enable_apic_mode(void)
118{
119}
120
121/*
122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us
124 */
125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
132{
133 return (int) 0xF;
134}
135
136/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
137static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
138{
139 return cpuid_apic >> index_msb;
140}
141
142#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_NUMAQ_APICDEF_H
2#define __ASM_NUMAQ_APICDEF_H
3
4
5#define APIC_ID_MASK (0xF<<24)
6
7static inline unsigned get_apic_id(unsigned long x)
8{
9 return (((x)>>24)&0x0F);
10}
11
12#define GET_APIC_ID(x) get_apic_id(x)
13
14#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c652778..000000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H
3
4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
6
7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 send_IPI_mask_allbutself(cpu_online_mask, vector);
15}
16
17static inline void send_IPI_all(int vector)
18{
19 send_IPI_mask(cpu_online_mask, vector);
20}
21
22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc7..000000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_NUMAQ_MPPARSE_H
2#define __ASM_NUMAQ_MPPARSE_H
3
4extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
5
6#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8eddb..000000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
1#ifndef __ASM_NUMAQ_WAKECPU_H
2#define __ASM_NUMAQ_WAKECPU_H
3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5
6#define TRAMPOLINE_PHYS_LOW (0x8)
7#define TRAMPOLINE_PHYS_HIGH (0xa)
8
9/* We don't do anything here because we use NMI's to boot instead */
10static inline void wait_for_init_deassert(atomic_t *deassert)
11{
12}
13
14/*
15 * Because we use NMIs rather than the INIT-STARTUP sequence to
16 * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
17 */
18static inline void smp_callin_clear_local_apic(void)
19{
20 clear_local_APIC();
21}
22
23static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
24{
25 printk("Storing NMI vector\n");
26 *high =
27 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
28 *low =
29 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
30}
31
32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{
34 printk("Restoring NMI vector\n");
35 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
36 *high;
37 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
38 *low;
39}
40
41static inline void inquire_remote_apic(int apicid)
42{
43}
44
45#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index e9873a2e8695..05f2da7f387a 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t; 57typedef struct { pgprotval_t pgprot; } pgprot_t;
58 58
59extern int page_is_ram(unsigned long pagenr); 59extern int page_is_ram(unsigned long pagenr);
60extern int pagerange_is_ram(unsigned long start, unsigned long end);
61extern int devmem_is_allowed(unsigned long pagenr); 60extern int devmem_is_allowed(unsigned long pagenr);
62extern void map_devmem(unsigned long pfn, unsigned long size, 61extern void map_devmem(unsigned long pfn, unsigned long size,
63 pgprot_t vma_prot); 62 pgprot_t vma_prot);
@@ -95,6 +94,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
95 return pgd.pgd; 94 return pgd.pgd;
96} 95}
97 96
97static inline pgdval_t pgd_flags(pgd_t pgd)
98{
99 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
100}
101
98#if PAGETABLE_LEVELS >= 3 102#if PAGETABLE_LEVELS >= 3
99#if PAGETABLE_LEVELS == 4 103#if PAGETABLE_LEVELS == 4
100typedef struct { pudval_t pud; } pud_t; 104typedef struct { pudval_t pud; } pud_t;
@@ -117,6 +121,11 @@ static inline pudval_t native_pud_val(pud_t pud)
117} 121}
118#endif /* PAGETABLE_LEVELS == 4 */ 122#endif /* PAGETABLE_LEVELS == 4 */
119 123
124static inline pudval_t pud_flags(pud_t pud)
125{
126 return native_pud_val(pud) & PTE_FLAGS_MASK;
127}
128
120typedef struct { pmdval_t pmd; } pmd_t; 129typedef struct { pmdval_t pmd; } pmd_t;
121 130
122static inline pmd_t native_make_pmd(pmdval_t val) 131static inline pmd_t native_make_pmd(pmdval_t val)
@@ -128,6 +137,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
128{ 137{
129 return pmd.pmd; 138 return pmd.pmd;
130} 139}
140
131#else /* PAGETABLE_LEVELS == 2 */ 141#else /* PAGETABLE_LEVELS == 2 */
132#include <asm-generic/pgtable-nopmd.h> 142#include <asm-generic/pgtable-nopmd.h>
133 143
@@ -137,6 +147,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
137} 147}
138#endif /* PAGETABLE_LEVELS >= 3 */ 148#endif /* PAGETABLE_LEVELS >= 3 */
139 149
150static inline pmdval_t pmd_flags(pmd_t pmd)
151{
152 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
153}
154
140static inline pte_t native_make_pte(pteval_t val) 155static inline pte_t native_make_pte(pteval_t val)
141{ 156{
142 return (pte_t) { .pte = val }; 157 return (pte_t) { .pte = val };
@@ -147,7 +162,7 @@ static inline pteval_t native_pte_val(pte_t pte)
147 return pte.pte; 162 return pte.pte;
148} 163}
149 164
150static inline pteval_t native_pte_flags(pte_t pte) 165static inline pteval_t pte_flags(pte_t pte)
151{ 166{
152 return native_pte_val(pte) & PTE_FLAGS_MASK; 167 return native_pte_val(pte) & PTE_FLAGS_MASK;
153} 168}
@@ -173,7 +188,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
173#endif 188#endif
174 189
175#define pte_val(x) native_pte_val(x) 190#define pte_val(x) native_pte_val(x)
176#define pte_flags(x) native_pte_flags(x)
177#define __pte(x) native_make_pte(x) 191#define __pte(x) native_make_pte(x)
178 192
179#endif /* CONFIG_PARAVIRT */ 193#endif /* CONFIG_PARAVIRT */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ccd59f00fd5c..b788dfd20483 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -12,21 +12,38 @@
12#define CLBR_EAX (1 << 0) 12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1) 13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2) 14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
15 16
16#ifdef CONFIG_X86_64 17#ifdef CONFIG_X86_32
17#define CLBR_RSI (1 << 3) 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
18#define CLBR_RDI (1 << 4) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
19#define CLBR_R8 (1 << 5) 30#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
23#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
24#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */ 43#endif /* X86_64 */
29 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
30#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
31#include <linux/types.h> 48#include <linux/types.h>
32#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
40struct mm_struct; 57struct mm_struct;
41struct desc_struct; 58struct desc_struct;
42 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
43/* general info */ 68/* general info */
44struct pv_info { 69struct pv_info {
45 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
189 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
190 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
191 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
192 */ 220 */
193 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
194 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
195 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
196 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
197 void (*safe_halt)(void); 226 void (*safe_halt)(void);
198 void (*halt)(void); 227 void (*halt)(void);
199 228
@@ -279,12 +308,11 @@ struct pv_mmu_ops {
279 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
281 310
282 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
283 pteval_t (*pte_flags)(pte_t); 312 struct paravirt_callee_save make_pte;
284 pte_t (*make_pte)(pteval_t pte);
285 313
286 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
287 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
288 316
289#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
290#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
@@ -299,12 +327,12 @@ struct pv_mmu_ops {
299 327
300 void (*set_pud)(pud_t *pudp, pud_t pudval); 328 void (*set_pud)(pud_t *pudp, pud_t pudval);
301 329
302 pmdval_t (*pmd_val)(pmd_t); 330 struct paravirt_callee_save pmd_val;
303 pmd_t (*make_pmd)(pmdval_t pmd); 331 struct paravirt_callee_save make_pmd;
304 332
305#if PAGETABLE_LEVELS == 4 333#if PAGETABLE_LEVELS == 4
306 pudval_t (*pud_val)(pud_t); 334 struct paravirt_callee_save pud_val;
307 pud_t (*make_pud)(pudval_t pud); 335 struct paravirt_callee_save make_pud;
308 336
309 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
310#endif /* PAGETABLE_LEVELS == 4 */ 338#endif /* PAGETABLE_LEVELS == 4 */
@@ -389,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
389 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
390 418
391unsigned paravirt_patch_nop(void); 419unsigned paravirt_patch_nop(void);
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
392unsigned paravirt_patch_ignore(unsigned len); 422unsigned paravirt_patch_ignore(unsigned len);
393unsigned paravirt_patch_call(void *insnbuf, 423unsigned paravirt_patch_call(void *insnbuf,
394 const void *target, u16 tgt_clobbers, 424 const void *target, u16 tgt_clobbers,
@@ -480,25 +510,45 @@ int paravirt_disable_iospace(void);
480 * makes sure the incoming and outgoing types are always correct. 510 * makes sure the incoming and outgoing types are always correct.
481 */ 511 */
482#ifdef CONFIG_X86_32 512#ifdef CONFIG_X86_32
483#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx 513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
484#define PVOP_CALL_ARGS PVOP_VCALL_ARGS 515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
485#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
486 "=c" (__ecx) 522 "=c" (__ecx)
487#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
488#define EXTRA_CLOBBERS 528#define EXTRA_CLOBBERS
489#define VEXTRA_CLOBBERS 529#define VEXTRA_CLOBBERS
490#else 530#else /* CONFIG_X86_64 */
491#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx 531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
492#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
493#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
494 "=S" (__esi), "=d" (__edx), \ 542 "=S" (__esi), "=d" (__edx), \
495 "=c" (__ecx) 543 "=c" (__ecx)
496
497#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
498 545
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
499#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
500#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
501#endif 551#endif /* CONFIG_X86_32 */
502 552
503#ifdef CONFIG_PARAVIRT_DEBUG 553#ifdef CONFIG_PARAVIRT_DEBUG
504#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) 554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -506,10 +556,11 @@ int paravirt_disable_iospace(void);
506#define PVOP_TEST_NULL(op) ((void)op) 556#define PVOP_TEST_NULL(op) ((void)op)
507#endif 557#endif
508 558
509#define __PVOP_CALL(rettype, op, pre, post, ...) \ 559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
510 ({ \ 561 ({ \
511 rettype __ret; \ 562 rettype __ret; \
512 PVOP_CALL_ARGS; \ 563 PVOP_CALL_ARGS; \
513 PVOP_TEST_NULL(op); \ 564 PVOP_TEST_NULL(op); \
514 /* This is 32-bit specific, but is okay in 64-bit */ \ 565 /* This is 32-bit specific, but is okay in 64-bit */ \
515 /* since this condition will never hold */ \ 566 /* since this condition will never hold */ \
@@ -517,70 +568,113 @@ int paravirt_disable_iospace(void);
517 asm volatile(pre \ 568 asm volatile(pre \
518 paravirt_alt(PARAVIRT_CALL) \ 569 paravirt_alt(PARAVIRT_CALL) \
519 post \ 570 post \
520 : PVOP_CALL_CLOBBERS \ 571 : call_clbr \
521 : paravirt_type(op), \ 572 : paravirt_type(op), \
522 paravirt_clobber(CLBR_ANY), \ 573 paravirt_clobber(clbr), \
523 ##__VA_ARGS__ \ 574 ##__VA_ARGS__ \
524 : "memory", "cc" EXTRA_CLOBBERS); \ 575 : "memory", "cc" extra_clbr); \
525 __ret = (rettype)((((u64)__edx) << 32) | __eax); \ 576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
526 } else { \ 577 } else { \
527 asm volatile(pre \ 578 asm volatile(pre \
528 paravirt_alt(PARAVIRT_CALL) \ 579 paravirt_alt(PARAVIRT_CALL) \
529 post \ 580 post \
530 : PVOP_CALL_CLOBBERS \ 581 : call_clbr \
531 : paravirt_type(op), \ 582 : paravirt_type(op), \
532 paravirt_clobber(CLBR_ANY), \ 583 paravirt_clobber(clbr), \
533 ##__VA_ARGS__ \ 584 ##__VA_ARGS__ \
534 : "memory", "cc" EXTRA_CLOBBERS); \ 585 : "memory", "cc" extra_clbr); \
535 __ret = (rettype)__eax; \ 586 __ret = (rettype)__eax; \
536 } \ 587 } \
537 __ret; \ 588 __ret; \
538 }) 589 })
539#define __PVOP_VCALL(op, pre, post, ...) \ 590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
540 ({ \ 602 ({ \
541 PVOP_VCALL_ARGS; \ 603 PVOP_VCALL_ARGS; \
542 PVOP_TEST_NULL(op); \ 604 PVOP_TEST_NULL(op); \
543 asm volatile(pre \ 605 asm volatile(pre \
544 paravirt_alt(PARAVIRT_CALL) \ 606 paravirt_alt(PARAVIRT_CALL) \
545 post \ 607 post \
546 : PVOP_VCALL_CLOBBERS \ 608 : call_clbr \
547 : paravirt_type(op), \ 609 : paravirt_type(op), \
548 paravirt_clobber(CLBR_ANY), \ 610 paravirt_clobber(clbr), \
549 ##__VA_ARGS__ \ 611 ##__VA_ARGS__ \
550 : "memory", "cc" VEXTRA_CLOBBERS); \ 612 : "memory", "cc" extra_clbr); \
551 }) 613 })
552 614
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
553#define PVOP_CALL0(rettype, op) \ 627#define PVOP_CALL0(rettype, op) \
554 __PVOP_CALL(rettype, op, "", "") 628 __PVOP_CALL(rettype, op, "", "")
555#define PVOP_VCALL0(op) \ 629#define PVOP_VCALL0(op) \
556 __PVOP_VCALL(op, "", "") 630 __PVOP_VCALL(op, "", "")
557 631
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
558#define PVOP_CALL1(rettype, op, arg1) \ 638#define PVOP_CALL1(rettype, op, arg1) \
559 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) 639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
560#define PVOP_VCALL1(op, arg1) \ 640#define PVOP_VCALL1(op, arg1) \
561 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) 641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
562 648
563#define PVOP_CALL2(rettype, op, arg1, arg2) \ 649#define PVOP_CALL2(rettype, op, arg1, arg2) \
564 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
565 "1" ((unsigned long)(arg2))) 651 PVOP_CALL_ARG2(arg2))
566#define PVOP_VCALL2(op, arg1, arg2) \ 652#define PVOP_VCALL2(op, arg1, arg2) \
567 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
568 "1" ((unsigned long)(arg2))) 654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
569 663
570#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ 664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
571 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
572 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
573#define PVOP_VCALL3(op, arg1, arg2, arg3) \ 667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
574 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
575 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) 669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
576 670
577/* This is the only difference in x86_64. We can make it much simpler */ 671/* This is the only difference in x86_64. We can make it much simpler */
578#ifdef CONFIG_X86_32 672#ifdef CONFIG_X86_32
579#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
580 __PVOP_CALL(rettype, op, \ 674 __PVOP_CALL(rettype, op, \
581 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
582 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ 676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
583 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
584#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
585 __PVOP_VCALL(op, \ 679 __PVOP_VCALL(op, \
586 "push %[_arg4];", "lea 4(%%esp),%%esp;", \ 680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -588,13 +682,13 @@ int paravirt_disable_iospace(void);
588 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) 682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
589#else 683#else
590#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ 684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
591 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ 685 __PVOP_CALL(rettype, op, "", "", \
592 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
593 "3"((unsigned long)(arg4))) 687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
594#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ 688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
595 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ 689 __PVOP_VCALL(op, "", "", \
596 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ 690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
597 "3"((unsigned long)(arg4))) 691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
598#endif 692#endif
599 693
600static inline int paravirt_enabled(void) 694static inline int paravirt_enabled(void)
@@ -1061,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
1061 pteval_t ret; 1155 pteval_t ret;
1062 1156
1063 if (sizeof(pteval_t) > sizeof(long)) 1157 if (sizeof(pteval_t) > sizeof(long))
1064 ret = PVOP_CALL2(pteval_t, 1158 ret = PVOP_CALLEE2(pteval_t,
1065 pv_mmu_ops.make_pte, 1159 pv_mmu_ops.make_pte,
1066 val, (u64)val >> 32); 1160 val, (u64)val >> 32);
1067 else 1161 else
1068 ret = PVOP_CALL1(pteval_t, 1162 ret = PVOP_CALLEE1(pteval_t,
1069 pv_mmu_ops.make_pte, 1163 pv_mmu_ops.make_pte,
1070 val); 1164 val);
1071 1165
1072 return (pte_t) { .pte = ret }; 1166 return (pte_t) { .pte = ret };
1073} 1167}
@@ -1077,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
1077 pteval_t ret; 1171 pteval_t ret;
1078 1172
1079 if (sizeof(pteval_t) > sizeof(long)) 1173 if (sizeof(pteval_t) > sizeof(long))
1080 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1081 pte.pte, (u64)pte.pte >> 32); 1175 pte.pte, (u64)pte.pte >> 32);
1082 else
1083 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1084 pte.pte);
1085
1086 return ret;
1087}
1088
1089static inline pteval_t pte_flags(pte_t pte)
1090{
1091 pteval_t ret;
1092
1093 if (sizeof(pteval_t) > sizeof(long))
1094 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1095 pte.pte, (u64)pte.pte >> 32);
1096 else 1176 else
1097 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags, 1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1098 pte.pte); 1178 pte.pte);
1099 1179
1100#ifdef CONFIG_PARAVIRT_DEBUG
1101 BUG_ON(ret & PTE_PFN_MASK);
1102#endif
1103 return ret; 1180 return ret;
1104} 1181}
1105 1182
@@ -1108,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
1108 pgdval_t ret; 1185 pgdval_t ret;
1109 1186
1110 if (sizeof(pgdval_t) > sizeof(long)) 1187 if (sizeof(pgdval_t) > sizeof(long))
1111 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1112 val, (u64)val >> 32); 1189 val, (u64)val >> 32);
1113 else 1190 else
1114 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1115 val); 1192 val);
1116 1193
1117 return (pgd_t) { ret }; 1194 return (pgd_t) { ret };
1118} 1195}
@@ -1122,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1122 pgdval_t ret; 1199 pgdval_t ret;
1123 1200
1124 if (sizeof(pgdval_t) > sizeof(long)) 1201 if (sizeof(pgdval_t) > sizeof(long))
1125 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1126 pgd.pgd, (u64)pgd.pgd >> 32); 1203 pgd.pgd, (u64)pgd.pgd >> 32);
1127 else 1204 else
1128 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1129 pgd.pgd); 1206 pgd.pgd);
1130 1207
1131 return ret; 1208 return ret;
1132} 1209}
@@ -1190,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
1190 pmdval_t ret; 1267 pmdval_t ret;
1191 1268
1192 if (sizeof(pmdval_t) > sizeof(long)) 1269 if (sizeof(pmdval_t) > sizeof(long))
1193 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1194 val, (u64)val >> 32); 1271 val, (u64)val >> 32);
1195 else 1272 else
1196 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1197 val); 1274 val);
1198 1275
1199 return (pmd_t) { ret }; 1276 return (pmd_t) { ret };
1200} 1277}
@@ -1204,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1204 pmdval_t ret; 1281 pmdval_t ret;
1205 1282
1206 if (sizeof(pmdval_t) > sizeof(long)) 1283 if (sizeof(pmdval_t) > sizeof(long))
1207 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1208 pmd.pmd, (u64)pmd.pmd >> 32); 1285 pmd.pmd, (u64)pmd.pmd >> 32);
1209 else 1286 else
1210 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1211 pmd.pmd); 1288 pmd.pmd);
1212 1289
1213 return ret; 1290 return ret;
1214} 1291}
@@ -1230,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
1230 pudval_t ret; 1307 pudval_t ret;
1231 1308
1232 if (sizeof(pudval_t) > sizeof(long)) 1309 if (sizeof(pudval_t) > sizeof(long))
1233 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1234 val, (u64)val >> 32); 1311 val, (u64)val >> 32);
1235 else 1312 else
1236 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1237 val); 1314 val);
1238 1315
1239 return (pud_t) { ret }; 1316 return (pud_t) { ret };
1240} 1317}
@@ -1244,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
1244 pudval_t ret; 1321 pudval_t ret;
1245 1322
1246 if (sizeof(pudval_t) > sizeof(long)) 1323 if (sizeof(pudval_t) > sizeof(long))
1247 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, 1324 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1248 pud.pud, (u64)pud.pud >> 32); 1325 pud.pud, (u64)pud.pud >> 32);
1249 else 1326 else
1250 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, 1327 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1251 pud.pud); 1328 pud.pud);
1252 1329
1253 return ret; 1330 return ret;
1254} 1331}
@@ -1354,14 +1431,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
1354 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); 1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1355} 1432}
1356 1433
1357static inline void arch_flush_lazy_cpu_mode(void) 1434void arch_flush_lazy_cpu_mode(void);
1358{
1359 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1360 arch_leave_lazy_cpu_mode();
1361 arch_enter_lazy_cpu_mode();
1362 }
1363}
1364
1365 1435
1366#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 1436#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1367static inline void arch_enter_lazy_mmu_mode(void) 1437static inline void arch_enter_lazy_mmu_mode(void)
@@ -1374,13 +1444,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1374 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 1444 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1375} 1445}
1376 1446
1377static inline void arch_flush_lazy_mmu_mode(void) 1447void arch_flush_lazy_mmu_mode(void);
1378{
1379 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1380 arch_leave_lazy_mmu_mode();
1381 arch_enter_lazy_mmu_mode();
1382 }
1383}
1384 1448
1385static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1449static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1386 unsigned long phys, pgprot_t flags) 1450 unsigned long phys, pgprot_t flags)
@@ -1389,9 +1453,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1389} 1453}
1390 1454
1391void _paravirt_nop(void); 1455void _paravirt_nop(void);
1392#define paravirt_nop ((void *)_paravirt_nop) 1456u32 _paravirt_ident_32(u32);
1457u64 _paravirt_ident_64(u64);
1393 1458
1394void paravirt_use_bytelocks(void); 1459#define paravirt_nop ((void *)_paravirt_nop)
1395 1460
1396#ifdef CONFIG_SMP 1461#ifdef CONFIG_SMP
1397 1462
@@ -1441,12 +1506,37 @@ extern struct paravirt_patch_site __parainstructions[],
1441 __parainstructions_end[]; 1506 __parainstructions_end[];
1442 1507
1443#ifdef CONFIG_X86_32 1508#ifdef CONFIG_X86_32
1444#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1509#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1445#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1510#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1511
1512/* save and restore all caller-save registers, except return value */
1513#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1514#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
1515
1446#define PV_FLAGS_ARG "0" 1516#define PV_FLAGS_ARG "0"
1447#define PV_EXTRA_CLOBBERS 1517#define PV_EXTRA_CLOBBERS
1448#define PV_VEXTRA_CLOBBERS 1518#define PV_VEXTRA_CLOBBERS
1449#else 1519#else
1520/* save and restore all caller-save registers, except return value */
1521#define PV_SAVE_ALL_CALLER_REGS \
1522 "push %rcx;" \
1523 "push %rdx;" \
1524 "push %rsi;" \
1525 "push %rdi;" \
1526 "push %r8;" \
1527 "push %r9;" \
1528 "push %r10;" \
1529 "push %r11;"
1530#define PV_RESTORE_ALL_CALLER_REGS \
1531 "pop %r11;" \
1532 "pop %r10;" \
1533 "pop %r9;" \
1534 "pop %r8;" \
1535 "pop %rdi;" \
1536 "pop %rsi;" \
1537 "pop %rdx;" \
1538 "pop %rcx;"
1539
1450/* We save some registers, but all of them, that's too much. We clobber all 1540/* We save some registers, but all of them, that's too much. We clobber all
1451 * caller saved registers but the argument parameter */ 1541 * caller saved registers but the argument parameter */
1452#define PV_SAVE_REGS "pushq %%rdi;" 1542#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1456,52 +1546,76 @@ extern struct paravirt_patch_site __parainstructions[],
1456#define PV_FLAGS_ARG "D" 1546#define PV_FLAGS_ARG "D"
1457#endif 1547#endif
1458 1548
1549/*
1550 * Generate a thunk around a function which saves all caller-save
1551 * registers except for the return value. This allows C functions to
1552 * be called from assembler code where fewer than normal registers are
1553 * available. It may also help code generation around calls from C
1554 * code if the common case doesn't use many registers.
1555 *
1556 * When a callee is wrapped in a thunk, the caller can assume that all
1557 * arg regs and all scratch registers are preserved across the
1558 * call. The return value in rax/eax will not be saved, even for void
1559 * functions.
1560 */
1561#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1562 extern typeof(func) __raw_callee_save_##func; \
1563 static void *__##func##__ __used = func; \
1564 \
1565 asm(".pushsection .text;" \
1566 "__raw_callee_save_" #func ": " \
1567 PV_SAVE_ALL_CALLER_REGS \
1568 "call " #func ";" \
1569 PV_RESTORE_ALL_CALLER_REGS \
1570 "ret;" \
1571 ".popsection")
1572
1573/* Get a reference to a callee-save function */
1574#define PV_CALLEE_SAVE(func) \
1575 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1576
1577/* Promise that "func" already uses the right calling convention */
1578#define __PV_IS_CALLEE_SAVE(func) \
1579 ((struct paravirt_callee_save) { func })
1580
1459static inline unsigned long __raw_local_save_flags(void) 1581static inline unsigned long __raw_local_save_flags(void)
1460{ 1582{
1461 unsigned long f; 1583 unsigned long f;
1462 1584
1463 asm volatile(paravirt_alt(PV_SAVE_REGS 1585 asm volatile(paravirt_alt(PARAVIRT_CALL)
1464 PARAVIRT_CALL
1465 PV_RESTORE_REGS)
1466 : "=a"(f) 1586 : "=a"(f)
1467 : paravirt_type(pv_irq_ops.save_fl), 1587 : paravirt_type(pv_irq_ops.save_fl),
1468 paravirt_clobber(CLBR_EAX) 1588 paravirt_clobber(CLBR_EAX)
1469 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1589 : "memory", "cc");
1470 return f; 1590 return f;
1471} 1591}
1472 1592
1473static inline void raw_local_irq_restore(unsigned long f) 1593static inline void raw_local_irq_restore(unsigned long f)
1474{ 1594{
1475 asm volatile(paravirt_alt(PV_SAVE_REGS 1595 asm volatile(paravirt_alt(PARAVIRT_CALL)
1476 PARAVIRT_CALL
1477 PV_RESTORE_REGS)
1478 : "=a"(f) 1596 : "=a"(f)
1479 : PV_FLAGS_ARG(f), 1597 : PV_FLAGS_ARG(f),
1480 paravirt_type(pv_irq_ops.restore_fl), 1598 paravirt_type(pv_irq_ops.restore_fl),
1481 paravirt_clobber(CLBR_EAX) 1599 paravirt_clobber(CLBR_EAX)
1482 : "memory", "cc" PV_EXTRA_CLOBBERS); 1600 : "memory", "cc");
1483} 1601}
1484 1602
1485static inline void raw_local_irq_disable(void) 1603static inline void raw_local_irq_disable(void)
1486{ 1604{
1487 asm volatile(paravirt_alt(PV_SAVE_REGS 1605 asm volatile(paravirt_alt(PARAVIRT_CALL)
1488 PARAVIRT_CALL
1489 PV_RESTORE_REGS)
1490 : 1606 :
1491 : paravirt_type(pv_irq_ops.irq_disable), 1607 : paravirt_type(pv_irq_ops.irq_disable),
1492 paravirt_clobber(CLBR_EAX) 1608 paravirt_clobber(CLBR_EAX)
1493 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1609 : "memory", "eax", "cc");
1494} 1610}
1495 1611
1496static inline void raw_local_irq_enable(void) 1612static inline void raw_local_irq_enable(void)
1497{ 1613{
1498 asm volatile(paravirt_alt(PV_SAVE_REGS 1614 asm volatile(paravirt_alt(PARAVIRT_CALL)
1499 PARAVIRT_CALL
1500 PV_RESTORE_REGS)
1501 : 1615 :
1502 : paravirt_type(pv_irq_ops.irq_enable), 1616 : paravirt_type(pv_irq_ops.irq_enable),
1503 paravirt_clobber(CLBR_EAX) 1617 paravirt_clobber(CLBR_EAX)
1504 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1618 : "memory", "eax", "cc");
1505} 1619}
1506 1620
1507static inline unsigned long __raw_local_irq_save(void) 1621static inline unsigned long __raw_local_irq_save(void)
@@ -1544,33 +1658,49 @@ static inline unsigned long __raw_local_irq_save(void)
1544 .popsection 1658 .popsection
1545 1659
1546 1660
1661#define COND_PUSH(set, mask, reg) \
1662 .if ((~(set)) & mask); push %reg; .endif
1663#define COND_POP(set, mask, reg) \
1664 .if ((~(set)) & mask); pop %reg; .endif
1665
1547#ifdef CONFIG_X86_64 1666#ifdef CONFIG_X86_64
1548#define PV_SAVE_REGS \ 1667
1549 push %rax; \ 1668#define PV_SAVE_REGS(set) \
1550 push %rcx; \ 1669 COND_PUSH(set, CLBR_RAX, rax); \
1551 push %rdx; \ 1670 COND_PUSH(set, CLBR_RCX, rcx); \
1552 push %rsi; \ 1671 COND_PUSH(set, CLBR_RDX, rdx); \
1553 push %rdi; \ 1672 COND_PUSH(set, CLBR_RSI, rsi); \
1554 push %r8; \ 1673 COND_PUSH(set, CLBR_RDI, rdi); \
1555 push %r9; \ 1674 COND_PUSH(set, CLBR_R8, r8); \
1556 push %r10; \ 1675 COND_PUSH(set, CLBR_R9, r9); \
1557 push %r11 1676 COND_PUSH(set, CLBR_R10, r10); \
1558#define PV_RESTORE_REGS \ 1677 COND_PUSH(set, CLBR_R11, r11)
1559 pop %r11; \ 1678#define PV_RESTORE_REGS(set) \
1560 pop %r10; \ 1679 COND_POP(set, CLBR_R11, r11); \
1561 pop %r9; \ 1680 COND_POP(set, CLBR_R10, r10); \
1562 pop %r8; \ 1681 COND_POP(set, CLBR_R9, r9); \
1563 pop %rdi; \ 1682 COND_POP(set, CLBR_R8, r8); \
1564 pop %rsi; \ 1683 COND_POP(set, CLBR_RDI, rdi); \
1565 pop %rdx; \ 1684 COND_POP(set, CLBR_RSI, rsi); \
1566 pop %rcx; \ 1685 COND_POP(set, CLBR_RDX, rdx); \
1567 pop %rax 1686 COND_POP(set, CLBR_RCX, rcx); \
1687 COND_POP(set, CLBR_RAX, rax)
1688
1568#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1689#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1569#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1690#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1570#define PARA_INDIRECT(addr) *addr(%rip) 1691#define PARA_INDIRECT(addr) *addr(%rip)
1571#else 1692#else
1572#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1693#define PV_SAVE_REGS(set) \
1573#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1694 COND_PUSH(set, CLBR_EAX, eax); \
1695 COND_PUSH(set, CLBR_EDI, edi); \
1696 COND_PUSH(set, CLBR_ECX, ecx); \
1697 COND_PUSH(set, CLBR_EDX, edx)
1698#define PV_RESTORE_REGS(set) \
1699 COND_POP(set, CLBR_EDX, edx); \
1700 COND_POP(set, CLBR_ECX, ecx); \
1701 COND_POP(set, CLBR_EDI, edi); \
1702 COND_POP(set, CLBR_EAX, eax)
1703
1574#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1704#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1575#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1705#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1576#define PARA_INDIRECT(addr) *%cs:addr 1706#define PARA_INDIRECT(addr) *%cs:addr
@@ -1582,15 +1712,15 @@ static inline unsigned long __raw_local_irq_save(void)
1582 1712
1583#define DISABLE_INTERRUPTS(clobbers) \ 1713#define DISABLE_INTERRUPTS(clobbers) \
1584 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1714 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1585 PV_SAVE_REGS; \ 1715 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1586 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1716 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1587 PV_RESTORE_REGS;) \ 1717 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1588 1718
1589#define ENABLE_INTERRUPTS(clobbers) \ 1719#define ENABLE_INTERRUPTS(clobbers) \
1590 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1720 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1591 PV_SAVE_REGS; \ 1721 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1592 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1722 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1593 PV_RESTORE_REGS;) 1723 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1594 1724
1595#define USERGS_SYSRET32 \ 1725#define USERGS_SYSRET32 \
1596 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1726 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1620,11 +1750,15 @@ static inline unsigned long __raw_local_irq_save(void)
1620 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1750 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1621 swapgs) 1751 swapgs)
1622 1752
1753/*
1754 * Note: swapgs is very special, and in practise is either going to be
1755 * implemented with a single "swapgs" instruction or something very
1756 * special. Either way, we don't need to save any registers for
1757 * it.
1758 */
1623#define SWAPGS \ 1759#define SWAPGS \
1624 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1760 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1625 PV_SAVE_REGS; \ 1761 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1626 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1627 PV_RESTORE_REGS \
1628 ) 1762 )
1629 1763
1630#define GET_CR2_INTO_RCX \ 1764#define GET_CR2_INTO_RCX \
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b9890..9709fdff6615 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,10 +5,8 @@
5 5
6#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
7extern int pat_enabled; 7extern int pat_enabled;
8extern void validate_pat_support(struct cpuinfo_x86 *c);
9#else 8#else
10static const int pat_enabled; 9static const int pat_enabled;
11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
12#endif 10#endif
13 11
14extern void pat_init(void); 12extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
17 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
18extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
19 17
20extern void pat_disable(char *reason);
21
22#endif /* _ASM_X86_PAT_H */ 18#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab427354..ed0bab427354 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0b64af4f13ac..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -34,6 +34,12 @@
34#define PER_CPU_VAR(var) per_cpu__##var 34#define PER_CPU_VAR(var) per_cpu__##var
35#endif /* SMP */ 35#endif /* SMP */
36 36
37#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var
41#endif
42
37#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
38 44
39#include <linux/stringify.h> 45#include <linux/stringify.h>
@@ -45,6 +51,22 @@
45#define __percpu_arg(x) "%" #x 51#define __percpu_arg(x) "%" #x
46#endif 52#endif
47 53
54/*
55 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address
57 * offset from __per_cpu_load on SMP.
58 *
59 * There also must be an entry in vmlinux_64.lds.S
60 */
61#define DECLARE_INIT_PER_CPU(var) \
62 extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
63
64#ifdef CONFIG_X86_64_SMP
65#define init_per_cpu_var(var) init_per_cpu__##var
66#else
67#define init_per_cpu_var(var) per_cpu_var(var)
68#endif
69
48/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
49 * don't give an lvalue though). */ 71 * don't give an lvalue though). */
50extern void __bad_percpu_size(void); 72extern void __bad_percpu_size(void);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index e0d199fe1d83..c1774ac9da7a 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_none(x) (!(x).pte_low)
57
58/* 56/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 57 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 58 * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 447da43cddb3..3f13cdf61156 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -18,21 +18,6 @@
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \ 18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21static inline int pud_none(pud_t pud)
22{
23 return pud_val(pud) == 0;
24}
25
26static inline int pud_bad(pud_t pud)
27{
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29}
30
31static inline int pud_present(pud_t pud)
32{
33 return pud_val(pud) & _PAGE_PRESENT;
34}
35
36/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
37 * either not present or in a state where the hardware will 22 * either not present or in a state where the hardware will
38 * not attempt to update the pte. In places where this is 23 * not attempt to update the pte. In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 105 write_cr3(pgd);
121} 106}
122 107
123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126
127
128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address))
131
132#ifdef CONFIG_SMP 108#ifdef CONFIG_SMP
133static inline pte_t native_ptep_get_and_clear(pte_t *ptep) 109static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134{ 110{
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
145#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 121#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146#endif 122#endif
147 123
148#define __HAVE_ARCH_PTE_SAME
149static inline int pte_same(pte_t a, pte_t b)
150{
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152}
153
154static inline int pte_none(pte_t pte)
155{
156 return !pte.pte_low && !pte.pte_high;
157}
158
159/* 124/*
160 * Bits 0, 6 and 7 are taken in the low part of the pte, 125 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part. 126 * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..8fef0f6bfbb6 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#include <asm/page.h>
5
4#define FIRST_USER_ADDRESS 0 6#define FIRST_USER_ADDRESS 0
5 7
6#define _PAGE_BIT_PRESENT 0 /* is present */ 8#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -236,68 +238,82 @@ static inline unsigned long pte_pfn(pte_t pte)
236 238
237static inline int pmd_large(pmd_t pte) 239static inline int pmd_large(pmd_t pte)
238{ 240{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 241 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT); 242 (_PAGE_PSE | _PAGE_PRESENT);
241} 243}
242 244
245static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
246{
247 pteval_t v = native_pte_val(pte);
248
249 return native_make_pte(v | set);
250}
251
252static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
253{
254 pteval_t v = native_pte_val(pte);
255
256 return native_make_pte(v & ~clear);
257}
258
243static inline pte_t pte_mkclean(pte_t pte) 259static inline pte_t pte_mkclean(pte_t pte)
244{ 260{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 261 return pte_clear_flags(pte, _PAGE_DIRTY);
246} 262}
247 263
248static inline pte_t pte_mkold(pte_t pte) 264static inline pte_t pte_mkold(pte_t pte)
249{ 265{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 266 return pte_clear_flags(pte, _PAGE_ACCESSED);
251} 267}
252 268
253static inline pte_t pte_wrprotect(pte_t pte) 269static inline pte_t pte_wrprotect(pte_t pte)
254{ 270{
255 return __pte(pte_val(pte) & ~_PAGE_RW); 271 return pte_clear_flags(pte, _PAGE_RW);
256} 272}
257 273
258static inline pte_t pte_mkexec(pte_t pte) 274static inline pte_t pte_mkexec(pte_t pte)
259{ 275{
260 return __pte(pte_val(pte) & ~_PAGE_NX); 276 return pte_clear_flags(pte, _PAGE_NX);
261} 277}
262 278
263static inline pte_t pte_mkdirty(pte_t pte) 279static inline pte_t pte_mkdirty(pte_t pte)
264{ 280{
265 return __pte(pte_val(pte) | _PAGE_DIRTY); 281 return pte_set_flags(pte, _PAGE_DIRTY);
266} 282}
267 283
268static inline pte_t pte_mkyoung(pte_t pte) 284static inline pte_t pte_mkyoung(pte_t pte)
269{ 285{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED); 286 return pte_set_flags(pte, _PAGE_ACCESSED);
271} 287}
272 288
273static inline pte_t pte_mkwrite(pte_t pte) 289static inline pte_t pte_mkwrite(pte_t pte)
274{ 290{
275 return __pte(pte_val(pte) | _PAGE_RW); 291 return pte_set_flags(pte, _PAGE_RW);
276} 292}
277 293
278static inline pte_t pte_mkhuge(pte_t pte) 294static inline pte_t pte_mkhuge(pte_t pte)
279{ 295{
280 return __pte(pte_val(pte) | _PAGE_PSE); 296 return pte_set_flags(pte, _PAGE_PSE);
281} 297}
282 298
283static inline pte_t pte_clrhuge(pte_t pte) 299static inline pte_t pte_clrhuge(pte_t pte)
284{ 300{
285 return __pte(pte_val(pte) & ~_PAGE_PSE); 301 return pte_clear_flags(pte, _PAGE_PSE);
286} 302}
287 303
288static inline pte_t pte_mkglobal(pte_t pte) 304static inline pte_t pte_mkglobal(pte_t pte)
289{ 305{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL); 306 return pte_set_flags(pte, _PAGE_GLOBAL);
291} 307}
292 308
293static inline pte_t pte_clrglobal(pte_t pte) 309static inline pte_t pte_clrglobal(pte_t pte)
294{ 310{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL); 311 return pte_clear_flags(pte, _PAGE_GLOBAL);
296} 312}
297 313
298static inline pte_t pte_mkspecial(pte_t pte) 314static inline pte_t pte_mkspecial(pte_t pte)
299{ 315{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL); 316 return pte_set_flags(pte, _PAGE_SPECIAL);
301} 317}
302 318
303extern pteval_t __supported_pte_mask; 319extern pteval_t __supported_pte_mask;
@@ -451,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
451# include "pgtable_64.h" 467# include "pgtable_64.h"
452#endif 468#endif
453 469
470#ifndef __ASSEMBLY__
471#include <linux/mm_types.h>
472
473static inline int pte_none(pte_t pte)
474{
475 return !pte.pte;
476}
477
478#define __HAVE_ARCH_PTE_SAME
479static inline int pte_same(pte_t a, pte_t b)
480{
481 return a.pte == b.pte;
482}
483
484static inline int pte_present(pte_t a)
485{
486 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
487}
488
489static inline int pmd_present(pmd_t pmd)
490{
491 return pmd_flags(pmd) & _PAGE_PRESENT;
492}
493
494static inline int pmd_none(pmd_t pmd)
495{
496 /* Only check low word on 32-bit platforms, since it might be
497 out of sync with upper half. */
498 return (unsigned long)native_pmd_val(pmd) == 0;
499}
500
501static inline unsigned long pmd_page_vaddr(pmd_t pmd)
502{
503 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
504}
505
506/*
507 * Currently stuck as a macro due to indirect forward reference to
508 * linux/mmzone.h's __section_mem_map_addr() definition:
509 */
510#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
511
512/*
513 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
514 *
515 * this macro returns the index of the entry in the pmd page which would
516 * control the given virtual address
517 */
518static inline unsigned pmd_index(unsigned long address)
519{
520 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
521}
522
523/*
524 * Conversion functions: convert a page and protection to a page entry,
525 * and a page entry and page directory to the page they refer to.
526 *
527 * (Currently stuck as a macro because of indirect forward reference
528 * to linux/mm.h:page_to_nid())
529 */
530#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
531
532/*
533 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
534 *
535 * this function returns the index of the entry in the pte page which would
536 * control the given virtual address
537 */
538static inline unsigned pte_index(unsigned long address)
539{
540 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
541}
542
543static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
544{
545 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
546}
547
548static inline int pmd_bad(pmd_t pmd)
549{
550 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
551}
552
553static inline unsigned long pages_to_mb(unsigned long npg)
554{
555 return npg >> (20 - PAGE_SHIFT);
556}
557
558#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
559 remap_pfn_range(vma, vaddr, pfn, size, prot)
560
561#if PAGETABLE_LEVELS == 2
562static inline int pud_large(pud_t pud)
563{
564 return 0;
565}
566#endif
567
568#if PAGETABLE_LEVELS > 2
569static inline int pud_none(pud_t pud)
570{
571 return native_pud_val(pud) == 0;
572}
573
574static inline int pud_present(pud_t pud)
575{
576 return pud_flags(pud) & _PAGE_PRESENT;
577}
578
579static inline unsigned long pud_page_vaddr(pud_t pud)
580{
581 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
582}
583
584/*
585 * Currently stuck as a macro due to indirect forward reference to
586 * linux/mmzone.h's __section_mem_map_addr() definition:
587 */
588#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
589
590/* Find an entry in the second-level page table.. */
591static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
592{
593 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
594}
595
596static inline unsigned long pmd_pfn(pmd_t pmd)
597{
598 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
599}
600
601static inline int pud_large(pud_t pud)
602{
603 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
604 (_PAGE_PSE | _PAGE_PRESENT);
605}
606
607static inline int pud_bad(pud_t pud)
608{
609 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
610}
611#endif /* PAGETABLE_LEVELS > 2 */
612
613#if PAGETABLE_LEVELS > 3
614static inline int pgd_present(pgd_t pgd)
615{
616 return pgd_flags(pgd) & _PAGE_PRESENT;
617}
618
619static inline unsigned long pgd_page_vaddr(pgd_t pgd)
620{
621 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
622}
623
624/*
625 * Currently stuck as a macro due to indirect forward reference to
626 * linux/mmzone.h's __section_mem_map_addr() definition:
627 */
628#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
629
630/* to find an entry in a page-table-directory. */
631static inline unsigned pud_index(unsigned long address)
632{
633 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
634}
635
636static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
637{
638 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
639}
640
641static inline int pgd_bad(pgd_t pgd)
642{
643 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
644}
645
646static inline int pgd_none(pgd_t pgd)
647{
648 return !native_pgd_val(pgd);
649}
650#endif /* PAGETABLE_LEVELS > 3 */
651
652#endif /* __ASSEMBLY__ */
653
454/* 654/*
455 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 655 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
456 * 656 *
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 72b020deb46b..1952bb762aac 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
85/* The boot page tables (all created as a single array) */ 85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[]; 86extern unsigned long pg0[];
87 87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
89
90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
97#ifdef CONFIG_X86_PAE 88#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h> 89# include <asm/pgtable-3level.h>
99#else 90#else
100# include <asm/pgtable-2level.h> 91# include <asm/pgtable-2level.h>
101#endif 92#endif
102 93
103/*
104 * Conversion functions: convert a page and protection to a page entry,
105 * and a page entry and page directory to the page they refer to.
106 */
107#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
108
109
110static inline int pud_large(pud_t pud) { return 0; }
111
112/*
113 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
114 *
115 * this macro returns the index of the entry in the pmd page which would
116 * control the given virtual address
117 */
118#define pmd_index(address) \
119 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
120
121/*
122 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
123 *
124 * this macro returns the index of the entry in the pte page which would
125 * control the given virtual address
126 */
127#define pte_index(address) \
128 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
129#define pte_offset_kernel(dir, address) \
130 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
131
132#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
133
134#define pmd_page_vaddr(pmd) \
135 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
136
137#if defined(CONFIG_HIGHPTE) 94#if defined(CONFIG_HIGHPTE)
138#define pte_offset_map(dir, address) \ 95#define pte_offset_map(dir, address) \
139 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ 96 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@@ -176,7 +133,4 @@ do { \
176#define kern_addr_valid(kaddr) (0) 133#define kern_addr_valid(kaddr) (0)
177#endif 134#endif
178 135
179#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
180 remap_pfn_range(vma, vaddr, pfn, size, prot)
181
182#endif /* _ASM_X86_PGTABLE_32_H */ 136#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 1df9637dfda3..1c4e247c51fd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -66,9 +66,6 @@ extern void paging_init(void);
66 printk("%s:%d: bad pgd %p(%016lx).\n", \ 66 printk("%s:%d: bad pgd %p(%016lx).\n", \
67 __FILE__, __LINE__, &(e), pgd_val(e)) 67 __FILE__, __LINE__, &(e), pgd_val(e))
68 68
69#define pgd_none(x) (!pgd_val(x))
70#define pud_none(x) (!pud_val(x))
71
72struct mm_struct; 69struct mm_struct;
73 70
74void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 71void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -133,8 +130,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
133 native_set_pgd(pgd, native_make_pgd(0)); 130 native_set_pgd(pgd, native_make_pgd(0));
134} 131}
135 132
136#define pte_same(a, b) ((a).pte == (b).pte)
137
138#endif /* !__ASSEMBLY__ */ 133#endif /* !__ASSEMBLY__ */
139 134
140#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) 135#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
@@ -155,26 +150,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
155 150
156#ifndef __ASSEMBLY__ 151#ifndef __ASSEMBLY__
157 152
158static inline int pgd_bad(pgd_t pgd)
159{
160 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
161}
162
163static inline int pud_bad(pud_t pud)
164{
165 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
166}
167
168static inline int pmd_bad(pmd_t pmd)
169{
170 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
171}
172
173#define pte_none(x) (!pte_val((x)))
174#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
175
176#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
177
178/* 153/*
179 * Conversion functions: convert a page and protection to a page entry, 154 * Conversion functions: convert a page and protection to a page entry,
180 * and a page entry and page directory to the page they refer to. 155 * and a page entry and page directory to the page they refer to.
@@ -183,41 +158,12 @@ static inline int pmd_bad(pmd_t pmd)
183/* 158/*
184 * Level 4 access. 159 * Level 4 access.
185 */ 160 */
186#define pgd_page_vaddr(pgd) \
187 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
188#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
189#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
190static inline int pgd_large(pgd_t pgd) { return 0; } 161static inline int pgd_large(pgd_t pgd) { return 0; }
191#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 162#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
192 163
193/* PUD - Level3 access */ 164/* PUD - Level3 access */
194/* to find an entry in a page-table-directory. */
195#define pud_page_vaddr(pud) \
196 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
197#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
198#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
199#define pud_offset(pgd, address) \
200 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
201#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
202
203static inline int pud_large(pud_t pte)
204{
205 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
206 (_PAGE_PSE | _PAGE_PRESENT);
207}
208 165
209/* PMD - Level 2 access */ 166/* PMD - Level 2 access */
210#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
211#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
212
213#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
214#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
215 pmd_index(address))
216#define pmd_none(x) (!pmd_val((x)))
217#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
218#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
219#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
220
221#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 167#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
222#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ 168#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
223 _PAGE_FILE }) 169 _PAGE_FILE })
@@ -225,13 +171,6 @@ static inline int pud_large(pud_t pte)
225 171
226/* PTE - Level 1 access. */ 172/* PTE - Level 1 access. */
227 173
228/* page, protection -> pte */
229#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
230
231#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
232#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
233 pte_index((address)))
234
235/* x86-64 always has all page tables mapped. */ 174/* x86-64 always has all page tables mapped. */
236#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 175#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
237#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) 176#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -265,9 +204,6 @@ extern int direct_gbpages;
265extern int kern_addr_valid(unsigned long addr); 204extern int kern_addr_valid(unsigned long addr);
266extern void cleanup_highmap(void); 205extern void cleanup_highmap(void);
267 206
268#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
269 remap_pfn_range(vma, vaddr, pfn, size, prot)
270
271#define HAVE_ARCH_UNMAPPED_AREA 207#define HAVE_ARCH_UNMAPPED_AREA
272#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 208#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
273 209
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9a..3ac5032fae09 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
6#define ARCH_GET_FS 0x1003 6#define ARCH_GET_FS 0x1003
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9#ifdef CONFIG_X86_64
10extern long sys_arch_prctl(int, unsigned long);
11#endif /* CONFIG_X86_64 */
12
13#endif /* _ASM_X86_PRCTL_H */ 9#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c15766a2969f..a0133838b67c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
73 char pad0; 73 char pad0;
74#else 74#else
75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 75 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
76 int x86_tlbsize; 76 int x86_tlbsize;
77 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
78 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif 79#endif
@@ -393,16 +393,14 @@ union irq_stack_union {
393}; 393};
394 394
395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 395DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
396DECLARE_PER_CPU(char *, irq_stack_ptr); 396DECLARE_INIT_PER_CPU(irq_stack_union);
397 397
398static inline void load_gs_base(int cpu) 398DECLARE_PER_CPU(char *, irq_stack_ptr);
399{ 399#else /* X86_64 */
400 /* Memory clobbers used to order pda/percpu accesses */ 400#ifdef CONFIG_CC_STACKPROTECTOR
401 mb(); 401DECLARE_PER_CPU(unsigned long, stack_canary);
402 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
403 mb();
404}
405#endif 402#endif
403#endif /* X86_64 */
406 404
407extern void print_cpu_info(struct cpuinfo_x86 *); 405extern void print_cpu_info(struct cpuinfo_x86 *);
408extern unsigned int xstate_size; 406extern unsigned int xstate_size;
@@ -776,9 +774,9 @@ extern int sysenter_setup(void);
776extern struct desc_ptr early_gdt_descr; 774extern struct desc_ptr early_gdt_descr;
777 775
778extern void cpu_set_gdt(int); 776extern void cpu_set_gdt(int);
779extern void switch_to_new_gdt(void); 777extern void switch_to_new_gdt(int);
778extern void load_percpu_segment(int);
780extern void cpu_init(void); 779extern void cpu_init(void);
781extern void init_gdt(int cpu);
782 780
783static inline unsigned long get_debugctlmsr(void) 781static inline unsigned long get_debugctlmsr(void)
784{ 782{
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba77..49fb3ecf3bb3 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
18 18
19extern void check_efer(void); 19extern void check_efer(void);
20 20
21#ifdef CONFIG_X86_BIOS_REBOOT
22extern int reboot_force; 21extern int reboot_force;
23#else
24static const int reboot_force = 0;
25#endif
26 22
27long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); 23long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
28 24
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d34d954c228..e304b66abeea 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -28,7 +28,7 @@ struct pt_regs {
28 int xds; 28 int xds;
29 int xes; 29 int xes;
30 int xfs; 30 int xfs;
31 /* int gs; */ 31 int xgs;
32 long orig_eax; 32 long orig_eax;
33 long eip; 33 long eip;
34 int xcs; 34 int xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
50 unsigned long ds; 50 unsigned long ds;
51 unsigned long es; 51 unsigned long es;
52 unsigned long fs; 52 unsigned long fs;
53 /* int gs; */ 53 unsigned long gs;
54 unsigned long orig_ax; 54 unsigned long orig_ax;
55 unsigned long ip; 55 unsigned long ip;
56 unsigned long cs; 56 unsigned long cs;
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d0..c8e9c8bed3d0 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 1dc1b51ac623..14e0ed86a6f9 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -61,7 +61,7 @@
61 * 61 *
62 * 26 - ESPFIX small SS 62 * 26 - ESPFIX small SS
63 * 27 - per-cpu [ offset to per-cpu data area ] 63 * 27 - per-cpu [ offset to per-cpu data area ]
64 * 28 - unused 64 * 28 - stack_canary-20 [ for stack protector ]
65 * 29 - unused 65 * 29 - unused
66 * 30 - unused 66 * 30 - unused
67 * 31 - TSS for double fault handler 67 * 31 - TSS for double fault handler
@@ -95,6 +95,13 @@
95#define __KERNEL_PERCPU 0 95#define __KERNEL_PERCPU 0
96#endif 96#endif
97 97
98#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
99#ifdef CONFIG_CC_STACKPROTECTOR
100#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
101#else
102#define __KERNEL_STACK_CANARY 0
103#endif
104
98#define GDT_ENTRY_DOUBLEFAULT_TSS 31 105#define GDT_ENTRY_DOUBLEFAULT_TSS 31
99 106
100/* 107/*
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 536949749bc2..45b40278b582 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef _ASM_X86_SETUP_H
2#define _ASM_X86_SETUP_H 2#define _ASM_X86_SETUP_H
3 3
4#ifdef __KERNEL__
5
4#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
5 7
6#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
@@ -8,10 +10,8 @@
8/* Interrupt control for vSMPowered x86_64 systems */ 10/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void); 11void vsmp_init(void);
10 12
11
12void setup_bios_corruption_check(void); 13void setup_bios_corruption_check(void);
13 14
14
15#ifdef CONFIG_X86_VISWS 15#ifdef CONFIG_X86_VISWS
16extern void visws_early_detect(void); 16extern void visws_early_detect(void);
17extern int is_visws_box(void); 17extern int is_visws_box(void);
@@ -43,7 +43,7 @@ struct x86_quirks {
43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); 43 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
44 void (*mpc_oem_pci_bus)(struct mpc_bus *m); 44 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 45 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
46 unsigned short oemsize); 46 unsigned short oemsize);
47 int (*setup_ioapic_ids)(void); 47 int (*setup_ioapic_ids)(void);
48 int (*update_genapic)(void); 48 int (*update_genapic)(void);
49}; 49};
@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
56#endif 56#endif
57#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
58 58
59#ifdef __KERNEL__
60
61#ifdef __i386__ 59#ifdef __i386__
62 60
63#include <linux/pfn.h> 61#include <linux/pfn.h>
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b548..38846208b548 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 45ef8a1b9d7c..47d0e21f2b9e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -182,28 +182,9 @@ static inline int logical_smp_processor_id(void)
182 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); 182 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
183} 183}
184 184
185#include <mach_apicdef.h>
186static inline unsigned int read_apic_id(void)
187{
188 unsigned int reg;
189
190 reg = *(u32 *)(APIC_BASE + APIC_ID);
191
192 return GET_APIC_ID(reg);
193}
194#endif 185#endif
195 186
196
197# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
198extern int hard_smp_processor_id(void); 187extern int hard_smp_processor_id(void);
199# else
200#include <mach_apicdef.h>
201static inline int hard_smp_processor_id(void)
202{
203 /* we don't want to mark this access volatile - bad code generation */
204 return read_apic_id();
205}
206# endif /* APIC_DEFINITION */
207 188
208#else /* CONFIG_X86_LOCAL_APIC */ 189#else /* CONFIG_X86_LOCAL_APIC */
209 190
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b89..1def60114906 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 pr_debug("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 16 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
17 start_eip >> 4; 17 start_eip >> 4;
18 pr_debug("2.\n"); 18 pr_debug("2.\n");
19 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 19 *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
20 start_eip & 0xf; 20 start_eip & 0xf;
21 pr_debug("3.\n"); 21 pr_debug("3.\n");
22} 22}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8247e94ac6b1..3a5696656680 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifdef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT
176/*
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
179 *
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
185 */
186struct __byte_spinlock {
187 s8 lock;
188 s8 spinners;
189};
190
191static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
192{
193 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
194 return bl->lock != 0;
195}
196
197static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
198{
199 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
200 return bl->spinners != 0;
201}
202
203static inline void __byte_spin_lock(raw_spinlock_t *lock)
204{
205 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
206 s8 val = 1;
207
208 asm("1: xchgb %1, %0\n"
209 " test %1,%1\n"
210 " jz 3f\n"
211 " " LOCK_PREFIX "incb %2\n"
212 "2: rep;nop\n"
213 " cmpb $1, %0\n"
214 " je 2b\n"
215 " " LOCK_PREFIX "decb %2\n"
216 " jmp 1b\n"
217 "3:"
218 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
219}
220
221static inline int __byte_spin_trylock(raw_spinlock_t *lock)
222{
223 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
224 u8 old = 1;
225
226 asm("xchgb %1,%0"
227 : "+m" (bl->lock), "+q" (old) : : "memory");
228 176
229 return old == 0;
230}
231
232static inline void __byte_spin_unlock(raw_spinlock_t *lock)
233{
234 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
235 smp_wmb();
236 bl->lock = 0;
237}
238#else /* !CONFIG_PARAVIRT */
239static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
240{ 178{
241 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
268 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
269} 207}
270 208
271#endif /* CONFIG_PARAVIRT */ 209#endif
272 210
273static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
274{ 212{
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
330{ 268{
331 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
332 270
333 atomic_dec(count); 271 if (atomic_dec_return(count) >= 0)
334 if (atomic_read(count) >= 0)
335 return 1; 272 return 1;
336 atomic_inc(count); 273 atomic_inc(count);
337 return 0; 274 return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 36a700acaf2b..c2d742c6e15f 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -1,8 +1,54 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and unfortunately gcc requires it to be at a fixed offset from %gs.
8 * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
9 * and x86_32 use segment registers differently and thus handles this
10 * requirement differently.
11 *
12 * On x86_64, %gs is shared by percpu area and stack canary. All
13 * percpu symbols are zero based and %gs points to the base of percpu
14 * area. The first occupant of the percpu area is always
15 * irq_stack_union which contains stack_canary at offset 40. Userland
16 * %gs is always saved and restored on kernel entry and exit using
17 * swapgs, so stack protector doesn't add any complexity there.
18 *
19 * On x86_32, it's slightly more complicated. As in x86_64, %gs is
20 * used for userland TLS. Unfortunately, some processors are much
21 * slower at loading segment registers with different value when
22 * entering and leaving the kernel, so the kernel uses %fs for percpu
23 * area and manages %gs lazily so that %gs is switched only when
24 * necessary, usually during task switch.
25 *
26 * As gcc requires the stack canary at %gs:20, %gs can't be managed
27 * lazily if stack protector is enabled, so the kernel saves and
28 * restores userland %gs on kernel entry and exit. This behavior is
29 * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
30 * system.h to hide the details.
31 */
32
1#ifndef _ASM_STACKPROTECTOR_H 33#ifndef _ASM_STACKPROTECTOR_H
2#define _ASM_STACKPROTECTOR_H 1 34#define _ASM_STACKPROTECTOR_H 1
3 35
36#ifdef CONFIG_CC_STACKPROTECTOR
37
4#include <asm/tsc.h> 38#include <asm/tsc.h>
5#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/percpu.h>
41#include <asm/system.h>
42#include <asm/desc.h>
43#include <linux/random.h>
44
45/*
46 * 24 byte read-only segment initializer for stack canary. Linker
47 * can't handle the address bit shifting. Address will be set in
48 * head_32 for boot CPU and setup_per_cpu_areas() for others.
49 */
50#define GDT_STACK_CANARY_INIT \
51 [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
6 52
7/* 53/*
8 * Initialize the stackprotector canary value. 54 * Initialize the stackprotector canary value.
@@ -15,12 +61,9 @@ static __always_inline void boot_init_stack_canary(void)
15 u64 canary; 61 u64 canary;
16 u64 tsc; 62 u64 tsc;
17 63
18 /* 64#ifdef CONFIG_X86_64
19 * Build time only check to make sure the stack_canary is at
20 * offset 40 in the pda; this is a gcc ABI requirement
21 */
22 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40); 65 BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
23 66#endif
24 /* 67 /*
25 * We both use the random pool and the current TSC as a source 68 * We both use the random pool and the current TSC as a source
26 * of randomness. The TSC only matters for very early init, 69 * of randomness. The TSC only matters for very early init,
@@ -32,7 +75,50 @@ static __always_inline void boot_init_stack_canary(void)
32 canary += tsc + (tsc << 32UL); 75 canary += tsc + (tsc << 32UL);
33 76
34 current->stack_canary = canary; 77 current->stack_canary = canary;
78#ifdef CONFIG_X86_64
35 percpu_write(irq_stack_union.stack_canary, canary); 79 percpu_write(irq_stack_union.stack_canary, canary);
80#else
81 percpu_write(stack_canary, canary);
82#endif
36} 83}
37 84
85static inline void setup_stack_canary_segment(int cpu)
86{
87#ifdef CONFIG_X86_32
88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
90 struct desc_struct desc;
91
92 desc = gdt_table[GDT_ENTRY_STACK_CANARY];
93 desc.base0 = canary & 0xffff;
94 desc.base1 = (canary >> 16) & 0xff;
95 desc.base2 = (canary >> 24) & 0xff;
96 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
97#endif
98}
99
100static inline void load_stack_canary_segment(void)
101{
102#ifdef CONFIG_X86_32
103 asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
104#endif
105}
106
107#else /* CC_STACKPROTECTOR */
108
109#define GDT_STACK_CANARY_INIT
110
111/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
112
113static inline void setup_stack_canary_segment(int cpu)
114{ }
115
116static inline void load_stack_canary_segment(void)
117{
118#ifdef CONFIG_X86_32
119 asm volatile ("mov %0, %%gs" : : "r" (0));
38#endif 120#endif
121}
122
123#endif /* CC_STACKPROTECTOR */
124#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cfe..000000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
1#ifndef __ASM_SUMMIT_APIC_H
2#define __ASM_SUMMIT_APIC_H
3
4#include <asm/smp.h>
5#include <linux/gfp.h>
6
7#define esr_disable (1)
8#define NO_BALANCE_IRQ (0)
9
10/* In clustered mode, the high nibble of APIC ID is a cluster number.
11 * The low nibble is a 4-bit bitmap. */
12#define XAPIC_DEST_CPUS_SHIFT 4
13#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
14#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
15
16#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
17
18static inline const cpumask_t *target_cpus(void)
19{
20 /* CPU_MASK_ALL (0xff) has undefined behaviour with
21 * dest_LowestPrio mode logical clustered apic interrupt routing
22 * Just start on cpu 0. IRQ balancing will spread load
23 */
24 return &cpumask_of_cpu(0);
25}
26
27#define INT_DELIVERY_MODE (dest_LowestPrio)
28#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{
32 return 0;
33}
34
35/* we don't use the phys_cpu_present_map to indicate apicid presence */
36static inline unsigned long check_apicid_present(int bit)
37{
38 return 1;
39}
40
41#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42
43extern u8 cpu_2_logical_apicid[];
44
45static inline void init_apic_ldr(void)
46{
47 unsigned long val, id;
48 int count = 0;
49 u8 my_id = (u8)hard_smp_processor_id();
50 u8 my_cluster = (u8)apicid_cluster(my_id);
51#ifdef CONFIG_SMP
52 u8 lid;
53 int i;
54
55 /* Create logical APIC IDs by counting CPUs already in cluster. */
56 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
57 lid = cpu_2_logical_apicid[i];
58 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
59 ++count;
60 }
61#endif
62 /* We only have a 4 wide bitmap in cluster mode. If a deranged
63 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
64 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
65 id = my_cluster | (1UL << count);
66 apic_write(APIC_DFR, APIC_DFR_VALUE);
67 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
68 val |= SET_APIC_LOGICAL_ID(id);
69 apic_write(APIC_LDR, val);
70}
71
72static inline int multi_timer_check(int apic, int irq)
73{
74 return 0;
75}
76
77static inline int apic_id_registered(void)
78{
79 return 1;
80}
81
82static inline void setup_apic_routing(void)
83{
84 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
85 nr_ioapics);
86}
87
88static inline int apicid_to_node(int logical_apicid)
89{
90#ifdef CONFIG_SMP
91 return apicid_2_node[hard_smp_processor_id()];
92#else
93 return 0;
94#endif
95}
96
97/* Mapping from cpu number to logical apicid */
98static inline int cpu_to_logical_apicid(int cpu)
99{
100#ifdef CONFIG_SMP
101 if (cpu >= nr_cpu_ids)
102 return BAD_APICID;
103 return (int)cpu_2_logical_apicid[cpu];
104#else
105 return logical_smp_processor_id();
106#endif
107}
108
109static inline int cpu_present_to_apicid(int mps_cpu)
110{
111 if (mps_cpu < nr_cpu_ids)
112 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
113 else
114 return BAD_APICID;
115}
116
117static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
118{
119 /* For clustered we don't have a good way to do this yet - hack */
120 return physids_promote(0x0F);
121}
122
123static inline physid_mask_t apicid_to_cpu_present(int apicid)
124{
125 return physid_mask_of_physid(0);
126}
127
128static inline void setup_portio_remap(void)
129{
130}
131
132static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
133{
134 return 1;
135}
136
137static inline void enable_apic_mode(void)
138{
139}
140
141static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
142{
143 int num_bits_set;
144 int cpus_found = 0;
145 int cpu;
146 int apicid;
147
148 num_bits_set = cpus_weight(*cpumask);
149 /* Return id to all */
150 if (num_bits_set >= nr_cpu_ids)
151 return (int) 0xFF;
152 /*
153 * The cpus in the mask must all be on the apic cluster. If are not
154 * on the same apicid cluster return default value of TARGET_CPUS.
155 */
156 cpu = first_cpu(*cpumask);
157 apicid = cpu_to_logical_apicid(cpu);
158 while (cpus_found < num_bits_set) {
159 if (cpu_isset(cpu, *cpumask)) {
160 int new_apicid = cpu_to_logical_apicid(cpu);
161 if (apicid_cluster(apicid) !=
162 apicid_cluster(new_apicid)){
163 printk ("%s: Not a valid mask!\n", __func__);
164 return 0xFF;
165 }
166 apicid = apicid | new_apicid;
167 cpus_found++;
168 }
169 cpu++;
170 }
171 return apicid;
172}
173
174static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
175 const struct cpumask *andmask)
176{
177 int apicid = cpu_to_logical_apicid(0);
178 cpumask_var_t cpumask;
179
180 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
181 return apicid;
182
183 cpumask_and(cpumask, inmask, andmask);
184 cpumask_and(cpumask, cpumask, cpu_online_mask);
185 apicid = cpu_mask_to_apicid(cpumask);
186
187 free_cpumask_var(cpumask);
188 return apicid;
189}
190
191/* cpuid returns the value latched in the HW at reset, not the APIC ID
192 * register's value. For any box whose BIOS changes APIC IDs, like
193 * clustered APIC systems, we must use hard_smp_processor_id.
194 *
195 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
196 */
197static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
198{
199 return hard_smp_processor_id() >> index_msb;
200}
201
202#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SUMMIT_APICDEF_H
2#define __ASM_SUMMIT_APICDEF_H
3
4#define APIC_ID_MASK (0xFF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (x>>24)&0xFF;
9}
10
11#define GET_APIC_ID(x) get_apic_id(x)
12
13#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50cc..000000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H
3
4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
6
7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
8{
9 send_IPI_mask_sequence(mask, vector);
10}
11
12static inline void send_IPI_allbutself(int vector)
13{
14 cpumask_t mask = cpu_online_map;
15 cpu_clear(smp_processor_id(), mask);
16
17 if (!cpus_empty(mask))
18 send_IPI_mask(&mask, vector);
19}
20
21static inline void send_IPI_all(int vector)
22{
23 send_IPI_mask(&cpu_online_map, vector);
24}
25
26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c02363..000000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef __ASM_SUMMIT_MPPARSE_H
2#define __ASM_SUMMIT_MPPARSE_H
3
4#include <asm/tsc.h>
5
6extern int use_cyclone;
7
8#ifdef CONFIG_X86_SUMMIT_NUMA
9extern void setup_summit(void);
10#else
11#define setup_summit() {}
12#endif
13
14static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
15 char *productid)
16{
17 if (!strncmp(oem, "IBM ENSW", 8) &&
18 (!strncmp(productid, "VIGIL SMP", 9)
19 || !strncmp(productid, "EXA", 3)
20 || !strncmp(productid, "RUTHLESS SMP", 12))){
21 mark_tsc_unstable("Summit based system");
22 use_cyclone = 1; /*enable cyclone-timer*/
23 setup_summit();
24 return 1;
25 }
26 return 0;
27}
28
29/* Hook from generic ACPI tables.c */
30static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
31{
32 if (!strncmp(oem_id, "IBM", 3) &&
33 (!strncmp(oem_table_id, "SERVIGIL", 8)
34 || !strncmp(oem_table_id, "EXA", 3))){
35 mark_tsc_unstable("Summit based system");
36 use_cyclone = 1; /*enable cyclone-timer*/
37 setup_summit();
38 return 1;
39 }
40 return 0;
41}
42
43struct rio_table_hdr {
44 unsigned char version; /* Version number of this data structure */
45 /* Version 3 adds chassis_num & WP_index */
46 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
47 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
48} __attribute__((packed));
49
50struct scal_detail {
51 unsigned char node_id; /* Scalability Node ID */
52 unsigned long CBAR; /* Address of 1MB register space */
53 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
54 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
55 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
56 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
57 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
58 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
59 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
60} __attribute__((packed));
61
62struct rio_detail {
63 unsigned char node_id; /* RIO Node ID */
64 unsigned long BBAR; /* Address of 1MB register space */
65 unsigned char type; /* Type of device */
66 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
67 /* For CYC: Node ID of Twister that owns this CYC */
68 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
69 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
70 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
71 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
72 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
73 /* For CYC: 0 */
74 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
75 /* = 0 : the XAPIC is not used, ie:*/
76 /* ints fwded to another XAPIC */
77 /* Bits1:7 Reserved */
78 /* For CYC: Bits0:7 Reserved */
79 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
80 /* lower slot numbers/PCI bus numbers */
81 /* For CYC: No meaning */
82 unsigned char chassis_num; /* 1 based Chassis number */
83 /* For LookOut WPEGs this field indicates the */
84 /* Expansion Chassis #, enumerated from Boot */
85 /* Node WPEG external port, then Boot Node CYC */
86 /* external port, then Next Vigil chassis WPEG */
87 /* external port, etc. */
88 /* Shared Lookouts have only 1 chassis number (the */
89 /* first one assigned) */
90} __attribute__((packed));
91
92
93typedef enum {
94 CompatTwister = 0, /* Compatibility Twister */
95 AltTwister = 1, /* Alternate Twister of internal 8-way */
96 CompatCyclone = 2, /* Compatibility Cyclone */
97 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
98 CompatWPEG = 4, /* Compatibility WPEG */
99 AltWPEG = 5, /* Second Planar WPEG */
100 LookOutAWPEG = 6, /* LookOut WPEG */
101 LookOutBWPEG = 7, /* LookOut WPEG */
102} node_type;
103
104static inline int is_WPEG(struct rio_detail *rio){
105 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
106 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
107}
108
109#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index c0b0bda754ee..68b1be10cfad 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
29/* X86_32 only */ 29/* X86_32 only */
30#ifdef CONFIG_X86_32 30#ifdef CONFIG_X86_32
31/* kernel/process_32.c */ 31/* kernel/process_32.c */
32asmlinkage int sys_fork(struct pt_regs); 32int sys_fork(struct pt_regs *);
33asmlinkage int sys_clone(struct pt_regs); 33int sys_clone(struct pt_regs *);
34asmlinkage int sys_vfork(struct pt_regs); 34int sys_vfork(struct pt_regs *);
35asmlinkage int sys_execve(struct pt_regs); 35int sys_execve(struct pt_regs *);
36 36
37/* kernel/signal_32.c */ 37/* kernel/signal_32.c */
38asmlinkage int sys_sigsuspend(int, int, old_sigset_t); 38asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, 39asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
40 struct old_sigaction __user *); 40 struct old_sigaction __user *);
41asmlinkage int sys_sigaltstack(unsigned long); 41int sys_sigaltstack(struct pt_regs *);
42asmlinkage unsigned long sys_sigreturn(unsigned long); 42unsigned long sys_sigreturn(struct pt_regs *);
43asmlinkage int sys_rt_sigreturn(unsigned long); 43long sys_rt_sigreturn(struct pt_regs *);
44 44
45/* kernel/ioport.c */ 45/* kernel/ioport.c */
46asmlinkage long sys_iopl(unsigned long); 46long sys_iopl(struct pt_regs *);
47 47
48/* kernel/sys_i386_32.c */ 48/* kernel/sys_i386_32.c */
49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, 49asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
59asmlinkage int sys_olduname(struct oldold_utsname __user *); 59asmlinkage int sys_olduname(struct oldold_utsname __user *);
60 60
61/* kernel/vm86_32.c */ 61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs); 62int sys_vm86old(struct pt_regs *);
63asmlinkage int sys_vm86(struct pt_regs); 63int sys_vm86(struct pt_regs *);
64 64
65#else /* CONFIG_X86_32 */ 65#else /* CONFIG_X86_32 */
66 66
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 2fcc70bc85f3..c00bfdbdd456 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
23 23
24#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
25 25
26#ifdef CONFIG_CC_STACKPROTECTOR
27#define __switch_canary \
28 "movl %P[task_canary](%[next]), %%ebx\n\t" \
29 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
30#define __switch_canary_oparam \
31 , [stack_canary] "=m" (per_cpu_var(stack_canary))
32#define __switch_canary_iparam \
33 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
34#else /* CC_STACKPROTECTOR */
35#define __switch_canary
36#define __switch_canary_oparam
37#define __switch_canary_iparam
38#endif /* CC_STACKPROTECTOR */
39
26/* 40/*
27 * Saving eflags is important. It switches not only IOPL between tasks, 41 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc. 42 * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +58,7 @@ do { \
44 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ 58 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
45 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ 59 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
46 "pushl %[next_ip]\n\t" /* restore EIP */ \ 60 "pushl %[next_ip]\n\t" /* restore EIP */ \
61 __switch_canary \
47 "jmp __switch_to\n" /* regparm call */ \ 62 "jmp __switch_to\n" /* regparm call */ \
48 "1:\t" \ 63 "1:\t" \
49 "popl %%ebp\n\t" /* restore EBP */ \ 64 "popl %%ebp\n\t" /* restore EBP */ \
@@ -58,6 +73,8 @@ do { \
58 "=b" (ebx), "=c" (ecx), "=d" (edx), \ 73 "=b" (ebx), "=c" (ecx), "=d" (edx), \
59 "=S" (esi), "=D" (edi) \ 74 "=S" (esi), "=D" (edi) \
60 \ 75 \
76 __switch_canary_oparam \
77 \
61 /* input parameters: */ \ 78 /* input parameters: */ \
62 : [next_sp] "m" (next->thread.sp), \ 79 : [next_sp] "m" (next->thread.sp), \
63 [next_ip] "m" (next->thread.ip), \ 80 [next_ip] "m" (next->thread.ip), \
@@ -66,6 +83,8 @@ do { \
66 [prev] "a" (prev), \ 83 [prev] "a" (prev), \
67 [next] "d" (next) \ 84 [next] "d" (next) \
68 \ 85 \
86 __switch_canary_iparam \
87 \
69 : /* reloaded segment registers */ \ 88 : /* reloaded segment registers */ \
70 "memory"); \ 89 "memory"); \
71} while (0) 90} while (0)
@@ -111,16 +130,16 @@ do { \
111 "movq "__percpu_arg([current_task])",%%rsi\n\t" \ 130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
112 __switch_canary \ 131 __switch_canary \
113 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
114 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
115 "movq %%rax,%%rdi\n\t" \ 133 "movq %%rax,%%rdi\n\t" \
116 "jc ret_from_fork\n\t" \ 134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
117 RESTORE_CONTEXT \ 136 RESTORE_CONTEXT \
118 : "=a" (last) \ 137 : "=a" (last) \
119 __switch_canary_oparam \ 138 __switch_canary_oparam \
120 : [next] "S" (next), [prev] "D" (prev), \ 139 : [next] "S" (next), [prev] "D" (prev), \
121 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ 140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
122 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
123 [tif_fork] "i" (TIF_FORK), \ 142 [_tif_fork] "i" (_TIF_FORK), \
124 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
125 [current_task] "m" (per_cpu_var(current_task)) \ 144 [current_task] "m" (per_cpu_var(current_task)) \
126 __switch_canary_iparam \ 145 __switch_canary_iparam \
@@ -182,6 +201,25 @@ extern void native_load_gs_index(unsigned);
182#define savesegment(seg, value) \ 201#define savesegment(seg, value) \
183 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 202 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
184 203
204/*
205 * x86_32 user gs accessors.
206 */
207#ifdef CONFIG_X86_32
208#ifdef CONFIG_X86_32_LAZY_GS
209#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
210#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
211#define task_user_gs(tsk) ((tsk)->thread.gs)
212#define lazy_save_gs(v) savesegment(gs, (v))
213#define lazy_load_gs(v) loadsegment(gs, (v))
214#else /* X86_32_LAZY_GS */
215#define get_user_gs(regs) (u16)((regs)->gs)
216#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
217#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
218#define lazy_save_gs(v) do { } while (0)
219#define lazy_load_gs(v) do { } while (0)
220#endif /* X86_32_LAZY_GS */
221#endif /* X86_32 */
222
185static inline unsigned long get_limit(unsigned long segment) 223static inline unsigned long get_limit(unsigned long segment)
186{ 224{
187 unsigned long __limit; 225 unsigned long __limit;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f38488989db7..ca7310e02446 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43 int uaccess_err;
43}; 44};
44 45
45#define INIT_THREAD_INFO(tsk) \ 46#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 10022ed3a4b6..77cfb2cfb386 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
74 return &node_to_cpumask_map[node]; 74 return &node_to_cpumask_map[node];
75} 75}
76 76
77static inline void setup_node_to_cpumask_map(void) { }
78
77#else /* CONFIG_X86_64 */ 79#else /* CONFIG_X86_64 */
78 80
79/* Mappings between node number and cpus on that node. */ 81/* Mappings between node number and cpus on that node. */
@@ -120,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
120 122
121#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 123#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
122 124
125extern void setup_node_to_cpumask_map(void);
126
123/* 127/*
124 * Replace default node_to_cpumask_ptr with optimized version 128 * Replace default node_to_cpumask_ptr with optimized version
125 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 129 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -218,6 +222,8 @@ static inline int node_to_first_cpu(int node)
218 return first_cpu(cpu_online_map); 222 return first_cpu(cpu_online_map);
219} 223}
220 224
225static inline void setup_node_to_cpumask_map(void) { }
226
221/* 227/*
222 * Replace default node_to_cpumask_ptr with optimized version 228 * Replace default node_to_cpumask_ptr with optimized version
223 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" 229 * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index cf3bb053da0b..0d5342515b86 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
41dotraplinkage void do_overflow(struct pt_regs *, long); 41dotraplinkage void do_overflow(struct pt_regs *, long);
42dotraplinkage void do_bounds(struct pt_regs *, long); 42dotraplinkage void do_bounds(struct pt_regs *, long);
43dotraplinkage void do_invalid_op(struct pt_regs *, long); 43dotraplinkage void do_invalid_op(struct pt_regs *, long);
44dotraplinkage void do_device_not_available(struct pt_regs); 44dotraplinkage void do_device_not_available(struct pt_regs *, long);
45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); 45dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
46dotraplinkage void do_invalid_TSS(struct pt_regs *, long); 46dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
47dotraplinkage void do_segment_not_present(struct pt_regs *, long); 47dotraplinkage void do_segment_not_present(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..b685ece89d5c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
121 121
122#define __get_user_x(size, ret, x, ptr) \ 122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \ 123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \ 124 : "=a" (ret), "=d" (x) \
125 : "0" (ptr)) \ 125 : "0" (ptr)) \
126 126
127/* Careful: we have to cast the result to the type of the pointer 127/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
181 181
182#define __put_user_x(size, x, ptr, __ret_pu) \ 182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
184 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 185
186 186
187 187
188#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
189#define __put_user_u64(x, addr, err) \ 189#define __put_user_asm_u64(x, addr, err, errret) \
190 asm volatile("1: movl %%eax,0(%2)\n" \ 190 asm volatile("1: movl %%eax,0(%2)\n" \
191 "2: movl %%edx,4(%2)\n" \ 191 "2: movl %%edx,4(%2)\n" \
192 "3:\n" \ 192 "3:\n" \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
197 _ASM_EXTABLE(1b, 4b) \ 197 _ASM_EXTABLE(1b, 4b) \
198 _ASM_EXTABLE(2b, 4b) \ 198 _ASM_EXTABLE(2b, 4b) \
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (errret), "0" (err))
201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
201 209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval, errret) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -276,10 +286,32 @@ do { \
276 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
277 break; \ 287 break; \
278 case 4: \ 288 case 4: \
279 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ 289 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
280 break; \ 290 break; \
281 case 8: \ 291 case 8: \
282 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ 292 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
293 errret); \
294 break; \
295 default: \
296 __put_user_bad(); \
297 } \
298} while (0)
299
300#define __put_user_size_ex(x, ptr, size) \
301do { \
302 __chk_user_ptr(ptr); \
303 switch (size) { \
304 case 1: \
305 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
306 break; \
307 case 2: \
308 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
309 break; \
310 case 4: \
311 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
312 break; \
313 case 8: \
314 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
283 break; \ 315 break; \
284 default: \ 316 default: \
285 __put_user_bad(); \ 317 __put_user_bad(); \
@@ -311,9 +343,12 @@ do { \
311 343
312#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 345#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
346#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 347#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 348#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 349 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
350#define __get_user_asm_ex_u64(x, ptr) \
351 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 352#endif
318 353
319#define __get_user_size(x, ptr, size, retval, errret) \ 354#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +385,33 @@ do { \
350 : "=r" (err), ltype(x) \ 385 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 386 : "m" (__m(addr)), "i" (errret), "0" (err))
352 387
388#define __get_user_size_ex(x, ptr, size) \
389do { \
390 __chk_user_ptr(ptr); \
391 switch (size) { \
392 case 1: \
393 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
394 break; \
395 case 2: \
396 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
397 break; \
398 case 4: \
399 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
400 break; \
401 case 8: \
402 __get_user_asm_ex_u64(x, ptr); \
403 break; \
404 default: \
405 (x) = __get_user_bad(); \
406 } \
407} while (0)
408
409#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
410 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
411 "2:\n" \
412 _ASM_EXTABLE(1b, 2b - 1b) \
413 : ltype(x) : "m" (__m(addr)))
414
353#define __put_user_nocheck(x, ptr, size) \ 415#define __put_user_nocheck(x, ptr, size) \
354({ \ 416({ \
355 int __pu_err; \ 417 int __pu_err; \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 447 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 448 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 449 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
450
451#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
452 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
453 "2:\n" \
454 _ASM_EXTABLE(1b, 2b - 1b) \
455 : : ltype(x), "m" (__m(addr)))
456
457/*
458 * uaccess_try and catch
459 */
460#define uaccess_try do { \
461 int prev_err = current_thread_info()->uaccess_err; \
462 current_thread_info()->uaccess_err = 0; \
463 barrier();
464
465#define uaccess_catch(err) \
466 (err) |= current_thread_info()->uaccess_err; \
467 current_thread_info()->uaccess_err = prev_err; \
468} while (0)
469
388/** 470/**
389 * __get_user: - Get a simple variable from user space, with less checking. 471 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 472 * @x: Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
408 490
409#define __get_user(x, ptr) \ 491#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 492 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
493
411/** 494/**
412 * __put_user: - Write a simple value into user space, with less checking. 495 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 496 * @x: Value to copy to user space.
@@ -435,6 +518,45 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 518#define __put_user_unaligned __put_user
436 519
437/* 520/*
521 * {get|put}_user_try and catch
522 *
523 * get_user_try {
524 * get_user_ex(...);
525 * } get_user_catch(err)
526 */
527#define get_user_try uaccess_try
528#define get_user_catch(err) uaccess_catch(err)
529
530#define get_user_ex(x, ptr) do { \
531 unsigned long __gue_val; \
532 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
533 (x) = (__force __typeof__(*(ptr)))__gue_val; \
534} while (0)
535
536#ifdef CONFIG_X86_WP_WORKS_OK
537
538#define put_user_try uaccess_try
539#define put_user_catch(err) uaccess_catch(err)
540
541#define put_user_ex(x, ptr) \
542 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
543
544#else /* !CONFIG_X86_WP_WORKS_OK */
545
546#define put_user_try do { \
547 int __uaccess_err = 0;
548
549#define put_user_catch(err) \
550 (err) |= __uaccess_err; \
551} while (0)
552
553#define put_user_ex(x, ptr) do { \
554 __uaccess_err |= __put_user(x, ptr); \
555} while (0)
556
557#endif /* CONFIG_X86_WP_WORKS_OK */
558
559/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 560 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 561 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 562#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 8ac1d7e312f3..8242bf965812 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -3,6 +3,9 @@
3 3
4enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; 4enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
5 5
6struct cpumask;
7struct mm_struct;
8
6#ifdef CONFIG_X86_UV 9#ifdef CONFIG_X86_UV
7 10
8extern enum uv_system_type get_uv_system_type(void); 11extern enum uv_system_type get_uv_system_type(void);
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index b3e647307625..c1635d43616f 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void);
527#define VOYAGER_PSI_SUBREAD 2 527#define VOYAGER_PSI_SUBREAD 2
528#define VOYAGER_PSI_SUBWRITE 3 528#define VOYAGER_PSI_SUBWRITE 3
529extern void voyager_cat_psi(__u8, __u16, __u8 *); 529extern void voyager_cat_psi(__u8, __u16, __u8 *);
530
531/* These define the CPIs we use in linux */
532#define VIC_CPI_LEVEL0 0
533#define VIC_CPI_LEVEL1 1
534/* now the fake CPIs */
535#define VIC_TIMER_CPI 2
536#define VIC_INVALIDATE_CPI 3
537#define VIC_RESCHEDULE_CPI 4
538#define VIC_ENABLE_IRQ_CPI 5
539#define VIC_CALL_FUNCTION_CPI 6
540#define VIC_CALL_FUNCTION_SINGLE_CPI 7
541
542/* Now the QIC CPIs: Since we don't need the two initial levels,
543 * these are 2 less than the VIC CPIs */
544#define QIC_CPI_OFFSET 1
545#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
546#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
547#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
548#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
549#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
550#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
551
552#define VIC_START_FAKE_CPI VIC_TIMER_CPI
553#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
554
555/* this is the SYS_INT CPI. */
556#define VIC_SYS_INT 8
557#define VIC_CMN_INT 15
558
559/* This is the boot CPI for alternate processors. It gets overwritten
560 * by the above once the system has activated all available processors */
561#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
562#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
563
564extern asmlinkage void vic_cpi_interrupt(void);
565extern asmlinkage void vic_sys_interrupt(void);
566extern asmlinkage void vic_cmn_interrupt(void);
567extern asmlinkage void qic_timer_interrupt(void);
568extern asmlinkage void qic_invalidate_interrupt(void);
569extern asmlinkage void qic_reschedule_interrupt(void);
570extern asmlinkage void qic_enable_irq_interrupt(void);
571extern asmlinkage void qic_call_function_interrupt(void);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983a..1df35417c412 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->flags); 15 return raw_irqs_disabled_flags(regs->flags);
16} 16}
17 17
18static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
19{
20 regs->orig_ax = ~irq;
21 do_IRQ(regs);
22}
23
24#endif /* _ASM_X86_XEN_EVENTS_H */ 18#endif /* _ASM_X86_XEN_EVENTS_H */