aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/pda.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
commita9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch)
tree886e75fdfd09690cd262ca69cb7f5d1d42b48602 /arch/x86/include/asm/pda.h
parentb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/include/asm/pda.h kernel/fork.c
Diffstat (limited to 'arch/x86/include/asm/pda.h')
-rw-r--r--arch/x86/include/asm/pda.h137
1 files changed, 137 insertions, 0 deletions
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
new file mode 100644
index 000000000000..3fea2fdb3302
--- /dev/null
+++ b/arch/x86/include/asm/pda.h
@@ -0,0 +1,137 @@
1#ifndef _ASM_X86_PDA_H
2#define _ASM_X86_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
8#include <asm/page.h>
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19 unsigned long stack_canary; /* 40 stack canary value */
20 /* gcc-ABI: this canary MUST be at
21 offset 40!!! */
22 char *irqstackptr;
23 short nodenumber; /* number of current node (32k max) */
24 short in_bootmem; /* pda lives in bootmem */
25 unsigned int __softirq_pending;
26 unsigned int __nmi_count; /* number of NMI on this CPUs */
27 short mmu_state;
28 short isidle;
29 struct mm_struct *active_mm;
30 unsigned apic_timer_irqs;
31 unsigned irq0_irqs;
32 unsigned irq_resched_count;
33 unsigned irq_call_count;
34 unsigned irq_tlb_count;
35 unsigned irq_thermal_count;
36 unsigned irq_threshold_count;
37 unsigned irq_spurious_count;
38} ____cacheline_aligned_in_smp;
39
40extern struct x8664_pda **_cpu_pda;
41extern void pda_init(int);
42
43#define cpu_pda(i) (_cpu_pda[i])
44
45/*
46 * There is no fast way to get the base address of the PDA, all the accesses
47 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
48 */
49extern void __bad_pda_field(void) __attribute__((noreturn));
50
51/*
52 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
53 * all PDA accesses so it gets read/write dependencies right.
54 */
55extern struct x8664_pda _proxy_pda;
56
57#define pda_offset(field) offsetof(struct x8664_pda, field)
58
59#define pda_to_op(op, field, val) \
60do { \
61 typedef typeof(_proxy_pda.field) T__; \
62 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
63 switch (sizeof(_proxy_pda.field)) { \
64 case 2: \
65 asm(op "w %1,%%gs:%c2" : \
66 "+m" (_proxy_pda.field) : \
67 "ri" ((T__)val), \
68 "i"(pda_offset(field))); \
69 break; \
70 case 4: \
71 asm(op "l %1,%%gs:%c2" : \
72 "+m" (_proxy_pda.field) : \
73 "ri" ((T__)val), \
74 "i" (pda_offset(field))); \
75 break; \
76 case 8: \
77 asm(op "q %1,%%gs:%c2": \
78 "+m" (_proxy_pda.field) : \
79 "ri" ((T__)val), \
80 "i"(pda_offset(field))); \
81 break; \
82 default: \
83 __bad_pda_field(); \
84 } \
85} while (0)
86
87#define pda_from_op(op, field) \
88({ \
89 typeof(_proxy_pda.field) ret__; \
90 switch (sizeof(_proxy_pda.field)) { \
91 case 2: \
92 asm(op "w %%gs:%c1,%0" : \
93 "=r" (ret__) : \
94 "i" (pda_offset(field)), \
95 "m" (_proxy_pda.field)); \
96 break; \
97 case 4: \
98 asm(op "l %%gs:%c1,%0": \
99 "=r" (ret__): \
100 "i" (pda_offset(field)), \
101 "m" (_proxy_pda.field)); \
102 break; \
103 case 8: \
104 asm(op "q %%gs:%c1,%0": \
105 "=r" (ret__) : \
106 "i" (pda_offset(field)), \
107 "m" (_proxy_pda.field)); \
108 break; \
109 default: \
110 __bad_pda_field(); \
111 } \
112 ret__; \
113})
114
115#define read_pda(field) pda_from_op("mov", field)
116#define write_pda(field, val) pda_to_op("mov", field, val)
117#define add_pda(field, val) pda_to_op("add", field, val)
118#define sub_pda(field, val) pda_to_op("sub", field, val)
119#define or_pda(field, val) pda_to_op("or", field, val)
120
121/* This is not atomic against other CPUs -- CPU preemption needs to be off */
122#define test_and_clear_bit_pda(bit, field) \
123({ \
124 int old__; \
125 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
126 : "=r" (old__), "+m" (_proxy_pda.field) \
127 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
128 old__; \
129})
130
131#endif
132
133#define PDA_STACKOFFSET (5*8)
134
135#define refresh_stack_canary() write_pda(stack_canary, current->stack_canary)
136
137#endif /* _ASM_X86_PDA_H */