aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/segment.h5
-rw-r--r--include/asm-x86_64/smp.h12
-rw-r--r--include/asm-x86_64/vsyscall.h9
-rw-r--r--include/linux/getcpu.h16
4 files changed, 37 insertions, 5 deletions
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index d4bed33fb32c..334ddcdd8f92 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -20,15 +20,16 @@
20#define __USER_CS 0x33 /* 6*8+3 */ 20#define __USER_CS 0x33 /* 6*8+3 */
21#define __USER32_DS __USER_DS 21#define __USER32_DS __USER_DS
22 22
23#define GDT_ENTRY_TLS 1
24#define GDT_ENTRY_TSS 8 /* needs two entries */ 23#define GDT_ENTRY_TSS 8 /* needs two entries */
25#define GDT_ENTRY_LDT 10 /* needs two entries */ 24#define GDT_ENTRY_LDT 10 /* needs two entries */
26#define GDT_ENTRY_TLS_MIN 12 25#define GDT_ENTRY_TLS_MIN 12
27#define GDT_ENTRY_TLS_MAX 14 26#define GDT_ENTRY_TLS_MAX 14
28/* 15 free */
29 27
30#define GDT_ENTRY_TLS_ENTRIES 3 28#define GDT_ENTRY_TLS_ENTRIES 3
31 29
30#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
31#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
32
32/* TLS indexes for 64bit - hardcoded in arch_prctl */ 33/* TLS indexes for 64bit - hardcoded in arch_prctl */
33#define FS_TLS 0 34#define FS_TLS 0
34#define GS_TLS 1 35#define GS_TLS 1
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 6805e1feb300..d61547fd833b 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -133,13 +133,19 @@ static __inline int logical_smp_processor_id(void)
133 /* we don't want to mark this access volatile - bad code generation */ 133 /* we don't want to mark this access volatile - bad code generation */
134 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); 134 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
135} 135}
136#endif
137 136
138#ifdef CONFIG_SMP 137#ifdef CONFIG_SMP
139#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] 138#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
140#else 139#else
141#define cpu_physical_id(cpu) boot_cpu_id 140#define cpu_physical_id(cpu) boot_cpu_id
142#endif 141static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
143 142 void *info, int retry, int wait)
143{
144 /* Disable interrupts here? */
145 func(info);
146 return 0;
147}
148#endif /* !CONFIG_SMP */
149#endif /* !__ASSEMBLY */
144#endif 150#endif
145 151
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 146b24402a5f..2281e9399b96 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -4,6 +4,7 @@
4enum vsyscall_num { 4enum vsyscall_num {
5 __NR_vgettimeofday, 5 __NR_vgettimeofday,
6 __NR_vtime, 6 __NR_vtime,
7 __NR_vgetcpu,
7}; 8};
8 9
9#define VSYSCALL_START (-10UL << 20) 10#define VSYSCALL_START (-10UL << 20)
@@ -15,6 +16,7 @@ enum vsyscall_num {
15#include <linux/seqlock.h> 16#include <linux/seqlock.h>
16 17
17#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) 18#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
18#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) 20#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))
19#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) 21#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
20#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) 22#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
@@ -26,6 +28,9 @@ enum vsyscall_num {
26#define VXTIME_HPET 2 28#define VXTIME_HPET 2
27#define VXTIME_PMTMR 3 29#define VXTIME_PMTMR 3
28 30
31#define VGETCPU_RDTSCP 1
32#define VGETCPU_LSL 2
33
29struct vxtime_data { 34struct vxtime_data {
30 long hpet_address; /* HPET base address */ 35 long hpet_address; /* HPET base address */
31 int last; 36 int last;
@@ -40,6 +45,7 @@ struct vxtime_data {
40 45
41/* vsyscall space (readonly) */ 46/* vsyscall space (readonly) */
42extern struct vxtime_data __vxtime; 47extern struct vxtime_data __vxtime;
48extern int __vgetcpu_mode;
43extern struct timespec __xtime; 49extern struct timespec __xtime;
44extern volatile unsigned long __jiffies; 50extern volatile unsigned long __jiffies;
45extern unsigned long __wall_jiffies; 51extern unsigned long __wall_jiffies;
@@ -48,6 +54,7 @@ extern seqlock_t __xtime_lock;
48 54
49/* kernel space (writeable) */ 55/* kernel space (writeable) */
50extern struct vxtime_data vxtime; 56extern struct vxtime_data vxtime;
57extern int vgetcpu_mode;
51extern unsigned long wall_jiffies; 58extern unsigned long wall_jiffies;
52extern struct timezone sys_tz; 59extern struct timezone sys_tz;
53extern int sysctl_vsyscall; 60extern int sysctl_vsyscall;
@@ -55,6 +62,8 @@ extern seqlock_t xtime_lock;
55 62
56extern int sysctl_vsyscall; 63extern int sysctl_vsyscall;
57 64
65extern void vsyscall_set_cpu(int cpu);
66
58#define ARCH_HAVE_XTIME_LOCK 1 67#define ARCH_HAVE_XTIME_LOCK 1
59 68
60#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
new file mode 100644
index 000000000000..031ed3780e45
--- /dev/null
+++ b/include/linux/getcpu.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_GETCPU_H
2#define _LINUX_GETCPU_H 1
3
4/* Cache for getcpu() to speed it up. Results might be upto a jiffie
5 out of date, but will be faster.
6 User programs should not refer to the contents of this structure.
7 It is only a cache for vgetcpu(). It might change in future kernels.
8 The user program must store this information per thread (__thread)
9 If you want 100% accurate information pass NULL instead. */
10struct getcpu_cache {
11 unsigned long t0;
12 unsigned long t1;
13 unsigned long res[4];
14};
15
16#endif