aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/desc_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/desc_64.h')
-rw-r--r--include/asm-x86/desc_64.h203
1 files changed, 0 insertions, 203 deletions
diff --git a/include/asm-x86/desc_64.h b/include/asm-x86/desc_64.h
index 7d9c938e69fd..8b137891791f 100644
--- a/include/asm-x86/desc_64.h
+++ b/include/asm-x86/desc_64.h
@@ -1,204 +1 @@
1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_H
3#define __ARCH_DESC_H
4
5#include <linux/threads.h>
6#include <asm/ldt.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/string.h>
11#include <linux/smp.h>
12#include <asm/desc_defs.h>
13
14#include <asm/segment.h>
15#include <asm/mmu.h>
16
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18
19#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
20#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
21#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
22
23static inline unsigned long __store_tr(void)
24{
25 unsigned long tr;
26
27 asm volatile ("str %w0":"=r" (tr));
28 return tr;
29}
30
31#define store_tr(tr) (tr) = __store_tr()
32
33/*
34 * This is the ldt that every process will get unless we need
35 * something other than this.
36 */
37extern struct desc_struct default_ldt[];
38extern struct gate_struct idt_table[];
39extern struct desc_ptr cpu_gdt_descr[];
40
41/* the cpu gdt accessor */
42#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
43
44static inline void load_gdt(const struct desc_ptr *ptr)
45{
46 asm volatile("lgdt %w0"::"m" (*ptr));
47}
48
49static inline void store_gdt(struct desc_ptr *ptr)
50{
51 asm("sgdt %w0":"=m" (*ptr));
52}
53
54static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
55{
56 struct gate_struct s;
57 s.offset_low = PTR_LOW(func);
58 s.segment = __KERNEL_CS;
59 s.ist = ist;
60 s.p = 1;
61 s.dpl = dpl;
62 s.zero0 = 0;
63 s.zero1 = 0;
64 s.type = type;
65 s.offset_middle = PTR_MIDDLE(func);
66 s.offset_high = PTR_HIGH(func);
67 /* does not need to be atomic because it is only done once at setup time */
68 memcpy(adr, &s, 16);
69}
70
71static inline void set_intr_gate(int nr, void *func)
72{
73 BUG_ON((unsigned)nr > 0xFF);
74 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
75}
76
77static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
78{
79 BUG_ON((unsigned)nr > 0xFF);
80 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
81}
82
83static inline void set_system_gate(int nr, void *func)
84{
85 BUG_ON((unsigned)nr > 0xFF);
86 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
87}
88
89static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
90{
91 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
92}
93
94static inline void load_idt(const struct desc_ptr *ptr)
95{
96 asm volatile("lidt %w0"::"m" (*ptr));
97}
98
99static inline void store_idt(struct desc_ptr *dtr)
100{
101 asm("sidt %w0":"=m" (*dtr));
102}
103
104static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
105 unsigned size)
106{
107 struct ldttss_desc d;
108 memset(&d,0,sizeof(d));
109 d.limit0 = size & 0xFFFF;
110 d.base0 = PTR_LOW(tss);
111 d.base1 = PTR_MIDDLE(tss) & 0xFF;
112 d.type = type;
113 d.p = 1;
114 d.limit1 = (size >> 16) & 0xF;
115 d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
116 d.base3 = PTR_HIGH(tss);
117 memcpy(ptr, &d, 16);
118}
119
120static inline void set_tss_desc(unsigned cpu, void *addr)
121{
122 /*
123 * sizeof(unsigned long) coming from an extra "long" at the end
124 * of the iobitmap. See tss_struct definition in processor.h
125 *
126 * -1? seg base+limit should be pointing to the address of the
127 * last valid byte
128 */
129 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
130 (unsigned long)addr, DESC_TSS,
131 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
132}
133
134static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
135{
136 set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
137 DESC_LDT, size * 8 - 1);
138}
139
140#define LDT_entry_a(info) \
141 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
142/* Don't allow setting of the lm bit. It is useless anyways because
143 64bit system calls require __USER_CS. */
144#define LDT_entry_b(info) \
145 (((info)->base_addr & 0xff000000) | \
146 (((info)->base_addr & 0x00ff0000) >> 16) | \
147 ((info)->limit & 0xf0000) | \
148 (((info)->read_exec_only ^ 1) << 9) | \
149 ((info)->contents << 10) | \
150 (((info)->seg_not_present ^ 1) << 15) | \
151 ((info)->seg_32bit << 22) | \
152 ((info)->limit_in_pages << 23) | \
153 ((info)->useable << 20) | \
154 /* ((info)->lm << 21) | */ \
155 0x7000)
156
157#define LDT_empty(info) (\
158 (info)->base_addr == 0 && \
159 (info)->limit == 0 && \
160 (info)->contents == 0 && \
161 (info)->read_exec_only == 1 && \
162 (info)->seg_32bit == 0 && \
163 (info)->limit_in_pages == 0 && \
164 (info)->seg_not_present == 1 && \
165 (info)->useable == 0 && \
166 (info)->lm == 0)
167
168static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
169{
170 unsigned int i;
171 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
172
173 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
174 gdt[i] = t->tls_array[i];
175}
176
177/*
178 * load one particular LDT into the current CPU
179 */
180static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
181{
182 int count = pc->size;
183
184 if (likely(!count)) {
185 clear_LDT();
186 return;
187 }
188
189 set_ldt_desc(cpu, pc->ldt, count);
190 load_LDT_desc();
191}
192
193static inline void load_LDT(mm_context_t *pc)
194{
195 int cpu = get_cpu();
196 load_LDT_nolock(pc, cpu);
197 put_cpu();
198}
199
200extern struct desc_ptr idt_descr;
201
202#endif /* !__ASSEMBLY__ */
203
204#endif