aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander van Heukelum <heukelum@fastmail.fm>2008-09-30 12:41:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-13 04:33:19 -0400
commitae82157b3d8bb4902f76b56c7450a945288786ac (patch)
tree5148bdb03b75278e1bb9c28458baf8e9df3e623b /arch
parenta28680b4b821a262fd3b5e57a28c148b5f9e662a (diff)
x86, traps, i386: factor out lazy io-bitmap copy
x86_64 does not do the lazy io-bitmap dance. Putting it in its own function makes i386's do_general_protection look much more like x86_64's. Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/traps_32.c76
1 files changed, 43 insertions, 33 deletions
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index ce1063c141fc..0206c915748c 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -95,6 +95,47 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
95 die(str, regs, err); 95 die(str, regs, err);
96} 96}
97 97
98/*
99 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
100 * invalid offset set (the LAZY one) and the faulting thread has
101 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
102 * we set the offset field correctly and return 1.
103 */
104static int lazy_iobitmap_copy(void)
105{
106 struct thread_struct *thread;
107 struct tss_struct *tss;
108 int cpu;
109
110 cpu = get_cpu();
111 tss = &per_cpu(init_tss, cpu);
112 thread = &current->thread;
113
114 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
115 thread->io_bitmap_ptr) {
116 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
117 thread->io_bitmap_max);
118 /*
119 * If the previously set map was extending to higher ports
120 * than the current one, pad extra space with 0xff (no access).
121 */
122 if (thread->io_bitmap_max < tss->io_bitmap_max) {
123 memset((char *) tss->io_bitmap +
124 thread->io_bitmap_max, 0xff,
125 tss->io_bitmap_max - thread->io_bitmap_max);
126 }
127 tss->io_bitmap_max = thread->io_bitmap_max;
128 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
129 tss->io_bitmap_owner = thread;
130 put_cpu();
131
132 return 1;
133 }
134 put_cpu();
135
136 return 0;
137}
138
98static void __kprobes 139static void __kprobes
99do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, 140do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
100 long error_code, siginfo_t *info) 141 long error_code, siginfo_t *info)
@@ -187,44 +228,13 @@ void __kprobes
187do_general_protection(struct pt_regs *regs, long error_code) 228do_general_protection(struct pt_regs *regs, long error_code)
188{ 229{
189 struct task_struct *tsk; 230 struct task_struct *tsk;
190 struct thread_struct *thread;
191 struct tss_struct *tss;
192 int cpu;
193 231
194 conditional_sti(regs); 232 conditional_sti(regs);
195 233
196 cpu = get_cpu(); 234 if (lazy_iobitmap_copy()) {
197 tss = &per_cpu(init_tss, cpu); 235 /* restart the faulting instruction */
198 thread = &current->thread;
199
200 /*
201 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
202 * invalid offset set (the LAZY one) and the faulting thread has
203 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
204 * and we set the offset field correctly. Then we let the CPU to
205 * restart the faulting instruction.
206 */
207 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
208 thread->io_bitmap_ptr) {
209 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
210 thread->io_bitmap_max);
211 /*
212 * If the previously set map was extending to higher ports
213 * than the current one, pad extra space with 0xff (no access).
214 */
215 if (thread->io_bitmap_max < tss->io_bitmap_max) {
216 memset((char *) tss->io_bitmap +
217 thread->io_bitmap_max, 0xff,
218 tss->io_bitmap_max - thread->io_bitmap_max);
219 }
220 tss->io_bitmap_max = thread->io_bitmap_max;
221 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
222 tss->io_bitmap_owner = thread;
223 put_cpu();
224
225 return; 236 return;
226 } 237 }
227 put_cpu();
228 238
229 if (regs->flags & X86_VM_MASK) 239 if (regs->flags & X86_VM_MASK)
230 goto gp_in_vm86; 240 goto gp_in_vm86;