aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-01-30 07:33:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:00 -0500
commit1379a5ce3ffc549a7ff3daffc49c5e1c372717a3 (patch)
tree9138e94f173c6ce3bb267ca89888c72cf0a17614 /arch
parent3d97775a80a03013abe1fd681620925f884ad18a (diff)
x86: move get_segment_eip() to step.c
get_segment_eip has similarities to convert_rip_to_linear(), and is used in a similar context. Move get_segment_eip to step.c to allow easier consolidation. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/step.c81
-rw-r--r--arch/x86/mm/fault_32.c77
-rw-r--r--arch/x86/mm/fault_64.c77
3 files changed, 81 insertions, 154 deletions
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 21ea22fda5fc..5884dd485db8 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -6,6 +6,87 @@
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7 7
8#ifdef CONFIG_X86_32 8#ifdef CONFIG_X86_32
9#include <linux/uaccess.h>
10
11#include <asm/desc.h>
12
13/*
14 * Return EIP plus the CS segment base. The segment limit is also
15 * adjusted, clamped to the kernel/user address space (whichever is
16 * appropriate), and returned in *eip_limit.
17 *
18 * The segment is checked, because it might have been changed by another
19 * task between the original faulting instruction and here.
20 *
21 * If CS is no longer a valid code segment, or if EIP is beyond the
22 * limit, or if it is a kernel address when CS is not a kernel segment,
23 * then the returned value will be greater than *eip_limit.
24 *
25 * This is slow, but is very rarely executed.
26 */
27unsigned long get_segment_eip(struct pt_regs *regs,
28 unsigned long *eip_limit)
29{
30 unsigned long ip = regs->ip;
31 unsigned seg = regs->cs & 0xffff;
32 u32 seg_ar, seg_limit, base, *desc;
33
34 /* Unlikely, but must come before segment checks. */
35 if (unlikely(regs->flags & VM_MASK)) {
36 base = seg << 4;
37 *eip_limit = base + 0xffff;
38 return base + (ip & 0xffff);
39 }
40
41 /* The standard kernel/user address space limit. */
42 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
43
44 /* By far the most common cases. */
45 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
46 return ip;
47
48 /* Check the segment exists, is within the current LDT/GDT size,
49 that kernel/user (ring 0..3) has the appropriate privilege,
50 that it's a code segment, and get the limit. */
51 __asm__("larl %3,%0; lsll %3,%1"
52 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
53 if ((~seg_ar & 0x9800) || ip > seg_limit) {
54 *eip_limit = 0;
55 return 1; /* So that returned ip > *eip_limit. */
56 }
57
58 /* Get the GDT/LDT descriptor base.
59 When you look for races in this code remember that
60 LDT and other horrors are only used in user space. */
61 if (seg & (1<<2)) {
62 /* Must lock the LDT while reading it. */
63 mutex_lock(&current->mm->context.lock);
64 desc = current->mm->context.ldt;
65 desc = (void *)desc + (seg & ~7);
66 } else {
67 /* Must disable preemption while reading the GDT. */
68 desc = (u32 *)get_cpu_gdt_table(get_cpu());
69 desc = (void *)desc + (seg & ~7);
70 }
71
72 /* Decode the code segment base from the descriptor */
73 base = get_desc_base((struct desc_struct *)desc);
74
75 if (seg & (1<<2))
76 mutex_unlock(&current->mm->context.lock);
77 else
78 put_cpu();
79
80 /* Adjust EIP and segment limit, and clamp at the kernel limit.
81 It's legitimate for segments to wrap at 0xffffffff. */
82 seg_limit += base;
83 if (seg_limit < *eip_limit && seg_limit >= base)
84 *eip_limit = seg_limit;
85 return ip + base;
86}
87#endif
88
89#ifdef CONFIG_X86_32
9static 90static
10#endif 91#endif
11unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs) 92unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 421e276770ad..b92922a1d65f 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -61,83 +61,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
61#endif 61#endif
62} 62}
63 63
64#ifdef CONFIG_X86_32
65/*
66 * Return EIP plus the CS segment base. The segment limit is also
67 * adjusted, clamped to the kernel/user address space (whichever is
68 * appropriate), and returned in *eip_limit.
69 *
70 * The segment is checked, because it might have been changed by another
71 * task between the original faulting instruction and here.
72 *
73 * If CS is no longer a valid code segment, or if EIP is beyond the
74 * limit, or if it is a kernel address when CS is not a kernel segment,
75 * then the returned value will be greater than *eip_limit.
76 *
77 * This is slow, but is very rarely executed.
78 */
79static inline unsigned long get_segment_eip(struct pt_regs *regs,
80 unsigned long *eip_limit)
81{
82 unsigned long ip = regs->ip;
83 unsigned seg = regs->cs & 0xffff;
84 u32 seg_ar, seg_limit, base, *desc;
85
86 /* Unlikely, but must come before segment checks. */
87 if (unlikely(regs->flags & VM_MASK)) {
88 base = seg << 4;
89 *eip_limit = base + 0xffff;
90 return base + (ip & 0xffff);
91 }
92
93 /* The standard kernel/user address space limit. */
94 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
95
96 /* By far the most common cases. */
97 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
98 return ip;
99
100 /* Check the segment exists, is within the current LDT/GDT size,
101 that kernel/user (ring 0..3) has the appropriate privilege,
102 that it's a code segment, and get the limit. */
103 __asm__ ("larl %3,%0; lsll %3,%1"
104 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
105 if ((~seg_ar & 0x9800) || ip > seg_limit) {
106 *eip_limit = 0;
107 return 1; /* So that returned ip > *eip_limit. */
108 }
109
110 /* Get the GDT/LDT descriptor base.
111 When you look for races in this code remember that
112 LDT and other horrors are only used in user space. */
113 if (seg & (1<<2)) {
114 /* Must lock the LDT while reading it. */
115 mutex_lock(&current->mm->context.lock);
116 desc = current->mm->context.ldt;
117 desc = (void *)desc + (seg & ~7);
118 } else {
119 /* Must disable preemption while reading the GDT. */
120 desc = (u32 *)get_cpu_gdt_table(get_cpu());
121 desc = (void *)desc + (seg & ~7);
122 }
123
124 /* Decode the code segment base from the descriptor */
125 base = get_desc_base((struct desc_struct *)desc);
126
127 if (seg & (1<<2))
128 mutex_unlock(&current->mm->context.lock);
129 else
130 put_cpu();
131
132 /* Adjust EIP and segment limit, and clamp at the kernel limit.
133 It's legitimate for segments to wrap at 0xffffffff. */
134 seg_limit += base;
135 if (seg_limit < *eip_limit && seg_limit >= base)
136 *eip_limit = seg_limit;
137 return ip + base;
138}
139#endif
140
141/* 64/*
142 * X86_32 65 * X86_32
143 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 66 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 95f142f5b5cc..e82832961d72 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -64,83 +64,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
64#endif 64#endif
65} 65}
66 66
67#ifdef CONFIG_X86_32
68/*
69 * Return EIP plus the CS segment base. The segment limit is also
70 * adjusted, clamped to the kernel/user address space (whichever is
71 * appropriate), and returned in *eip_limit.
72 *
73 * The segment is checked, because it might have been changed by another
74 * task between the original faulting instruction and here.
75 *
76 * If CS is no longer a valid code segment, or if EIP is beyond the
77 * limit, or if it is a kernel address when CS is not a kernel segment,
78 * then the returned value will be greater than *eip_limit.
79 *
80 * This is slow, but is very rarely executed.
81 */
82static inline unsigned long get_segment_eip(struct pt_regs *regs,
83 unsigned long *eip_limit)
84{
85 unsigned long ip = regs->ip;
86 unsigned seg = regs->cs & 0xffff;
87 u32 seg_ar, seg_limit, base, *desc;
88
89 /* Unlikely, but must come before segment checks. */
90 if (unlikely(regs->flags & VM_MASK)) {
91 base = seg << 4;
92 *eip_limit = base + 0xffff;
93 return base + (ip & 0xffff);
94 }
95
96 /* The standard kernel/user address space limit. */
97 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
98
99 /* By far the most common cases. */
100 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
101 return ip;
102
103 /* Check the segment exists, is within the current LDT/GDT size,
104 that kernel/user (ring 0..3) has the appropriate privilege,
105 that it's a code segment, and get the limit. */
106 __asm__("larl %3,%0; lsll %3,%1"
107 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
108 if ((~seg_ar & 0x9800) || ip > seg_limit) {
109 *eip_limit = 0;
110 return 1; /* So that returned ip > *eip_limit. */
111 }
112
113 /* Get the GDT/LDT descriptor base.
114 When you look for races in this code remember that
115 LDT and other horrors are only used in user space. */
116 if (seg & (1<<2)) {
117 /* Must lock the LDT while reading it. */
118 mutex_lock(&current->mm->context.lock);
119 desc = current->mm->context.ldt;
120 desc = (void *)desc + (seg & ~7);
121 } else {
122 /* Must disable preemption while reading the GDT. */
123 desc = (u32 *)get_cpu_gdt_table(get_cpu());
124 desc = (void *)desc + (seg & ~7);
125 }
126
127 /* Decode the code segment base from the descriptor */
128 base = get_desc_base((struct desc_struct *)desc);
129
130 if (seg & (1<<2))
131 mutex_unlock(&current->mm->context.lock);
132 else
133 put_cpu();
134
135 /* Adjust EIP and segment limit, and clamp at the kernel limit.
136 It's legitimate for segments to wrap at 0xffffffff. */
137 seg_limit += base;
138 if (seg_limit < *eip_limit && seg_limit >= base)
139 *eip_limit = seg_limit;
140 return ip + base;
141}
142#endif
143
144/* 67/*
145 * X86_32 68 * X86_32
146 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 69 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.