aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault_32.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-01-30 07:33:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:00 -0500
commit1379a5ce3ffc549a7ff3daffc49c5e1c372717a3 (patch)
tree9138e94f173c6ce3bb267ca89888c72cf0a17614 /arch/x86/mm/fault_32.c
parent3d97775a80a03013abe1fd681620925f884ad18a (diff)
x86: move get_segment_eip() to step.c
get_segment_eip has similarities to convert_rip_to_linear(), and is used in a similar context. Move get_segment_eip to step.c to allow easier consolidation. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/fault_32.c')
-rw-r--r--arch/x86/mm/fault_32.c77
1 files changed, 0 insertions, 77 deletions
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 421e276770ad..b92922a1d65f 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -61,83 +61,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
61#endif 61#endif
62} 62}
63 63
64#ifdef CONFIG_X86_32
65/*
66 * Return EIP plus the CS segment base. The segment limit is also
67 * adjusted, clamped to the kernel/user address space (whichever is
68 * appropriate), and returned in *eip_limit.
69 *
70 * The segment is checked, because it might have been changed by another
71 * task between the original faulting instruction and here.
72 *
73 * If CS is no longer a valid code segment, or if EIP is beyond the
74 * limit, or if it is a kernel address when CS is not a kernel segment,
75 * then the returned value will be greater than *eip_limit.
76 *
77 * This is slow, but is very rarely executed.
78 */
79static inline unsigned long get_segment_eip(struct pt_regs *regs,
80 unsigned long *eip_limit)
81{
82 unsigned long ip = regs->ip;
83 unsigned seg = regs->cs & 0xffff;
84 u32 seg_ar, seg_limit, base, *desc;
85
86 /* Unlikely, but must come before segment checks. */
87 if (unlikely(regs->flags & VM_MASK)) {
88 base = seg << 4;
89 *eip_limit = base + 0xffff;
90 return base + (ip & 0xffff);
91 }
92
93 /* The standard kernel/user address space limit. */
94 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
95
96 /* By far the most common cases. */
97 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
98 return ip;
99
100 /* Check the segment exists, is within the current LDT/GDT size,
101 that kernel/user (ring 0..3) has the appropriate privilege,
102 that it's a code segment, and get the limit. */
103 __asm__ ("larl %3,%0; lsll %3,%1"
104 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
105 if ((~seg_ar & 0x9800) || ip > seg_limit) {
106 *eip_limit = 0;
107 return 1; /* So that returned ip > *eip_limit. */
108 }
109
110 /* Get the GDT/LDT descriptor base.
111 When you look for races in this code remember that
112 LDT and other horrors are only used in user space. */
113 if (seg & (1<<2)) {
114 /* Must lock the LDT while reading it. */
115 mutex_lock(&current->mm->context.lock);
116 desc = current->mm->context.ldt;
117 desc = (void *)desc + (seg & ~7);
118 } else {
119 /* Must disable preemption while reading the GDT. */
120 desc = (u32 *)get_cpu_gdt_table(get_cpu());
121 desc = (void *)desc + (seg & ~7);
122 }
123
124 /* Decode the code segment base from the descriptor */
125 base = get_desc_base((struct desc_struct *)desc);
126
127 if (seg & (1<<2))
128 mutex_unlock(&current->mm->context.lock);
129 else
130 put_cpu();
131
132 /* Adjust EIP and segment limit, and clamp at the kernel limit.
133 It's legitimate for segments to wrap at 0xffffffff. */
134 seg_limit += base;
135 if (seg_limit < *eip_limit && seg_limit >= base)
136 *eip_limit = seg_limit;
137 return ip + base;
138}
139#endif
140
141/* 64/*
142 * X86_32 65 * X86_32
143 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. 66 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.