diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-01-30 07:33:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:12 -0500 |
commit | f2857ce92023409df1544737d5b3499b4630a183 (patch) | |
tree | df2c5bd9ce155950c0f48c1126b2fdf2db4b64d4 | |
parent | 37cd9cf3dafed82f7cf905785883300f6ff7c818 (diff) |
x86: remove last user of get_segment_eip
is_prefetch was the last user of get_segment_eip and only on
X86_32. This function returned the faulting instruction's
address and set the upper segment limit.
Instead, use the convert_ip_to_linear helper and rely on
probe_kernel_address to do the segment checks which was
already done everywhere the segment limit was being checked
on X86_32.
Remove get_segment_eip as well.
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/step.c | 84 | ||||
-rw-r--r-- | arch/x86/mm/fault_32.c | 15 | ||||
-rw-r--r-- | arch/x86/mm/fault_64.c | 15 | ||||
-rw-r--r-- | include/asm-x86/ptrace.h | 6 |
4 files changed, 8 insertions, 112 deletions
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 80b37181a42b..2ef1a5f8d675 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -5,90 +5,6 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/ptrace.h> | 6 | #include <linux/ptrace.h> |
7 | 7 | ||
8 | #ifdef CONFIG_X86_32 | ||
9 | #include <linux/uaccess.h> | ||
10 | |||
11 | #include <asm/desc.h> | ||
12 | |||
13 | /* | ||
14 | * Return EIP plus the CS segment base. The segment limit is also | ||
15 | * adjusted, clamped to the kernel/user address space (whichever is | ||
16 | * appropriate), and returned in *eip_limit. | ||
17 | * | ||
18 | * The segment is checked, because it might have been changed by another | ||
19 | * task between the original faulting instruction and here. | ||
20 | * | ||
21 | * If CS is no longer a valid code segment, or if EIP is beyond the | ||
22 | * limit, or if it is a kernel address when CS is not a kernel segment, | ||
23 | * then the returned value will be greater than *eip_limit. | ||
24 | * | ||
25 | * This is slow, but is very rarely executed. | ||
26 | */ | ||
27 | unsigned long get_segment_eip(struct pt_regs *regs, | ||
28 | unsigned long *eip_limit) | ||
29 | { | ||
30 | unsigned long ip = regs->ip; | ||
31 | unsigned seg = regs->cs & 0xffff; | ||
32 | u32 seg_ar, seg_limit, base, *desc; | ||
33 | |||
34 | /* Unlikely, but must come before segment checks. */ | ||
35 | if (unlikely(regs->flags & VM_MASK)) { | ||
36 | base = seg << 4; | ||
37 | *eip_limit = base + 0xffff; | ||
38 | return base + (ip & 0xffff); | ||
39 | } | ||
40 | |||
41 | /* The standard kernel/user address space limit. */ | ||
42 | *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg; | ||
43 | |||
44 | /* By far the most common cases. */ | ||
45 | if (likely(SEGMENT_IS_FLAT_CODE(seg))) | ||
46 | return ip; | ||
47 | |||
48 | /* Check the segment exists, is within the current LDT/GDT size, | ||
49 | that kernel/user (ring 0..3) has the appropriate privilege, | ||
50 | that it's a code segment, and get the limit. */ | ||
51 | __asm__("larl %3,%0; lsll %3,%1" | ||
52 | : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg)); | ||
53 | if ((~seg_ar & 0x9800) || ip > seg_limit) { | ||
54 | *eip_limit = 0; | ||
55 | return 1; /* So that returned ip > *eip_limit. */ | ||
56 | } | ||
57 | |||
58 | /* Get the GDT/LDT descriptor base. | ||
59 | When you look for races in this code remember that | ||
60 | LDT and other horrors are only used in user space. */ | ||
61 | if (seg & (1<<2)) { | ||
62 | /* Must lock the LDT while reading it. */ | ||
63 | mutex_lock(¤t->mm->context.lock); | ||
64 | desc = current->mm->context.ldt; | ||
65 | desc = (void *)desc + (seg & ~7); | ||
66 | } else { | ||
67 | /* Must disable preemption while reading the GDT. */ | ||
68 | desc = (u32 *)get_cpu_gdt_table(get_cpu()); | ||
69 | desc = (void *)desc + (seg & ~7); | ||
70 | } | ||
71 | |||
72 | /* Decode the code segment base from the descriptor */ | ||
73 | base = get_desc_base((struct desc_struct *)desc); | ||
74 | |||
75 | if (seg & (1<<2)) | ||
76 | mutex_unlock(¤t->mm->context.lock); | ||
77 | else | ||
78 | put_cpu(); | ||
79 | |||
80 | /* Adjust EIP and segment limit, and clamp at the kernel limit. | ||
81 | It's legitimate for segments to wrap at 0xffffffff. */ | ||
82 | seg_limit += base; | ||
83 | if (seg_limit < *eip_limit && seg_limit >= base) | ||
84 | *eip_limit = seg_limit; | ||
85 | return ip + base; | ||
86 | } | ||
87 | #endif | ||
88 | |||
89 | #ifdef CONFIG_X86_32 | ||
90 | static | ||
91 | #endif | ||
92 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) | 8 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
93 | { | 9 | { |
94 | unsigned long addr, seg; | 10 | unsigned long addr, seg; |
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index b4d19c2d4f05..36cb67e02b04 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c | |||
@@ -81,7 +81,6 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
81 | unsigned char *max_instr; | 81 | unsigned char *max_instr; |
82 | 82 | ||
83 | #ifdef CONFIG_X86_32 | 83 | #ifdef CONFIG_X86_32 |
84 | unsigned long limit; | ||
85 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 84 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
86 | boot_cpu_data.x86 >= 6)) { | 85 | boot_cpu_data.x86 >= 6)) { |
87 | /* Catch an obscure case of prefetch inside an NX page. */ | 86 | /* Catch an obscure case of prefetch inside an NX page. */ |
@@ -90,30 +89,23 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
90 | } else { | 89 | } else { |
91 | return 0; | 90 | return 0; |
92 | } | 91 | } |
93 | instr = (unsigned char *)get_segment_eip(regs, &limit); | ||
94 | #else | 92 | #else |
95 | /* If it was a exec fault ignore */ | 93 | /* If it was a exec fault ignore */ |
96 | if (error_code & PF_INSTR) | 94 | if (error_code & PF_INSTR) |
97 | return 0; | 95 | return 0; |
98 | instr = (unsigned char __user *)convert_ip_to_linear(current, regs); | ||
99 | #endif | 96 | #endif |
100 | 97 | ||
98 | instr = (unsigned char *)convert_ip_to_linear(current, regs); | ||
101 | max_instr = instr + 15; | 99 | max_instr = instr + 15; |
102 | 100 | ||
103 | #ifdef CONFIG_X86_64 | ||
104 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 101 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
105 | return 0; | 102 | return 0; |
106 | #endif | ||
107 | 103 | ||
108 | while (scan_more && instr < max_instr) { | 104 | while (scan_more && instr < max_instr) { |
109 | unsigned char opcode; | 105 | unsigned char opcode; |
110 | unsigned char instr_hi; | 106 | unsigned char instr_hi; |
111 | unsigned char instr_lo; | 107 | unsigned char instr_lo; |
112 | 108 | ||
113 | #ifdef CONFIG_X86_32 | ||
114 | if (instr > (unsigned char *)limit) | ||
115 | break; | ||
116 | #endif | ||
117 | if (probe_kernel_address(instr, opcode)) | 109 | if (probe_kernel_address(instr, opcode)) |
118 | break; | 110 | break; |
119 | 111 | ||
@@ -155,10 +147,7 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
155 | case 0x00: | 147 | case 0x00: |
156 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | 148 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
157 | scan_more = 0; | 149 | scan_more = 0; |
158 | #ifdef CONFIG_X86_32 | 150 | |
159 | if (instr > (unsigned char *)limit) | ||
160 | break; | ||
161 | #endif | ||
162 | if (probe_kernel_address(instr, opcode)) | 151 | if (probe_kernel_address(instr, opcode)) |
163 | break; | 152 | break; |
164 | prefetch = (instr_lo == 0xF) && | 153 | prefetch = (instr_lo == 0xF) && |
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c index d519b41f1962..80f8436ac8b2 100644 --- a/arch/x86/mm/fault_64.c +++ b/arch/x86/mm/fault_64.c | |||
@@ -84,7 +84,6 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
84 | unsigned char *max_instr; | 84 | unsigned char *max_instr; |
85 | 85 | ||
86 | #ifdef CONFIG_X86_32 | 86 | #ifdef CONFIG_X86_32 |
87 | unsigned long limit; | ||
88 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 87 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
89 | boot_cpu_data.x86 >= 6)) { | 88 | boot_cpu_data.x86 >= 6)) { |
90 | /* Catch an obscure case of prefetch inside an NX page. */ | 89 | /* Catch an obscure case of prefetch inside an NX page. */ |
@@ -93,30 +92,23 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
93 | } else { | 92 | } else { |
94 | return 0; | 93 | return 0; |
95 | } | 94 | } |
96 | instr = (unsigned char *)get_segment_eip(regs, &limit); | ||
97 | #else | 95 | #else |
98 | /* If it was a exec fault ignore */ | 96 | /* If it was a exec fault ignore */ |
99 | if (error_code & PF_INSTR) | 97 | if (error_code & PF_INSTR) |
100 | return 0; | 98 | return 0; |
101 | instr = (unsigned char __user *)convert_ip_to_linear(current, regs); | ||
102 | #endif | 99 | #endif |
103 | 100 | ||
101 | instr = (unsigned char *)convert_ip_to_linear(current, regs); | ||
104 | max_instr = instr + 15; | 102 | max_instr = instr + 15; |
105 | 103 | ||
106 | #ifdef CONFIG_X86_64 | ||
107 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 104 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
108 | return 0; | 105 | return 0; |
109 | #endif | ||
110 | 106 | ||
111 | while (scan_more && instr < max_instr) { | 107 | while (scan_more && instr < max_instr) { |
112 | unsigned char opcode; | 108 | unsigned char opcode; |
113 | unsigned char instr_hi; | 109 | unsigned char instr_hi; |
114 | unsigned char instr_lo; | 110 | unsigned char instr_lo; |
115 | 111 | ||
116 | #ifdef CONFIG_X86_32 | ||
117 | if (instr > (unsigned char *)limit) | ||
118 | break; | ||
119 | #endif | ||
120 | if (probe_kernel_address(instr, opcode)) | 112 | if (probe_kernel_address(instr, opcode)) |
121 | break; | 113 | break; |
122 | 114 | ||
@@ -158,10 +150,7 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |||
158 | case 0x00: | 150 | case 0x00: |
159 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | 151 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
160 | scan_more = 0; | 152 | scan_more = 0; |
161 | #ifdef CONFIG_X86_32 | 153 | |
162 | if (instr > (unsigned char *)limit) | ||
163 | break; | ||
164 | #endif | ||
165 | if (probe_kernel_address(instr, opcode)) | 154 | if (probe_kernel_address(instr, opcode)) |
166 | break; | 155 | break; |
167 | prefetch = (instr_lo == 0xF) && | 156 | prefetch = (instr_lo == 0xF) && |
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 35c103714906..ee4b595e1ccc 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -70,6 +70,10 @@ struct pt_regs { | |||
70 | #include <asm/segment.h> | 70 | #include <asm/segment.h> |
71 | 71 | ||
72 | struct task_struct; | 72 | struct task_struct; |
73 | |||
74 | extern unsigned long | ||
75 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | ||
76 | |||
73 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 77 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); |
74 | 78 | ||
75 | /* | 79 | /* |
@@ -184,8 +188,6 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | |||
184 | 188 | ||
185 | #ifdef __KERNEL__ | 189 | #ifdef __KERNEL__ |
186 | 190 | ||
187 | unsigned long get_segment_eip(struct pt_regs *regs, unsigned long *eip_limit); | ||
188 | |||
189 | /* | 191 | /* |
190 | * These are defined as per linux/ptrace.h, which see. | 192 | * These are defined as per linux/ptrace.h, which see. |
191 | */ | 193 | */ |