aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h74
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S1
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c364
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c3
-rw-r--r--arch/powerpc/kernel/process.c14
-rw-r--r--arch/powerpc/kernel/ptrace.c64
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/lib/Makefile5
-rw-r--r--arch/powerpc/lib/ldstfp.S375
-rw-r--r--arch/powerpc/lib/sstep.c1514
-rw-r--r--kernel/hw_breakpoint.c12
18 files changed, 2408 insertions, 52 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6506bf4fbff1..89bc2e895390 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -141,6 +141,7 @@ config PPC
141 select GENERIC_ATOMIC64 if PPC32 141 select GENERIC_ATOMIC64 if PPC32
142 select HAVE_PERF_EVENTS 142 select HAVE_PERF_EVENTS
143 select HAVE_REGS_AND_STACK_ACCESS_API 143 select HAVE_REGS_AND_STACK_ACCESS_API
144 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
144 145
145config EARLY_PRINTK 146config EARLY_PRINTK
146 bool 147 bool
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 2048a6aeea91..decad950f11a 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -30,6 +30,7 @@
30#define PPC_STLCX stringify_in_c(stdcx.) 30#define PPC_STLCX stringify_in_c(stdcx.)
31#define PPC_CNTLZL stringify_in_c(cntlzd) 31#define PPC_CNTLZL stringify_in_c(cntlzd)
32#define PPC_LR_STKOFF 16 32#define PPC_LR_STKOFF 16
33#define PPC_MIN_STKFRM 112
33 34
34/* Move to CR, single-entry optimized version. Only available 35/* Move to CR, single-entry optimized version. Only available
35 * on POWER4 and later. 36 * on POWER4 and later.
@@ -55,6 +56,7 @@
55#define PPC_CNTLZL stringify_in_c(cntlzw) 56#define PPC_CNTLZL stringify_in_c(cntlzw)
56#define PPC_MTOCRF stringify_in_c(mtcrf) 57#define PPC_MTOCRF stringify_in_c(mtcrf)
57#define PPC_LR_STKOFF 4 58#define PPC_LR_STKOFF 4
59#define PPC_MIN_STKFRM 16
58 60
59#endif 61#endif
60 62
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b0b21134f61a..5e2e2cfcc81b 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -517,6 +517,10 @@ static inline int cpu_has_feature(unsigned long feature)
517 & feature); 517 & feature);
518} 518}
519 519
520#ifdef CONFIG_HAVE_HW_BREAKPOINT
521#define HBP_NUM 1
522#endif /* CONFIG_HAVE_HW_BREAKPOINT */
523
520#endif /* !__ASSEMBLY__ */ 524#endif /* !__ASSEMBLY__ */
521 525
522#endif /* __KERNEL__ */ 526#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000000..1c33ec17ca36
--- /dev/null
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -0,0 +1,74 @@
1/*
2 * PowerPC BookIII S hardware breakpoint definitions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2010, IBM Corporation.
19 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
20 *
21 */
22
23#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
24#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
25
26#ifdef __KERNEL__
27#ifdef CONFIG_HAVE_HW_BREAKPOINT
28
29struct arch_hw_breakpoint {
30 bool extraneous_interrupt;
31 u8 len; /* length of the target data symbol */
32 int type;
33 unsigned long address;
34};
35
36#include <linux/kdebug.h>
37#include <asm/reg.h>
38#include <asm/system.h>
39
40struct perf_event;
41struct pmu;
42struct perf_sample_data;
43
44#define HW_BREAKPOINT_ALIGN 0x7
45/* Maximum permissible length of any HW Breakpoint */
46#define HW_BREAKPOINT_LEN 0x8
47
48extern int hw_breakpoint_slots(int type);
49extern int arch_bp_generic_fields(int type, int *gen_bp_type);
50extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
51extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
52extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
53 unsigned long val, void *data);
54int arch_install_hw_breakpoint(struct perf_event *bp);
55void arch_uninstall_hw_breakpoint(struct perf_event *bp);
56void hw_breakpoint_pmu_read(struct perf_event *bp);
57extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
58
59extern struct pmu perf_ops_bp;
60extern void ptrace_triggered(struct perf_event *bp, int nmi,
61 struct perf_sample_data *data, struct pt_regs *regs);
62static inline void hw_breakpoint_disable(void)
63{
64 set_dabr(0);
65}
66extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
67
68#else /* CONFIG_HAVE_HW_BREAKPOINT */
69static inline void hw_breakpoint_disable(void) { }
70static inline void thread_change_pc(struct task_struct *tsk,
71 struct pt_regs *regs) { }
72#endif /* CONFIG_HAVE_HW_BREAKPOINT */
73#endif /* __KERNEL__ */
74#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d553bbeb726c..43adc8b819ed 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -52,13 +52,17 @@
52#define PPC_INST_WAIT 0x7c00007c 52#define PPC_INST_WAIT 0x7c00007c
53#define PPC_INST_TLBIVAX 0x7c000624 53#define PPC_INST_TLBIVAX 0x7c000624
54#define PPC_INST_TLBSRX_DOT 0x7c0006a5 54#define PPC_INST_TLBSRX_DOT 0x7c0006a5
55#define PPC_INST_XXLOR 0xf0000510
55 56
56/* macros to insert fields into opcodes */ 57/* macros to insert fields into opcodes */
57#define __PPC_RA(a) (((a) & 0x1f) << 16) 58#define __PPC_RA(a) (((a) & 0x1f) << 16)
58#define __PPC_RB(b) (((b) & 0x1f) << 11) 59#define __PPC_RB(b) (((b) & 0x1f) << 11)
59#define __PPC_RS(s) (((s) & 0x1f) << 21) 60#define __PPC_RS(s) (((s) & 0x1f) << 21)
60#define __PPC_RT(s) __PPC_RS(s) 61#define __PPC_RT(s) __PPC_RS(s)
62#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
63#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
61#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) 64#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
65#define __PPC_XT(s) __PPC_XS(s)
62#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 66#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
63#define __PPC_WC(w) (((w) & 0x3) << 21) 67#define __PPC_WC(w) (((w) & 0x3) << 21)
64/* 68/*
@@ -106,9 +110,12 @@
106 * the 128 bit load store instructions based on that. 110 * the 128 bit load store instructions based on that.
107 */ 111 */
108#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) 112#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
113#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
109#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ 114#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
110 VSX_XX1((s), (a), (b))) 115 VSX_XX1((s), (a), (b)))
111#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ 116#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
112 VSX_XX1((s), (a), (b))) 117 VSX_XX1((s), (a), (b)))
118#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
119 VSX_XX3((t), (a), (b)))
113 120
114#endif /* _ASM_POWERPC_PPC_OPCODE_H */ 121#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7492fe8ad6e4..19c05b0f74be 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -209,6 +209,14 @@ struct thread_struct {
209#ifdef CONFIG_PPC64 209#ifdef CONFIG_PPC64
210 unsigned long start_tb; /* Start purr when proc switched in */ 210 unsigned long start_tb; /* Start purr when proc switched in */
211 unsigned long accum_tb; /* Total accumilated purr for process */ 211 unsigned long accum_tb; /* Total accumilated purr for process */
212#ifdef CONFIG_HAVE_HW_BREAKPOINT
213 struct perf_event *ptrace_bps[HBP_NUM];
214 /*
215 * Helps identify source of single-step exception and subsequent
216 * hw-breakpoint enablement
217 */
218 struct perf_event *last_hit_ubp;
219#endif /* CONFIG_HAVE_HW_BREAKPOINT */
212#endif 220#endif
213 unsigned long dabr; /* Data address breakpoint register */ 221 unsigned long dabr; /* Data address breakpoint register */
214#ifdef CONFIG_ALTIVEC 222#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 58d0572de6f9..01006040f59e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -34,6 +34,7 @@ obj-y += vdso32/
34obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 34obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
35 signal_64.o ptrace32.o \ 35 signal_64.o ptrace32.o \
36 paca.o nvram_64.o firmware.o 36 paca.o nvram_64.o firmware.o
37obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
37obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o 38obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
38obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 39obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
39obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o 40obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3e423fbad6bc..f53029a01554 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -828,6 +828,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
828 828
829/* We have a data breakpoint exception - handle it */ 829/* We have a data breakpoint exception - handle it */
830handle_dabr_fault: 830handle_dabr_fault:
831 bl .save_nvgprs
831 ld r4,_DAR(r1) 832 ld r4,_DAR(r1)
832 ld r5,_DSISR(r1) 833 ld r5,_DSISR(r1)
833 addi r3,r1,STACK_FRAME_OVERHEAD 834 addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..5ecd0401cdb1
--- /dev/null
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -0,0 +1,364 @@
1/*
2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers. Derived from
4 * "arch/x86/kernel/hw_breakpoint.c"
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright 2010 IBM Corporation
21 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
22 *
23 */
24
25#include <linux/hw_breakpoint.h>
26#include <linux/notifier.h>
27#include <linux/kprobes.h>
28#include <linux/percpu.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/init.h>
33#include <linux/smp.h>
34
35#include <asm/hw_breakpoint.h>
36#include <asm/processor.h>
37#include <asm/sstep.h>
38#include <asm/uaccess.h>
39
40/*
41 * Stores the breakpoints currently in use on each breakpoint address
42 * register for every cpu
43 */
44static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
45
46/*
47 * Returns total number of data or instruction breakpoints available.
48 */
49int hw_breakpoint_slots(int type)
50{
51 if (type == TYPE_DATA)
52 return HBP_NUM;
53 return 0; /* no instruction breakpoints available */
54}
55
56/*
57 * Install a perf counter breakpoint.
58 *
59 * We seek a free debug address register and use it for this
60 * breakpoint.
61 *
62 * Atomic: we hold the counter->ctx->lock and we only handle variables
63 * and registers local to this cpu.
64 */
65int arch_install_hw_breakpoint(struct perf_event *bp)
66{
67 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
68 struct perf_event **slot = &__get_cpu_var(bp_per_reg);
69
70 *slot = bp;
71
72 /*
73 * Do not install DABR values if the instruction must be single-stepped.
74 * If so, DABR will be populated in single_step_dabr_instruction().
75 */
76 if (current->thread.last_hit_ubp != bp)
77 set_dabr(info->address | info->type | DABR_TRANSLATION);
78
79 return 0;
80}
81
82/*
83 * Uninstall the breakpoint contained in the given counter.
84 *
85 * First we search the debug address register it uses and then we disable
86 * it.
87 *
88 * Atomic: we hold the counter->ctx->lock and we only handle variables
89 * and registers local to this cpu.
90 */
91void arch_uninstall_hw_breakpoint(struct perf_event *bp)
92{
93 struct perf_event **slot = &__get_cpu_var(bp_per_reg);
94
95 if (*slot != bp) {
96 WARN_ONCE(1, "Can't find the breakpoint");
97 return;
98 }
99
100 *slot = NULL;
101 set_dabr(0);
102}
103
104/*
105 * Perform cleanup of arch-specific counters during unregistration
106 * of the perf-event
107 */
108void arch_unregister_hw_breakpoint(struct perf_event *bp)
109{
110 /*
111 * If the breakpoint is unregistered between a hw_breakpoint_handler()
112 * and the single_step_dabr_instruction(), then cleanup the breakpoint
113 * restoration variables to prevent dangling pointers.
114 */
115 if (bp->ctx->task)
116 bp->ctx->task->thread.last_hit_ubp = NULL;
117}
118
119/*
120 * Check for virtual address in kernel space.
121 */
122int arch_check_bp_in_kernelspace(struct perf_event *bp)
123{
124 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
125
126 return is_kernel_addr(info->address);
127}
128
129int arch_bp_generic_fields(int type, int *gen_bp_type)
130{
131 switch (type) {
132 case DABR_DATA_READ:
133 *gen_bp_type = HW_BREAKPOINT_R;
134 break;
135 case DABR_DATA_WRITE:
136 *gen_bp_type = HW_BREAKPOINT_W;
137 break;
138 case (DABR_DATA_WRITE | DABR_DATA_READ):
139 *gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
140 break;
141 default:
142 return -EINVAL;
143 }
144 return 0;
145}
146
147/*
148 * Validate the arch-specific HW Breakpoint register settings
149 */
150int arch_validate_hwbkpt_settings(struct perf_event *bp)
151{
152 int ret = -EINVAL;
153 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
154
155 if (!bp)
156 return ret;
157
158 switch (bp->attr.bp_type) {
159 case HW_BREAKPOINT_R:
160 info->type = DABR_DATA_READ;
161 break;
162 case HW_BREAKPOINT_W:
163 info->type = DABR_DATA_WRITE;
164 break;
165 case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
166 info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
167 break;
168 default:
169 return ret;
170 }
171
172 info->address = bp->attr.bp_addr;
173 info->len = bp->attr.bp_len;
174
175 /*
176 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
177 * and breakpoint addresses are aligned to nearest double-word
178 * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
179 * 'symbolsize' should satisfy the check below.
180 */
181 if (info->len >
182 (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
183 return -EINVAL;
184 return 0;
185}
186
187/*
188 * Restores the breakpoint on the debug registers.
189 * Invoke this function if it is known that the execution context is
190 * about to change to cause loss of MSR_SE settings.
191 */
192void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
193{
194 struct arch_hw_breakpoint *info;
195
196 if (likely(!tsk->thread.last_hit_ubp))
197 return;
198
199 info = counter_arch_bp(tsk->thread.last_hit_ubp);
200 regs->msr &= ~MSR_SE;
201 set_dabr(info->address | info->type | DABR_TRANSLATION);
202 tsk->thread.last_hit_ubp = NULL;
203}
204
205/*
206 * Handle debug exception notifications.
207 */
208int __kprobes hw_breakpoint_handler(struct die_args *args)
209{
210 int rc = NOTIFY_STOP;
211 struct perf_event *bp;
212 struct pt_regs *regs = args->regs;
213 int stepped = 1;
214 struct arch_hw_breakpoint *info;
215 unsigned int instr;
216 unsigned long dar = regs->dar;
217
218 /* Disable breakpoints during exception handling */
219 set_dabr(0);
220
221 /*
222 * The counter may be concurrently released but that can only
223 * occur from a call_rcu() path. We can then safely fetch
224 * the breakpoint, use its callback, touch its counter
225 * while we are in an rcu_read_lock() path.
226 */
227 rcu_read_lock();
228
229 bp = __get_cpu_var(bp_per_reg);
230 if (!bp)
231 goto out;
232 info = counter_arch_bp(bp);
233
234 /*
235 * Return early after invoking user-callback function without restoring
236 * DABR if the breakpoint is from ptrace which always operates in
237 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
238 * generated in do_dabr().
239 */
240 if (bp->overflow_handler == ptrace_triggered) {
241 perf_bp_event(bp, regs);
242 rc = NOTIFY_DONE;
243 goto out;
244 }
245
246 /*
247 * Verify if dar lies within the address range occupied by the symbol
248 * being watched to filter extraneous exceptions. If it doesn't,
249 * we still need to single-step the instruction, but we don't
250 * generate an event.
251 */
252 info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
253 (dar - bp->attr.bp_addr < bp->attr.bp_len));
254
255 /* Do not emulate user-space instructions, instead single-step them */
256 if (user_mode(regs)) {
257 bp->ctx->task->thread.last_hit_ubp = bp;
258 regs->msr |= MSR_SE;
259 goto out;
260 }
261
262 stepped = 0;
263 instr = 0;
264 if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
265 stepped = emulate_step(regs, instr);
266
267 /*
268 * emulate_step() could not execute it. We've failed in reliably
269 * handling the hw-breakpoint. Unregister it and throw a warning
270 * message to let the user know about it.
271 */
272 if (!stepped) {
273 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
274 "0x%lx will be disabled.", info->address);
275 perf_event_disable(bp);
276 goto out;
277 }
278 /*
279 * As a policy, the callback is invoked in a 'trigger-after-execute'
280 * fashion
281 */
282 if (!info->extraneous_interrupt)
283 perf_bp_event(bp, regs);
284
285 set_dabr(info->address | info->type | DABR_TRANSLATION);
286out:
287 rcu_read_unlock();
288 return rc;
289}
290
291/*
292 * Handle single-step exceptions following a DABR hit.
293 */
294int __kprobes single_step_dabr_instruction(struct die_args *args)
295{
296 struct pt_regs *regs = args->regs;
297 struct perf_event *bp = NULL;
298 struct arch_hw_breakpoint *bp_info;
299
300 bp = current->thread.last_hit_ubp;
301 /*
302 * Check if we are single-stepping as a result of a
303 * previous HW Breakpoint exception
304 */
305 if (!bp)
306 return NOTIFY_DONE;
307
308 bp_info = counter_arch_bp(bp);
309
310 /*
311 * We shall invoke the user-defined callback function in the single
312 * stepping handler to confirm to 'trigger-after-execute' semantics
313 */
314 if (!bp_info->extraneous_interrupt)
315 perf_bp_event(bp, regs);
316
317 set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
318 current->thread.last_hit_ubp = NULL;
319
320 /*
321 * If the process was being single-stepped by ptrace, let the
322 * other single-step actions occur (e.g. generate SIGTRAP).
323 */
324 if (test_thread_flag(TIF_SINGLESTEP))
325 return NOTIFY_DONE;
326
327 return NOTIFY_STOP;
328}
329
330/*
331 * Handle debug exception notifications.
332 */
333int __kprobes hw_breakpoint_exceptions_notify(
334 struct notifier_block *unused, unsigned long val, void *data)
335{
336 int ret = NOTIFY_DONE;
337
338 switch (val) {
339 case DIE_DABR_MATCH:
340 ret = hw_breakpoint_handler(data);
341 break;
342 case DIE_SSTEP:
343 ret = single_step_dabr_instruction(data);
344 break;
345 }
346
347 return ret;
348}
349
350/*
351 * Release the user breakpoints used by ptrace
352 */
353void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
354{
355 struct thread_struct *t = &tsk->thread;
356
357 unregister_hw_breakpoint(t->ptrace_bps[0]);
358 t->ptrace_bps[0] = NULL;
359}
360
361void hw_breakpoint_pmu_read(struct perf_event *bp)
362{
363 /* TODO */
364}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ed31a29c4ff7..de6b70eac51d 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -25,6 +25,7 @@
25#include <asm/sections.h> /* _end */ 25#include <asm/sections.h> /* _end */
26#include <asm/prom.h> 26#include <asm/prom.h>
27#include <asm/smp.h> 27#include <asm/smp.h>
28#include <asm/hw_breakpoint.h>
28 29
29int default_machine_kexec_prepare(struct kimage *image) 30int default_machine_kexec_prepare(struct kimage *image)
30{ 31{
@@ -165,6 +166,7 @@ static void kexec_smp_down(void *arg)
165 while(kexec_all_irq_disabled == 0) 166 while(kexec_all_irq_disabled == 0)
166 cpu_relax(); 167 cpu_relax();
167 mb(); /* make sure all irqs are disabled before this */ 168 mb(); /* make sure all irqs are disabled before this */
169 hw_breakpoint_disable();
168 /* 170 /*
169 * Now every CPU has IRQs off, we can clear out any pending 171 * Now every CPU has IRQs off, we can clear out any pending
170 * IPIs and be sure that no more will come in after this. 172 * IPIs and be sure that no more will come in after this.
@@ -180,6 +182,7 @@ static void kexec_prepare_cpus_wait(int wait_state)
180{ 182{
181 int my_cpu, i, notified=-1; 183 int my_cpu, i, notified=-1;
182 184
185 hw_breakpoint_disable();
183 my_cpu = get_cpu(); 186 my_cpu = get_cpu();
184 /* Make sure each CPU has atleast made it to the state we need */ 187 /* Make sure each CPU has atleast made it to the state we need */
185 for_each_online_cpu(i) { 188 for_each_online_cpu(i) {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 773424df828a..1e78453645be 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -37,6 +37,7 @@
37#include <linux/kernel_stat.h> 37#include <linux/kernel_stat.h>
38#include <linux/personality.h> 38#include <linux/personality.h>
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/hw_breakpoint.h>
40 41
41#include <asm/pgtable.h> 42#include <asm/pgtable.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -462,8 +463,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
462#ifdef CONFIG_PPC_ADV_DEBUG_REGS 463#ifdef CONFIG_PPC_ADV_DEBUG_REGS
463 switch_booke_debug_regs(&new->thread); 464 switch_booke_debug_regs(&new->thread);
464#else 465#else
466/*
467 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
468 * schedule DABR
469 */
470#ifndef CONFIG_HAVE_HW_BREAKPOINT
465 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 471 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
466 set_dabr(new->thread.dabr); 472 set_dabr(new->thread.dabr);
473#endif /* CONFIG_HAVE_HW_BREAKPOINT */
467#endif 474#endif
468 475
469 476
@@ -642,7 +649,11 @@ void flush_thread(void)
642{ 649{
643 discard_lazy_cpu_state(); 650 discard_lazy_cpu_state();
644 651
652#ifdef CONFIG_HAVE_HW_BREAKPOINTS
653 flush_ptrace_hw_breakpoint(current);
654#else /* CONFIG_HAVE_HW_BREAKPOINTS */
645 set_debug_reg_defaults(&current->thread); 655 set_debug_reg_defaults(&current->thread);
656#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
646} 657}
647 658
648void 659void
@@ -660,6 +671,9 @@ void prepare_to_copy(struct task_struct *tsk)
660 flush_altivec_to_thread(current); 671 flush_altivec_to_thread(current);
661 flush_vsx_to_thread(current); 672 flush_vsx_to_thread(current);
662 flush_spe_to_thread(current); 673 flush_spe_to_thread(current);
674#ifdef CONFIG_HAVE_HW_BREAKPOINT
675 flush_ptrace_hw_breakpoint(tsk);
676#endif /* CONFIG_HAVE_HW_BREAKPOINT */
663} 677}
664 678
665/* 679/*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 7a0c0199ea28..11f3cd9c832f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,8 @@
32#ifdef CONFIG_PPC32 32#ifdef CONFIG_PPC32
33#include <linux/module.h> 33#include <linux/module.h>
34#endif 34#endif
35#include <linux/hw_breakpoint.h>
36#include <linux/perf_event.h>
35 37
36#include <asm/uaccess.h> 38#include <asm/uaccess.h>
37#include <asm/page.h> 39#include <asm/page.h>
@@ -866,9 +868,34 @@ void user_disable_single_step(struct task_struct *task)
866 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 868 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
867} 869}
868 870
871#ifdef CONFIG_HAVE_HW_BREAKPOINT
872void ptrace_triggered(struct perf_event *bp, int nmi,
873 struct perf_sample_data *data, struct pt_regs *regs)
874{
875 struct perf_event_attr attr;
876
877 /*
878 * Disable the breakpoint request here since ptrace has defined a
879 * one-shot behaviour for breakpoint exceptions in PPC64.
880 * The SIGTRAP signal is generated automatically for us in do_dabr().
881 * We don't have to do anything about that here
882 */
883 attr = bp->attr;
884 attr.disabled = true;
885 modify_user_hw_breakpoint(bp, &attr);
886}
887#endif /* CONFIG_HAVE_HW_BREAKPOINT */
888
869int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, 889int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
870 unsigned long data) 890 unsigned long data)
871{ 891{
892#ifdef CONFIG_HAVE_HW_BREAKPOINT
893 int ret;
894 struct thread_struct *thread = &(task->thread);
895 struct perf_event *bp;
896 struct perf_event_attr attr;
897#endif /* CONFIG_HAVE_HW_BREAKPOINT */
898
872 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64). 899 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
873 * For embedded processors we support one DAC and no IAC's at the 900 * For embedded processors we support one DAC and no IAC's at the
874 * moment. 901 * moment.
@@ -896,6 +923,43 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
896 /* Ensure breakpoint translation bit is set */ 923 /* Ensure breakpoint translation bit is set */
897 if (data && !(data & DABR_TRANSLATION)) 924 if (data && !(data & DABR_TRANSLATION))
898 return -EIO; 925 return -EIO;
926#ifdef CONFIG_HAVE_HW_BREAKPOINT
927 bp = thread->ptrace_bps[0];
928 if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
929 if (bp) {
930 unregister_hw_breakpoint(bp);
931 thread->ptrace_bps[0] = NULL;
932 }
933 return 0;
934 }
935 if (bp) {
936 attr = bp->attr;
937 attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
938 arch_bp_generic_fields(data &
939 (DABR_DATA_WRITE | DABR_DATA_READ),
940 &attr.bp_type);
941 ret = modify_user_hw_breakpoint(bp, &attr);
942 if (ret)
943 return ret;
944 thread->ptrace_bps[0] = bp;
945 thread->dabr = data;
946 return 0;
947 }
948
949 /* Create a new breakpoint request if one doesn't exist already */
950 hw_breakpoint_init(&attr);
951 attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
952 arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
953 &attr.bp_type);
954
955 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
956 ptrace_triggered, task);
957 if (IS_ERR(bp)) {
958 thread->ptrace_bps[0] = NULL;
959 return PTR_ERR(bp);
960 }
961
962#endif /* CONFIG_HAVE_HW_BREAKPOINT */
899 963
900 /* Move contents to the DABR register */ 964 /* Move contents to the DABR register */
901 task->thread.dabr = data; 965 task->thread.dabr = data;
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index a0afb555a7c9..7109f5b1baa8 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/tracehook.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <asm/hw_breakpoint.h>
14#include <asm/uaccess.h> 15#include <asm/uaccess.h>
15#include <asm/unistd.h> 16#include <asm/unistd.h>
16 17
@@ -149,6 +150,8 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
149 if (current->thread.dabr) 150 if (current->thread.dabr)
150 set_dabr(current->thread.dabr); 151 set_dabr(current->thread.dabr);
151#endif 152#endif
153 /* Re-enable the breakpoints for the signal stack */
154 thread_change_pc(current, regs);
152 155
153 if (is32) { 156 if (is32) {
154 if (ka.sa.sa_flags & SA_SIGINFO) 157 if (ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 25fc33984c2b..e5fe5a8522a6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -688,7 +688,7 @@ void RunModeException(struct pt_regs *regs)
688 688
689void __kprobes single_step_exception(struct pt_regs *regs) 689void __kprobes single_step_exception(struct pt_regs *regs)
690{ 690{
691 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 691 clear_single_step(regs);
692 692
693 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 693 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
694 5, SIGTRAP) == NOTIFY_STOP) 694 5, SIGTRAP) == NOTIFY_STOP)
@@ -707,10 +707,8 @@ void __kprobes single_step_exception(struct pt_regs *regs)
707 */ 707 */
708static void emulate_single_step(struct pt_regs *regs) 708static void emulate_single_step(struct pt_regs *regs)
709{ 709{
710 if (single_stepping(regs)) { 710 if (single_stepping(regs))
711 clear_single_step(regs); 711 single_step_exception(regs);
712 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
713 }
714} 712}
715 713
716static inline int __parse_fpscr(unsigned long fpscr) 714static inline int __parse_fpscr(unsigned long fpscr)
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 111da1c03a11..5bb89c828070 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -18,8 +18,9 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
18 18
19obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ 19obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
20 memcpy_64.o usercopy_64.o mem_64.o string.o 20 memcpy_64.o usercopy_64.o mem_64.o string.o
21obj-$(CONFIG_XMON) += sstep.o 21obj-$(CONFIG_XMON) += sstep.o ldstfp.o
22obj-$(CONFIG_KPROBES) += sstep.o 22obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
23obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
23 24
24ifeq ($(CONFIG_PPC64),y) 25ifeq ($(CONFIG_PPC64),y)
25obj-$(CONFIG_SMP) += locks.o 26obj-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
new file mode 100644
index 000000000000..f6448636baf5
--- /dev/null
+++ b/arch/powerpc/lib/ldstfp.S
@@ -0,0 +1,375 @@
1/*
2 * Floating-point, VMX/Altivec and VSX loads and stores
3 * for use in instruction emulation.
4 *
5 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/ppc_asm.h>
15#include <asm/ppc-opcode.h>
16#include <asm/reg.h>
17#include <asm/asm-offsets.h>
18#include <linux/errno.h>
19
20#define STKFRM (PPC_MIN_STKFRM + 16)
21
22 .macro extab instr,handler
23 .section __ex_table,"a"
24 PPC_LONG \instr,\handler
25 .previous
26 .endm
27
28 .macro inst32 op
29reg = 0
30 .rept 32
3120: \op reg,0,r4
32 b 3f
33 extab 20b,99f
34reg = reg + 1
35 .endr
36 .endm
37
38/* Get the contents of frN into fr0; N is in r3. */
39_GLOBAL(get_fpr)
40 mflr r0
41 rlwinm r3,r3,3,0xf8
42 bcl 20,31,1f
43 blr /* fr0 is already in fr0 */
44 nop
45reg = 1
46 .rept 31
47 fmr fr0,reg
48 blr
49reg = reg + 1
50 .endr
511: mflr r5
52 add r5,r3,r5
53 mtctr r5
54 mtlr r0
55 bctr
56
57/* Put the contents of fr0 into frN; N is in r3. */
58_GLOBAL(put_fpr)
59 mflr r0
60 rlwinm r3,r3,3,0xf8
61 bcl 20,31,1f
62 blr /* fr0 is already in fr0 */
63 nop
64reg = 1
65 .rept 31
66 fmr reg,fr0
67 blr
68reg = reg + 1
69 .endr
701: mflr r5
71 add r5,r3,r5
72 mtctr r5
73 mtlr r0
74 bctr
75
76/* Load FP reg N from float at *p. N is in r3, p in r4. */
77_GLOBAL(do_lfs)
78 PPC_STLU r1,-STKFRM(r1)
79 mflr r0
80 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
81 mfmsr r6
82 ori r7,r6,MSR_FP
83 cmpwi cr7,r3,0
84 mtmsrd r7
85 isync
86 beq cr7,1f
87 stfd fr0,STKFRM-16(r1)
881: li r9,-EFAULT
892: lfs fr0,0(r4)
90 li r9,0
913: bl put_fpr
92 beq cr7,4f
93 lfd fr0,STKFRM-16(r1)
944: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
95 mtlr r0
96 mtmsrd r6
97 isync
98 mr r3,r9
99 addi r1,r1,STKFRM
100 blr
101 extab 2b,3b
102
103/* Load FP reg N from double at *p. N is in r3, p in r4. */
104_GLOBAL(do_lfd)
105 PPC_STLU r1,-STKFRM(r1)
106 mflr r0
107 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
108 mfmsr r6
109 ori r7,r6,MSR_FP
110 cmpwi cr7,r3,0
111 mtmsrd r7
112 isync
113 beq cr7,1f
114 stfd fr0,STKFRM-16(r1)
1151: li r9,-EFAULT
1162: lfd fr0,0(r4)
117 li r9,0
1183: beq cr7,4f
119 bl put_fpr
120 lfd fr0,STKFRM-16(r1)
1214: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
122 mtlr r0
123 mtmsrd r6
124 isync
125 mr r3,r9
126 addi r1,r1,STKFRM
127 blr
128 extab 2b,3b
129
130/* Store FP reg N to float at *p. N is in r3, p in r4. */
131_GLOBAL(do_stfs)
132 PPC_STLU r1,-STKFRM(r1)
133 mflr r0
134 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
135 mfmsr r6
136 ori r7,r6,MSR_FP
137 cmpwi cr7,r3,0
138 mtmsrd r7
139 isync
140 beq cr7,1f
141 stfd fr0,STKFRM-16(r1)
142 bl get_fpr
1431: li r9,-EFAULT
1442: stfs fr0,0(r4)
145 li r9,0
1463: beq cr7,4f
147 lfd fr0,STKFRM-16(r1)
1484: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
149 mtlr r0
150 mtmsrd r6
151 isync
152 mr r3,r9
153 addi r1,r1,STKFRM
154 blr
155 extab 2b,3b
156
157/* Store FP reg N to double at *p. N is in r3, p in r4. */
158_GLOBAL(do_stfd)
159 PPC_STLU r1,-STKFRM(r1)
160 mflr r0
161 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
162 mfmsr r6
163 ori r7,r6,MSR_FP
164 cmpwi cr7,r3,0
165 mtmsrd r7
166 isync
167 beq cr7,1f
168 stfd fr0,STKFRM-16(r1)
169 bl get_fpr
1701: li r9,-EFAULT
1712: stfd fr0,0(r4)
172 li r9,0
1733: beq cr7,4f
174 lfd fr0,STKFRM-16(r1)
1754: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
176 mtlr r0
177 mtmsrd r6
178 isync
179 mr r3,r9
180 addi r1,r1,STKFRM
181 blr
182 extab 2b,3b
183
184#ifdef CONFIG_ALTIVEC
185/* Get the contents of vrN into vr0; N is in r3. */
186_GLOBAL(get_vr)
187 mflr r0
188 rlwinm r3,r3,3,0xf8
189 bcl 20,31,1f
190 blr /* vr0 is already in vr0 */
191 nop
192reg = 1
193 .rept 31
194 vor vr0,reg,reg /* assembler doesn't know vmr? */
195 blr
196reg = reg + 1
197 .endr
1981: mflr r5
199 add r5,r3,r5
200 mtctr r5
201 mtlr r0
202 bctr
203
204/* Put the contents of vr0 into vrN; N is in r3. */
205_GLOBAL(put_vr)
206 mflr r0
207 rlwinm r3,r3,3,0xf8
208 bcl 20,31,1f
209 blr /* vr0 is already in vr0 */
210 nop
211reg = 1
212 .rept 31
213 vor reg,vr0,vr0
214 blr
215reg = reg + 1
216 .endr
2171: mflr r5
218 add r5,r3,r5
219 mtctr r5
220 mtlr r0
221 bctr
222
223/* Load vector reg N from *p. N is in r3, p in r4. */
224_GLOBAL(do_lvx)
225 PPC_STLU r1,-STKFRM(r1)
226 mflr r0
227 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
228 mfmsr r6
229 oris r7,r6,MSR_VEC@h
230 cmpwi cr7,r3,0
231 li r8,STKFRM-16
232 mtmsrd r7
233 isync
234 beq cr7,1f
235 stvx vr0,r1,r8
2361: li r9,-EFAULT
2372: lvx vr0,0,r4
238 li r9,0
2393: beq cr7,4f
240 bl put_vr
241 lvx vr0,r1,r8
2424: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
243 mtlr r0
244 mtmsrd r6
245 isync
246 mr r3,r9
247 addi r1,r1,STKFRM
248 blr
249 extab 2b,3b
250
251/* Store vector reg N to *p. N is in r3, p in r4. */
252_GLOBAL(do_stvx)
253 PPC_STLU r1,-STKFRM(r1)
254 mflr r0
255 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
256 mfmsr r6
257 oris r7,r6,MSR_VEC@h
258 cmpwi cr7,r3,0
259 li r8,STKFRM-16
260 mtmsrd r7
261 isync
262 beq cr7,1f
263 stvx vr0,r1,r8
264 bl get_vr
2651: li r9,-EFAULT
2662: stvx vr0,0,r4
267 li r9,0
2683: beq cr7,4f
269 lvx vr0,r1,r8
2704: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
271 mtlr r0
272 mtmsrd r6
273 isync
274 mr r3,r9
275 addi r1,r1,STKFRM
276 blr
277 extab 2b,3b
278#endif /* CONFIG_ALTIVEC */
279
280#ifdef CONFIG_VSX
281/* Get the contents of vsrN into vsr0; N is in r3. */
282_GLOBAL(get_vsr)
283 mflr r0
284 rlwinm r3,r3,3,0x1f8
285 bcl 20,31,1f
286 blr /* vsr0 is already in vsr0 */
287 nop
288reg = 1
289 .rept 63
290 XXLOR(0,reg,reg)
291 blr
292reg = reg + 1
293 .endr
2941: mflr r5
295 add r5,r3,r5
296 mtctr r5
297 mtlr r0
298 bctr
299
300/* Put the contents of vsr0 into vsrN; N is in r3. */
301_GLOBAL(put_vsr)
302 mflr r0
303 rlwinm r3,r3,3,0x1f8
304 bcl 20,31,1f
305 blr /* vr0 is already in vr0 */
306 nop
307reg = 1
308 .rept 63
309 XXLOR(reg,0,0)
310 blr
311reg = reg + 1
312 .endr
3131: mflr r5
314 add r5,r3,r5
315 mtctr r5
316 mtlr r0
317 bctr
318
319/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
320_GLOBAL(do_lxvd2x)
321 PPC_STLU r1,-STKFRM(r1)
322 mflr r0
323 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
324 mfmsr r6
325 oris r7,r6,MSR_VSX@h
326 cmpwi cr7,r3,0
327 li r8,STKFRM-16
328 mtmsrd r7
329 isync
330 beq cr7,1f
331 STXVD2X(0,r1,r8)
3321: li r9,-EFAULT
3332: LXVD2X(0,0,r4)
334 li r9,0
3353: beq cr7,4f
336 bl put_vsr
337 LXVD2X(0,r1,r8)
3384: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
339 mtlr r0
340 mtmsrd r6
341 isync
342 mr r3,r9
343 addi r1,r1,STKFRM
344 blr
345 extab 2b,3b
346
347/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
348_GLOBAL(do_stxvd2x)
349 PPC_STLU r1,-STKFRM(r1)
350 mflr r0
351 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
352 mfmsr r6
353 oris r7,r6,MSR_VSX@h
354 cmpwi cr7,r3,0
355 li r8,STKFRM-16
356 mtmsrd r7
357 isync
358 beq cr7,1f
359 STXVD2X(0,r1,r8)
360 bl get_vsr
3611: li r9,-EFAULT
3622: STXVD2X(0,0,r4)
363 li r9,0
3643: beq cr7,4f
365 LXVD2X(0,r1,r8)
3664: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
367 mtlr r0
368 mtmsrd r6
369 isync
370 mr r3,r9
371 addi r1,r1,STKFRM
372 blr
373 extab 2b,3b
374
375#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 13b7d54f185b..e0a9858d537e 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -13,6 +13,8 @@
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <asm/sstep.h> 14#include <asm/sstep.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/uaccess.h>
17#include <asm/cputable.h>
16 18
17extern char system_call_common[]; 19extern char system_call_common[];
18 20
@@ -23,6 +25,23 @@ extern char system_call_common[];
23#define MSR_MASK 0x87c0ffff 25#define MSR_MASK 0x87c0ffff
24#endif 26#endif
25 27
28/* Bits in XER */
29#define XER_SO 0x80000000U
30#define XER_OV 0x40000000U
31#define XER_CA 0x20000000U
32
33/*
34 * Functions in ldstfp.S
35 */
36extern int do_lfs(int rn, unsigned long ea);
37extern int do_lfd(int rn, unsigned long ea);
38extern int do_stfs(int rn, unsigned long ea);
39extern int do_stfd(int rn, unsigned long ea);
40extern int do_lvx(int rn, unsigned long ea);
41extern int do_stvx(int rn, unsigned long ea);
42extern int do_lxvd2x(int rn, unsigned long ea);
43extern int do_stxvd2x(int rn, unsigned long ea);
44
26/* 45/*
27 * Determine whether a conditional branch instruction would branch. 46 * Determine whether a conditional branch instruction would branch.
28 */ 47 */
@@ -46,16 +65,499 @@ static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
46 return 1; 65 return 1;
47} 66}
48 67
68
69static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
70{
71 if (!user_mode(regs))
72 return 1;
73 return __access_ok(ea, nb, USER_DS);
74}
75
76/*
77 * Calculate effective address for a D-form instruction
78 */
79static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
80{
81 int ra;
82 unsigned long ea;
83
84 ra = (instr >> 16) & 0x1f;
85 ea = (signed short) instr; /* sign-extend */
86 if (ra) {
87 ea += regs->gpr[ra];
88 if (instr & 0x04000000) /* update forms */
89 regs->gpr[ra] = ea;
90 }
91#ifdef __powerpc64__
92 if (!(regs->msr & MSR_SF))
93 ea &= 0xffffffffUL;
94#endif
95 return ea;
96}
97
98#ifdef __powerpc64__
99/*
100 * Calculate effective address for a DS-form instruction
101 */
102static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
103{
104 int ra;
105 unsigned long ea;
106
107 ra = (instr >> 16) & 0x1f;
108 ea = (signed short) (instr & ~3); /* sign-extend */
109 if (ra) {
110 ea += regs->gpr[ra];
111 if ((instr & 3) == 1) /* update forms */
112 regs->gpr[ra] = ea;
113 }
114 if (!(regs->msr & MSR_SF))
115 ea &= 0xffffffffUL;
116 return ea;
117}
118#endif /* __powerpc64 */
119
120/*
121 * Calculate effective address for an X-form instruction
122 */
123static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
124 int do_update)
125{
126 int ra, rb;
127 unsigned long ea;
128
129 ra = (instr >> 16) & 0x1f;
130 rb = (instr >> 11) & 0x1f;
131 ea = regs->gpr[rb];
132 if (ra) {
133 ea += regs->gpr[ra];
134 if (do_update) /* update forms */
135 regs->gpr[ra] = ea;
136 }
137#ifdef __powerpc64__
138 if (!(regs->msr & MSR_SF))
139 ea &= 0xffffffffUL;
140#endif
141 return ea;
142}
143
144/*
145 * Return the largest power of 2, not greater than sizeof(unsigned long),
146 * such that x is a multiple of it.
147 */
148static inline unsigned long max_align(unsigned long x)
149{
150 x |= sizeof(unsigned long);
151 return x & -x; /* isolates rightmost bit */
152}
153
154
155static inline unsigned long byterev_2(unsigned long x)
156{
157 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
158}
159
160static inline unsigned long byterev_4(unsigned long x)
161{
162 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
163 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
164}
165
166#ifdef __powerpc64__
167static inline unsigned long byterev_8(unsigned long x)
168{
169 return (byterev_4(x) << 32) | byterev_4(x >> 32);
170}
171#endif
172
173static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
174 int nb)
175{
176 int err = 0;
177 unsigned long x = 0;
178
179 switch (nb) {
180 case 1:
181 err = __get_user(x, (unsigned char __user *) ea);
182 break;
183 case 2:
184 err = __get_user(x, (unsigned short __user *) ea);
185 break;
186 case 4:
187 err = __get_user(x, (unsigned int __user *) ea);
188 break;
189#ifdef __powerpc64__
190 case 8:
191 err = __get_user(x, (unsigned long __user *) ea);
192 break;
193#endif
194 }
195 if (!err)
196 *dest = x;
197 return err;
198}
199
200static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
201 int nb, struct pt_regs *regs)
202{
203 int err;
204 unsigned long x, b, c;
205
206 /* unaligned, do this in pieces */
207 x = 0;
208 for (; nb > 0; nb -= c) {
209 c = max_align(ea);
210 if (c > nb)
211 c = max_align(nb);
212 err = read_mem_aligned(&b, ea, c);
213 if (err)
214 return err;
215 x = (x << (8 * c)) + b;
216 ea += c;
217 }
218 *dest = x;
219 return 0;
220}
221
222/*
223 * Read memory at address ea for nb bytes, return 0 for success
224 * or -EFAULT if an error occurred.
225 */
226static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
227 struct pt_regs *regs)
228{
229 if (!address_ok(regs, ea, nb))
230 return -EFAULT;
231 if ((ea & (nb - 1)) == 0)
232 return read_mem_aligned(dest, ea, nb);
233 return read_mem_unaligned(dest, ea, nb, regs);
234}
235
236static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
237 int nb)
238{
239 int err = 0;
240
241 switch (nb) {
242 case 1:
243 err = __put_user(val, (unsigned char __user *) ea);
244 break;
245 case 2:
246 err = __put_user(val, (unsigned short __user *) ea);
247 break;
248 case 4:
249 err = __put_user(val, (unsigned int __user *) ea);
250 break;
251#ifdef __powerpc64__
252 case 8:
253 err = __put_user(val, (unsigned long __user *) ea);
254 break;
255#endif
256 }
257 return err;
258}
259
260static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
261 int nb, struct pt_regs *regs)
262{
263 int err;
264 unsigned long c;
265
266 /* unaligned or little-endian, do this in pieces */
267 for (; nb > 0; nb -= c) {
268 c = max_align(ea);
269 if (c > nb)
270 c = max_align(nb);
271 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
272 if (err)
273 return err;
274 ++ea;
275 }
276 return 0;
277}
278
279/*
280 * Write memory at address ea for nb bytes, return 0 for success
281 * or -EFAULT if an error occurred.
282 */
283static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
284 struct pt_regs *regs)
285{
286 if (!address_ok(regs, ea, nb))
287 return -EFAULT;
288 if ((ea & (nb - 1)) == 0)
289 return write_mem_aligned(val, ea, nb);
290 return write_mem_unaligned(val, ea, nb, regs);
291}
292
49/* 293/*
50 * Emulate instructions that cause a transfer of control. 294 * Check the address and alignment, and call func to do the actual
295 * load or store.
296 */
297static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
298 unsigned long ea, int nb,
299 struct pt_regs *regs)
300{
301 int err;
302 unsigned long val[sizeof(double) / sizeof(long)];
303 unsigned long ptr;
304
305 if (!address_ok(regs, ea, nb))
306 return -EFAULT;
307 if ((ea & 3) == 0)
308 return (*func)(rn, ea);
309 ptr = (unsigned long) &val[0];
310 if (sizeof(unsigned long) == 8 || nb == 4) {
311 err = read_mem_unaligned(&val[0], ea, nb, regs);
312 ptr += sizeof(unsigned long) - nb;
313 } else {
314 /* reading a double on 32-bit */
315 err = read_mem_unaligned(&val[0], ea, 4, regs);
316 if (!err)
317 err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
318 }
319 if (err)
320 return err;
321 return (*func)(rn, ptr);
322}
323
324static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
325 unsigned long ea, int nb,
326 struct pt_regs *regs)
327{
328 int err;
329 unsigned long val[sizeof(double) / sizeof(long)];
330 unsigned long ptr;
331
332 if (!address_ok(regs, ea, nb))
333 return -EFAULT;
334 if ((ea & 3) == 0)
335 return (*func)(rn, ea);
336 ptr = (unsigned long) &val[0];
337 if (sizeof(unsigned long) == 8 || nb == 4) {
338 ptr += sizeof(unsigned long) - nb;
339 err = (*func)(rn, ptr);
340 if (err)
341 return err;
342 err = write_mem_unaligned(val[0], ea, nb, regs);
343 } else {
344 /* writing a double on 32-bit */
345 err = (*func)(rn, ptr);
346 if (err)
347 return err;
348 err = write_mem_unaligned(val[0], ea, 4, regs);
349 if (!err)
350 err = write_mem_unaligned(val[1], ea + 4, 4, regs);
351 }
352 return err;
353}
354
355#ifdef CONFIG_ALTIVEC
356/* For Altivec/VMX, no need to worry about alignment */
357static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
358 unsigned long ea, struct pt_regs *regs)
359{
360 if (!address_ok(regs, ea & ~0xfUL, 16))
361 return -EFAULT;
362 return (*func)(rn, ea);
363}
364
365static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
366 unsigned long ea, struct pt_regs *regs)
367{
368 if (!address_ok(regs, ea & ~0xfUL, 16))
369 return -EFAULT;
370 return (*func)(rn, ea);
371}
372#endif /* CONFIG_ALTIVEC */
373
374#ifdef CONFIG_VSX
375static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
376 unsigned long ea, struct pt_regs *regs)
377{
378 int err;
379 unsigned long val[2];
380
381 if (!address_ok(regs, ea, 16))
382 return -EFAULT;
383 if ((ea & 3) == 0)
384 return (*func)(rn, ea);
385 err = read_mem_unaligned(&val[0], ea, 8, regs);
386 if (!err)
387 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
388 if (!err)
389 err = (*func)(rn, (unsigned long) &val[0]);
390 return err;
391}
392
393static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
394 unsigned long ea, struct pt_regs *regs)
395{
396 int err;
397 unsigned long val[2];
398
399 if (!address_ok(regs, ea, 16))
400 return -EFAULT;
401 if ((ea & 3) == 0)
402 return (*func)(rn, ea);
403 err = (*func)(rn, (unsigned long) &val[0]);
404 if (err)
405 return err;
406 err = write_mem_unaligned(val[0], ea, 8, regs);
407 if (!err)
408 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
409 return err;
410}
411#endif /* CONFIG_VSX */
412
413#define __put_user_asmx(x, addr, err, op, cr) \
414 __asm__ __volatile__( \
415 "1: " op " %2,0,%3\n" \
416 " mfcr %1\n" \
417 "2:\n" \
418 ".section .fixup,\"ax\"\n" \
419 "3: li %0,%4\n" \
420 " b 2b\n" \
421 ".previous\n" \
422 ".section __ex_table,\"a\"\n" \
423 PPC_LONG_ALIGN "\n" \
424 PPC_LONG "1b,3b\n" \
425 ".previous" \
426 : "=r" (err), "=r" (cr) \
427 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
428
429#define __get_user_asmx(x, addr, err, op) \
430 __asm__ __volatile__( \
431 "1: "op" %1,0,%2\n" \
432 "2:\n" \
433 ".section .fixup,\"ax\"\n" \
434 "3: li %0,%3\n" \
435 " b 2b\n" \
436 ".previous\n" \
437 ".section __ex_table,\"a\"\n" \
438 PPC_LONG_ALIGN "\n" \
439 PPC_LONG "1b,3b\n" \
440 ".previous" \
441 : "=r" (err), "=r" (x) \
442 : "r" (addr), "i" (-EFAULT), "0" (err))
443
444#define __cacheop_user_asmx(addr, err, op) \
445 __asm__ __volatile__( \
446 "1: "op" 0,%1\n" \
447 "2:\n" \
448 ".section .fixup,\"ax\"\n" \
449 "3: li %0,%3\n" \
450 " b 2b\n" \
451 ".previous\n" \
452 ".section __ex_table,\"a\"\n" \
453 PPC_LONG_ALIGN "\n" \
454 PPC_LONG "1b,3b\n" \
455 ".previous" \
456 : "=r" (err) \
457 : "r" (addr), "i" (-EFAULT), "0" (err))
458
459static void __kprobes set_cr0(struct pt_regs *regs, int rd)
460{
461 long val = regs->gpr[rd];
462
463 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
464#ifdef __powerpc64__
465 if (!(regs->msr & MSR_SF))
466 val = (int) val;
467#endif
468 if (val < 0)
469 regs->ccr |= 0x80000000;
470 else if (val > 0)
471 regs->ccr |= 0x40000000;
472 else
473 regs->ccr |= 0x20000000;
474}
475
476static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
477 unsigned long val1, unsigned long val2,
478 unsigned long carry_in)
479{
480 unsigned long val = val1 + val2;
481
482 if (carry_in)
483 ++val;
484 regs->gpr[rd] = val;
485#ifdef __powerpc64__
486 if (!(regs->msr & MSR_SF)) {
487 val = (unsigned int) val;
488 val1 = (unsigned int) val1;
489 }
490#endif
491 if (val < val1 || (carry_in && val == val1))
492 regs->xer |= XER_CA;
493 else
494 regs->xer &= ~XER_CA;
495}
496
497static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
498 int crfld)
499{
500 unsigned int crval, shift;
501
502 crval = (regs->xer >> 31) & 1; /* get SO bit */
503 if (v1 < v2)
504 crval |= 8;
505 else if (v1 > v2)
506 crval |= 4;
507 else
508 crval |= 2;
509 shift = (7 - crfld) * 4;
510 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
511}
512
513static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
514 unsigned long v2, int crfld)
515{
516 unsigned int crval, shift;
517
518 crval = (regs->xer >> 31) & 1; /* get SO bit */
519 if (v1 < v2)
520 crval |= 8;
521 else if (v1 > v2)
522 crval |= 4;
523 else
524 crval |= 2;
525 shift = (7 - crfld) * 4;
526 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
527}
528
529/*
530 * Elements of 32-bit rotate and mask instructions.
531 */
532#define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
533 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
534#ifdef __powerpc64__
535#define MASK64_L(mb) (~0UL >> (mb))
536#define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
537#define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
538#define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
539#else
540#define DATA32(x) (x)
541#endif
542#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
543
544/*
545 * Emulate instructions that cause a transfer of control,
546 * loads and stores, and a few other instructions.
51 * Returns 1 if the step was emulated, 0 if not, 547 * Returns 1 if the step was emulated, 0 if not,
52 * or -1 if the instruction is one that should not be stepped, 548 * or -1 if the instruction is one that should not be stepped,
53 * such as an rfid, or a mtmsrd that would clear MSR_RI. 549 * such as an rfid, or a mtmsrd that would clear MSR_RI.
54 */ 550 */
55int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) 551int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
56{ 552{
57 unsigned int opcode, rs, rb, rd, spr; 553 unsigned int opcode, ra, rb, rd, spr, u;
58 unsigned long int imm; 554 unsigned long int imm;
555 unsigned long int val, val2;
556 unsigned long int ea;
557 unsigned int cr, mb, me, sh;
558 int err;
559 unsigned long old_ra;
560 long ival;
59 561
60 opcode = instr >> 26; 562 opcode = instr >> 26;
61 switch (opcode) { 563 switch (opcode) {
@@ -78,7 +580,13 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
78 * entry code works. If that is changed, this will 580 * entry code works. If that is changed, this will
79 * need to be changed also. 581 * need to be changed also.
80 */ 582 */
583 if (regs->gpr[0] == 0x1ebe &&
584 cpu_has_feature(CPU_FTR_REAL_LE)) {
585 regs->msr ^= MSR_LE;
586 goto instr_done;
587 }
81 regs->gpr[9] = regs->gpr[13]; 588 regs->gpr[9] = regs->gpr[13];
589 regs->gpr[10] = MSR_KERNEL;
82 regs->gpr[11] = regs->nip + 4; 590 regs->gpr[11] = regs->nip + 4;
83 regs->gpr[12] = regs->msr & MSR_MASK; 591 regs->gpr[12] = regs->msr & MSR_MASK;
84 regs->gpr[13] = (unsigned long) get_paca(); 592 regs->gpr[13] = (unsigned long) get_paca();
@@ -102,9 +610,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
102 regs->nip = imm; 610 regs->nip = imm;
103 return 1; 611 return 1;
104 case 19: 612 case 19:
105 switch (instr & 0x7fe) { 613 switch ((instr >> 1) & 0x3ff) {
106 case 0x20: /* bclr */ 614 case 16: /* bclr */
107 case 0x420: /* bcctr */ 615 case 528: /* bcctr */
108 imm = (instr & 0x400)? regs->ctr: regs->link; 616 imm = (instr & 0x400)? regs->ctr: regs->link;
109 regs->nip += 4; 617 regs->nip += 4;
110 if ((regs->msr & MSR_SF) == 0) { 618 if ((regs->msr & MSR_SF) == 0) {
@@ -116,30 +624,233 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
116 if (branch_taken(instr, regs)) 624 if (branch_taken(instr, regs))
117 regs->nip = imm; 625 regs->nip = imm;
118 return 1; 626 return 1;
119 case 0x24: /* rfid, scary */ 627
628 case 18: /* rfid, scary */
120 return -1; 629 return -1;
630
631 case 150: /* isync */
632 isync();
633 goto instr_done;
634
635 case 33: /* crnor */
636 case 129: /* crandc */
637 case 193: /* crxor */
638 case 225: /* crnand */
639 case 257: /* crand */
640 case 289: /* creqv */
641 case 417: /* crorc */
642 case 449: /* cror */
643 ra = (instr >> 16) & 0x1f;
644 rb = (instr >> 11) & 0x1f;
645 rd = (instr >> 21) & 0x1f;
646 ra = (regs->ccr >> (31 - ra)) & 1;
647 rb = (regs->ccr >> (31 - rb)) & 1;
648 val = (instr >> (6 + ra * 2 + rb)) & 1;
649 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
650 (val << (31 - rd));
651 goto instr_done;
652 }
653 break;
654 case 31:
655 switch ((instr >> 1) & 0x3ff) {
656 case 598: /* sync */
657#ifdef __powerpc64__
658 switch ((instr >> 21) & 3) {
659 case 1: /* lwsync */
660 asm volatile("lwsync" : : : "memory");
661 goto instr_done;
662 case 2: /* ptesync */
663 asm volatile("ptesync" : : : "memory");
664 goto instr_done;
665 }
666#endif
667 mb();
668 goto instr_done;
669
670 case 854: /* eieio */
671 eieio();
672 goto instr_done;
673 }
674 break;
675 }
676
677 /* Following cases refer to regs->gpr[], so we need all regs */
678 if (!FULL_REGS(regs))
679 return 0;
680
681 rd = (instr >> 21) & 0x1f;
682 ra = (instr >> 16) & 0x1f;
683 rb = (instr >> 11) & 0x1f;
684
685 switch (opcode) {
686 case 7: /* mulli */
687 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
688 goto instr_done;
689
690 case 8: /* subfic */
691 imm = (short) instr;
692 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
693 goto instr_done;
694
695 case 10: /* cmpli */
696 imm = (unsigned short) instr;
697 val = regs->gpr[ra];
698#ifdef __powerpc64__
699 if ((rd & 1) == 0)
700 val = (unsigned int) val;
701#endif
702 do_cmp_unsigned(regs, val, imm, rd >> 2);
703 goto instr_done;
704
705 case 11: /* cmpi */
706 imm = (short) instr;
707 val = regs->gpr[ra];
708#ifdef __powerpc64__
709 if ((rd & 1) == 0)
710 val = (int) val;
711#endif
712 do_cmp_signed(regs, val, imm, rd >> 2);
713 goto instr_done;
714
715 case 12: /* addic */
716 imm = (short) instr;
717 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
718 goto instr_done;
719
720 case 13: /* addic. */
721 imm = (short) instr;
722 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
723 set_cr0(regs, rd);
724 goto instr_done;
725
726 case 14: /* addi */
727 imm = (short) instr;
728 if (ra)
729 imm += regs->gpr[ra];
730 regs->gpr[rd] = imm;
731 goto instr_done;
732
733 case 15: /* addis */
734 imm = ((short) instr) << 16;
735 if (ra)
736 imm += regs->gpr[ra];
737 regs->gpr[rd] = imm;
738 goto instr_done;
739
740 case 20: /* rlwimi */
741 mb = (instr >> 6) & 0x1f;
742 me = (instr >> 1) & 0x1f;
743 val = DATA32(regs->gpr[rd]);
744 imm = MASK32(mb, me);
745 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
746 goto logical_done;
747
748 case 21: /* rlwinm */
749 mb = (instr >> 6) & 0x1f;
750 me = (instr >> 1) & 0x1f;
751 val = DATA32(regs->gpr[rd]);
752 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
753 goto logical_done;
754
755 case 23: /* rlwnm */
756 mb = (instr >> 6) & 0x1f;
757 me = (instr >> 1) & 0x1f;
758 rb = regs->gpr[rb] & 0x1f;
759 val = DATA32(regs->gpr[rd]);
760 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
761 goto logical_done;
762
763 case 24: /* ori */
764 imm = (unsigned short) instr;
765 regs->gpr[ra] = regs->gpr[rd] | imm;
766 goto instr_done;
767
768 case 25: /* oris */
769 imm = (unsigned short) instr;
770 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
771 goto instr_done;
772
773 case 26: /* xori */
774 imm = (unsigned short) instr;
775 regs->gpr[ra] = regs->gpr[rd] ^ imm;
776 goto instr_done;
777
778 case 27: /* xoris */
779 imm = (unsigned short) instr;
780 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
781 goto instr_done;
782
783 case 28: /* andi. */
784 imm = (unsigned short) instr;
785 regs->gpr[ra] = regs->gpr[rd] & imm;
786 set_cr0(regs, ra);
787 goto instr_done;
788
789 case 29: /* andis. */
790 imm = (unsigned short) instr;
791 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
792 set_cr0(regs, ra);
793 goto instr_done;
794
795#ifdef __powerpc64__
796 case 30: /* rld* */
797 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
798 val = regs->gpr[rd];
799 if ((instr & 0x10) == 0) {
800 sh = rb | ((instr & 2) << 4);
801 val = ROTATE(val, sh);
802 switch ((instr >> 2) & 3) {
803 case 0: /* rldicl */
804 regs->gpr[ra] = val & MASK64_L(mb);
805 goto logical_done;
806 case 1: /* rldicr */
807 regs->gpr[ra] = val & MASK64_R(mb);
808 goto logical_done;
809 case 2: /* rldic */
810 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
811 goto logical_done;
812 case 3: /* rldimi */
813 imm = MASK64(mb, 63 - sh);
814 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
815 (val & imm);
816 goto logical_done;
817 }
818 } else {
819 sh = regs->gpr[rb] & 0x3f;
820 val = ROTATE(val, sh);
821 switch ((instr >> 1) & 7) {
822 case 0: /* rldcl */
823 regs->gpr[ra] = val & MASK64_L(mb);
824 goto logical_done;
825 case 1: /* rldcr */
826 regs->gpr[ra] = val & MASK64_R(mb);
827 goto logical_done;
828 }
121 } 829 }
830#endif
831
122 case 31: 832 case 31:
123 rd = (instr >> 21) & 0x1f; 833 switch ((instr >> 1) & 0x3ff) {
124 switch (instr & 0x7fe) { 834 case 83: /* mfmsr */
125 case 0xa6: /* mfmsr */ 835 if (regs->msr & MSR_PR)
836 break;
126 regs->gpr[rd] = regs->msr & MSR_MASK; 837 regs->gpr[rd] = regs->msr & MSR_MASK;
127 regs->nip += 4; 838 goto instr_done;
128 if ((regs->msr & MSR_SF) == 0) 839 case 146: /* mtmsr */
129 regs->nip &= 0xffffffffUL; 840 if (regs->msr & MSR_PR)
130 return 1; 841 break;
131 case 0x124: /* mtmsr */
132 imm = regs->gpr[rd]; 842 imm = regs->gpr[rd];
133 if ((imm & MSR_RI) == 0) 843 if ((imm & MSR_RI) == 0)
134 /* can't step mtmsr that would clear MSR_RI */ 844 /* can't step mtmsr that would clear MSR_RI */
135 return -1; 845 return -1;
136 regs->msr = imm; 846 regs->msr = imm;
137 regs->nip += 4; 847 goto instr_done;
138 return 1;
139#ifdef CONFIG_PPC64 848#ifdef CONFIG_PPC64
140 case 0x164: /* mtmsrd */ 849 case 178: /* mtmsrd */
141 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 850 /* only MSR_EE and MSR_RI get changed if bit 15 set */
142 /* mtmsrd doesn't change MSR_HV and MSR_ME */ 851 /* mtmsrd doesn't change MSR_HV and MSR_ME */
852 if (regs->msr & MSR_PR)
853 break;
143 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; 854 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
144 imm = (regs->msr & MSR_MASK & ~imm) 855 imm = (regs->msr & MSR_MASK & ~imm)
145 | (regs->gpr[rd] & imm); 856 | (regs->gpr[rd] & imm);
@@ -147,57 +858,770 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
147 /* can't step mtmsrd that would clear MSR_RI */ 858 /* can't step mtmsrd that would clear MSR_RI */
148 return -1; 859 return -1;
149 regs->msr = imm; 860 regs->msr = imm;
150 regs->nip += 4; 861 goto instr_done;
151 if ((imm & MSR_SF) == 0)
152 regs->nip &= 0xffffffffUL;
153 return 1;
154#endif 862#endif
155 case 0x26: /* mfcr */ 863 case 19: /* mfcr */
156 regs->gpr[rd] = regs->ccr; 864 regs->gpr[rd] = regs->ccr;
157 regs->gpr[rd] &= 0xffffffffUL; 865 regs->gpr[rd] &= 0xffffffffUL;
158 goto mtspr_out; 866 goto instr_done;
159 case 0x2a6: /* mfspr */ 867
868 case 144: /* mtcrf */
869 imm = 0xf0000000UL;
870 val = regs->gpr[rd];
871 for (sh = 0; sh < 8; ++sh) {
872 if (instr & (0x80000 >> sh))
873 regs->ccr = (regs->ccr & ~imm) |
874 (val & imm);
875 imm >>= 4;
876 }
877 goto instr_done;
878
879 case 339: /* mfspr */
160 spr = (instr >> 11) & 0x3ff; 880 spr = (instr >> 11) & 0x3ff;
161 switch (spr) { 881 switch (spr) {
162 case 0x20: /* mfxer */ 882 case 0x20: /* mfxer */
163 regs->gpr[rd] = regs->xer; 883 regs->gpr[rd] = regs->xer;
164 regs->gpr[rd] &= 0xffffffffUL; 884 regs->gpr[rd] &= 0xffffffffUL;
165 goto mtspr_out; 885 goto instr_done;
166 case 0x100: /* mflr */ 886 case 0x100: /* mflr */
167 regs->gpr[rd] = regs->link; 887 regs->gpr[rd] = regs->link;
168 goto mtspr_out; 888 goto instr_done;
169 case 0x120: /* mfctr */ 889 case 0x120: /* mfctr */
170 regs->gpr[rd] = regs->ctr; 890 regs->gpr[rd] = regs->ctr;
171 goto mtspr_out; 891 goto instr_done;
172 }
173 break;
174 case 0x378: /* orx */
175 if (instr & 1)
176 break;
177 rs = (instr >> 21) & 0x1f;
178 rb = (instr >> 11) & 0x1f;
179 if (rs == rb) { /* mr */
180 rd = (instr >> 16) & 0x1f;
181 regs->gpr[rd] = regs->gpr[rs];
182 goto mtspr_out;
183 } 892 }
184 break; 893 break;
185 case 0x3a6: /* mtspr */ 894
895 case 467: /* mtspr */
186 spr = (instr >> 11) & 0x3ff; 896 spr = (instr >> 11) & 0x3ff;
187 switch (spr) { 897 switch (spr) {
188 case 0x20: /* mtxer */ 898 case 0x20: /* mtxer */
189 regs->xer = (regs->gpr[rd] & 0xffffffffUL); 899 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
190 goto mtspr_out; 900 goto instr_done;
191 case 0x100: /* mtlr */ 901 case 0x100: /* mtlr */
192 regs->link = regs->gpr[rd]; 902 regs->link = regs->gpr[rd];
193 goto mtspr_out; 903 goto instr_done;
194 case 0x120: /* mtctr */ 904 case 0x120: /* mtctr */
195 regs->ctr = regs->gpr[rd]; 905 regs->ctr = regs->gpr[rd];
196mtspr_out: 906 goto instr_done;
197 regs->nip += 4;
198 return 1;
199 } 907 }
908 break;
909
910/*
911 * Compare instructions
912 */
913 case 0: /* cmp */
914 val = regs->gpr[ra];
915 val2 = regs->gpr[rb];
916#ifdef __powerpc64__
917 if ((rd & 1) == 0) {
918 /* word (32-bit) compare */
919 val = (int) val;
920 val2 = (int) val2;
921 }
922#endif
923 do_cmp_signed(regs, val, val2, rd >> 2);
924 goto instr_done;
925
926 case 32: /* cmpl */
927 val = regs->gpr[ra];
928 val2 = regs->gpr[rb];
929#ifdef __powerpc64__
930 if ((rd & 1) == 0) {
931 /* word (32-bit) compare */
932 val = (unsigned int) val;
933 val2 = (unsigned int) val2;
934 }
935#endif
936 do_cmp_unsigned(regs, val, val2, rd >> 2);
937 goto instr_done;
938
939/*
940 * Arithmetic instructions
941 */
942 case 8: /* subfc */
943 add_with_carry(regs, rd, ~regs->gpr[ra],
944 regs->gpr[rb], 1);
945 goto arith_done;
946#ifdef __powerpc64__
947 case 9: /* mulhdu */
948 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
949 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
950 goto arith_done;
951#endif
952 case 10: /* addc */
953 add_with_carry(regs, rd, regs->gpr[ra],
954 regs->gpr[rb], 0);
955 goto arith_done;
956
957 case 11: /* mulhwu */
958 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
959 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
960 goto arith_done;
961
962 case 40: /* subf */
963 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
964 goto arith_done;
965#ifdef __powerpc64__
966 case 73: /* mulhd */
967 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
968 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
969 goto arith_done;
970#endif
971 case 75: /* mulhw */
972 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
973 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
974 goto arith_done;
975
976 case 104: /* neg */
977 regs->gpr[rd] = -regs->gpr[ra];
978 goto arith_done;
979
980 case 136: /* subfe */
981 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
982 regs->xer & XER_CA);
983 goto arith_done;
984
985 case 138: /* adde */
986 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
987 regs->xer & XER_CA);
988 goto arith_done;
989
990 case 200: /* subfze */
991 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
992 regs->xer & XER_CA);
993 goto arith_done;
994
995 case 202: /* addze */
996 add_with_carry(regs, rd, regs->gpr[ra], 0L,
997 regs->xer & XER_CA);
998 goto arith_done;
999
1000 case 232: /* subfme */
1001 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1002 regs->xer & XER_CA);
1003 goto arith_done;
1004#ifdef __powerpc64__
1005 case 233: /* mulld */
1006 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1007 goto arith_done;
1008#endif
1009 case 234: /* addme */
1010 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1011 regs->xer & XER_CA);
1012 goto arith_done;
1013
1014 case 235: /* mullw */
1015 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1016 (unsigned int) regs->gpr[rb];
1017 goto arith_done;
1018
1019 case 266: /* add */
1020 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1021 goto arith_done;
1022#ifdef __powerpc64__
1023 case 457: /* divdu */
1024 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1025 goto arith_done;
1026#endif
1027 case 459: /* divwu */
1028 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1029 (unsigned int) regs->gpr[rb];
1030 goto arith_done;
1031#ifdef __powerpc64__
1032 case 489: /* divd */
1033 regs->gpr[rd] = (long int) regs->gpr[ra] /
1034 (long int) regs->gpr[rb];
1035 goto arith_done;
1036#endif
1037 case 491: /* divw */
1038 regs->gpr[rd] = (int) regs->gpr[ra] /
1039 (int) regs->gpr[rb];
1040 goto arith_done;
1041
1042
1043/*
1044 * Logical instructions
1045 */
1046 case 26: /* cntlzw */
1047 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1048 "r" (regs->gpr[rd]));
1049 goto logical_done;
1050#ifdef __powerpc64__
1051 case 58: /* cntlzd */
1052 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1053 "r" (regs->gpr[rd]));
1054 goto logical_done;
1055#endif
1056 case 28: /* and */
1057 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1058 goto logical_done;
1059
1060 case 60: /* andc */
1061 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1062 goto logical_done;
1063
1064 case 124: /* nor */
1065 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1066 goto logical_done;
1067
1068 case 284: /* xor */
1069 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1070 goto logical_done;
1071
1072 case 316: /* xor */
1073 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1074 goto logical_done;
1075
1076 case 412: /* orc */
1077 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1078 goto logical_done;
1079
1080 case 444: /* or */
1081 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1082 goto logical_done;
1083
1084 case 476: /* nand */
1085 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1086 goto logical_done;
1087
1088 case 922: /* extsh */
1089 regs->gpr[ra] = (signed short) regs->gpr[rd];
1090 goto logical_done;
1091
1092 case 954: /* extsb */
1093 regs->gpr[ra] = (signed char) regs->gpr[rd];
1094 goto logical_done;
1095#ifdef __powerpc64__
1096 case 986: /* extsw */
1097 regs->gpr[ra] = (signed int) regs->gpr[rd];
1098 goto logical_done;
1099#endif
1100
1101/*
1102 * Shift instructions
1103 */
1104 case 24: /* slw */
1105 sh = regs->gpr[rb] & 0x3f;
1106 if (sh < 32)
1107 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1108 else
1109 regs->gpr[ra] = 0;
1110 goto logical_done;
1111
1112 case 536: /* srw */
1113 sh = regs->gpr[rb] & 0x3f;
1114 if (sh < 32)
1115 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1116 else
1117 regs->gpr[ra] = 0;
1118 goto logical_done;
1119
1120 case 792: /* sraw */
1121 sh = regs->gpr[rb] & 0x3f;
1122 ival = (signed int) regs->gpr[rd];
1123 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1124 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1125 regs->xer |= XER_CA;
1126 else
1127 regs->xer &= ~XER_CA;
1128 goto logical_done;
1129
1130 case 824: /* srawi */
1131 sh = rb;
1132 ival = (signed int) regs->gpr[rd];
1133 regs->gpr[ra] = ival >> sh;
1134 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1135 regs->xer |= XER_CA;
1136 else
1137 regs->xer &= ~XER_CA;
1138 goto logical_done;
1139
1140#ifdef __powerpc64__
1141 case 27: /* sld */
1142 sh = regs->gpr[rd] & 0x7f;
1143 if (sh < 64)
1144 regs->gpr[ra] = regs->gpr[rd] << sh;
1145 else
1146 regs->gpr[ra] = 0;
1147 goto logical_done;
1148
1149 case 539: /* srd */
1150 sh = regs->gpr[rb] & 0x7f;
1151 if (sh < 64)
1152 regs->gpr[ra] = regs->gpr[rd] >> sh;
1153 else
1154 regs->gpr[ra] = 0;
1155 goto logical_done;
1156
1157 case 794: /* srad */
1158 sh = regs->gpr[rb] & 0x7f;
1159 ival = (signed long int) regs->gpr[rd];
1160 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1161 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1162 regs->xer |= XER_CA;
1163 else
1164 regs->xer &= ~XER_CA;
1165 goto logical_done;
1166
1167 case 826: /* sradi with sh_5 = 0 */
1168 case 827: /* sradi with sh_5 = 1 */
1169 sh = rb | ((instr & 2) << 4);
1170 ival = (signed long int) regs->gpr[rd];
1171 regs->gpr[ra] = ival >> sh;
1172 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1173 regs->xer |= XER_CA;
1174 else
1175 regs->xer &= ~XER_CA;
1176 goto logical_done;
1177#endif /* __powerpc64__ */
1178
1179/*
1180 * Cache instructions
1181 */
1182 case 54: /* dcbst */
1183 ea = xform_ea(instr, regs, 0);
1184 if (!address_ok(regs, ea, 8))
1185 return 0;
1186 err = 0;
1187 __cacheop_user_asmx(ea, err, "dcbst");
1188 if (err)
1189 return 0;
1190 goto instr_done;
1191
1192 case 86: /* dcbf */
1193 ea = xform_ea(instr, regs, 0);
1194 if (!address_ok(regs, ea, 8))
1195 return 0;
1196 err = 0;
1197 __cacheop_user_asmx(ea, err, "dcbf");
1198 if (err)
1199 return 0;
1200 goto instr_done;
1201
1202 case 246: /* dcbtst */
1203 if (rd == 0) {
1204 ea = xform_ea(instr, regs, 0);
1205 prefetchw((void *) ea);
1206 }
1207 goto instr_done;
1208
1209 case 278: /* dcbt */
1210 if (rd == 0) {
1211 ea = xform_ea(instr, regs, 0);
1212 prefetch((void *) ea);
1213 }
1214 goto instr_done;
1215
200 } 1216 }
1217 break;
201 } 1218 }
202 return 0; 1219
1220 /*
1221 * Following cases are for loads and stores, so bail out
1222 * if we're in little-endian mode.
1223 */
1224 if (regs->msr & MSR_LE)
1225 return 0;
1226
1227 /*
1228 * Save register RA in case it's an update form load or store
1229 * and the access faults.
1230 */
1231 old_ra = regs->gpr[ra];
1232
1233 switch (opcode) {
1234 case 31:
1235 u = instr & 0x40;
1236 switch ((instr >> 1) & 0x3ff) {
1237 case 20: /* lwarx */
1238 ea = xform_ea(instr, regs, 0);
1239 if (ea & 3)
1240 break; /* can't handle misaligned */
1241 err = -EFAULT;
1242 if (!address_ok(regs, ea, 4))
1243 goto ldst_done;
1244 err = 0;
1245 __get_user_asmx(val, ea, err, "lwarx");
1246 if (!err)
1247 regs->gpr[rd] = val;
1248 goto ldst_done;
1249
1250 case 150: /* stwcx. */
1251 ea = xform_ea(instr, regs, 0);
1252 if (ea & 3)
1253 break; /* can't handle misaligned */
1254 err = -EFAULT;
1255 if (!address_ok(regs, ea, 4))
1256 goto ldst_done;
1257 err = 0;
1258 __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1259 if (!err)
1260 regs->ccr = (regs->ccr & 0x0fffffff) |
1261 (cr & 0xe0000000) |
1262 ((regs->xer >> 3) & 0x10000000);
1263 goto ldst_done;
1264
1265#ifdef __powerpc64__
1266 case 84: /* ldarx */
1267 ea = xform_ea(instr, regs, 0);
1268 if (ea & 7)
1269 break; /* can't handle misaligned */
1270 err = -EFAULT;
1271 if (!address_ok(regs, ea, 8))
1272 goto ldst_done;
1273 err = 0;
1274 __get_user_asmx(val, ea, err, "ldarx");
1275 if (!err)
1276 regs->gpr[rd] = val;
1277 goto ldst_done;
1278
1279 case 214: /* stdcx. */
1280 ea = xform_ea(instr, regs, 0);
1281 if (ea & 7)
1282 break; /* can't handle misaligned */
1283 err = -EFAULT;
1284 if (!address_ok(regs, ea, 8))
1285 goto ldst_done;
1286 err = 0;
1287 __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1288 if (!err)
1289 regs->ccr = (regs->ccr & 0x0fffffff) |
1290 (cr & 0xe0000000) |
1291 ((regs->xer >> 3) & 0x10000000);
1292 goto ldst_done;
1293
1294 case 21: /* ldx */
1295 case 53: /* ldux */
1296 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1297 8, regs);
1298 goto ldst_done;
1299#endif
1300
1301 case 23: /* lwzx */
1302 case 55: /* lwzux */
1303 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1304 4, regs);
1305 goto ldst_done;
1306
1307 case 87: /* lbzx */
1308 case 119: /* lbzux */
1309 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1310 1, regs);
1311 goto ldst_done;
1312
1313#ifdef CONFIG_ALTIVEC
1314 case 103: /* lvx */
1315 case 359: /* lvxl */
1316 if (!(regs->msr & MSR_VEC))
1317 break;
1318 ea = xform_ea(instr, regs, 0);
1319 err = do_vec_load(rd, do_lvx, ea, regs);
1320 goto ldst_done;
1321
1322 case 231: /* stvx */
1323 case 487: /* stvxl */
1324 if (!(regs->msr & MSR_VEC))
1325 break;
1326 ea = xform_ea(instr, regs, 0);
1327 err = do_vec_store(rd, do_stvx, ea, regs);
1328 goto ldst_done;
1329#endif /* CONFIG_ALTIVEC */
1330
1331#ifdef __powerpc64__
1332 case 149: /* stdx */
1333 case 181: /* stdux */
1334 val = regs->gpr[rd];
1335 err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1336 goto ldst_done;
1337#endif
1338
1339 case 151: /* stwx */
1340 case 183: /* stwux */
1341 val = regs->gpr[rd];
1342 err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1343 goto ldst_done;
1344
1345 case 215: /* stbx */
1346 case 247: /* stbux */
1347 val = regs->gpr[rd];
1348 err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1349 goto ldst_done;
1350
1351 case 279: /* lhzx */
1352 case 311: /* lhzux */
1353 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1354 2, regs);
1355 goto ldst_done;
1356
1357#ifdef __powerpc64__
1358 case 341: /* lwax */
1359 case 373: /* lwaux */
1360 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1361 4, regs);
1362 if (!err)
1363 regs->gpr[rd] = (signed int) regs->gpr[rd];
1364 goto ldst_done;
1365#endif
1366
1367 case 343: /* lhax */
1368 case 375: /* lhaux */
1369 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1370 2, regs);
1371 if (!err)
1372 regs->gpr[rd] = (signed short) regs->gpr[rd];
1373 goto ldst_done;
1374
1375 case 407: /* sthx */
1376 case 439: /* sthux */
1377 val = regs->gpr[rd];
1378 err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1379 goto ldst_done;
1380
1381#ifdef __powerpc64__
1382 case 532: /* ldbrx */
1383 err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1384 if (!err)
1385 regs->gpr[rd] = byterev_8(val);
1386 goto ldst_done;
1387
1388#endif
1389
1390 case 534: /* lwbrx */
1391 err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1392 if (!err)
1393 regs->gpr[rd] = byterev_4(val);
1394 goto ldst_done;
1395
1396 case 535: /* lfsx */
1397 case 567: /* lfsux */
1398 if (!(regs->msr & MSR_FP))
1399 break;
1400 ea = xform_ea(instr, regs, u);
1401 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1402 goto ldst_done;
1403
1404 case 599: /* lfdx */
1405 case 631: /* lfdux */
1406 if (!(regs->msr & MSR_FP))
1407 break;
1408 ea = xform_ea(instr, regs, u);
1409 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1410 goto ldst_done;
1411
1412 case 663: /* stfsx */
1413 case 695: /* stfsux */
1414 if (!(regs->msr & MSR_FP))
1415 break;
1416 ea = xform_ea(instr, regs, u);
1417 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1418 goto ldst_done;
1419
1420 case 727: /* stfdx */
1421 case 759: /* stfdux */
1422 if (!(regs->msr & MSR_FP))
1423 break;
1424 ea = xform_ea(instr, regs, u);
1425 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1426 goto ldst_done;
1427
1428#ifdef __powerpc64__
1429 case 660: /* stdbrx */
1430 val = byterev_8(regs->gpr[rd]);
1431 err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1432 goto ldst_done;
1433
1434#endif
1435 case 662: /* stwbrx */
1436 val = byterev_4(regs->gpr[rd]);
1437 err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1438 goto ldst_done;
1439
1440 case 790: /* lhbrx */
1441 err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1442 if (!err)
1443 regs->gpr[rd] = byterev_2(val);
1444 goto ldst_done;
1445
1446 case 918: /* sthbrx */
1447 val = byterev_2(regs->gpr[rd]);
1448 err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1449 goto ldst_done;
1450
1451#ifdef CONFIG_VSX
1452 case 844: /* lxvd2x */
1453 case 876: /* lxvd2ux */
1454 if (!(regs->msr & MSR_VSX))
1455 break;
1456 rd |= (instr & 1) << 5;
1457 ea = xform_ea(instr, regs, u);
1458 err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1459 goto ldst_done;
1460
1461 case 972: /* stxvd2x */
1462 case 1004: /* stxvd2ux */
1463 if (!(regs->msr & MSR_VSX))
1464 break;
1465 rd |= (instr & 1) << 5;
1466 ea = xform_ea(instr, regs, u);
1467 err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1468 goto ldst_done;
1469
1470#endif /* CONFIG_VSX */
1471 }
1472 break;
1473
1474 case 32: /* lwz */
1475 case 33: /* lwzu */
1476 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
1477 goto ldst_done;
1478
1479 case 34: /* lbz */
1480 case 35: /* lbzu */
1481 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
1482 goto ldst_done;
1483
1484 case 36: /* stw */
1485 case 37: /* stwu */
1486 val = regs->gpr[rd];
1487 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1488 goto ldst_done;
1489
1490 case 38: /* stb */
1491 case 39: /* stbu */
1492 val = regs->gpr[rd];
1493 err = write_mem(val, dform_ea(instr, regs), 1, regs);
1494 goto ldst_done;
1495
1496 case 40: /* lhz */
1497 case 41: /* lhzu */
1498 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1499 goto ldst_done;
1500
1501 case 42: /* lha */
1502 case 43: /* lhau */
1503 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1504 if (!err)
1505 regs->gpr[rd] = (signed short) regs->gpr[rd];
1506 goto ldst_done;
1507
1508 case 44: /* sth */
1509 case 45: /* sthu */
1510 val = regs->gpr[rd];
1511 err = write_mem(val, dform_ea(instr, regs), 2, regs);
1512 goto ldst_done;
1513
1514 case 46: /* lmw */
1515 ra = (instr >> 16) & 0x1f;
1516 if (ra >= rd)
1517 break; /* invalid form, ra in range to load */
1518 ea = dform_ea(instr, regs);
1519 do {
1520 err = read_mem(&regs->gpr[rd], ea, 4, regs);
1521 if (err)
1522 return 0;
1523 ea += 4;
1524 } while (++rd < 32);
1525 goto instr_done;
1526
1527 case 47: /* stmw */
1528 ea = dform_ea(instr, regs);
1529 do {
1530 err = write_mem(regs->gpr[rd], ea, 4, regs);
1531 if (err)
1532 return 0;
1533 ea += 4;
1534 } while (++rd < 32);
1535 goto instr_done;
1536
1537 case 48: /* lfs */
1538 case 49: /* lfsu */
1539 if (!(regs->msr & MSR_FP))
1540 break;
1541 ea = dform_ea(instr, regs);
1542 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1543 goto ldst_done;
1544
1545 case 50: /* lfd */
1546 case 51: /* lfdu */
1547 if (!(regs->msr & MSR_FP))
1548 break;
1549 ea = dform_ea(instr, regs);
1550 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1551 goto ldst_done;
1552
1553 case 52: /* stfs */
1554 case 53: /* stfsu */
1555 if (!(regs->msr & MSR_FP))
1556 break;
1557 ea = dform_ea(instr, regs);
1558 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1559 goto ldst_done;
1560
1561 case 54: /* stfd */
1562 case 55: /* stfdu */
1563 if (!(regs->msr & MSR_FP))
1564 break;
1565 ea = dform_ea(instr, regs);
1566 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1567 goto ldst_done;
1568
1569#ifdef __powerpc64__
1570 case 58: /* ld[u], lwa */
1571 switch (instr & 3) {
1572 case 0: /* ld */
1573 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1574 8, regs);
1575 goto ldst_done;
1576 case 1: /* ldu */
1577 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1578 8, regs);
1579 goto ldst_done;
1580 case 2: /* lwa */
1581 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1582 4, regs);
1583 if (!err)
1584 regs->gpr[rd] = (signed int) regs->gpr[rd];
1585 goto ldst_done;
1586 }
1587 break;
1588
1589 case 62: /* std[u] */
1590 val = regs->gpr[rd];
1591 switch (instr & 3) {
1592 case 0: /* std */
1593 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1594 goto ldst_done;
1595 case 1: /* stdu */
1596 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1597 goto ldst_done;
1598 }
1599 break;
1600#endif /* __powerpc64__ */
1601
1602 }
1603 err = -EINVAL;
1604
1605 ldst_done:
1606 if (err) {
1607 regs->gpr[ra] = old_ra;
1608 return 0; /* invoke DSI if -EFAULT? */
1609 }
1610 instr_done:
1611 regs->nip += 4;
1612#ifdef __powerpc64__
1613 if ((regs->msr & MSR_SF) == 0)
1614 regs->nip &= 0xffffffffUL;
1615#endif
1616 return 1;
1617
1618 logical_done:
1619 if (instr & 1)
1620 set_cr0(regs, ra);
1621 goto instr_done;
1622
1623 arith_done:
1624 if (instr & 1)
1625 set_cr0(regs, rd);
1626 goto instr_done;
203} 1627}
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 7a56b22e0602..71ed3ce29e12 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -242,6 +242,17 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
242} 242}
243 243
244/* 244/*
245 * Function to perform processor-specific cleanup during unregistration
246 */
247__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
248{
249 /*
250 * A weak stub function here for those archs that don't define
251 * it inside arch/.../kernel/hw_breakpoint.c
252 */
253}
254
255/*
245 * Contraints to check before allowing this new breakpoint counter: 256 * Contraints to check before allowing this new breakpoint counter:
246 * 257 *
247 * == Non-pinned counter == (Considered as pinned for now) 258 * == Non-pinned counter == (Considered as pinned for now)
@@ -339,6 +350,7 @@ void release_bp_slot(struct perf_event *bp)
339{ 350{
340 mutex_lock(&nr_bp_mutex); 351 mutex_lock(&nr_bp_mutex);
341 352
353 arch_unregister_hw_breakpoint(bp);
342 __release_bp_slot(bp); 354 __release_bp_slot(bp);
343 355
344 mutex_unlock(&nr_bp_mutex); 356 mutex_unlock(&nr_bp_mutex);