aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/unaligned_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/unaligned_64.c')
-rw-r--r--arch/sparc/kernel/unaligned_64.c27
1 files changed, 10 insertions, 17 deletions
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index b2b019ea8caa..76e4ac1a13e1 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -22,6 +22,7 @@
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/perf_event.h> 23#include <linux/perf_event.h>
24#include <linux/ratelimit.h> 24#include <linux/ratelimit.h>
25#include <linux/bitops.h>
25#include <asm/fpumacro.h> 26#include <asm/fpumacro.h>
26 27
27enum direction { 28enum direction {
@@ -317,7 +318,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
317 318
318 addr = compute_effective_address(regs, insn, 319 addr = compute_effective_address(regs, insn,
319 ((insn >> 25) & 0x1f)); 320 ((insn >> 25) & 0x1f));
320 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 321 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
321 switch (asi) { 322 switch (asi) {
322 case ASI_NL: 323 case ASI_NL:
323 case ASI_AIUPL: 324 case ASI_AIUPL:
@@ -373,18 +374,13 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
373 } 374 }
374} 375}
375 376
376static char popc_helper[] = {
3770, 1, 1, 2, 1, 2, 2, 3,
3781, 2, 2, 3, 2, 3, 3, 4,
379};
380
381int handle_popc(u32 insn, struct pt_regs *regs) 377int handle_popc(u32 insn, struct pt_regs *regs)
382{ 378{
383 u64 value;
384 int ret, i, rd = ((insn >> 25) & 0x1f);
385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 379 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
380 int ret, rd = ((insn >> 25) & 0x1f);
381 u64 value;
386 382
387 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 383 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
388 if (insn & 0x2000) { 384 if (insn & 0x2000) {
389 maybe_flush_windows(0, 0, rd, from_kernel); 385 maybe_flush_windows(0, 0, rd, from_kernel);
390 value = sign_extend_imm13(insn); 386 value = sign_extend_imm13(insn);
@@ -392,10 +388,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
392 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); 388 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
393 value = fetch_reg(insn & 0x1f, regs); 389 value = fetch_reg(insn & 0x1f, regs);
394 } 390 }
395 for (ret = 0, i = 0; i < 16; i++) { 391 ret = hweight64(value);
396 ret += popc_helper[value & 0xf];
397 value >>= 4;
398 }
399 if (rd < 16) { 392 if (rd < 16) {
400 if (rd) 393 if (rd)
401 regs->u_regs[rd] = ret; 394 regs->u_regs[rd] = ret;
@@ -431,7 +424,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
431 int asi = decode_asi(insn, regs); 424 int asi = decode_asi(insn, regs);
432 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 425 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
433 426
434 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 427 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
435 428
436 save_and_clear_fpu(); 429 save_and_clear_fpu();
437 current_thread_info()->xfsr[0] &= ~0x1c000; 430 current_thread_info()->xfsr[0] &= ~0x1c000;
@@ -554,7 +547,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
554 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 547 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
555 unsigned long *reg; 548 unsigned long *reg;
556 549
557 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 550 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
558 551
559 maybe_flush_windows(0, 0, rd, from_kernel); 552 maybe_flush_windows(0, 0, rd, from_kernel);
560 reg = fetch_reg_addr(rd, regs); 553 reg = fetch_reg_addr(rd, regs);
@@ -586,7 +579,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
586 579
587 if (tstate & TSTATE_PRIV) 580 if (tstate & TSTATE_PRIV)
588 die_if_kernel("lddfmna from kernel", regs); 581 die_if_kernel("lddfmna from kernel", regs);
589 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 582 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
590 if (test_thread_flag(TIF_32BIT)) 583 if (test_thread_flag(TIF_32BIT))
591 pc = (u32)pc; 584 pc = (u32)pc;
592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 585 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -647,7 +640,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
647 640
648 if (tstate & TSTATE_PRIV) 641 if (tstate & TSTATE_PRIV)
649 die_if_kernel("stdfmna from kernel", regs); 642 die_if_kernel("stdfmna from kernel", regs);
650 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 643 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
651 if (test_thread_flag(TIF_32BIT)) 644 if (test_thread_flag(TIF_32BIT))
652 pc = (u32)pc; 645 pc = (u32)pc;
653 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 646 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {