diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/calls.S | 3 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/etm.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-decode.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 12 |
5 files changed, 18 insertions, 10 deletions
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index afeb71fa72cb..5c26eccef998 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -376,6 +376,9 @@ | |||
376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
377 | /* 365 */ CALL(sys_recvmmsg) | 377 | /* 365 */ CALL(sys_recvmmsg) |
378 | CALL(sys_accept4) | 378 | CALL(sys_accept4) |
379 | CALL(sys_fanotify_init) | ||
380 | CALL(sys_fanotify_mark) | ||
381 | CALL(sys_prlimit64) | ||
379 | #ifndef syscalls_counted | 382 | #ifndef syscalls_counted |
380 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
381 | #define syscalls_counted | 384 | #define syscalls_counted |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..7885722bdf4e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -48,6 +48,8 @@ work_pending: | |||
48 | beq no_work_pending | 48 | beq no_work_pending |
49 | mov r0, sp @ 'regs' | 49 | mov r0, sp @ 'regs' |
50 | mov r2, why @ 'syscall' | 50 | mov r2, why @ 'syscall' |
51 | tst r1, #_TIF_SIGPENDING @ delivering a signal? | ||
52 | movne why, #0 @ prevent further restarts | ||
51 | bl do_notify_resume | 53 | bl do_notify_resume |
52 | b ret_slow_syscall @ Check work again | 54 | b ret_slow_syscall @ Check work again |
53 | 55 | ||
@@ -418,11 +420,13 @@ ENDPROC(sys_clone_wrapper) | |||
418 | 420 | ||
419 | sys_sigreturn_wrapper: | 421 | sys_sigreturn_wrapper: |
420 | add r0, sp, #S_OFF | 422 | add r0, sp, #S_OFF |
423 | mov why, #0 @ prevent syscall restart handling | ||
421 | b sys_sigreturn | 424 | b sys_sigreturn |
422 | ENDPROC(sys_sigreturn_wrapper) | 425 | ENDPROC(sys_sigreturn_wrapper) |
423 | 426 | ||
424 | sys_rt_sigreturn_wrapper: | 427 | sys_rt_sigreturn_wrapper: |
425 | add r0, sp, #S_OFF | 428 | add r0, sp, #S_OFF |
429 | mov why, #0 @ prevent syscall restart handling | ||
426 | b sys_rt_sigreturn | 430 | b sys_rt_sigreturn |
427 | ENDPROC(sys_rt_sigreturn_wrapper) | 431 | ENDPROC(sys_rt_sigreturn_wrapper) |
428 | 432 | ||
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 56418f98cd01..33c7077174db 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c | |||
@@ -230,7 +230,7 @@ static void etm_dump(void) | |||
230 | etb_lock(t); | 230 | etb_lock(t); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void sysrq_etm_dump(int key, struct tty_struct *tty) | 233 | static void sysrq_etm_dump(int key) |
234 | { | 234 | { |
235 | dev_dbg(tracer.dev, "Dumping ETB buffer\n"); | 235 | dev_dbg(tracer.dev, "Dumping ETB buffer\n"); |
236 | etm_dump(); | 236 | etm_dump(); |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 8bccbfa693ff..2c1f0050c9c4 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1162 | { | 1162 | { |
1163 | /* | 1163 | /* |
1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx | 1164 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx |
1165 | * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx | 1165 | * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx |
1166 | * ALU op with S bit and Rd == 15 : | 1166 | * ALU op with S bit and Rd == 15 : |
1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx | 1167 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx |
1168 | */ | 1168 | */ |
1169 | if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ | 1169 | if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ |
1170 | (insn & 0x0ff00000) == 0x03400000 || /* Undef */ | ||
1170 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ | 1171 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ |
1171 | return INSN_REJECTED; | 1172 | return INSN_REJECTED; |
1172 | 1173 | ||
@@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1177 | * *S (bit 20) updates condition codes | 1178 | * *S (bit 20) updates condition codes |
1178 | * ADC/SBC/RSC reads the C flag | 1179 | * ADC/SBC/RSC reads the C flag |
1179 | */ | 1180 | */ |
1180 | insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ | 1181 | insn &= 0xffff0fff; /* Rd = r0 */ |
1181 | asi->insn[0] = insn; | 1182 | asi->insn[0] = insn; |
1182 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ | 1183 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ |
1183 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; | 1184 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..ecbb0288e5dd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc, | |||
319 | { | 319 | { |
320 | struct hw_perf_event fake_event = event->hw; | 320 | struct hw_perf_event fake_event = event->hw; |
321 | 321 | ||
322 | if (event->pmu && event->pmu != &pmu) | 322 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
323 | return 0; | 323 | return 1; |
324 | 324 | ||
325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | 325 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; |
326 | } | 326 | } |
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num, | |||
1041 | /* | 1041 | /* |
1042 | * Handle the pending perf events. | 1042 | * Handle the pending perf events. |
1043 | * | 1043 | * |
1044 | * Note: this call *must* be run with interrupts enabled. For | 1044 | * Note: this call *must* be run with interrupts disabled. For |
1045 | * platforms that can have the PMU interrupts raised as a PMI, this | 1045 | * platforms that can have the PMU interrupts raised as an NMI, this |
1046 | * will not work. | 1046 | * will not work. |
1047 | */ | 1047 | */ |
1048 | perf_event_do_pending(); | 1048 | perf_event_do_pending(); |
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
2017 | /* | 2017 | /* |
2018 | * Handle the pending perf events. | 2018 | * Handle the pending perf events. |
2019 | * | 2019 | * |
2020 | * Note: this call *must* be run with interrupts enabled. For | 2020 | * Note: this call *must* be run with interrupts disabled. For |
2021 | * platforms that can have the PMU interrupts raised as a PMI, this | 2021 | * platforms that can have the PMU interrupts raised as an NMI, this |
2022 | * will not work. | 2022 | * will not work. |
2023 | */ | 2023 | */ |
2024 | perf_event_do_pending(); | 2024 | perf_event_do_pending(); |