aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-20 17:20:32 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-20 17:20:32 -0400
commit464b5847e61085f81bb99ce48eb427a0dc7617dc (patch)
tree805c97855a9a13c06910687bbbbe3eb7bc371902 /kernel
parent0a30d69195604f136a4e3bfaf453f742e583ce95 (diff)
parente875bd66dfb68f4e898e9a43ef42858c504a7f23 (diff)
Merge branch 'irq/urgent' into irq/core
Merge urgent fixes so pending patches for 4.9 can be applied.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c47
-rw-r--r--kernel/events/ring_buffer.c15
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/sched/core.c22
4 files changed, 76 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3cfabdf7b942..a54f2c2cdb20 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2496,11 +2496,11 @@ static int __perf_event_stop(void *info)
2496 return 0; 2496 return 0;
2497} 2497}
2498 2498
2499static int perf_event_restart(struct perf_event *event) 2499static int perf_event_stop(struct perf_event *event, int restart)
2500{ 2500{
2501 struct stop_event_data sd = { 2501 struct stop_event_data sd = {
2502 .event = event, 2502 .event = event,
2503 .restart = 1, 2503 .restart = restart,
2504 }; 2504 };
2505 int ret = 0; 2505 int ret = 0;
2506 2506
@@ -3549,10 +3549,18 @@ static int perf_event_read(struct perf_event *event, bool group)
3549 .group = group, 3549 .group = group,
3550 .ret = 0, 3550 .ret = 0,
3551 }; 3551 };
3552 ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1); 3552 /*
3553 /* The event must have been read from an online CPU: */ 3553 * Purposely ignore the smp_call_function_single() return
3554 WARN_ON_ONCE(ret); 3554 * value.
3555 ret = ret ? : data.ret; 3555 *
3556 * If event->oncpu isn't a valid CPU it means the event got
3557 * scheduled out and that will have updated the event count.
3558 *
3559 * Therefore, either way, we'll have an up-to-date event count
3560 * after this.
3561 */
3562 (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
3563 ret = data.ret;
3556 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3564 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3557 struct perf_event_context *ctx = event->ctx; 3565 struct perf_event_context *ctx = event->ctx;
3558 unsigned long flags; 3566 unsigned long flags;
@@ -4837,6 +4845,19 @@ static void ring_buffer_attach(struct perf_event *event,
4837 spin_unlock_irqrestore(&rb->event_lock, flags); 4845 spin_unlock_irqrestore(&rb->event_lock, flags);
4838 } 4846 }
4839 4847
4848 /*
4849 * Avoid racing with perf_mmap_close(AUX): stop the event
4850 * before swizzling the event::rb pointer; if it's getting
4851 * unmapped, its aux_mmap_count will be 0 and it won't
4852 * restart. See the comment in __perf_pmu_output_stop().
4853 *
4854 * Data will inevitably be lost when set_output is done in
4855 * mid-air, but then again, whoever does it like this is
4856 * not in for the data anyway.
4857 */
4858 if (has_aux(event))
4859 perf_event_stop(event, 0);
4860
4840 rcu_assign_pointer(event->rb, rb); 4861 rcu_assign_pointer(event->rb, rb);
4841 4862
4842 if (old_rb) { 4863 if (old_rb) {
@@ -6112,7 +6133,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6112 raw_spin_unlock_irqrestore(&ifh->lock, flags); 6133 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6113 6134
6114 if (restart) 6135 if (restart)
6115 perf_event_restart(event); 6136 perf_event_stop(event, 1);
6116} 6137}
6117 6138
6118void perf_event_exec(void) 6139void perf_event_exec(void)
@@ -6156,7 +6177,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
6156 6177
6157 /* 6178 /*
6158 * In case of inheritance, it will be the parent that links to the 6179 * In case of inheritance, it will be the parent that links to the
6159 * ring-buffer, but it will be the child that's actually using it: 6180 * ring-buffer, but it will be the child that's actually using it.
6181 *
6182 * We are using event::rb to determine if the event should be stopped,
6183 * however this may race with ring_buffer_attach() (through set_output),
6184 * which will make us skip the event that actually needs to be stopped.
6185 * So ring_buffer_attach() has to stop an aux event before re-assigning
6186 * its rb pointer.
6160 */ 6187 */
6161 if (rcu_dereference(parent->rb) == rb) 6188 if (rcu_dereference(parent->rb) == rb)
6162 ro->err = __perf_event_stop(&sd); 6189 ro->err = __perf_event_stop(&sd);
@@ -6670,7 +6697,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6670 raw_spin_unlock_irqrestore(&ifh->lock, flags); 6697 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6671 6698
6672 if (restart) 6699 if (restart)
6673 perf_event_restart(event); 6700 perf_event_stop(event, 1);
6674} 6701}
6675 6702
6676/* 6703/*
@@ -7859,7 +7886,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
7859 mmput(mm); 7886 mmput(mm);
7860 7887
7861restart: 7888restart:
7862 perf_event_restart(event); 7889 perf_event_stop(event, 1);
7863} 7890}
7864 7891
7865/* 7892/*
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ae9b90dc9a5a..257fa460b846 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
330 if (!rb) 330 if (!rb)
331 return NULL; 331 return NULL;
332 332
333 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount)) 333 if (!rb_has_aux(rb))
334 goto err; 334 goto err;
335 335
336 /* 336 /*
337 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through), 337 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
338 * the aux buffer is in perf_mmap_close(), about to get freed. 338 * about to get freed, so we leave immediately.
339 *
340 * Checking rb::aux_mmap_count and rb::refcount has to be done in
341 * the same order, see perf_mmap_close. Otherwise we end up freeing
342 * aux pages in this path, which is a bug, because in_atomic().
339 */ 343 */
340 if (!atomic_read(&rb->aux_mmap_count)) 344 if (!atomic_read(&rb->aux_mmap_count))
341 goto err_put; 345 goto err;
346
347 if (!atomic_inc_not_zero(&rb->aux_refcount))
348 goto err;
342 349
343 /* 350 /*
344 * Nesting is not supported for AUX area, make sure nested 351 * Nesting is not supported for AUX area, make sure nested
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 18f29586f230..d8dfdc630b7e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -829,6 +829,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
829 desc->name = name; 829 desc->name = name;
830 830
831 if (handle != handle_bad_irq && is_chained) { 831 if (handle != handle_bad_irq && is_chained) {
832 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
833
832 /* 834 /*
833 * We're about to start this interrupt immediately, 835 * We're about to start this interrupt immediately,
834 * hence the need to set the trigger configuration. 836 * hence the need to set the trigger configuration.
@@ -837,8 +839,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
837 * chained interrupt. Reset it immediately because we 839 * chained interrupt. Reset it immediately because we
838 * do know better. 840 * do know better.
839 */ 841 */
840 __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data)); 842 if (type != IRQ_TYPE_NONE) {
841 desc->handle_irq = handle; 843 __irq_set_trigger(desc, type);
844 desc->handle_irq = handle;
845 }
842 846
843 irq_settings_set_noprobe(desc); 847 irq_settings_set_noprobe(desc);
844 irq_settings_set_norequest(desc); 848 irq_settings_set_norequest(desc);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2a906f20fba7..44817c640e99 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2016,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2016 success = 1; /* we're going to change ->state */ 2016 success = 1; /* we're going to change ->state */
2017 cpu = task_cpu(p); 2017 cpu = task_cpu(p);
2018 2018
2019 /*
2020 * Ensure we load p->on_rq _after_ p->state, otherwise it would
2021 * be possible to, falsely, observe p->on_rq == 0 and get stuck
2022 * in smp_cond_load_acquire() below.
2023 *
2024 * sched_ttwu_pending() try_to_wake_up()
2025 * [S] p->on_rq = 1; [L] P->state
2026 * UNLOCK rq->lock -----.
2027 * \
2028 * +--- RMB
2029 * schedule() /
2030 * LOCK rq->lock -----'
2031 * UNLOCK rq->lock
2032 *
2033 * [task p]
2034 * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
2035 *
2036 * Pairs with the UNLOCK+LOCK on rq->lock from the
2037 * last wakeup of our task and the schedule that got our task
2038 * current.
2039 */
2040 smp_rmb();
2019 if (p->on_rq && ttwu_remote(p, wake_flags)) 2041 if (p->on_rq && ttwu_remote(p, wake_flags))
2020 goto stat; 2042 goto stat;
2021 2043