summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/debug.c2
-rw-r--r--kernel/events/core.c52
-rw-r--r--kernel/events/ring_buffer.c4
-rw-r--r--kernel/irq/chip.c4
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/locking/lockdep.c29
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/trace/trace.c6
9 files changed, 72 insertions, 34 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 45d51e8e26f6..a218e43cc382 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
706#ifdef CONFIG_STACKTRACE 706#ifdef CONFIG_STACKTRACE
707 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 707 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
708 entry->stacktrace.entries = entry->st_entries; 708 entry->stacktrace.entries = entry->st_entries;
709 entry->stacktrace.skip = 2; 709 entry->stacktrace.skip = 1;
710 save_stack_trace(&entry->stacktrace); 710 save_stack_trace(&entry->stacktrace);
711#endif 711#endif
712 712
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 72d06e302e99..534e01e7bc36 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event,
2009 event->pmu->del(event, 0); 2009 event->pmu->del(event, 0);
2010 event->oncpu = -1; 2010 event->oncpu = -1;
2011 2011
2012 if (event->pending_disable) { 2012 if (READ_ONCE(event->pending_disable) >= 0) {
2013 event->pending_disable = 0; 2013 WRITE_ONCE(event->pending_disable, -1);
2014 state = PERF_EVENT_STATE_OFF; 2014 state = PERF_EVENT_STATE_OFF;
2015 } 2015 }
2016 perf_event_set_state(event, state); 2016 perf_event_set_state(event, state);
@@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
2198 2198
2199void perf_event_disable_inatomic(struct perf_event *event) 2199void perf_event_disable_inatomic(struct perf_event *event)
2200{ 2200{
2201 event->pending_disable = 1; 2201 WRITE_ONCE(event->pending_disable, smp_processor_id());
2202 /* can fail, see perf_pending_event_disable() */
2202 irq_work_queue(&event->pending); 2203 irq_work_queue(&event->pending);
2203} 2204}
2204 2205
@@ -5810,10 +5811,45 @@ void perf_event_wakeup(struct perf_event *event)
5810 } 5811 }
5811} 5812}
5812 5813
5814static void perf_pending_event_disable(struct perf_event *event)
5815{
5816 int cpu = READ_ONCE(event->pending_disable);
5817
5818 if (cpu < 0)
5819 return;
5820
5821 if (cpu == smp_processor_id()) {
5822 WRITE_ONCE(event->pending_disable, -1);
5823 perf_event_disable_local(event);
5824 return;
5825 }
5826
5827 /*
5828 * CPU-A CPU-B
5829 *
5830 * perf_event_disable_inatomic()
5831 * @pending_disable = CPU-A;
5832 * irq_work_queue();
5833 *
5834 * sched-out
5835 * @pending_disable = -1;
5836 *
5837 * sched-in
5838 * perf_event_disable_inatomic()
5839 * @pending_disable = CPU-B;
5840 * irq_work_queue(); // FAILS
5841 *
5842 * irq_work_run()
5843 * perf_pending_event()
5844 *
5845 * But the event runs on CPU-B and wants disabling there.
5846 */
5847 irq_work_queue_on(&event->pending, cpu);
5848}
5849
5813static void perf_pending_event(struct irq_work *entry) 5850static void perf_pending_event(struct irq_work *entry)
5814{ 5851{
5815 struct perf_event *event = container_of(entry, 5852 struct perf_event *event = container_of(entry, struct perf_event, pending);
5816 struct perf_event, pending);
5817 int rctx; 5853 int rctx;
5818 5854
5819 rctx = perf_swevent_get_recursion_context(); 5855 rctx = perf_swevent_get_recursion_context();
@@ -5822,10 +5858,7 @@ static void perf_pending_event(struct irq_work *entry)
5822 * and we won't recurse 'further'. 5858 * and we won't recurse 'further'.
5823 */ 5859 */
5824 5860
5825 if (event->pending_disable) { 5861 perf_pending_event_disable(event);
5826 event->pending_disable = 0;
5827 perf_event_disable_local(event);
5828 }
5829 5862
5830 if (event->pending_wakeup) { 5863 if (event->pending_wakeup) {
5831 event->pending_wakeup = 0; 5864 event->pending_wakeup = 0;
@@ -10236,6 +10269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
10236 10269
10237 10270
10238 init_waitqueue_head(&event->waitq); 10271 init_waitqueue_head(&event->waitq);
10272 event->pending_disable = -1;
10239 init_irq_work(&event->pending, perf_pending_event); 10273 init_irq_work(&event->pending, perf_pending_event);
10240 10274
10241 mutex_init(&event->mmap_mutex); 10275 mutex_init(&event->mmap_mutex);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a4047321d7d8..2545ac08cc77 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
392 * store that will be enabled on successful return 392 * store that will be enabled on successful return
393 */ 393 */
394 if (!handle->size) { /* A, matches D */ 394 if (!handle->size) { /* A, matches D */
395 event->pending_disable = 1; 395 event->pending_disable = smp_processor_id();
396 perf_output_wakeup(handle); 396 perf_output_wakeup(handle);
397 local_set(&rb->aux_nest, 0); 397 local_set(&rb->aux_nest, 0);
398 goto err_put; 398 goto err_put;
@@ -480,7 +480,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
480 480
481 if (wakeup) { 481 if (wakeup) {
482 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) 482 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
483 handle->event->pending_disable = 1; 483 handle->event->pending_disable = smp_processor_id();
484 perf_output_wakeup(handle); 484 perf_output_wakeup(handle);
485 } 485 }
486 486
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3faef4a77f71..51128bea3846 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1449int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1449int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1450{ 1450{
1451 data = data->parent_data; 1451 data = data->parent_data;
1452
1453 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1454 return 0;
1455
1452 if (data->chip->irq_set_wake) 1456 if (data->chip->irq_set_wake)
1453 return data->chip->irq_set_wake(data, on); 1457 return data->chip->irq_set_wake(data, on);
1454 1458
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 13539e12cd80..9f8a709337cf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -558,6 +558,7 @@ int __init early_irq_init(void)
558 alloc_masks(&desc[i], node); 558 alloc_masks(&desc[i], node);
559 raw_spin_lock_init(&desc[i].lock); 559 raw_spin_lock_init(&desc[i].lock);
560 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 560 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
561 mutex_init(&desc[i].request_mutex);
561 desc_set_defaults(i, &desc[i], node, NULL, NULL); 562 desc_set_defaults(i, &desc[i], node, NULL, NULL);
562 } 563 }
563 return arch_early_irq_init(); 564 return arch_early_irq_init();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 34cdcbedda49..e16766ff184b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4689 return; 4689 return;
4690 4690
4691 raw_local_irq_save(flags); 4691 raw_local_irq_save(flags);
4692 if (!graph_lock()) 4692 arch_spin_lock(&lockdep_lock);
4693 goto out_irq; 4693 current->lockdep_recursion = 1;
4694 4694
4695 /* closed head */ 4695 /* closed head */
4696 pf = delayed_free.pf + (delayed_free.index ^ 1); 4696 pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4702 */ 4702 */
4703 call_rcu_zapped(delayed_free.pf + delayed_free.index); 4703 call_rcu_zapped(delayed_free.pf + delayed_free.index);
4704 4704
4705 graph_unlock(); 4705 current->lockdep_recursion = 0;
4706out_irq: 4706 arch_spin_unlock(&lockdep_lock);
4707 raw_local_irq_restore(flags); 4707 raw_local_irq_restore(flags);
4708} 4708}
4709 4709
@@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
4744{ 4744{
4745 struct pending_free *pf; 4745 struct pending_free *pf;
4746 unsigned long flags; 4746 unsigned long flags;
4747 int locked;
4748 4747
4749 init_data_structures_once(); 4748 init_data_structures_once();
4750 4749
4751 raw_local_irq_save(flags); 4750 raw_local_irq_save(flags);
4752 locked = graph_lock(); 4751 arch_spin_lock(&lockdep_lock);
4753 if (!locked) 4752 current->lockdep_recursion = 1;
4754 goto out_irq;
4755
4756 pf = get_pending_free(); 4753 pf = get_pending_free();
4757 __lockdep_free_key_range(pf, start, size); 4754 __lockdep_free_key_range(pf, start, size);
4758 call_rcu_zapped(pf); 4755 call_rcu_zapped(pf);
4759 4756 current->lockdep_recursion = 0;
4760 graph_unlock(); 4757 arch_spin_unlock(&lockdep_lock);
4761out_irq:
4762 raw_local_irq_restore(flags); 4758 raw_local_irq_restore(flags);
4763 4759
4764 /* 4760 /*
@@ -4911,9 +4907,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
4911 return; 4907 return;
4912 4908
4913 raw_local_irq_save(flags); 4909 raw_local_irq_save(flags);
4914 if (!graph_lock()) 4910 arch_spin_lock(&lockdep_lock);
4915 goto out_irq; 4911 current->lockdep_recursion = 1;
4916
4917 pf = get_pending_free(); 4912 pf = get_pending_free();
4918 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 4913 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
4919 if (k == key) { 4914 if (k == key) {
@@ -4925,8 +4920,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
4925 WARN_ON_ONCE(!found); 4920 WARN_ON_ONCE(!found);
4926 __lockdep_free_key_range(pf, key, 1); 4921 __lockdep_free_key_range(pf, key, 1);
4927 call_rcu_zapped(pf); 4922 call_rcu_zapped(pf);
4928 graph_unlock(); 4923 current->lockdep_recursion = 0;
4929out_irq: 4924 arch_spin_unlock(&lockdep_lock);
4930 raw_local_irq_restore(flags); 4925 raw_local_irq_restore(flags);
4931 4926
4932 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 4927 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fdab7eb6f351..40bd1e27b1b7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7784,10 +7784,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
7784 if (cfs_rq->last_h_load_update == now) 7784 if (cfs_rq->last_h_load_update == now)
7785 return; 7785 return;
7786 7786
7787 cfs_rq->h_load_next = NULL; 7787 WRITE_ONCE(cfs_rq->h_load_next, NULL);
7788 for_each_sched_entity(se) { 7788 for_each_sched_entity(se) {
7789 cfs_rq = cfs_rq_of(se); 7789 cfs_rq = cfs_rq_of(se);
7790 cfs_rq->h_load_next = se; 7790 WRITE_ONCE(cfs_rq->h_load_next, se);
7791 if (cfs_rq->last_h_load_update == now) 7791 if (cfs_rq->last_h_load_update == now)
7792 break; 7792 break;
7793 } 7793 }
@@ -7797,7 +7797,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
7797 cfs_rq->last_h_load_update = now; 7797 cfs_rq->last_h_load_update = now;
7798 } 7798 }
7799 7799
7800 while ((se = cfs_rq->h_load_next) != NULL) { 7800 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
7801 load = cfs_rq->h_load; 7801 load = cfs_rq->h_load;
7802 load = div64_ul(load * se->avg.load_avg, 7802 load = div64_ul(load * se->avg.load_avg,
7803 cfs_rq_load_avg(cfs_rq) + 1); 7803 cfs_rq_load_avg(cfs_rq) + 1);
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2c97e8c2d29f..0519a8805aab 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
594{ 594{
595 struct alarm *alarm = &timr->it.alarm.alarmtimer; 595 struct alarm *alarm = &timr->it.alarm.alarmtimer;
596 596
597 return ktime_sub(now, alarm->node.expires); 597 return ktime_sub(alarm->node.expires, now);
598} 598}
599 599
600/** 600/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 21153e64bf1c..6c24755655c7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7041,12 +7041,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7041 buf->private = 0; 7041 buf->private = 0;
7042} 7042}
7043 7043
7044static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 7044static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7045 struct pipe_buffer *buf) 7045 struct pipe_buffer *buf)
7046{ 7046{
7047 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 7047 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7048 7048
7049 if (ref->ref > INT_MAX/2)
7050 return false;
7051
7049 ref->ref++; 7052 ref->ref++;
7053 return true;
7050} 7054}
7051 7055
7052/* Pipe buffer operations for a buffer. */ 7056/* Pipe buffer operations for a buffer. */