aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c64
-rw-r--r--kernel/events/hw_breakpoint.c11
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/time/tick-sched.c1
7 files changed, 61 insertions, 60 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b7935fcec7d9..7fee567153f0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1253,7 +1253,7 @@ retry:
1253/* 1253/*
1254 * Cross CPU call to disable a performance event 1254 * Cross CPU call to disable a performance event
1255 */ 1255 */
1256static int __perf_event_disable(void *info) 1256int __perf_event_disable(void *info)
1257{ 1257{
1258 struct perf_event *event = info; 1258 struct perf_event *event = info;
1259 struct perf_event_context *ctx = event->ctx; 1259 struct perf_event_context *ctx = event->ctx;
@@ -2935,12 +2935,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2935/* 2935/*
2936 * Called when the last reference to the file is gone. 2936 * Called when the last reference to the file is gone.
2937 */ 2937 */
2938static int perf_release(struct inode *inode, struct file *file) 2938static void put_event(struct perf_event *event)
2939{ 2939{
2940 struct perf_event *event = file->private_data;
2941 struct task_struct *owner; 2940 struct task_struct *owner;
2942 2941
2943 file->private_data = NULL; 2942 if (!atomic_long_dec_and_test(&event->refcount))
2943 return;
2944 2944
2945 rcu_read_lock(); 2945 rcu_read_lock();
2946 owner = ACCESS_ONCE(event->owner); 2946 owner = ACCESS_ONCE(event->owner);
@@ -2975,7 +2975,13 @@ static int perf_release(struct inode *inode, struct file *file)
2975 put_task_struct(owner); 2975 put_task_struct(owner);
2976 } 2976 }
2977 2977
2978 return perf_event_release_kernel(event); 2978 perf_event_release_kernel(event);
2979}
2980
2981static int perf_release(struct inode *inode, struct file *file)
2982{
2983 put_event(file->private_data);
2984 return 0;
2979} 2985}
2980 2986
2981u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) 2987u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
@@ -3227,7 +3233,7 @@ unlock:
3227 3233
3228static const struct file_operations perf_fops; 3234static const struct file_operations perf_fops;
3229 3235
3230static struct perf_event *perf_fget_light(int fd, int *fput_needed) 3236static struct file *perf_fget_light(int fd, int *fput_needed)
3231{ 3237{
3232 struct file *file; 3238 struct file *file;
3233 3239
@@ -3241,7 +3247,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3241 return ERR_PTR(-EBADF); 3247 return ERR_PTR(-EBADF);
3242 } 3248 }
3243 3249
3244 return file->private_data; 3250 return file;
3245} 3251}
3246 3252
3247static int perf_event_set_output(struct perf_event *event, 3253static int perf_event_set_output(struct perf_event *event,
@@ -3273,19 +3279,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3273 3279
3274 case PERF_EVENT_IOC_SET_OUTPUT: 3280 case PERF_EVENT_IOC_SET_OUTPUT:
3275 { 3281 {
3282 struct file *output_file = NULL;
3276 struct perf_event *output_event = NULL; 3283 struct perf_event *output_event = NULL;
3277 int fput_needed = 0; 3284 int fput_needed = 0;
3278 int ret; 3285 int ret;
3279 3286
3280 if (arg != -1) { 3287 if (arg != -1) {
3281 output_event = perf_fget_light(arg, &fput_needed); 3288 output_file = perf_fget_light(arg, &fput_needed);
3282 if (IS_ERR(output_event)) 3289 if (IS_ERR(output_file))
3283 return PTR_ERR(output_event); 3290 return PTR_ERR(output_file);
3291 output_event = output_file->private_data;
3284 } 3292 }
3285 3293
3286 ret = perf_event_set_output(event, output_event); 3294 ret = perf_event_set_output(event, output_event);
3287 if (output_event) 3295 if (output_event)
3288 fput_light(output_event->filp, fput_needed); 3296 fput_light(output_file, fput_needed);
3289 3297
3290 return ret; 3298 return ret;
3291 } 3299 }
@@ -5950,6 +5958,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5950 5958
5951 mutex_init(&event->mmap_mutex); 5959 mutex_init(&event->mmap_mutex);
5952 5960
5961 atomic_long_set(&event->refcount, 1);
5953 event->cpu = cpu; 5962 event->cpu = cpu;
5954 event->attr = *attr; 5963 event->attr = *attr;
5955 event->group_leader = group_leader; 5964 event->group_leader = group_leader;
@@ -6260,12 +6269,12 @@ SYSCALL_DEFINE5(perf_event_open,
6260 return event_fd; 6269 return event_fd;
6261 6270
6262 if (group_fd != -1) { 6271 if (group_fd != -1) {
6263 group_leader = perf_fget_light(group_fd, &fput_needed); 6272 group_file = perf_fget_light(group_fd, &fput_needed);
6264 if (IS_ERR(group_leader)) { 6273 if (IS_ERR(group_file)) {
6265 err = PTR_ERR(group_leader); 6274 err = PTR_ERR(group_file);
6266 goto err_fd; 6275 goto err_fd;
6267 } 6276 }
6268 group_file = group_leader->filp; 6277 group_leader = group_file->private_data;
6269 if (flags & PERF_FLAG_FD_OUTPUT) 6278 if (flags & PERF_FLAG_FD_OUTPUT)
6270 output_event = group_leader; 6279 output_event = group_leader;
6271 if (flags & PERF_FLAG_FD_NO_GROUP) 6280 if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -6402,7 +6411,6 @@ SYSCALL_DEFINE5(perf_event_open,
6402 put_ctx(gctx); 6411 put_ctx(gctx);
6403 } 6412 }
6404 6413
6405 event->filp = event_file;
6406 WARN_ON_ONCE(ctx->parent_ctx); 6414 WARN_ON_ONCE(ctx->parent_ctx);
6407 mutex_lock(&ctx->mutex); 6415 mutex_lock(&ctx->mutex);
6408 6416
@@ -6496,7 +6504,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6496 goto err_free; 6504 goto err_free;
6497 } 6505 }
6498 6506
6499 event->filp = NULL;
6500 WARN_ON_ONCE(ctx->parent_ctx); 6507 WARN_ON_ONCE(ctx->parent_ctx);
6501 mutex_lock(&ctx->mutex); 6508 mutex_lock(&ctx->mutex);
6502 perf_install_in_context(ctx, event, cpu); 6509 perf_install_in_context(ctx, event, cpu);
@@ -6578,7 +6585,7 @@ static void sync_child_event(struct perf_event *child_event,
6578 * Release the parent event, if this was the last 6585 * Release the parent event, if this was the last
6579 * reference to it. 6586 * reference to it.
6580 */ 6587 */
6581 fput(parent_event->filp); 6588 put_event(parent_event);
6582} 6589}
6583 6590
6584static void 6591static void
@@ -6654,9 +6661,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6654 * 6661 *
6655 * __perf_event_exit_task() 6662 * __perf_event_exit_task()
6656 * sync_child_event() 6663 * sync_child_event()
6657 * fput(parent_event->filp) 6664 * put_event()
6658 * perf_release() 6665 * mutex_lock(&ctx->mutex)
6659 * mutex_lock(&ctx->mutex)
6660 * 6666 *
6661 * But since its the parent context it won't be the same instance. 6667 * But since its the parent context it won't be the same instance.
6662 */ 6668 */
@@ -6724,7 +6730,7 @@ static void perf_free_event(struct perf_event *event,
6724 list_del_init(&event->child_list); 6730 list_del_init(&event->child_list);
6725 mutex_unlock(&parent->child_mutex); 6731 mutex_unlock(&parent->child_mutex);
6726 6732
6727 fput(parent->filp); 6733 put_event(parent);
6728 6734
6729 perf_group_detach(event); 6735 perf_group_detach(event);
6730 list_del_event(event, ctx); 6736 list_del_event(event, ctx);
@@ -6804,6 +6810,12 @@ inherit_event(struct perf_event *parent_event,
6804 NULL, NULL); 6810 NULL, NULL);
6805 if (IS_ERR(child_event)) 6811 if (IS_ERR(child_event))
6806 return child_event; 6812 return child_event;
6813
6814 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
6815 free_event(child_event);
6816 return NULL;
6817 }
6818
6807 get_ctx(child_ctx); 6819 get_ctx(child_ctx);
6808 6820
6809 /* 6821 /*
@@ -6845,14 +6857,6 @@ inherit_event(struct perf_event *parent_event,
6845 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 6857 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6846 6858
6847 /* 6859 /*
6848 * Get a reference to the parent filp - we will fput it
6849 * when the child event exits. This is safe to do because
6850 * we are in the parent and we know that the filp still
6851 * exists and has a nonzero count:
6852 */
6853 atomic_long_inc(&parent_event->filp->f_count);
6854
6855 /*
6856 * Link this into the parent event's child list 6860 * Link this into the parent event's child list
6857 */ 6861 */
6858 WARN_ON_ONCE(parent_event->ctx->parent_ctx); 6862 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index bb38c4d3ee12..9a7b487c6fe2 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -453,7 +453,16 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
453 int old_type = bp->attr.bp_type; 453 int old_type = bp->attr.bp_type;
454 int err = 0; 454 int err = 0;
455 455
456 perf_event_disable(bp); 456 /*
457 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
458 * will not be possible to raise IPIs that invoke __perf_event_disable.
459 * So call the function directly after making sure we are targeting the
460 * current task.
461 */
462 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
463 __perf_event_disable(bp);
464 else
465 perf_event_disable(bp);
457 466
458 bp->attr.bp_addr = attr->bp_addr; 467 bp->attr.bp_addr = attr->bp_addr;
459 bp->attr.bp_type = attr->bp_type; 468 bp->attr.bp_type = attr->bp_type;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fbf1fd098dc6..a4ea245f3d85 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5304,27 +5304,17 @@ void idle_task_exit(void)
5304} 5304}
5305 5305
5306/* 5306/*
5307 * While a dead CPU has no uninterruptible tasks queued at this point, 5307 * Since this CPU is going 'away' for a while, fold any nr_active delta
5308 * it might still have a nonzero ->nr_uninterruptible counter, because 5308 * we might have. Assumes we're called after migrate_tasks() so that the
5309 * for performance reasons the counter is not stricly tracking tasks to 5309 * nr_active count is stable.
5310 * their home CPUs. So we just add the counter to another CPU's counter, 5310 *
5311 * to keep the global sum constant after CPU-down: 5311 * Also see the comment "Global load-average calculations".
5312 */
5313static void migrate_nr_uninterruptible(struct rq *rq_src)
5314{
5315 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5316
5317 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5318 rq_src->nr_uninterruptible = 0;
5319}
5320
5321/*
5322 * remove the tasks which were accounted by rq from calc_load_tasks.
5323 */ 5312 */
5324static void calc_global_load_remove(struct rq *rq) 5313static void calc_load_migrate(struct rq *rq)
5325{ 5314{
5326 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 5315 long delta = calc_load_fold_active(rq);
5327 rq->calc_load_active = 0; 5316 if (delta)
5317 atomic_long_add(delta, &calc_load_tasks);
5328} 5318}
5329 5319
5330/* 5320/*
@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
5352 */ 5342 */
5353 rq->stop = NULL; 5343 rq->stop = NULL;
5354 5344
5355 /* Ensure any throttled groups are reachable by pick_next_task */
5356 unthrottle_offline_cfs_rqs(rq);
5357
5358 for ( ; ; ) { 5345 for ( ; ; ) {
5359 /* 5346 /*
5360 * There's this thread running, bail when that's the only 5347 * There's this thread running, bail when that's the only
@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5618 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5605 BUG_ON(rq->nr_running != 1); /* the migration thread */
5619 raw_spin_unlock_irqrestore(&rq->lock, flags); 5606 raw_spin_unlock_irqrestore(&rq->lock, flags);
5620 5607
5621 migrate_nr_uninterruptible(rq); 5608 calc_load_migrate(rq);
5622 calc_global_load_remove(rq);
5623 break; 5609 break;
5624#endif 5610#endif
5625 } 5611 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c219bf8d704c..42d9df6a5ca4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2052 hrtimer_cancel(&cfs_b->slack_timer); 2052 hrtimer_cancel(&cfs_b->slack_timer);
2053} 2053}
2054 2054
2055void unthrottle_offline_cfs_rqs(struct rq *rq) 2055static void unthrottle_offline_cfs_rqs(struct rq *rq)
2056{ 2056{
2057 struct cfs_rq *cfs_rq; 2057 struct cfs_rq *cfs_rq;
2058 2058
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2106 return NULL; 2106 return NULL;
2107} 2107}
2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2109void unthrottle_offline_cfs_rqs(struct rq *rq) {} 2109static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2110 2110
2111#endif /* CONFIG_CFS_BANDWIDTH */ 2111#endif /* CONFIG_CFS_BANDWIDTH */
2112 2112
@@ -3658,7 +3658,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3658 * @group: sched_group whose statistics are to be updated. 3658 * @group: sched_group whose statistics are to be updated.
3659 * @load_idx: Load index of sched_domain of this_cpu for load calc. 3659 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3660 * @local_group: Does group contain this_cpu. 3660 * @local_group: Does group contain this_cpu.
3661 * @cpus: Set of cpus considered for load balancing.
3662 * @balance: Should we balance. 3661 * @balance: Should we balance.
3663 * @sgs: variable to hold the statistics for this group. 3662 * @sgs: variable to hold the statistics for this group.
3664 */ 3663 */
@@ -3805,7 +3804,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3805/** 3804/**
3806 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 3805 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3807 * @env: The load balancing environment. 3806 * @env: The load balancing environment.
3808 * @cpus: Set of cpus considered for load balancing.
3809 * @balance: Should we balance. 3807 * @balance: Should we balance.
3810 * @sds: variable to hold the statistics for this sched_domain. 3808 * @sds: variable to hold the statistics for this sched_domain.
3811 */ 3809 */
@@ -4956,6 +4954,9 @@ static void rq_online_fair(struct rq *rq)
4956static void rq_offline_fair(struct rq *rq) 4954static void rq_offline_fair(struct rq *rq)
4957{ 4955{
4958 update_sysctl(); 4956 update_sysctl();
4957
4958 /* Ensure any throttled groups are reachable by pick_next_task */
4959 unthrottle_offline_cfs_rqs(rq);
4959} 4960}
4960 4961
4961#endif /* CONFIG_SMP */ 4962#endif /* CONFIG_SMP */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 944cb68420e9..e0b7ba9c040f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -691,6 +691,7 @@ balanced:
691 * runtime - in which case borrowing doesn't make sense. 691 * runtime - in which case borrowing doesn't make sense.
692 */ 692 */
693 rt_rq->rt_runtime = RUNTIME_INF; 693 rt_rq->rt_runtime = RUNTIME_INF;
694 rt_rq->rt_throttled = 0;
694 raw_spin_unlock(&rt_rq->rt_runtime_lock); 695 raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 raw_spin_unlock(&rt_b->rt_runtime_lock); 696 raw_spin_unlock(&rt_b->rt_runtime_lock);
696 } 697 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f6714d009e77..0848fa36c383 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1144 1144
1145extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1145extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1147extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1148 1147
1149extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1148extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1150 1149
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 024540f97f74..3a9e5d5c1091 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
573 tick_do_update_jiffies64(now); 573 tick_do_update_jiffies64(now);
574 update_cpu_load_nohz(); 574 update_cpu_load_nohz();
575 575
576 calc_load_exit_idle();
576 touch_softlockup_watchdog(); 577 touch_softlockup_watchdog();
577 /* 578 /*
578 * Cancel the scheduled timer and restore the tick 579 * Cancel the scheduled timer and restore the tick