diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 8 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 1 | ||||
-rw-r--r-- | kernel/power/Kconfig | 1 | ||||
-rw-r--r-- | kernel/sched/core.c | 71 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 14 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 5 | ||||
-rw-r--r-- | kernel/sched/rt.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 5 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 2 |
11 files changed, 63 insertions, 50 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2b02c9fda790..1cd5eef1fcdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group | |||
1562 | 1562 | ||
1563 | if (!task) { | 1563 | if (!task) { |
1564 | /* | 1564 | /* |
1565 | * Per cpu events are removed via an smp call and | 1565 | * Per cpu events are removed via an smp call. The removal can |
1566 | * the removal is always successful. | 1566 | * fail if the CPU is currently offline, but in that case we |
1567 | * already called __perf_remove_from_context from | ||
1568 | * perf_event_exit_cpu. | ||
1567 | */ | 1569 | */ |
1568 | cpu_function_call(event->cpu, __perf_remove_from_context, &re); | 1570 | cpu_function_call(event->cpu, __perf_remove_from_context, &re); |
1569 | return; | 1571 | return; |
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
8117 | 8119 | ||
8118 | static void __perf_event_exit_context(void *__info) | 8120 | static void __perf_event_exit_context(void *__info) |
8119 | { | 8121 | { |
8120 | struct remove_event re = { .detach_group = false }; | 8122 | struct remove_event re = { .detach_group = true }; |
8121 | struct perf_event_context *ctx = __info; | 8123 | struct perf_event_context *ctx = __info; |
8122 | 8124 | ||
8123 | perf_pmu_rotate_stop(ctx->pmu); | 8125 | perf_pmu_rotate_stop(ctx->pmu); |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1d0af8a2c646..ed8f2cde34c5 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void) | |||
1640 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { | 1640 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { |
1641 | utask->state = UTASK_SSTEP_TRAPPED; | 1641 | utask->state = UTASK_SSTEP_TRAPPED; |
1642 | set_tsk_thread_flag(t, TIF_UPROBE); | 1642 | set_tsk_thread_flag(t, TIF_UPROBE); |
1643 | set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); | ||
1644 | } | 1643 | } |
1645 | } | 1644 | } |
1646 | 1645 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index bbef57f5bdfd..1eb7da7bc8e8 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -308,4 +308,3 @@ config PM_GENERIC_DOMAINS_OF | |||
308 | 308 | ||
309 | config CPU_PM | 309 | config CPU_PM |
310 | bool | 310 | bool |
311 | depends on SUSPEND || CPU_IDLE | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..89e7283015a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | 2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
2476 | 2476 | ||
2477 | /* | 2477 | /* |
2478 | * Return any ns on the sched_clock that have not yet been accounted in | ||
2479 | * @p in case that task is currently running. | ||
2480 | * | ||
2481 | * Called with task_rq_lock() held on @rq. | ||
2482 | */ | ||
2483 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | ||
2484 | { | ||
2485 | u64 ns = 0; | ||
2486 | |||
2487 | /* | ||
2488 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2489 | * project cycles that may never be accounted to this | ||
2490 | * thread, breaking clock_gettime(). | ||
2491 | */ | ||
2492 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2493 | update_rq_clock(rq); | ||
2494 | ns = rq_clock_task(rq) - p->se.exec_start; | ||
2495 | if ((s64)ns < 0) | ||
2496 | ns = 0; | ||
2497 | } | ||
2498 | |||
2499 | return ns; | ||
2500 | } | ||
2501 | |||
2502 | unsigned long long task_delta_exec(struct task_struct *p) | ||
2503 | { | ||
2504 | unsigned long flags; | ||
2505 | struct rq *rq; | ||
2506 | u64 ns = 0; | ||
2507 | |||
2508 | rq = task_rq_lock(p, &flags); | ||
2509 | ns = do_task_delta_exec(p, rq); | ||
2510 | task_rq_unlock(rq, p, &flags); | ||
2511 | |||
2512 | return ns; | ||
2513 | } | ||
2514 | |||
2515 | /* | ||
2516 | * Return accounted runtime for the task. | 2478 | * Return accounted runtime for the task. |
2517 | * In case the task is currently running, return the runtime plus current's | 2479 | * In case the task is currently running, return the runtime plus current's |
2518 | * pending runtime that have not been accounted yet. | 2480 | * pending runtime that have not been accounted yet. |
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2521 | { | 2483 | { |
2522 | unsigned long flags; | 2484 | unsigned long flags; |
2523 | struct rq *rq; | 2485 | struct rq *rq; |
2524 | u64 ns = 0; | 2486 | u64 ns; |
2525 | 2487 | ||
2526 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) | 2488 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
2527 | /* | 2489 | /* |
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2540 | #endif | 2502 | #endif |
2541 | 2503 | ||
2542 | rq = task_rq_lock(p, &flags); | 2504 | rq = task_rq_lock(p, &flags); |
2543 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | 2505 | /* |
2506 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2507 | * project cycles that may never be accounted to this | ||
2508 | * thread, breaking clock_gettime(). | ||
2509 | */ | ||
2510 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2511 | update_rq_clock(rq); | ||
2512 | p->sched_class->update_curr(rq); | ||
2513 | } | ||
2514 | ns = p->se.sum_exec_runtime; | ||
2544 | task_rq_unlock(rq, p, &flags); | 2515 | task_rq_unlock(rq, p, &flags); |
2545 | 2516 | ||
2546 | return ns; | 2517 | return ns; |
@@ -2903,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void) | |||
2903 | * or we have been woken up remotely but the IPI has not yet arrived, | 2874 | * or we have been woken up remotely but the IPI has not yet arrived, |
2904 | * we haven't yet exited the RCU idle mode. Do it here manually until | 2875 | * we haven't yet exited the RCU idle mode. Do it here manually until |
2905 | * we find a better solution. | 2876 | * we find a better solution. |
2877 | * | ||
2878 | * NB: There are buggy callers of this function. Ideally we | ||
2879 | * should warn if prev_state != IN_USER, but that will trigger | ||
2880 | * too frequently to make sense yet. | ||
2906 | */ | 2881 | */ |
2907 | user_exit(); | 2882 | enum ctx_state prev_state = exception_enter(); |
2908 | schedule(); | 2883 | schedule(); |
2909 | user_enter(); | 2884 | exception_exit(prev_state); |
2910 | } | 2885 | } |
2911 | #endif | 2886 | #endif |
2912 | 2887 | ||
@@ -6368,6 +6343,10 @@ static void sched_init_numa(void) | |||
6368 | if (!sched_debug()) | 6343 | if (!sched_debug()) |
6369 | break; | 6344 | break; |
6370 | } | 6345 | } |
6346 | |||
6347 | if (!level) | ||
6348 | return; | ||
6349 | |||
6371 | /* | 6350 | /* |
6372 | * 'level' contains the number of unique distances, excluding the | 6351 | * 'level' contains the number of unique distances, excluding the |
6373 | * identity distance node_distance(i,i). | 6352 | * identity distance node_distance(i,i). |
@@ -7444,8 +7423,12 @@ void sched_move_task(struct task_struct *tsk) | |||
7444 | if (unlikely(running)) | 7423 | if (unlikely(running)) |
7445 | put_prev_task(rq, tsk); | 7424 | put_prev_task(rq, tsk); |
7446 | 7425 | ||
7447 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, | 7426 | /* |
7448 | lockdep_is_held(&tsk->sighand->siglock)), | 7427 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
7428 | * which is pointless here. Thus, we pass "true" to task_css_check() | ||
7429 | * to prevent lockdep warnings. | ||
7430 | */ | ||
7431 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), | ||
7449 | struct task_group, css); | 7432 | struct task_group, css); |
7450 | tg = autogroup_task_group(tsk, tg); | 7433 | tg = autogroup_task_group(tsk, tg); |
7451 | tsk->sched_task_group = tg; | 7434 | tsk->sched_task_group = tg; |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 5285332392d5..28fa9d9e9201 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = { | |||
1701 | .prio_changed = prio_changed_dl, | 1701 | .prio_changed = prio_changed_dl, |
1702 | .switched_from = switched_from_dl, | 1702 | .switched_from = switched_from_dl, |
1703 | .switched_to = switched_to_dl, | 1703 | .switched_to = switched_to_dl, |
1704 | |||
1705 | .update_curr = update_curr_dl, | ||
1704 | }; | 1706 | }; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 34baa60f8a7b..ef2b104b254c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
726 | account_cfs_rq_runtime(cfs_rq, delta_exec); | 726 | account_cfs_rq_runtime(cfs_rq, delta_exec); |
727 | } | 727 | } |
728 | 728 | ||
729 | static void update_curr_fair(struct rq *rq) | ||
730 | { | ||
731 | update_curr(cfs_rq_of(&rq->curr->se)); | ||
732 | } | ||
733 | |||
729 | static inline void | 734 | static inline void |
730 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 735 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
731 | { | 736 | { |
@@ -1180,6 +1185,13 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1180 | raw_spin_unlock_irq(&dst_rq->lock); | 1185 | raw_spin_unlock_irq(&dst_rq->lock); |
1181 | 1186 | ||
1182 | /* | 1187 | /* |
1188 | * Because we have preemption enabled we can get migrated around and | ||
1189 | * end try selecting ourselves (current == env->p) as a swap candidate. | ||
1190 | */ | ||
1191 | if (cur == env->p) | ||
1192 | goto unlock; | ||
1193 | |||
1194 | /* | ||
1183 | * "imp" is the fault differential for the source task between the | 1195 | * "imp" is the fault differential for the source task between the |
1184 | * source and destination node. Calculate the total differential for | 1196 | * source and destination node. Calculate the total differential for |
1185 | * the source task and potential destination task. The more negative | 1197 | * the source task and potential destination task. The more negative |
@@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = { | |||
7949 | 7961 | ||
7950 | .get_rr_interval = get_rr_interval_fair, | 7962 | .get_rr_interval = get_rr_interval_fair, |
7951 | 7963 | ||
7964 | .update_curr = update_curr_fair, | ||
7965 | |||
7952 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7966 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7953 | .task_move_group = task_move_group_fair, | 7967 | .task_move_group = task_move_group_fair, |
7954 | #endif | 7968 | #endif |
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 67ad4e7f506a..c65dac8c97cd 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c | |||
@@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void update_curr_idle(struct rq *rq) | ||
79 | { | ||
80 | } | ||
81 | |||
78 | /* | 82 | /* |
79 | * Simple, special scheduling class for the per-CPU idle tasks: | 83 | * Simple, special scheduling class for the per-CPU idle tasks: |
80 | */ | 84 | */ |
@@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = { | |||
101 | 105 | ||
102 | .prio_changed = prio_changed_idle, | 106 | .prio_changed = prio_changed_idle, |
103 | .switched_to = switched_to_idle, | 107 | .switched_to = switched_to_idle, |
108 | .update_curr = update_curr_idle, | ||
104 | }; | 109 | }; |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d024e6ce30ba..20bca398084a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = { | |||
2128 | 2128 | ||
2129 | .prio_changed = prio_changed_rt, | 2129 | .prio_changed = prio_changed_rt, |
2130 | .switched_to = switched_to_rt, | 2130 | .switched_to = switched_to_rt, |
2131 | |||
2132 | .update_curr = update_curr_rt, | ||
2131 | }; | 2133 | }; |
2132 | 2134 | ||
2133 | #ifdef CONFIG_SCHED_DEBUG | 2135 | #ifdef CONFIG_SCHED_DEBUG |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24156c8434d1..2df8ef067cc5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1135,6 +1135,8 @@ struct sched_class { | |||
1135 | unsigned int (*get_rr_interval) (struct rq *rq, | 1135 | unsigned int (*get_rr_interval) (struct rq *rq, |
1136 | struct task_struct *task); | 1136 | struct task_struct *task); |
1137 | 1137 | ||
1138 | void (*update_curr) (struct rq *rq); | ||
1139 | |||
1138 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1140 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1139 | void (*task_move_group) (struct task_struct *p, int on_rq); | 1141 | void (*task_move_group) (struct task_struct *p, int on_rq); |
1140 | #endif | 1142 | #endif |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 67426e529f59..79ffec45a6ac 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
@@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) | |||
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void update_curr_stop(struct rq *rq) | ||
106 | { | ||
107 | } | ||
108 | |||
105 | /* | 109 | /* |
106 | * Simple, special scheduling class for the per-CPU stop tasks: | 110 | * Simple, special scheduling class for the per-CPU stop tasks: |
107 | */ | 111 | */ |
@@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = { | |||
128 | 132 | ||
129 | .prio_changed = prio_changed_stop, | 133 | .prio_changed = prio_changed_stop, |
130 | .switched_to = switched_to_stop, | 134 | .switched_to = switched_to_stop, |
135 | .update_curr = update_curr_stop, | ||
131 | }; | 136 | }; |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 492b986195d5..a16b67859e2a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
553 | *sample = cputime_to_expires(cputime.utime); | 553 | *sample = cputime_to_expires(cputime.utime); |
554 | break; | 554 | break; |
555 | case CPUCLOCK_SCHED: | 555 | case CPUCLOCK_SCHED: |
556 | *sample = cputime.sum_exec_runtime + task_delta_exec(p); | 556 | *sample = cputime.sum_exec_runtime; |
557 | break; | 557 | break; |
558 | } | 558 | } |
559 | return 0; | 559 | return 0; |