aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/isolation.c14
-rw-r--r--kernel/sched/sched.h4
6 files changed, 16 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8050f266751a..e4ca15d75541 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2857,7 +2857,7 @@ unsigned long nr_running(void)
2857 * preemption, thus the result might have a time-of-check-to-time-of-use 2857 * preemption, thus the result might have a time-of-check-to-time-of-use
2858 * race. The caller is responsible to use it correctly, for example: 2858 * race. The caller is responsible to use it correctly, for example:
2859 * 2859 *
2860 * - from a non-preemptable section (of course) 2860 * - from a non-preemptible section (of course)
2861 * 2861 *
2862 * - from a thread that is bound to a single CPU 2862 * - from a thread that is bound to a single CPU
2863 * 2863 *
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0796f938c4f0..ba4a143bdcf3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -525,7 +525,7 @@ void account_idle_ticks(unsigned long ticks)
525 525
526/* 526/*
527 * Perform (stime * rtime) / total, but avoid multiplication overflow by 527 * Perform (stime * rtime) / total, but avoid multiplication overflow by
528 * loosing precision when the numbers are big. 528 * losing precision when the numbers are big.
529 */ 529 */
530static u64 scale_stime(u64 stime, u64 rtime, u64 total) 530static u64 scale_stime(u64 stime, u64 rtime, u64 total)
531{ 531{
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 470ba6b464fe..b32bc1f7cd14 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -727,7 +727,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
727 * refill the runtime and set the deadline a period in the future, 727 * refill the runtime and set the deadline a period in the future,
728 * because keeping the current (absolute) deadline of the task would 728 * because keeping the current (absolute) deadline of the task would
729 * result in breaking guarantees promised to other tasks (refer to 729 * result in breaking guarantees promised to other tasks (refer to
730 * Documentation/scheduler/sched-deadline.txt for more informations). 730 * Documentation/scheduler/sched-deadline.txt for more information).
731 * 731 *
732 * This function returns true if: 732 * This function returns true if:
733 * 733 *
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e30dea59d215..fdc8356ea742 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -703,9 +703,9 @@ void init_entity_runnable_average(struct sched_entity *se)
703 memset(sa, 0, sizeof(*sa)); 703 memset(sa, 0, sizeof(*sa));
704 704
705 /* 705 /*
706 * Tasks are intialized with full load to be seen as heavy tasks until 706 * Tasks are initialized with full load to be seen as heavy tasks until
707 * they get a chance to stabilize to their real load level. 707 * they get a chance to stabilize to their real load level.
708 * Group entities are intialized with zero load to reflect the fact that 708 * Group entities are initialized with zero load to reflect the fact that
709 * nothing has been attached to the task group yet. 709 * nothing has been attached to the task group yet.
710 */ 710 */
711 if (entity_is_task(se)) 711 if (entity_is_task(se))
@@ -3976,8 +3976,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3976 /* 3976 /*
3977 * When dequeuing a sched_entity, we must: 3977 * When dequeuing a sched_entity, we must:
3978 * - Update loads to have both entity and cfs_rq synced with now. 3978 * - Update loads to have both entity and cfs_rq synced with now.
3979 * - Substract its load from the cfs_rq->runnable_avg. 3979 * - Subtract its load from the cfs_rq->runnable_avg.
3980 * - Substract its previous weight from cfs_rq->load.weight. 3980 * - Subtract its previous weight from cfs_rq->load.weight.
3981 * - For group entity, update its weight to reflect the new share 3981 * - For group entity, update its weight to reflect the new share
3982 * of its group cfs_rq. 3982 * of its group cfs_rq.
3983 */ 3983 */
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index e6802181900f..81faddba9e20 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -8,14 +8,14 @@
8 */ 8 */
9#include "sched.h" 9#include "sched.h"
10 10
11DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); 11DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
12EXPORT_SYMBOL_GPL(housekeeping_overriden); 12EXPORT_SYMBOL_GPL(housekeeping_overridden);
13static cpumask_var_t housekeeping_mask; 13static cpumask_var_t housekeeping_mask;
14static unsigned int housekeeping_flags; 14static unsigned int housekeeping_flags;
15 15
16int housekeeping_any_cpu(enum hk_flags flags) 16int housekeeping_any_cpu(enum hk_flags flags)
17{ 17{
18 if (static_branch_unlikely(&housekeeping_overriden)) 18 if (static_branch_unlikely(&housekeeping_overridden))
19 if (housekeeping_flags & flags) 19 if (housekeeping_flags & flags)
20 return cpumask_any_and(housekeeping_mask, cpu_online_mask); 20 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
21 return smp_processor_id(); 21 return smp_processor_id();
@@ -24,7 +24,7 @@ EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
24 24
25const struct cpumask *housekeeping_cpumask(enum hk_flags flags) 25const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
26{ 26{
27 if (static_branch_unlikely(&housekeeping_overriden)) 27 if (static_branch_unlikely(&housekeeping_overridden))
28 if (housekeeping_flags & flags) 28 if (housekeeping_flags & flags)
29 return housekeeping_mask; 29 return housekeeping_mask;
30 return cpu_possible_mask; 30 return cpu_possible_mask;
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(housekeeping_cpumask);
33 33
34void housekeeping_affine(struct task_struct *t, enum hk_flags flags) 34void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
35{ 35{
36 if (static_branch_unlikely(&housekeeping_overriden)) 36 if (static_branch_unlikely(&housekeeping_overridden))
37 if (housekeeping_flags & flags) 37 if (housekeeping_flags & flags)
38 set_cpus_allowed_ptr(t, housekeeping_mask); 38 set_cpus_allowed_ptr(t, housekeeping_mask);
39} 39}
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(housekeeping_affine);
41 41
42bool housekeeping_test_cpu(int cpu, enum hk_flags flags) 42bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
43{ 43{
44 if (static_branch_unlikely(&housekeeping_overriden)) 44 if (static_branch_unlikely(&housekeeping_overridden))
45 if (housekeeping_flags & flags) 45 if (housekeeping_flags & flags)
46 return cpumask_test_cpu(cpu, housekeeping_mask); 46 return cpumask_test_cpu(cpu, housekeeping_mask);
47 return true; 47 return true;
@@ -53,7 +53,7 @@ void __init housekeeping_init(void)
53 if (!housekeeping_flags) 53 if (!housekeeping_flags)
54 return; 54 return;
55 55
56 static_branch_enable(&housekeeping_overriden); 56 static_branch_enable(&housekeeping_overridden);
57 57
58 if (housekeeping_flags & HK_FLAG_TICK) 58 if (housekeeping_flags & HK_FLAG_TICK)
59 sched_tick_offload_init(); 59 sched_tick_offload_init();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 71cd8b710599..9bde60a11805 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -637,7 +637,7 @@ struct dl_rq {
637 /* 637 /*
638 * Deadline values of the currently executing and the 638 * Deadline values of the currently executing and the
639 * earliest ready task on this rq. Caching these facilitates 639 * earliest ready task on this rq. Caching these facilitates
640 * the decision wether or not a ready but not running task 640 * the decision whether or not a ready but not running task
641 * should migrate somewhere else. 641 * should migrate somewhere else.
642 */ 642 */
643 struct { 643 struct {
@@ -1434,7 +1434,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1434#ifdef CONFIG_SMP 1434#ifdef CONFIG_SMP
1435 /* 1435 /*
1436 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1436 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1437 * successfuly executed on another CPU. We must ensure that updates of 1437 * successfully executed on another CPU. We must ensure that updates of
1438 * per-task data have been completed by this moment. 1438 * per-task data have been completed by this moment.
1439 */ 1439 */
1440 smp_wmb(); 1440 smp_wmb();