summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/mm.h2
-rw-r--r--include/linux/sched/stat.h2
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/isolation.c14
-rw-r--r--kernel/sched/sched.h4
10 files changed, 22 insertions, 22 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 291a9bd5b97f..b8c7ba0e3796 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,7 +176,7 @@ struct task_group;
176 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 176 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
177 * 177 *
178 * However, with slightly different timing the wakeup TASK_RUNNING store can 178 * However, with slightly different timing the wakeup TASK_RUNNING store can
179 * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not 179 * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
180 * a problem either because that will result in one extra go around the loop 180 * a problem either because that will result in one extra go around the loop
181 * and our @cond test will save the day. 181 * and our @cond test will save the day.
182 * 182 *
@@ -515,7 +515,7 @@ struct sched_dl_entity {
515 515
516 /* 516 /*
517 * Actual scheduling parameters. Initialized with the values above, 517 * Actual scheduling parameters. Initialized with the values above,
518 * they are continously updated during task execution. Note that 518 * they are continuously updated during task execution. Note that
519 * the remaining runtime could be < 0 in case we are in overrun. 519 * the remaining runtime could be < 0 in case we are in overrun.
520 */ 520 */
521 s64 runtime; /* Remaining runtime for this instance */ 521 s64 runtime; /* Remaining runtime for this instance */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 4a6582c27dea..b0fb1446fe04 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -16,7 +16,7 @@ enum hk_flags {
16}; 16};
17 17
18#ifdef CONFIG_CPU_ISOLATION 18#ifdef CONFIG_CPU_ISOLATION
19DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); 19DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
20extern int housekeeping_any_cpu(enum hk_flags flags); 20extern int housekeeping_any_cpu(enum hk_flags flags);
21extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); 21extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
22extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); 22extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
43static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) 43static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
44{ 44{
45#ifdef CONFIG_CPU_ISOLATION 45#ifdef CONFIG_CPU_ISOLATION
46 if (static_branch_unlikely(&housekeeping_overriden)) 46 if (static_branch_unlikely(&housekeeping_overridden))
47 return housekeeping_test_cpu(cpu, flags); 47 return housekeeping_test_cpu(cpu, flags);
48#endif 48#endif
49 return true; 49 return true;
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aebb370a0006..3bfa6a0cbba4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
153{ 153{
154 /* 154 /*
155 * NOIO implies both NOIO and NOFS and it is a weaker context 155 * NOIO implies both NOIO and NOFS and it is a weaker context
156 * so always make sure it makes precendence 156 * so always make sure it makes precedence
157 */ 157 */
158 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 158 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
159 flags &= ~(__GFP_IO | __GFP_FS); 159 flags &= ~(__GFP_IO | __GFP_FS);
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index f30954cc059d..568286411b43 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -8,7 +8,7 @@
8 * Various counters maintained by the scheduler and fork(), 8 * Various counters maintained by the scheduler and fork(),
9 * exposed via /proc, sys.c or used by drivers via these APIs. 9 * exposed via /proc, sys.c or used by drivers via these APIs.
10 * 10 *
11 * ( Note that all these values are aquired without locking, 11 * ( Note that all these values are acquired without locking,
12 * so they can only be relied on in narrow circumstances. ) 12 * so they can only be relied on in narrow circumstances. )
13 */ 13 */
14 14
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8050f266751a..e4ca15d75541 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2857,7 +2857,7 @@ unsigned long nr_running(void)
2857 * preemption, thus the result might have a time-of-check-to-time-of-use 2857 * preemption, thus the result might have a time-of-check-to-time-of-use
2858 * race. The caller is responsible to use it correctly, for example: 2858 * race. The caller is responsible to use it correctly, for example:
2859 * 2859 *
2860 * - from a non-preemptable section (of course) 2860 * - from a non-preemptible section (of course)
2861 * 2861 *
2862 * - from a thread that is bound to a single CPU 2862 * - from a thread that is bound to a single CPU
2863 * 2863 *
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0796f938c4f0..ba4a143bdcf3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -525,7 +525,7 @@ void account_idle_ticks(unsigned long ticks)
525 525
526/* 526/*
527 * Perform (stime * rtime) / total, but avoid multiplication overflow by 527 * Perform (stime * rtime) / total, but avoid multiplication overflow by
528 * loosing precision when the numbers are big. 528 * losing precision when the numbers are big.
529 */ 529 */
530static u64 scale_stime(u64 stime, u64 rtime, u64 total) 530static u64 scale_stime(u64 stime, u64 rtime, u64 total)
531{ 531{
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 470ba6b464fe..b32bc1f7cd14 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -727,7 +727,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
727 * refill the runtime and set the deadline a period in the future, 727 * refill the runtime and set the deadline a period in the future,
728 * because keeping the current (absolute) deadline of the task would 728 * because keeping the current (absolute) deadline of the task would
729 * result in breaking guarantees promised to other tasks (refer to 729 * result in breaking guarantees promised to other tasks (refer to
730 * Documentation/scheduler/sched-deadline.txt for more informations). 730 * Documentation/scheduler/sched-deadline.txt for more information).
731 * 731 *
732 * This function returns true if: 732 * This function returns true if:
733 * 733 *
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e30dea59d215..fdc8356ea742 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -703,9 +703,9 @@ void init_entity_runnable_average(struct sched_entity *se)
703 memset(sa, 0, sizeof(*sa)); 703 memset(sa, 0, sizeof(*sa));
704 704
705 /* 705 /*
706 * Tasks are intialized with full load to be seen as heavy tasks until 706 * Tasks are initialized with full load to be seen as heavy tasks until
707 * they get a chance to stabilize to their real load level. 707 * they get a chance to stabilize to their real load level.
708 * Group entities are intialized with zero load to reflect the fact that 708 * Group entities are initialized with zero load to reflect the fact that
709 * nothing has been attached to the task group yet. 709 * nothing has been attached to the task group yet.
710 */ 710 */
711 if (entity_is_task(se)) 711 if (entity_is_task(se))
@@ -3976,8 +3976,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3976 /* 3976 /*
3977 * When dequeuing a sched_entity, we must: 3977 * When dequeuing a sched_entity, we must:
3978 * - Update loads to have both entity and cfs_rq synced with now. 3978 * - Update loads to have both entity and cfs_rq synced with now.
3979 * - Substract its load from the cfs_rq->runnable_avg. 3979 * - Subtract its load from the cfs_rq->runnable_avg.
3980 * - Substract its previous weight from cfs_rq->load.weight. 3980 * - Subtract its previous weight from cfs_rq->load.weight.
3981 * - For group entity, update its weight to reflect the new share 3981 * - For group entity, update its weight to reflect the new share
3982 * of its group cfs_rq. 3982 * of its group cfs_rq.
3983 */ 3983 */
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index e6802181900f..81faddba9e20 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -8,14 +8,14 @@
8 */ 8 */
9#include "sched.h" 9#include "sched.h"
10 10
11DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); 11DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
12EXPORT_SYMBOL_GPL(housekeeping_overriden); 12EXPORT_SYMBOL_GPL(housekeeping_overridden);
13static cpumask_var_t housekeeping_mask; 13static cpumask_var_t housekeeping_mask;
14static unsigned int housekeeping_flags; 14static unsigned int housekeeping_flags;
15 15
16int housekeeping_any_cpu(enum hk_flags flags) 16int housekeeping_any_cpu(enum hk_flags flags)
17{ 17{
18 if (static_branch_unlikely(&housekeeping_overriden)) 18 if (static_branch_unlikely(&housekeeping_overridden))
19 if (housekeeping_flags & flags) 19 if (housekeeping_flags & flags)
20 return cpumask_any_and(housekeeping_mask, cpu_online_mask); 20 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
21 return smp_processor_id(); 21 return smp_processor_id();
@@ -24,7 +24,7 @@ EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
24 24
25const struct cpumask *housekeeping_cpumask(enum hk_flags flags) 25const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
26{ 26{
27 if (static_branch_unlikely(&housekeeping_overriden)) 27 if (static_branch_unlikely(&housekeeping_overridden))
28 if (housekeeping_flags & flags) 28 if (housekeeping_flags & flags)
29 return housekeeping_mask; 29 return housekeeping_mask;
30 return cpu_possible_mask; 30 return cpu_possible_mask;
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(housekeeping_cpumask);
33 33
34void housekeeping_affine(struct task_struct *t, enum hk_flags flags) 34void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
35{ 35{
36 if (static_branch_unlikely(&housekeeping_overriden)) 36 if (static_branch_unlikely(&housekeeping_overridden))
37 if (housekeeping_flags & flags) 37 if (housekeeping_flags & flags)
38 set_cpus_allowed_ptr(t, housekeeping_mask); 38 set_cpus_allowed_ptr(t, housekeeping_mask);
39} 39}
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(housekeeping_affine);
41 41
42bool housekeeping_test_cpu(int cpu, enum hk_flags flags) 42bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
43{ 43{
44 if (static_branch_unlikely(&housekeeping_overriden)) 44 if (static_branch_unlikely(&housekeeping_overridden))
45 if (housekeeping_flags & flags) 45 if (housekeeping_flags & flags)
46 return cpumask_test_cpu(cpu, housekeeping_mask); 46 return cpumask_test_cpu(cpu, housekeeping_mask);
47 return true; 47 return true;
@@ -53,7 +53,7 @@ void __init housekeeping_init(void)
53 if (!housekeeping_flags) 53 if (!housekeeping_flags)
54 return; 54 return;
55 55
56 static_branch_enable(&housekeeping_overriden); 56 static_branch_enable(&housekeeping_overridden);
57 57
58 if (housekeeping_flags & HK_FLAG_TICK) 58 if (housekeeping_flags & HK_FLAG_TICK)
59 sched_tick_offload_init(); 59 sched_tick_offload_init();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 71cd8b710599..9bde60a11805 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -637,7 +637,7 @@ struct dl_rq {
637 /* 637 /*
638 * Deadline values of the currently executing and the 638 * Deadline values of the currently executing and the
639 * earliest ready task on this rq. Caching these facilitates 639 * earliest ready task on this rq. Caching these facilitates
640 * the decision wether or not a ready but not running task 640 * the decision whether or not a ready but not running task
641 * should migrate somewhere else. 641 * should migrate somewhere else.
642 */ 642 */
643 struct { 643 struct {
@@ -1434,7 +1434,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1434#ifdef CONFIG_SMP 1434#ifdef CONFIG_SMP
1435 /* 1435 /*
1436 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1436 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1437 * successfuly executed on another CPU. We must ensure that updates of 1437 * successfully executed on another CPU. We must ensure that updates of
1438 * per-task data have been completed by this moment. 1438 * per-task data have been completed by this moment.
1439 */ 1439 */
1440 smp_wmb(); 1440 smp_wmb();