aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h136
1 files changed, 69 insertions, 67 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c64fc5114004..055f935d4421 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/sched/sysctl.h> 3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h> 4#include <linux/sched/rt.h>
5#include <linux/u64_stats_sync.h>
5#include <linux/sched/deadline.h> 6#include <linux/sched/deadline.h>
6#include <linux/binfmts.h> 7#include <linux/binfmts.h>
7#include <linux/mutex.h> 8#include <linux/mutex.h>
@@ -15,6 +16,12 @@
15#include "cpudeadline.h" 16#include "cpudeadline.h"
16#include "cpuacct.h" 17#include "cpuacct.h"
17 18
19#ifdef CONFIG_SCHED_DEBUG
20#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
21#else
22#define SCHED_WARN_ON(x) ((void)(x))
23#endif
24
18struct rq; 25struct rq;
19struct cpuidle_state; 26struct cpuidle_state;
20 27
@@ -565,6 +572,8 @@ struct root_domain {
565 */ 572 */
566 cpumask_var_t rto_mask; 573 cpumask_var_t rto_mask;
567 struct cpupri cpupri; 574 struct cpupri cpupri;
575
576 unsigned long max_cpu_capacity;
568}; 577};
569 578
570extern struct root_domain def_root_domain; 579extern struct root_domain def_root_domain;
@@ -597,7 +606,6 @@ struct rq {
597#ifdef CONFIG_SMP 606#ifdef CONFIG_SMP
598 unsigned long last_load_update_tick; 607 unsigned long last_load_update_tick;
599#endif /* CONFIG_SMP */ 608#endif /* CONFIG_SMP */
600 u64 nohz_stamp;
601 unsigned long nohz_flags; 609 unsigned long nohz_flags;
602#endif /* CONFIG_NO_HZ_COMMON */ 610#endif /* CONFIG_NO_HZ_COMMON */
603#ifdef CONFIG_NO_HZ_FULL 611#ifdef CONFIG_NO_HZ_FULL
@@ -723,6 +731,23 @@ static inline int cpu_of(struct rq *rq)
723#endif 731#endif
724} 732}
725 733
734
735#ifdef CONFIG_SCHED_SMT
736
737extern struct static_key_false sched_smt_present;
738
739extern void __update_idle_core(struct rq *rq);
740
741static inline void update_idle_core(struct rq *rq)
742{
743 if (static_branch_unlikely(&sched_smt_present))
744 __update_idle_core(rq);
745}
746
747#else
748static inline void update_idle_core(struct rq *rq) { }
749#endif
750
726DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 751DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
727 752
728#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 753#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
@@ -857,8 +882,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
857DECLARE_PER_CPU(struct sched_domain *, sd_llc); 882DECLARE_PER_CPU(struct sched_domain *, sd_llc);
858DECLARE_PER_CPU(int, sd_llc_size); 883DECLARE_PER_CPU(int, sd_llc_size);
859DECLARE_PER_CPU(int, sd_llc_id); 884DECLARE_PER_CPU(int, sd_llc_id);
885DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
860DECLARE_PER_CPU(struct sched_domain *, sd_numa); 886DECLARE_PER_CPU(struct sched_domain *, sd_numa);
861DECLARE_PER_CPU(struct sched_domain *, sd_busy);
862DECLARE_PER_CPU(struct sched_domain *, sd_asym); 887DECLARE_PER_CPU(struct sched_domain *, sd_asym);
863 888
864struct sched_group_capacity { 889struct sched_group_capacity {
@@ -870,10 +895,6 @@ struct sched_group_capacity {
870 unsigned int capacity; 895 unsigned int capacity;
871 unsigned long next_update; 896 unsigned long next_update;
872 int imbalance; /* XXX unrelated to capacity but shared group state */ 897 int imbalance; /* XXX unrelated to capacity but shared group state */
873 /*
874 * Number of busy cpus in this group.
875 */
876 atomic_t nr_busy_cpus;
877 898
878 unsigned long cpumask[0]; /* iteration mask */ 899 unsigned long cpumask[0]; /* iteration mask */
879}; 900};
@@ -1000,7 +1021,11 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1000 * per-task data have been completed by this moment. 1021 * per-task data have been completed by this moment.
1001 */ 1022 */
1002 smp_wmb(); 1023 smp_wmb();
1024#ifdef CONFIG_THREAD_INFO_IN_TASK
1025 p->cpu = cpu;
1026#else
1003 task_thread_info(p)->cpu = cpu; 1027 task_thread_info(p)->cpu = cpu;
1028#endif
1004 p->wake_cpu = cpu; 1029 p->wake_cpu = cpu;
1005#endif 1030#endif
1006} 1031}
@@ -1260,6 +1285,11 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1260 prev->sched_class->put_prev_task(rq, prev); 1285 prev->sched_class->put_prev_task(rq, prev);
1261} 1286}
1262 1287
1288static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1289{
1290 curr->sched_class->set_curr_task(rq);
1291}
1292
1263#define sched_class_highest (&stop_sched_class) 1293#define sched_class_highest (&stop_sched_class)
1264#define for_each_class(class) \ 1294#define for_each_class(class) \
1265 for (class = sched_class_highest; class; class = class->next) 1295 for (class = sched_class_highest; class; class = class->next)
@@ -1290,7 +1320,7 @@ static inline void idle_set_state(struct rq *rq,
1290 1320
1291static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1321static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1292{ 1322{
1293 WARN_ON(!rcu_read_lock_held()); 1323 SCHED_WARN_ON(!rcu_read_lock_held());
1294 return rq->idle_state; 1324 return rq->idle_state;
1295} 1325}
1296#else 1326#else
@@ -1710,52 +1740,28 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1710#endif 1740#endif
1711 1741
1712#ifdef CONFIG_IRQ_TIME_ACCOUNTING 1742#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1743struct irqtime {
1744 u64 hardirq_time;
1745 u64 softirq_time;
1746 u64 irq_start_time;
1747 struct u64_stats_sync sync;
1748};
1713 1749
1714DECLARE_PER_CPU(u64, cpu_hardirq_time); 1750DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1715DECLARE_PER_CPU(u64, cpu_softirq_time);
1716
1717#ifndef CONFIG_64BIT
1718DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1719
1720static inline void irq_time_write_begin(void)
1721{
1722 __this_cpu_inc(irq_time_seq.sequence);
1723 smp_wmb();
1724}
1725
1726static inline void irq_time_write_end(void)
1727{
1728 smp_wmb();
1729 __this_cpu_inc(irq_time_seq.sequence);
1730}
1731 1751
1732static inline u64 irq_time_read(int cpu) 1752static inline u64 irq_time_read(int cpu)
1733{ 1753{
1734 u64 irq_time; 1754 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1735 unsigned seq; 1755 unsigned int seq;
1756 u64 total;
1736 1757
1737 do { 1758 do {
1738 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1759 seq = __u64_stats_fetch_begin(&irqtime->sync);
1739 irq_time = per_cpu(cpu_softirq_time, cpu) + 1760 total = irqtime->softirq_time + irqtime->hardirq_time;
1740 per_cpu(cpu_hardirq_time, cpu); 1761 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1741 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1742
1743 return irq_time;
1744}
1745#else /* CONFIG_64BIT */
1746static inline void irq_time_write_begin(void)
1747{
1748}
1749
1750static inline void irq_time_write_end(void)
1751{
1752}
1753 1762
1754static inline u64 irq_time_read(int cpu) 1763 return total;
1755{
1756 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1757} 1764}
1758#endif /* CONFIG_64BIT */
1759#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1765#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1760 1766
1761#ifdef CONFIG_CPU_FREQ 1767#ifdef CONFIG_CPU_FREQ
@@ -1763,27 +1769,13 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1763 1769
1764/** 1770/**
1765 * cpufreq_update_util - Take a note about CPU utilization changes. 1771 * cpufreq_update_util - Take a note about CPU utilization changes.
1766 * @time: Current time. 1772 * @rq: Runqueue to carry out the update for.
1767 * @util: Current utilization. 1773 * @flags: Update reason flags.
1768 * @max: Utilization ceiling.
1769 * 1774 *
1770 * This function is called by the scheduler on every invocation of 1775 * This function is called by the scheduler on the CPU whose utilization is
1771 * update_load_avg() on the CPU whose utilization is being updated. 1776 * being updated.
1772 * 1777 *
1773 * It can only be called from RCU-sched read-side critical sections. 1778 * It can only be called from RCU-sched read-side critical sections.
1774 */
1775static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max)
1776{
1777 struct update_util_data *data;
1778
1779 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1780 if (data)
1781 data->func(data, time, util, max);
1782}
1783
1784/**
1785 * cpufreq_trigger_update - Trigger CPU performance state evaluation if needed.
1786 * @time: Current time.
1787 * 1779 *
1788 * The way cpufreq is currently arranged requires it to evaluate the CPU 1780 * The way cpufreq is currently arranged requires it to evaluate the CPU
1789 * performance state (frequency/voltage) on a regular basis to prevent it from 1781 * performance state (frequency/voltage) on a regular basis to prevent it from
@@ -1797,13 +1789,23 @@ static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned lo
1797 * but that really is a band-aid. Going forward it should be replaced with 1789 * but that really is a band-aid. Going forward it should be replaced with
1798 * solutions targeted more specifically at RT and DL tasks. 1790 * solutions targeted more specifically at RT and DL tasks.
1799 */ 1791 */
1800static inline void cpufreq_trigger_update(u64 time) 1792static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
1793{
1794 struct update_util_data *data;
1795
1796 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1797 if (data)
1798 data->func(data, rq_clock(rq), flags);
1799}
1800
1801static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1801{ 1802{
1802 cpufreq_update_util(time, ULONG_MAX, 0); 1803 if (cpu_of(rq) == smp_processor_id())
1804 cpufreq_update_util(rq, flags);
1803} 1805}
1804#else 1806#else
1805static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) {} 1807static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1806static inline void cpufreq_trigger_update(u64 time) {} 1808static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
1807#endif /* CONFIG_CPU_FREQ */ 1809#endif /* CONFIG_CPU_FREQ */
1808 1810
1809#ifdef arch_scale_freq_capacity 1811#ifdef arch_scale_freq_capacity