aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h52
1 files changed, 42 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a781dec1cd0b..7cb07fd26680 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3,6 +3,8 @@
3 3
4#include <uapi/linux/sched.h> 4#include <uapi/linux/sched.h>
5 5
6#include <linux/sched/prio.h>
7
6 8
7struct sched_param { 9struct sched_param {
8 int sched_priority; 10 int sched_priority;
@@ -27,7 +29,7 @@ struct sched_param {
27 29
28#include <asm/page.h> 30#include <asm/page.h>
29#include <asm/ptrace.h> 31#include <asm/ptrace.h>
30#include <asm/cputime.h> 32#include <linux/cputime.h>
31 33
32#include <linux/smp.h> 34#include <linux/smp.h>
33#include <linux/sem.h> 35#include <linux/sem.h>
@@ -292,10 +294,14 @@ extern int runqueue_is_locked(int cpu);
292#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 294#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
293extern void nohz_balance_enter_idle(int cpu); 295extern void nohz_balance_enter_idle(int cpu);
294extern void set_cpu_sd_state_idle(void); 296extern void set_cpu_sd_state_idle(void);
295extern int get_nohz_timer_target(void); 297extern int get_nohz_timer_target(int pinned);
296#else 298#else
297static inline void nohz_balance_enter_idle(int cpu) { } 299static inline void nohz_balance_enter_idle(int cpu) { }
298static inline void set_cpu_sd_state_idle(void) { } 300static inline void set_cpu_sd_state_idle(void) { }
301static inline int get_nohz_timer_target(int pinned)
302{
303 return smp_processor_id();
304}
299#endif 305#endif
300 306
301/* 307/*
@@ -1077,6 +1083,7 @@ struct sched_entity {
1077#endif 1083#endif
1078 1084
1079#ifdef CONFIG_FAIR_GROUP_SCHED 1085#ifdef CONFIG_FAIR_GROUP_SCHED
1086 int depth;
1080 struct sched_entity *parent; 1087 struct sched_entity *parent;
1081 /* rq on which this entity is (to be) queued: */ 1088 /* rq on which this entity is (to be) queued: */
1082 struct cfs_rq *cfs_rq; 1089 struct cfs_rq *cfs_rq;
@@ -1460,6 +1467,9 @@ struct task_struct {
1460 struct mutex perf_event_mutex; 1467 struct mutex perf_event_mutex;
1461 struct list_head perf_event_list; 1468 struct list_head perf_event_list;
1462#endif 1469#endif
1470#ifdef CONFIG_DEBUG_PREEMPT
1471 unsigned long preempt_disable_ip;
1472#endif
1463#ifdef CONFIG_NUMA 1473#ifdef CONFIG_NUMA
1464 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1474 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1465 short il_next; 1475 short il_next;
@@ -1470,9 +1480,10 @@ struct task_struct {
1470 unsigned int numa_scan_period; 1480 unsigned int numa_scan_period;
1471 unsigned int numa_scan_period_max; 1481 unsigned int numa_scan_period_max;
1472 int numa_preferred_nid; 1482 int numa_preferred_nid;
1473 int numa_migrate_deferred;
1474 unsigned long numa_migrate_retry; 1483 unsigned long numa_migrate_retry;
1475 u64 node_stamp; /* migration stamp */ 1484 u64 node_stamp; /* migration stamp */
1485 u64 last_task_numa_placement;
1486 u64 last_sum_exec_runtime;
1476 struct callback_head numa_work; 1487 struct callback_head numa_work;
1477 1488
1478 struct list_head numa_entry; 1489 struct list_head numa_entry;
@@ -1483,15 +1494,22 @@ struct task_struct {
1483 * Scheduling placement decisions are made based on the these counts. 1494 * Scheduling placement decisions are made based on the these counts.
1484 * The values remain static for the duration of a PTE scan 1495 * The values remain static for the duration of a PTE scan
1485 */ 1496 */
1486 unsigned long *numa_faults; 1497 unsigned long *numa_faults_memory;
1487 unsigned long total_numa_faults; 1498 unsigned long total_numa_faults;
1488 1499
1489 /* 1500 /*
1490 * numa_faults_buffer records faults per node during the current 1501 * numa_faults_buffer records faults per node during the current
1491 * scan window. When the scan completes, the counts in numa_faults 1502 * scan window. When the scan completes, the counts in
1492 * decay and these values are copied. 1503 * numa_faults_memory decay and these values are copied.
1493 */ 1504 */
1494 unsigned long *numa_faults_buffer; 1505 unsigned long *numa_faults_buffer_memory;
1506
1507 /*
1508 * Track the nodes the process was running on when a NUMA hinting
1509 * fault was incurred.
1510 */
1511 unsigned long *numa_faults_cpu;
1512 unsigned long *numa_faults_buffer_cpu;
1495 1513
1496 /* 1514 /*
1497 * numa_faults_locality tracks if faults recorded during the last 1515 * numa_faults_locality tracks if faults recorded during the last
@@ -1596,8 +1614,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags);
1596extern pid_t task_numa_group_id(struct task_struct *p); 1614extern pid_t task_numa_group_id(struct task_struct *p);
1597extern void set_numabalancing_state(bool enabled); 1615extern void set_numabalancing_state(bool enabled);
1598extern void task_numa_free(struct task_struct *p); 1616extern void task_numa_free(struct task_struct *p);
1599 1617extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1600extern unsigned int sysctl_numa_balancing_migrate_deferred; 1618 int src_nid, int dst_cpu);
1601#else 1619#else
1602static inline void task_numa_fault(int last_node, int node, int pages, 1620static inline void task_numa_fault(int last_node, int node, int pages,
1603 int flags) 1621 int flags)
@@ -1613,6 +1631,11 @@ static inline void set_numabalancing_state(bool enabled)
1613static inline void task_numa_free(struct task_struct *p) 1631static inline void task_numa_free(struct task_struct *p)
1614{ 1632{
1615} 1633}
1634static inline bool should_numa_migrate_memory(struct task_struct *p,
1635 struct page *page, int src_nid, int dst_cpu)
1636{
1637 return true;
1638}
1616#endif 1639#endif
1617 1640
1618static inline struct pid *task_pid(struct task_struct *task) 1641static inline struct pid *task_pid(struct task_struct *task)
@@ -2080,7 +2103,16 @@ static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2080extern bool yield_to(struct task_struct *p, bool preempt); 2103extern bool yield_to(struct task_struct *p, bool preempt);
2081extern void set_user_nice(struct task_struct *p, long nice); 2104extern void set_user_nice(struct task_struct *p, long nice);
2082extern int task_prio(const struct task_struct *p); 2105extern int task_prio(const struct task_struct *p);
2083extern int task_nice(const struct task_struct *p); 2106/**
2107 * task_nice - return the nice value of a given task.
2108 * @p: the task in question.
2109 *
2110 * Return: The nice value [ -20 ... 0 ... 19 ].
2111 */
2112static inline int task_nice(const struct task_struct *p)
2113{
2114 return PRIO_TO_NICE((p)->static_prio);
2115}
2084extern int can_nice(const struct task_struct *p, const int nice); 2116extern int can_nice(const struct task_struct *p, const int nice);
2085extern int task_curr(const struct task_struct *p); 2117extern int task_curr(const struct task_struct *p);
2086extern int idle_cpu(int cpu); 2118extern int idle_cpu(int cpu);