diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 41 |
1 files changed, 33 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a781dec1cd0b..c49a2585ff7d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <uapi/linux/sched.h> | 4 | #include <uapi/linux/sched.h> |
5 | 5 | ||
6 | #include <linux/sched/prio.h> | ||
7 | |||
6 | 8 | ||
7 | struct sched_param { | 9 | struct sched_param { |
8 | int sched_priority; | 10 | int sched_priority; |
@@ -1077,6 +1079,7 @@ struct sched_entity { | |||
1077 | #endif | 1079 | #endif |
1078 | 1080 | ||
1079 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1081 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1082 | int depth; | ||
1080 | struct sched_entity *parent; | 1083 | struct sched_entity *parent; |
1081 | /* rq on which this entity is (to be) queued: */ | 1084 | /* rq on which this entity is (to be) queued: */ |
1082 | struct cfs_rq *cfs_rq; | 1085 | struct cfs_rq *cfs_rq; |
@@ -1470,9 +1473,10 @@ struct task_struct { | |||
1470 | unsigned int numa_scan_period; | 1473 | unsigned int numa_scan_period; |
1471 | unsigned int numa_scan_period_max; | 1474 | unsigned int numa_scan_period_max; |
1472 | int numa_preferred_nid; | 1475 | int numa_preferred_nid; |
1473 | int numa_migrate_deferred; | ||
1474 | unsigned long numa_migrate_retry; | 1476 | unsigned long numa_migrate_retry; |
1475 | u64 node_stamp; /* migration stamp */ | 1477 | u64 node_stamp; /* migration stamp */ |
1478 | u64 last_task_numa_placement; | ||
1479 | u64 last_sum_exec_runtime; | ||
1476 | struct callback_head numa_work; | 1480 | struct callback_head numa_work; |
1477 | 1481 | ||
1478 | struct list_head numa_entry; | 1482 | struct list_head numa_entry; |
@@ -1483,15 +1487,22 @@ struct task_struct { | |||
1483 | * Scheduling placement decisions are made based on the these counts. | 1487 | * Scheduling placement decisions are made based on the these counts. |
1484 | * The values remain static for the duration of a PTE scan | 1488 | * The values remain static for the duration of a PTE scan |
1485 | */ | 1489 | */ |
1486 | unsigned long *numa_faults; | 1490 | unsigned long *numa_faults_memory; |
1487 | unsigned long total_numa_faults; | 1491 | unsigned long total_numa_faults; |
1488 | 1492 | ||
1489 | /* | 1493 | /* |
1490 | * numa_faults_buffer records faults per node during the current | 1494 | * numa_faults_buffer records faults per node during the current |
1491 | * scan window. When the scan completes, the counts in numa_faults | 1495 | * scan window. When the scan completes, the counts in |
1492 | * decay and these values are copied. | 1496 | * numa_faults_memory decay and these values are copied. |
1497 | */ | ||
1498 | unsigned long *numa_faults_buffer_memory; | ||
1499 | |||
1500 | /* | ||
1501 | * Track the nodes the process was running on when a NUMA hinting | ||
1502 | * fault was incurred. | ||
1493 | */ | 1503 | */ |
1494 | unsigned long *numa_faults_buffer; | 1504 | unsigned long *numa_faults_cpu; |
1505 | unsigned long *numa_faults_buffer_cpu; | ||
1495 | 1506 | ||
1496 | /* | 1507 | /* |
1497 | * numa_faults_locality tracks if faults recorded during the last | 1508 | * numa_faults_locality tracks if faults recorded during the last |
@@ -1596,8 +1607,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); | |||
1596 | extern pid_t task_numa_group_id(struct task_struct *p); | 1607 | extern pid_t task_numa_group_id(struct task_struct *p); |
1597 | extern void set_numabalancing_state(bool enabled); | 1608 | extern void set_numabalancing_state(bool enabled); |
1598 | extern void task_numa_free(struct task_struct *p); | 1609 | extern void task_numa_free(struct task_struct *p); |
1599 | 1610 | extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, | |
1600 | extern unsigned int sysctl_numa_balancing_migrate_deferred; | 1611 | int src_nid, int dst_cpu); |
1601 | #else | 1612 | #else |
1602 | static inline void task_numa_fault(int last_node, int node, int pages, | 1613 | static inline void task_numa_fault(int last_node, int node, int pages, |
1603 | int flags) | 1614 | int flags) |
@@ -1613,6 +1624,11 @@ static inline void set_numabalancing_state(bool enabled) | |||
1613 | static inline void task_numa_free(struct task_struct *p) | 1624 | static inline void task_numa_free(struct task_struct *p) |
1614 | { | 1625 | { |
1615 | } | 1626 | } |
1627 | static inline bool should_numa_migrate_memory(struct task_struct *p, | ||
1628 | struct page *page, int src_nid, int dst_cpu) | ||
1629 | { | ||
1630 | return true; | ||
1631 | } | ||
1616 | #endif | 1632 | #endif |
1617 | 1633 | ||
1618 | static inline struct pid *task_pid(struct task_struct *task) | 1634 | static inline struct pid *task_pid(struct task_struct *task) |
@@ -2080,7 +2096,16 @@ static inline void sched_autogroup_exit(struct signal_struct *sig) { } | |||
2080 | extern bool yield_to(struct task_struct *p, bool preempt); | 2096 | extern bool yield_to(struct task_struct *p, bool preempt); |
2081 | extern void set_user_nice(struct task_struct *p, long nice); | 2097 | extern void set_user_nice(struct task_struct *p, long nice); |
2082 | extern int task_prio(const struct task_struct *p); | 2098 | extern int task_prio(const struct task_struct *p); |
2083 | extern int task_nice(const struct task_struct *p); | 2099 | /** |
2100 | * task_nice - return the nice value of a given task. | ||
2101 | * @p: the task in question. | ||
2102 | * | ||
2103 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
2104 | */ | ||
2105 | static inline int task_nice(const struct task_struct *p) | ||
2106 | { | ||
2107 | return PRIO_TO_NICE((p)->static_prio); | ||
2108 | } | ||
2084 | extern int can_nice(const struct task_struct *p, const int nice); | 2109 | extern int can_nice(const struct task_struct *p, const int nice); |
2085 | extern int task_curr(const struct task_struct *p); | 2110 | extern int task_curr(const struct task_struct *p); |
2086 | extern int idle_cpu(int cpu); | 2111 | extern int idle_cpu(int cpu); |