diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 52 |
1 files changed, 42 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a781dec1cd0b..7cb07fd26680 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <uapi/linux/sched.h> | 4 | #include <uapi/linux/sched.h> |
5 | 5 | ||
6 | #include <linux/sched/prio.h> | ||
7 | |||
6 | 8 | ||
7 | struct sched_param { | 9 | struct sched_param { |
8 | int sched_priority; | 10 | int sched_priority; |
@@ -27,7 +29,7 @@ struct sched_param { | |||
27 | 29 | ||
28 | #include <asm/page.h> | 30 | #include <asm/page.h> |
29 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
30 | #include <asm/cputime.h> | 32 | #include <linux/cputime.h> |
31 | 33 | ||
32 | #include <linux/smp.h> | 34 | #include <linux/smp.h> |
33 | #include <linux/sem.h> | 35 | #include <linux/sem.h> |
@@ -292,10 +294,14 @@ extern int runqueue_is_locked(int cpu); | |||
292 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | 294 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
293 | extern void nohz_balance_enter_idle(int cpu); | 295 | extern void nohz_balance_enter_idle(int cpu); |
294 | extern void set_cpu_sd_state_idle(void); | 296 | extern void set_cpu_sd_state_idle(void); |
295 | extern int get_nohz_timer_target(void); | 297 | extern int get_nohz_timer_target(int pinned); |
296 | #else | 298 | #else |
297 | static inline void nohz_balance_enter_idle(int cpu) { } | 299 | static inline void nohz_balance_enter_idle(int cpu) { } |
298 | static inline void set_cpu_sd_state_idle(void) { } | 300 | static inline void set_cpu_sd_state_idle(void) { } |
301 | static inline int get_nohz_timer_target(int pinned) | ||
302 | { | ||
303 | return smp_processor_id(); | ||
304 | } | ||
299 | #endif | 305 | #endif |
300 | 306 | ||
301 | /* | 307 | /* |
@@ -1077,6 +1083,7 @@ struct sched_entity { | |||
1077 | #endif | 1083 | #endif |
1078 | 1084 | ||
1079 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1085 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1086 | int depth; | ||
1080 | struct sched_entity *parent; | 1087 | struct sched_entity *parent; |
1081 | /* rq on which this entity is (to be) queued: */ | 1088 | /* rq on which this entity is (to be) queued: */ |
1082 | struct cfs_rq *cfs_rq; | 1089 | struct cfs_rq *cfs_rq; |
@@ -1460,6 +1467,9 @@ struct task_struct { | |||
1460 | struct mutex perf_event_mutex; | 1467 | struct mutex perf_event_mutex; |
1461 | struct list_head perf_event_list; | 1468 | struct list_head perf_event_list; |
1462 | #endif | 1469 | #endif |
1470 | #ifdef CONFIG_DEBUG_PREEMPT | ||
1471 | unsigned long preempt_disable_ip; | ||
1472 | #endif | ||
1463 | #ifdef CONFIG_NUMA | 1473 | #ifdef CONFIG_NUMA |
1464 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ | 1474 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
1465 | short il_next; | 1475 | short il_next; |
@@ -1470,9 +1480,10 @@ struct task_struct { | |||
1470 | unsigned int numa_scan_period; | 1480 | unsigned int numa_scan_period; |
1471 | unsigned int numa_scan_period_max; | 1481 | unsigned int numa_scan_period_max; |
1472 | int numa_preferred_nid; | 1482 | int numa_preferred_nid; |
1473 | int numa_migrate_deferred; | ||
1474 | unsigned long numa_migrate_retry; | 1483 | unsigned long numa_migrate_retry; |
1475 | u64 node_stamp; /* migration stamp */ | 1484 | u64 node_stamp; /* migration stamp */ |
1485 | u64 last_task_numa_placement; | ||
1486 | u64 last_sum_exec_runtime; | ||
1476 | struct callback_head numa_work; | 1487 | struct callback_head numa_work; |
1477 | 1488 | ||
1478 | struct list_head numa_entry; | 1489 | struct list_head numa_entry; |
@@ -1483,15 +1494,22 @@ struct task_struct { | |||
1483 | * Scheduling placement decisions are made based on the these counts. | 1494 | * Scheduling placement decisions are made based on the these counts. |
1484 | * The values remain static for the duration of a PTE scan | 1495 | * The values remain static for the duration of a PTE scan |
1485 | */ | 1496 | */ |
1486 | unsigned long *numa_faults; | 1497 | unsigned long *numa_faults_memory; |
1487 | unsigned long total_numa_faults; | 1498 | unsigned long total_numa_faults; |
1488 | 1499 | ||
1489 | /* | 1500 | /* |
1490 | * numa_faults_buffer records faults per node during the current | 1501 | * numa_faults_buffer records faults per node during the current |
1491 | * scan window. When the scan completes, the counts in numa_faults | 1502 | * scan window. When the scan completes, the counts in |
1492 | * decay and these values are copied. | 1503 | * numa_faults_memory decay and these values are copied. |
1493 | */ | 1504 | */ |
1494 | unsigned long *numa_faults_buffer; | 1505 | unsigned long *numa_faults_buffer_memory; |
1506 | |||
1507 | /* | ||
1508 | * Track the nodes the process was running on when a NUMA hinting | ||
1509 | * fault was incurred. | ||
1510 | */ | ||
1511 | unsigned long *numa_faults_cpu; | ||
1512 | unsigned long *numa_faults_buffer_cpu; | ||
1495 | 1513 | ||
1496 | /* | 1514 | /* |
1497 | * numa_faults_locality tracks if faults recorded during the last | 1515 | * numa_faults_locality tracks if faults recorded during the last |
@@ -1596,8 +1614,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); | |||
1596 | extern pid_t task_numa_group_id(struct task_struct *p); | 1614 | extern pid_t task_numa_group_id(struct task_struct *p); |
1597 | extern void set_numabalancing_state(bool enabled); | 1615 | extern void set_numabalancing_state(bool enabled); |
1598 | extern void task_numa_free(struct task_struct *p); | 1616 | extern void task_numa_free(struct task_struct *p); |
1599 | 1617 | extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, | |
1600 | extern unsigned int sysctl_numa_balancing_migrate_deferred; | 1618 | int src_nid, int dst_cpu); |
1601 | #else | 1619 | #else |
1602 | static inline void task_numa_fault(int last_node, int node, int pages, | 1620 | static inline void task_numa_fault(int last_node, int node, int pages, |
1603 | int flags) | 1621 | int flags) |
@@ -1613,6 +1631,11 @@ static inline void set_numabalancing_state(bool enabled) | |||
1613 | static inline void task_numa_free(struct task_struct *p) | 1631 | static inline void task_numa_free(struct task_struct *p) |
1614 | { | 1632 | { |
1615 | } | 1633 | } |
1634 | static inline bool should_numa_migrate_memory(struct task_struct *p, | ||
1635 | struct page *page, int src_nid, int dst_cpu) | ||
1636 | { | ||
1637 | return true; | ||
1638 | } | ||
1616 | #endif | 1639 | #endif |
1617 | 1640 | ||
1618 | static inline struct pid *task_pid(struct task_struct *task) | 1641 | static inline struct pid *task_pid(struct task_struct *task) |
@@ -2080,7 +2103,16 @@ static inline void sched_autogroup_exit(struct signal_struct *sig) { } | |||
2080 | extern bool yield_to(struct task_struct *p, bool preempt); | 2103 | extern bool yield_to(struct task_struct *p, bool preempt); |
2081 | extern void set_user_nice(struct task_struct *p, long nice); | 2104 | extern void set_user_nice(struct task_struct *p, long nice); |
2082 | extern int task_prio(const struct task_struct *p); | 2105 | extern int task_prio(const struct task_struct *p); |
2083 | extern int task_nice(const struct task_struct *p); | 2106 | /** |
2107 | * task_nice - return the nice value of a given task. | ||
2108 | * @p: the task in question. | ||
2109 | * | ||
2110 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
2111 | */ | ||
2112 | static inline int task_nice(const struct task_struct *p) | ||
2113 | { | ||
2114 | return PRIO_TO_NICE((p)->static_prio); | ||
2115 | } | ||
2084 | extern int can_nice(const struct task_struct *p, const int nice); | 2116 | extern int can_nice(const struct task_struct *p, const int nice); |
2085 | extern int task_curr(const struct task_struct *p); | 2117 | extern int task_curr(const struct task_struct *p); |
2086 | extern int idle_cpu(int cpu); | 2118 | extern int idle_cpu(int cpu); |