aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h90
1 files changed, 26 insertions, 64 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 81a173c0897d..660c8ae93471 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -90,6 +90,7 @@ struct sched_param {
90#include <linux/latencytop.h> 90#include <linux/latencytop.h>
91#include <linux/cred.h> 91#include <linux/cred.h>
92#include <linux/llist.h> 92#include <linux/llist.h>
93#include <linux/uidgid.h>
93 94
94#include <asm/processor.h> 95#include <asm/processor.h>
95 96
@@ -728,8 +729,7 @@ struct user_struct {
728 729
729 /* Hash table maintenance information */ 730 /* Hash table maintenance information */
730 struct hlist_node uidhash_node; 731 struct hlist_node uidhash_node;
731 uid_t uid; 732 kuid_t uid;
732 struct user_namespace *user_ns;
733 733
734#ifdef CONFIG_PERF_EVENTS 734#ifdef CONFIG_PERF_EVENTS
735 atomic_long_t locked_vm; 735 atomic_long_t locked_vm;
@@ -738,7 +738,7 @@ struct user_struct {
738 738
739extern int uids_sysfs_init(void); 739extern int uids_sysfs_init(void);
740 740
741extern struct user_struct *find_user(uid_t); 741extern struct user_struct *find_user(kuid_t);
742 742
743extern struct user_struct root_user; 743extern struct user_struct root_user;
744#define INIT_USER (&root_user) 744#define INIT_USER (&root_user)
@@ -855,61 +855,14 @@ enum cpu_idle_type {
855#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 855#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
856#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ 856#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
857#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 857#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
858#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
859#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 858#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
860#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 859#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
861#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 860#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
862#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 861#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
863#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 862#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
864 863
865enum powersavings_balance_level {
866 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
867 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
868 * first for long running threads
869 */
870 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
871 * cpu package for power savings
872 */
873 MAX_POWERSAVINGS_BALANCE_LEVELS
874};
875
876extern int sched_mc_power_savings, sched_smt_power_savings;
877
878static inline int sd_balance_for_mc_power(void)
879{
880 if (sched_smt_power_savings)
881 return SD_POWERSAVINGS_BALANCE;
882
883 if (!sched_mc_power_savings)
884 return SD_PREFER_SIBLING;
885
886 return 0;
887}
888
889static inline int sd_balance_for_package_power(void)
890{
891 if (sched_mc_power_savings | sched_smt_power_savings)
892 return SD_POWERSAVINGS_BALANCE;
893
894 return SD_PREFER_SIBLING;
895}
896
897extern int __weak arch_sd_sibiling_asym_packing(void); 864extern int __weak arch_sd_sibiling_asym_packing(void);
898 865
899/*
900 * Optimise SD flags for power savings:
901 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
902 * Keep default SD flags if sched_{smt,mc}_power_saving=0
903 */
904
905static inline int sd_power_saving_flags(void)
906{
907 if (sched_mc_power_savings | sched_smt_power_savings)
908 return SD_BALANCE_NEWIDLE;
909
910 return 0;
911}
912
913struct sched_group_power { 866struct sched_group_power {
914 atomic_t ref; 867 atomic_t ref;
915 /* 868 /*
@@ -1341,16 +1294,13 @@ struct task_struct {
1341 * execve */ 1294 * execve */
1342 unsigned in_iowait:1; 1295 unsigned in_iowait:1;
1343 1296
1297 /* task may not gain privileges */
1298 unsigned no_new_privs:1;
1344 1299
1345 /* Revert to default priority/policy when forking */ 1300 /* Revert to default priority/policy when forking */
1346 unsigned sched_reset_on_fork:1; 1301 unsigned sched_reset_on_fork:1;
1347 unsigned sched_contributes_to_load:1; 1302 unsigned sched_contributes_to_load:1;
1348 1303
1349#ifdef CONFIG_GENERIC_HARDIRQS
1350 /* IRQ handler threads */
1351 unsigned irq_thread:1;
1352#endif
1353
1354 pid_t pid; 1304 pid_t pid;
1355 pid_t tgid; 1305 pid_t tgid;
1356 1306
@@ -1358,10 +1308,9 @@ struct task_struct {
1358 /* Canary value for the -fstack-protector gcc feature */ 1308 /* Canary value for the -fstack-protector gcc feature */
1359 unsigned long stack_canary; 1309 unsigned long stack_canary;
1360#endif 1310#endif
1361 1311 /*
1362 /*
1363 * pointers to (original) parent process, youngest child, younger sibling, 1312 * pointers to (original) parent process, youngest child, younger sibling,
1364 * older sibling, respectively. (p->father can be replaced with 1313 * older sibling, respectively. (p->father can be replaced with
1365 * p->real_parent->pid) 1314 * p->real_parent->pid)
1366 */ 1315 */
1367 struct task_struct __rcu *real_parent; /* real parent process */ 1316 struct task_struct __rcu *real_parent; /* real parent process */
@@ -1408,8 +1357,6 @@ struct task_struct {
1408 * credentials (COW) */ 1357 * credentials (COW) */
1409 const struct cred __rcu *cred; /* effective (overridable) subjective task 1358 const struct cred __rcu *cred; /* effective (overridable) subjective task
1410 * credentials (COW) */ 1359 * credentials (COW) */
1411 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1412
1413 char comm[TASK_COMM_LEN]; /* executable name excluding path 1360 char comm[TASK_COMM_LEN]; /* executable name excluding path
1414 - access with [gs]et_task_comm (which lock 1361 - access with [gs]et_task_comm (which lock
1415 it with task_lock()) 1362 it with task_lock())
@@ -1445,12 +1392,14 @@ struct task_struct {
1445 int (*notifier)(void *priv); 1392 int (*notifier)(void *priv);
1446 void *notifier_data; 1393 void *notifier_data;
1447 sigset_t *notifier_mask; 1394 sigset_t *notifier_mask;
1395 struct hlist_head task_works;
1396
1448 struct audit_context *audit_context; 1397 struct audit_context *audit_context;
1449#ifdef CONFIG_AUDITSYSCALL 1398#ifdef CONFIG_AUDITSYSCALL
1450 uid_t loginuid; 1399 uid_t loginuid;
1451 unsigned int sessionid; 1400 unsigned int sessionid;
1452#endif 1401#endif
1453 seccomp_t seccomp; 1402 struct seccomp seccomp;
1454 1403
1455/* Thread group tracking */ 1404/* Thread group tracking */
1456 u32 parent_exec_id; 1405 u32 parent_exec_id;
@@ -1617,6 +1566,10 @@ struct task_struct {
1617#ifdef CONFIG_HAVE_HW_BREAKPOINT 1566#ifdef CONFIG_HAVE_HW_BREAKPOINT
1618 atomic_t ptrace_bp_refcnt; 1567 atomic_t ptrace_bp_refcnt;
1619#endif 1568#endif
1569#ifdef CONFIG_UPROBES
1570 struct uprobe_task *utask;
1571 int uprobe_srcu_id;
1572#endif
1620}; 1573};
1621 1574
1622/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1575/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1905,12 +1858,22 @@ static inline void rcu_copy_process(struct task_struct *p)
1905 INIT_LIST_HEAD(&p->rcu_node_entry); 1858 INIT_LIST_HEAD(&p->rcu_node_entry);
1906} 1859}
1907 1860
1861static inline void rcu_switch_from(struct task_struct *prev)
1862{
1863 if (prev->rcu_read_lock_nesting != 0)
1864 rcu_preempt_note_context_switch();
1865}
1866
1908#else 1867#else
1909 1868
1910static inline void rcu_copy_process(struct task_struct *p) 1869static inline void rcu_copy_process(struct task_struct *p)
1911{ 1870{
1912} 1871}
1913 1872
1873static inline void rcu_switch_from(struct task_struct *prev)
1874{
1875}
1876
1914#endif 1877#endif
1915 1878
1916#ifdef CONFIG_SMP 1879#ifdef CONFIG_SMP
@@ -1950,7 +1913,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1950 */ 1913 */
1951extern unsigned long long notrace sched_clock(void); 1914extern unsigned long long notrace sched_clock(void);
1952/* 1915/*
1953 * See the comment in kernel/sched_clock.c 1916 * See the comment in kernel/sched/clock.c
1954 */ 1917 */
1955extern u64 cpu_clock(int cpu); 1918extern u64 cpu_clock(int cpu);
1956extern u64 local_clock(void); 1919extern u64 local_clock(void);
@@ -2177,14 +2140,13 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2177extern void __set_special_pids(struct pid *pid); 2140extern void __set_special_pids(struct pid *pid);
2178 2141
2179/* per-UID process charging. */ 2142/* per-UID process charging. */
2180extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 2143extern struct user_struct * alloc_uid(kuid_t);
2181static inline struct user_struct *get_uid(struct user_struct *u) 2144static inline struct user_struct *get_uid(struct user_struct *u)
2182{ 2145{
2183 atomic_inc(&u->__count); 2146 atomic_inc(&u->__count);
2184 return u; 2147 return u;
2185} 2148}
2186extern void free_uid(struct user_struct *); 2149extern void free_uid(struct user_struct *);
2187extern void release_uids(struct user_namespace *ns);
2188 2150
2189#include <asm/current.h> 2151#include <asm/current.h>
2190 2152