diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 76 |
1 files changed, 22 insertions, 54 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 81a173c0897d..f45c0b280b5d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -90,6 +90,7 @@ struct sched_param { | |||
90 | #include <linux/latencytop.h> | 90 | #include <linux/latencytop.h> |
91 | #include <linux/cred.h> | 91 | #include <linux/cred.h> |
92 | #include <linux/llist.h> | 92 | #include <linux/llist.h> |
93 | #include <linux/uidgid.h> | ||
93 | 94 | ||
94 | #include <asm/processor.h> | 95 | #include <asm/processor.h> |
95 | 96 | ||
@@ -728,8 +729,7 @@ struct user_struct { | |||
728 | 729 | ||
729 | /* Hash table maintenance information */ | 730 | /* Hash table maintenance information */ |
730 | struct hlist_node uidhash_node; | 731 | struct hlist_node uidhash_node; |
731 | uid_t uid; | 732 | kuid_t uid; |
732 | struct user_namespace *user_ns; | ||
733 | 733 | ||
734 | #ifdef CONFIG_PERF_EVENTS | 734 | #ifdef CONFIG_PERF_EVENTS |
735 | atomic_long_t locked_vm; | 735 | atomic_long_t locked_vm; |
@@ -738,7 +738,7 @@ struct user_struct { | |||
738 | 738 | ||
739 | extern int uids_sysfs_init(void); | 739 | extern int uids_sysfs_init(void); |
740 | 740 | ||
741 | extern struct user_struct *find_user(uid_t); | 741 | extern struct user_struct *find_user(kuid_t); |
742 | 742 | ||
743 | extern struct user_struct root_user; | 743 | extern struct user_struct root_user; |
744 | #define INIT_USER (&root_user) | 744 | #define INIT_USER (&root_user) |
@@ -855,61 +855,14 @@ enum cpu_idle_type { | |||
855 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 855 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
856 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ | 856 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ |
857 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 857 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
858 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | ||
859 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 858 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
860 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 859 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
861 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 860 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
862 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 861 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
863 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | 862 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ |
864 | 863 | ||
865 | enum powersavings_balance_level { | ||
866 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | ||
867 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
868 | * first for long running threads | ||
869 | */ | ||
870 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
871 | * cpu package for power savings | ||
872 | */ | ||
873 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
874 | }; | ||
875 | |||
876 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
877 | |||
878 | static inline int sd_balance_for_mc_power(void) | ||
879 | { | ||
880 | if (sched_smt_power_savings) | ||
881 | return SD_POWERSAVINGS_BALANCE; | ||
882 | |||
883 | if (!sched_mc_power_savings) | ||
884 | return SD_PREFER_SIBLING; | ||
885 | |||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static inline int sd_balance_for_package_power(void) | ||
890 | { | ||
891 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
892 | return SD_POWERSAVINGS_BALANCE; | ||
893 | |||
894 | return SD_PREFER_SIBLING; | ||
895 | } | ||
896 | |||
897 | extern int __weak arch_sd_sibiling_asym_packing(void); | 864 | extern int __weak arch_sd_sibiling_asym_packing(void); |
898 | 865 | ||
899 | /* | ||
900 | * Optimise SD flags for power savings: | ||
901 | * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. | ||
902 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
903 | */ | ||
904 | |||
905 | static inline int sd_power_saving_flags(void) | ||
906 | { | ||
907 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
908 | return SD_BALANCE_NEWIDLE; | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | struct sched_group_power { | 866 | struct sched_group_power { |
914 | atomic_t ref; | 867 | atomic_t ref; |
915 | /* | 868 | /* |
@@ -1341,6 +1294,8 @@ struct task_struct { | |||
1341 | * execve */ | 1294 | * execve */ |
1342 | unsigned in_iowait:1; | 1295 | unsigned in_iowait:1; |
1343 | 1296 | ||
1297 | /* task may not gain privileges */ | ||
1298 | unsigned no_new_privs:1; | ||
1344 | 1299 | ||
1345 | /* Revert to default priority/policy when forking */ | 1300 | /* Revert to default priority/policy when forking */ |
1346 | unsigned sched_reset_on_fork:1; | 1301 | unsigned sched_reset_on_fork:1; |
@@ -1450,7 +1405,7 @@ struct task_struct { | |||
1450 | uid_t loginuid; | 1405 | uid_t loginuid; |
1451 | unsigned int sessionid; | 1406 | unsigned int sessionid; |
1452 | #endif | 1407 | #endif |
1453 | seccomp_t seccomp; | 1408 | struct seccomp seccomp; |
1454 | 1409 | ||
1455 | /* Thread group tracking */ | 1410 | /* Thread group tracking */ |
1456 | u32 parent_exec_id; | 1411 | u32 parent_exec_id; |
@@ -1617,6 +1572,10 @@ struct task_struct { | |||
1617 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1572 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1618 | atomic_t ptrace_bp_refcnt; | 1573 | atomic_t ptrace_bp_refcnt; |
1619 | #endif | 1574 | #endif |
1575 | #ifdef CONFIG_UPROBES | ||
1576 | struct uprobe_task *utask; | ||
1577 | int uprobe_srcu_id; | ||
1578 | #endif | ||
1620 | }; | 1579 | }; |
1621 | 1580 | ||
1622 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1581 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -1905,12 +1864,22 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1905 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1864 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1906 | } | 1865 | } |
1907 | 1866 | ||
1867 | static inline void rcu_switch_from(struct task_struct *prev) | ||
1868 | { | ||
1869 | if (prev->rcu_read_lock_nesting != 0) | ||
1870 | rcu_preempt_note_context_switch(); | ||
1871 | } | ||
1872 | |||
1908 | #else | 1873 | #else |
1909 | 1874 | ||
1910 | static inline void rcu_copy_process(struct task_struct *p) | 1875 | static inline void rcu_copy_process(struct task_struct *p) |
1911 | { | 1876 | { |
1912 | } | 1877 | } |
1913 | 1878 | ||
1879 | static inline void rcu_switch_from(struct task_struct *prev) | ||
1880 | { | ||
1881 | } | ||
1882 | |||
1914 | #endif | 1883 | #endif |
1915 | 1884 | ||
1916 | #ifdef CONFIG_SMP | 1885 | #ifdef CONFIG_SMP |
@@ -1950,7 +1919,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1950 | */ | 1919 | */ |
1951 | extern unsigned long long notrace sched_clock(void); | 1920 | extern unsigned long long notrace sched_clock(void); |
1952 | /* | 1921 | /* |
1953 | * See the comment in kernel/sched_clock.c | 1922 | * See the comment in kernel/sched/clock.c |
1954 | */ | 1923 | */ |
1955 | extern u64 cpu_clock(int cpu); | 1924 | extern u64 cpu_clock(int cpu); |
1956 | extern u64 local_clock(void); | 1925 | extern u64 local_clock(void); |
@@ -2177,14 +2146,13 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr, | |||
2177 | extern void __set_special_pids(struct pid *pid); | 2146 | extern void __set_special_pids(struct pid *pid); |
2178 | 2147 | ||
2179 | /* per-UID process charging. */ | 2148 | /* per-UID process charging. */ |
2180 | extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); | 2149 | extern struct user_struct * alloc_uid(kuid_t); |
2181 | static inline struct user_struct *get_uid(struct user_struct *u) | 2150 | static inline struct user_struct *get_uid(struct user_struct *u) |
2182 | { | 2151 | { |
2183 | atomic_inc(&u->__count); | 2152 | atomic_inc(&u->__count); |
2184 | return u; | 2153 | return u; |
2185 | } | 2154 | } |
2186 | extern void free_uid(struct user_struct *); | 2155 | extern void free_uid(struct user_struct *); |
2187 | extern void release_uids(struct user_namespace *ns); | ||
2188 | 2156 | ||
2189 | #include <asm/current.h> | 2157 | #include <asm/current.h> |
2190 | 2158 | ||