diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 296 |
1 files changed, 76 insertions, 220 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index e692a022527b..178a8d909f14 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | |||
127 | extern void proc_sched_set_task(struct task_struct *p); | 127 | extern void proc_sched_set_task(struct task_struct *p); |
128 | extern void | 128 | extern void |
129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | 129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
130 | #else | ||
131 | static inline void | ||
132 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) | ||
133 | { | ||
134 | } | ||
135 | static inline void proc_sched_set_task(struct task_struct *p) | ||
136 | { | ||
137 | } | ||
138 | static inline void | ||
139 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
140 | { | ||
141 | } | ||
142 | #endif | 130 | #endif |
143 | 131 | ||
144 | /* | 132 | /* |
@@ -243,7 +231,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
243 | 231 | ||
244 | extern int runqueue_is_locked(int cpu); | 232 | extern int runqueue_is_locked(int cpu); |
245 | 233 | ||
246 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 234 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
247 | extern void nohz_balance_enter_idle(int cpu); | 235 | extern void nohz_balance_enter_idle(int cpu); |
248 | extern void set_cpu_sd_state_idle(void); | 236 | extern void set_cpu_sd_state_idle(void); |
249 | extern int get_nohz_timer_target(void); | 237 | extern int get_nohz_timer_target(void); |
@@ -321,13 +309,10 @@ extern signed long schedule_timeout_killable(signed long timeout); | |||
321 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 309 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
322 | asmlinkage void schedule(void); | 310 | asmlinkage void schedule(void); |
323 | extern void schedule_preempt_disabled(void); | 311 | extern void schedule_preempt_disabled(void); |
324 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | ||
325 | 312 | ||
326 | struct nsproxy; | 313 | struct nsproxy; |
327 | struct user_namespace; | 314 | struct user_namespace; |
328 | 315 | ||
329 | #include <linux/aio.h> | ||
330 | |||
331 | #ifdef CONFIG_MMU | 316 | #ifdef CONFIG_MMU |
332 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 317 | extern void arch_pick_mmap_layout(struct mm_struct *mm); |
333 | extern unsigned long | 318 | extern unsigned long |
@@ -527,7 +512,8 @@ struct signal_struct { | |||
527 | unsigned int has_child_subreaper:1; | 512 | unsigned int has_child_subreaper:1; |
528 | 513 | ||
529 | /* POSIX.1b Interval Timers */ | 514 | /* POSIX.1b Interval Timers */ |
530 | struct list_head posix_timers; | 515 | int posix_timer_id; |
516 | struct list_head posix_timers; | ||
531 | 517 | ||
532 | /* ITIMER_REAL timer for the process */ | 518 | /* ITIMER_REAL timer for the process */ |
533 | struct hrtimer real_timer; | 519 | struct hrtimer real_timer; |
@@ -571,7 +557,7 @@ struct signal_struct { | |||
571 | cputime_t utime, stime, cutime, cstime; | 557 | cputime_t utime, stime, cutime, cstime; |
572 | cputime_t gtime; | 558 | cputime_t gtime; |
573 | cputime_t cgtime; | 559 | cputime_t cgtime; |
574 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 560 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
575 | struct cputime prev_cputime; | 561 | struct cputime prev_cputime; |
576 | #endif | 562 | #endif |
577 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 563 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
@@ -607,6 +593,7 @@ struct signal_struct { | |||
607 | #endif | 593 | #endif |
608 | #ifdef CONFIG_AUDIT | 594 | #ifdef CONFIG_AUDIT |
609 | unsigned audit_tty; | 595 | unsigned audit_tty; |
596 | unsigned audit_tty_log_passwd; | ||
610 | struct tty_audit_buf *tty_audit_buf; | 597 | struct tty_audit_buf *tty_audit_buf; |
611 | #endif | 598 | #endif |
612 | #ifdef CONFIG_CGROUPS | 599 | #ifdef CONFIG_CGROUPS |
@@ -638,6 +625,7 @@ struct signal_struct { | |||
638 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | 625 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
639 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | 626 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
640 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | 627 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
628 | #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ | ||
641 | /* | 629 | /* |
642 | * Pending notifications to parent. | 630 | * Pending notifications to parent. |
643 | */ | 631 | */ |
@@ -769,31 +757,6 @@ enum cpu_idle_type { | |||
769 | }; | 757 | }; |
770 | 758 | ||
771 | /* | 759 | /* |
772 | * Increase resolution of nice-level calculations for 64-bit architectures. | ||
773 | * The extra resolution improves shares distribution and load balancing of | ||
774 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
775 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
776 | * and does not change the user-interface for setting shares/weights. | ||
777 | * | ||
778 | * We increase resolution only if we have enough bits to allow this increased | ||
779 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
780 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
781 | * increased costs. | ||
782 | */ | ||
783 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
784 | # define SCHED_LOAD_RESOLUTION 10 | ||
785 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
786 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
787 | #else | ||
788 | # define SCHED_LOAD_RESOLUTION 0 | ||
789 | # define scale_load(w) (w) | ||
790 | # define scale_load_down(w) (w) | ||
791 | #endif | ||
792 | |||
793 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | ||
794 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
795 | |||
796 | /* | ||
797 | * Increase resolution of cpu_power calculations | 760 | * Increase resolution of cpu_power calculations |
798 | */ | 761 | */ |
799 | #define SCHED_POWER_SHIFT 10 | 762 | #define SCHED_POWER_SHIFT 10 |
@@ -818,62 +781,6 @@ enum cpu_idle_type { | |||
818 | 781 | ||
819 | extern int __weak arch_sd_sibiling_asym_packing(void); | 782 | extern int __weak arch_sd_sibiling_asym_packing(void); |
820 | 783 | ||
821 | struct sched_group_power { | ||
822 | atomic_t ref; | ||
823 | /* | ||
824 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | ||
825 | * single CPU. | ||
826 | */ | ||
827 | unsigned int power, power_orig; | ||
828 | unsigned long next_update; | ||
829 | /* | ||
830 | * Number of busy cpus in this group. | ||
831 | */ | ||
832 | atomic_t nr_busy_cpus; | ||
833 | |||
834 | unsigned long cpumask[0]; /* iteration mask */ | ||
835 | }; | ||
836 | |||
837 | struct sched_group { | ||
838 | struct sched_group *next; /* Must be a circular list */ | ||
839 | atomic_t ref; | ||
840 | |||
841 | unsigned int group_weight; | ||
842 | struct sched_group_power *sgp; | ||
843 | |||
844 | /* | ||
845 | * The CPUs this group covers. | ||
846 | * | ||
847 | * NOTE: this field is variable length. (Allocated dynamically | ||
848 | * by attaching extra space to the end of the structure, | ||
849 | * depending on how many CPUs the kernel has booted up with) | ||
850 | */ | ||
851 | unsigned long cpumask[0]; | ||
852 | }; | ||
853 | |||
854 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
855 | { | ||
856 | return to_cpumask(sg->cpumask); | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
861 | * tree. | ||
862 | */ | ||
863 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
864 | { | ||
865 | return to_cpumask(sg->sgp->cpumask); | ||
866 | } | ||
867 | |||
868 | /** | ||
869 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
870 | * @group: The group whose first cpu is to be returned. | ||
871 | */ | ||
872 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
873 | { | ||
874 | return cpumask_first(sched_group_cpus(group)); | ||
875 | } | ||
876 | |||
877 | struct sched_domain_attr { | 784 | struct sched_domain_attr { |
878 | int relax_domain_level; | 785 | int relax_domain_level; |
879 | }; | 786 | }; |
@@ -884,6 +791,8 @@ struct sched_domain_attr { | |||
884 | 791 | ||
885 | extern int sched_domain_level_max; | 792 | extern int sched_domain_level_max; |
886 | 793 | ||
794 | struct sched_group; | ||
795 | |||
887 | struct sched_domain { | 796 | struct sched_domain { |
888 | /* These fields must be setup */ | 797 | /* These fields must be setup */ |
889 | struct sched_domain *parent; /* top domain must be null terminated */ | 798 | struct sched_domain *parent; /* top domain must be null terminated */ |
@@ -900,6 +809,8 @@ struct sched_domain { | |||
900 | unsigned int wake_idx; | 809 | unsigned int wake_idx; |
901 | unsigned int forkexec_idx; | 810 | unsigned int forkexec_idx; |
902 | unsigned int smt_gain; | 811 | unsigned int smt_gain; |
812 | |||
813 | int nohz_idle; /* NOHZ IDLE status */ | ||
903 | int flags; /* See SD_* */ | 814 | int flags; /* See SD_* */ |
904 | int level; | 815 | int level; |
905 | 816 | ||
@@ -972,18 +883,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |||
972 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | 883 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
973 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | 884 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
974 | 885 | ||
975 | /* Test a flag in parent sched domain */ | ||
976 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
977 | { | ||
978 | if (sd->parent && (sd->parent->flags & flag)) | ||
979 | return 1; | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | ||
985 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | ||
986 | |||
987 | bool cpus_share_cache(int this_cpu, int that_cpu); | 886 | bool cpus_share_cache(int this_cpu, int that_cpu); |
988 | 887 | ||
989 | #else /* CONFIG_SMP */ | 888 | #else /* CONFIG_SMP */ |
@@ -1018,72 +917,6 @@ struct mempolicy; | |||
1018 | struct pipe_inode_info; | 917 | struct pipe_inode_info; |
1019 | struct uts_namespace; | 918 | struct uts_namespace; |
1020 | 919 | ||
1021 | struct rq; | ||
1022 | struct sched_domain; | ||
1023 | |||
1024 | /* | ||
1025 | * wake flags | ||
1026 | */ | ||
1027 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | ||
1028 | #define WF_FORK 0x02 /* child wakeup after fork */ | ||
1029 | #define WF_MIGRATED 0x04 /* internal use, task got migrated */ | ||
1030 | |||
1031 | #define ENQUEUE_WAKEUP 1 | ||
1032 | #define ENQUEUE_HEAD 2 | ||
1033 | #ifdef CONFIG_SMP | ||
1034 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1035 | #else | ||
1036 | #define ENQUEUE_WAKING 0 | ||
1037 | #endif | ||
1038 | |||
1039 | #define DEQUEUE_SLEEP 1 | ||
1040 | |||
1041 | struct sched_class { | ||
1042 | const struct sched_class *next; | ||
1043 | |||
1044 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
1045 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
1046 | void (*yield_task) (struct rq *rq); | ||
1047 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | ||
1048 | |||
1049 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | ||
1050 | |||
1051 | struct task_struct * (*pick_next_task) (struct rq *rq); | ||
1052 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | ||
1053 | |||
1054 | #ifdef CONFIG_SMP | ||
1055 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | ||
1056 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | ||
1057 | |||
1058 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
1059 | void (*post_schedule) (struct rq *this_rq); | ||
1060 | void (*task_waking) (struct task_struct *task); | ||
1061 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1062 | |||
1063 | void (*set_cpus_allowed)(struct task_struct *p, | ||
1064 | const struct cpumask *newmask); | ||
1065 | |||
1066 | void (*rq_online)(struct rq *rq); | ||
1067 | void (*rq_offline)(struct rq *rq); | ||
1068 | #endif | ||
1069 | |||
1070 | void (*set_curr_task) (struct rq *rq); | ||
1071 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | ||
1072 | void (*task_fork) (struct task_struct *p); | ||
1073 | |||
1074 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); | ||
1075 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); | ||
1076 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | ||
1077 | int oldprio); | ||
1078 | |||
1079 | unsigned int (*get_rr_interval) (struct rq *rq, | ||
1080 | struct task_struct *task); | ||
1081 | |||
1082 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1083 | void (*task_move_group) (struct task_struct *p, int on_rq); | ||
1084 | #endif | ||
1085 | }; | ||
1086 | |||
1087 | struct load_weight { | 920 | struct load_weight { |
1088 | unsigned long weight, inv_weight; | 921 | unsigned long weight, inv_weight; |
1089 | }; | 922 | }; |
@@ -1275,8 +1108,10 @@ struct task_struct { | |||
1275 | int exit_code, exit_signal; | 1108 | int exit_code, exit_signal; |
1276 | int pdeath_signal; /* The signal sent when the parent dies */ | 1109 | int pdeath_signal; /* The signal sent when the parent dies */ |
1277 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ | 1110 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ |
1278 | /* ??? */ | 1111 | |
1112 | /* Used for emulating ABI behavior of previous Linux versions */ | ||
1279 | unsigned int personality; | 1113 | unsigned int personality; |
1114 | |||
1280 | unsigned did_exec:1; | 1115 | unsigned did_exec:1; |
1281 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1116 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
1282 | * execve */ | 1117 | * execve */ |
@@ -1328,7 +1163,7 @@ struct task_struct { | |||
1328 | 1163 | ||
1329 | cputime_t utime, stime, utimescaled, stimescaled; | 1164 | cputime_t utime, stime, utimescaled, stimescaled; |
1330 | cputime_t gtime; | 1165 | cputime_t gtime; |
1331 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 1166 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1332 | struct cputime prev_cputime; | 1167 | struct cputime prev_cputime; |
1333 | #endif | 1168 | #endif |
1334 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
@@ -1577,6 +1412,10 @@ struct task_struct { | |||
1577 | #ifdef CONFIG_UPROBES | 1412 | #ifdef CONFIG_UPROBES |
1578 | struct uprobe_task *utask; | 1413 | struct uprobe_task *utask; |
1579 | #endif | 1414 | #endif |
1415 | #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) | ||
1416 | unsigned int sequential_io; | ||
1417 | unsigned int sequential_io_avg; | ||
1418 | #endif | ||
1580 | }; | 1419 | }; |
1581 | 1420 | ||
1582 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1421 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -1794,7 +1633,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, | |||
1794 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1633 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1795 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1634 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
1796 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1635 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
1797 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | 1636 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
1798 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1637 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1799 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1638 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1800 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1639 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
@@ -1928,13 +1767,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, | |||
1928 | } | 1767 | } |
1929 | #endif | 1768 | #endif |
1930 | 1769 | ||
1931 | #ifdef CONFIG_NO_HZ | 1770 | #ifdef CONFIG_NO_HZ_COMMON |
1932 | void calc_load_enter_idle(void); | 1771 | void calc_load_enter_idle(void); |
1933 | void calc_load_exit_idle(void); | 1772 | void calc_load_exit_idle(void); |
1934 | #else | 1773 | #else |
1935 | static inline void calc_load_enter_idle(void) { } | 1774 | static inline void calc_load_enter_idle(void) { } |
1936 | static inline void calc_load_exit_idle(void) { } | 1775 | static inline void calc_load_exit_idle(void) { } |
1937 | #endif /* CONFIG_NO_HZ */ | 1776 | #endif /* CONFIG_NO_HZ_COMMON */ |
1938 | 1777 | ||
1939 | #ifndef CONFIG_CPUMASK_OFFSTACK | 1778 | #ifndef CONFIG_CPUMASK_OFFSTACK |
1940 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1779 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
@@ -2020,10 +1859,17 @@ extern void idle_task_exit(void); | |||
2020 | static inline void idle_task_exit(void) {} | 1859 | static inline void idle_task_exit(void) {} |
2021 | #endif | 1860 | #endif |
2022 | 1861 | ||
2023 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | 1862 | #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) |
2024 | extern void wake_up_idle_cpu(int cpu); | 1863 | extern void wake_up_nohz_cpu(int cpu); |
2025 | #else | 1864 | #else |
2026 | static inline void wake_up_idle_cpu(int cpu) { } | 1865 | static inline void wake_up_nohz_cpu(int cpu) { } |
1866 | #endif | ||
1867 | |||
1868 | #ifdef CONFIG_NO_HZ_FULL | ||
1869 | extern bool sched_can_stop_tick(void); | ||
1870 | extern u64 scheduler_tick_max_deferment(void); | ||
1871 | #else | ||
1872 | static inline bool sched_can_stop_tick(void) { return false; } | ||
2027 | #endif | 1873 | #endif |
2028 | 1874 | ||
2029 | #ifdef CONFIG_SCHED_AUTOGROUP | 1875 | #ifdef CONFIG_SCHED_AUTOGROUP |
@@ -2413,27 +2259,18 @@ static inline void threadgroup_change_end(struct task_struct *tsk) | |||
2413 | * | 2259 | * |
2414 | * Lock the threadgroup @tsk belongs to. No new task is allowed to enter | 2260 | * Lock the threadgroup @tsk belongs to. No new task is allowed to enter |
2415 | * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or | 2261 | * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or |
2416 | * perform exec. This is useful for cases where the threadgroup needs to | 2262 | * change ->group_leader/pid. This is useful for cases where the threadgroup |
2417 | * stay stable across blockable operations. | 2263 | * needs to stay stable across blockable operations. |
2418 | * | 2264 | * |
2419 | * fork and exit paths explicitly call threadgroup_change_{begin|end}() for | 2265 | * fork and exit paths explicitly call threadgroup_change_{begin|end}() for |
2420 | * synchronization. While held, no new task will be added to threadgroup | 2266 | * synchronization. While held, no new task will be added to threadgroup |
2421 | * and no existing live task will have its PF_EXITING set. | 2267 | * and no existing live task will have its PF_EXITING set. |
2422 | * | 2268 | * |
2423 | * During exec, a task goes and puts its thread group through unusual | 2269 | * de_thread() does threadgroup_change_{begin|end}() when a non-leader |
2424 | * changes. After de-threading, exclusive access is assumed to resources | 2270 | * sub-thread becomes a new leader. |
2425 | * which are usually shared by tasks in the same group - e.g. sighand may | ||
2426 | * be replaced with a new one. Also, the exec'ing task takes over group | ||
2427 | * leader role including its pid. Exclude these changes while locked by | ||
2428 | * grabbing cred_guard_mutex which is used to synchronize exec path. | ||
2429 | */ | 2271 | */ |
2430 | static inline void threadgroup_lock(struct task_struct *tsk) | 2272 | static inline void threadgroup_lock(struct task_struct *tsk) |
2431 | { | 2273 | { |
2432 | /* | ||
2433 | * exec uses exit for de-threading nesting group_rwsem inside | ||
2434 | * cred_guard_mutex. Grab cred_guard_mutex first. | ||
2435 | */ | ||
2436 | mutex_lock(&tsk->signal->cred_guard_mutex); | ||
2437 | down_write(&tsk->signal->group_rwsem); | 2274 | down_write(&tsk->signal->group_rwsem); |
2438 | } | 2275 | } |
2439 | 2276 | ||
@@ -2446,7 +2283,6 @@ static inline void threadgroup_lock(struct task_struct *tsk) | |||
2446 | static inline void threadgroup_unlock(struct task_struct *tsk) | 2283 | static inline void threadgroup_unlock(struct task_struct *tsk) |
2447 | { | 2284 | { |
2448 | up_write(&tsk->signal->group_rwsem); | 2285 | up_write(&tsk->signal->group_rwsem); |
2449 | mutex_unlock(&tsk->signal->cred_guard_mutex); | ||
2450 | } | 2286 | } |
2451 | #else | 2287 | #else |
2452 | static inline void threadgroup_change_begin(struct task_struct *tsk) {} | 2288 | static inline void threadgroup_change_begin(struct task_struct *tsk) {} |
@@ -2623,6 +2459,47 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2623 | } | 2459 | } |
2624 | 2460 | ||
2625 | /* | 2461 | /* |
2462 | * Idle thread specific functions to determine the need_resched | ||
2463 | * polling state. We have two versions, one based on TS_POLLING in | ||
2464 | * thread_info.status and one based on TIF_POLLING_NRFLAG in | ||
2465 | * thread_info.flags | ||
2466 | */ | ||
2467 | #ifdef TS_POLLING | ||
2468 | static inline int tsk_is_polling(struct task_struct *p) | ||
2469 | { | ||
2470 | return task_thread_info(p)->status & TS_POLLING; | ||
2471 | } | ||
2472 | static inline void current_set_polling(void) | ||
2473 | { | ||
2474 | current_thread_info()->status |= TS_POLLING; | ||
2475 | } | ||
2476 | |||
2477 | static inline void current_clr_polling(void) | ||
2478 | { | ||
2479 | current_thread_info()->status &= ~TS_POLLING; | ||
2480 | smp_mb__after_clear_bit(); | ||
2481 | } | ||
2482 | #elif defined(TIF_POLLING_NRFLAG) | ||
2483 | static inline int tsk_is_polling(struct task_struct *p) | ||
2484 | { | ||
2485 | return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); | ||
2486 | } | ||
2487 | static inline void current_set_polling(void) | ||
2488 | { | ||
2489 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
2490 | } | ||
2491 | |||
2492 | static inline void current_clr_polling(void) | ||
2493 | { | ||
2494 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
2495 | } | ||
2496 | #else | ||
2497 | static inline int tsk_is_polling(struct task_struct *p) { return 0; } | ||
2498 | static inline void current_set_polling(void) { } | ||
2499 | static inline void current_clr_polling(void) { } | ||
2500 | #endif | ||
2501 | |||
2502 | /* | ||
2626 | * Thread group CPU time accounting. | 2503 | * Thread group CPU time accounting. |
2627 | */ | 2504 | */ |
2628 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | 2505 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
@@ -2682,28 +2559,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | |||
2682 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2559 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2683 | 2560 | ||
2684 | #ifdef CONFIG_CGROUP_SCHED | 2561 | #ifdef CONFIG_CGROUP_SCHED |
2685 | |||
2686 | extern struct task_group root_task_group; | 2562 | extern struct task_group root_task_group; |
2687 | |||
2688 | extern struct task_group *sched_create_group(struct task_group *parent); | ||
2689 | extern void sched_online_group(struct task_group *tg, | ||
2690 | struct task_group *parent); | ||
2691 | extern void sched_destroy_group(struct task_group *tg); | ||
2692 | extern void sched_offline_group(struct task_group *tg); | ||
2693 | extern void sched_move_task(struct task_struct *tsk); | ||
2694 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
2695 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | ||
2696 | extern unsigned long sched_group_shares(struct task_group *tg); | ||
2697 | #endif | ||
2698 | #ifdef CONFIG_RT_GROUP_SCHED | ||
2699 | extern int sched_group_set_rt_runtime(struct task_group *tg, | ||
2700 | long rt_runtime_us); | ||
2701 | extern long sched_group_rt_runtime(struct task_group *tg); | ||
2702 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
2703 | long rt_period_us); | ||
2704 | extern long sched_group_rt_period(struct task_group *tg); | ||
2705 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2706 | #endif | ||
2707 | #endif /* CONFIG_CGROUP_SCHED */ | 2563 | #endif /* CONFIG_CGROUP_SCHED */ |
2708 | 2564 | ||
2709 | extern int task_can_switch_user(struct user_struct *up, | 2565 | extern int task_can_switch_user(struct user_struct *up, |