diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 160 |
1 files changed, 112 insertions, 48 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 781abd137673..496770a96487 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -315,7 +315,6 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, | |||
315 | void __user *buffer, | 315 | void __user *buffer, |
316 | size_t *lenp, loff_t *ppos); | 316 | size_t *lenp, loff_t *ppos); |
317 | extern unsigned int softlockup_panic; | 317 | extern unsigned int softlockup_panic; |
318 | extern int softlockup_thresh; | ||
319 | void lockup_detector_init(void); | 318 | void lockup_detector_init(void); |
320 | #else | 319 | #else |
321 | static inline void touch_softlockup_watchdog(void) | 320 | static inline void touch_softlockup_watchdog(void) |
@@ -360,7 +359,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); | |||
360 | extern signed long schedule_timeout_killable(signed long timeout); | 359 | extern signed long schedule_timeout_killable(signed long timeout); |
361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 360 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
362 | asmlinkage void schedule(void); | 361 | asmlinkage void schedule(void); |
363 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 362 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
364 | 363 | ||
365 | struct nsproxy; | 364 | struct nsproxy; |
366 | struct user_namespace; | 365 | struct user_namespace; |
@@ -514,6 +513,7 @@ struct thread_group_cputimer { | |||
514 | spinlock_t lock; | 513 | spinlock_t lock; |
515 | }; | 514 | }; |
516 | 515 | ||
516 | #include <linux/rwsem.h> | ||
517 | struct autogroup; | 517 | struct autogroup; |
518 | 518 | ||
519 | /* | 519 | /* |
@@ -633,6 +633,16 @@ struct signal_struct { | |||
633 | unsigned audit_tty; | 633 | unsigned audit_tty; |
634 | struct tty_audit_buf *tty_audit_buf; | 634 | struct tty_audit_buf *tty_audit_buf; |
635 | #endif | 635 | #endif |
636 | #ifdef CONFIG_CGROUPS | ||
637 | /* | ||
638 | * The threadgroup_fork_lock prevents threads from forking with | ||
639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive | ||
640 | * threadgroup-wide operations. It's taken for reading in fork.c in | ||
641 | * copy_process(). | ||
642 | * Currently only needed write-side by cgroups. | ||
643 | */ | ||
644 | struct rw_semaphore threadgroup_fork_lock; | ||
645 | #endif | ||
636 | 646 | ||
637 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
638 | int oom_score_adj; /* OOM kill score adjustment */ | 648 | int oom_score_adj; /* OOM kill score adjustment */ |
@@ -653,9 +663,8 @@ struct signal_struct { | |||
653 | * Bits in flags field of signal_struct. | 663 | * Bits in flags field of signal_struct. |
654 | */ | 664 | */ |
655 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | 665 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
656 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | 666 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
657 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 667 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
658 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | ||
659 | /* | 668 | /* |
660 | * Pending notifications to parent. | 669 | * Pending notifications to parent. |
661 | */ | 670 | */ |
@@ -731,10 +740,6 @@ struct sched_info { | |||
731 | /* timestamps */ | 740 | /* timestamps */ |
732 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 741 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
733 | last_queued; /* when we were last queued to run */ | 742 | last_queued; /* when we were last queued to run */ |
734 | #ifdef CONFIG_SCHEDSTATS | ||
735 | /* BKL stats */ | ||
736 | unsigned int bkl_count; | ||
737 | #endif | ||
738 | }; | 743 | }; |
739 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 744 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
740 | 745 | ||
@@ -792,17 +797,39 @@ enum cpu_idle_type { | |||
792 | }; | 797 | }; |
793 | 798 | ||
794 | /* | 799 | /* |
795 | * sched-domains (multiprocessor balancing) declarations: | 800 | * Increase resolution of nice-level calculations for 64-bit architectures. |
801 | * The extra resolution improves shares distribution and load balancing of | ||
802 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
803 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
804 | * and does not change the user-interface for setting shares/weights. | ||
805 | * | ||
806 | * We increase resolution only if we have enough bits to allow this increased | ||
807 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
808 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
809 | * increased costs. | ||
796 | */ | 810 | */ |
811 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
812 | # define SCHED_LOAD_RESOLUTION 10 | ||
813 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
814 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
815 | #else | ||
816 | # define SCHED_LOAD_RESOLUTION 0 | ||
817 | # define scale_load(w) (w) | ||
818 | # define scale_load_down(w) (w) | ||
819 | #endif | ||
797 | 820 | ||
798 | /* | 821 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) |
799 | * Increase resolution of nice-level calculations: | ||
800 | */ | ||
801 | #define SCHED_LOAD_SHIFT 10 | ||
802 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | 822 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) |
803 | 823 | ||
804 | #define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE | 824 | /* |
825 | * Increase resolution of cpu_power calculations | ||
826 | */ | ||
827 | #define SCHED_POWER_SHIFT 10 | ||
828 | #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) | ||
805 | 829 | ||
830 | /* | ||
831 | * sched-domains (multiprocessor balancing) declarations: | ||
832 | */ | ||
806 | #ifdef CONFIG_SMP | 833 | #ifdef CONFIG_SMP |
807 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ | 834 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ |
808 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ | 835 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ |
@@ -868,6 +895,7 @@ static inline int sd_power_saving_flags(void) | |||
868 | 895 | ||
869 | struct sched_group { | 896 | struct sched_group { |
870 | struct sched_group *next; /* Must be a circular list */ | 897 | struct sched_group *next; /* Must be a circular list */ |
898 | atomic_t ref; | ||
871 | 899 | ||
872 | /* | 900 | /* |
873 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 901 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -882,9 +910,6 @@ struct sched_group { | |||
882 | * NOTE: this field is variable length. (Allocated dynamically | 910 | * NOTE: this field is variable length. (Allocated dynamically |
883 | * by attaching extra space to the end of the structure, | 911 | * by attaching extra space to the end of the structure, |
884 | * depending on how many CPUs the kernel has booted up with) | 912 | * depending on how many CPUs the kernel has booted up with) |
885 | * | ||
886 | * It is also be embedded into static data structures at build | ||
887 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
888 | */ | 913 | */ |
889 | unsigned long cpumask[0]; | 914 | unsigned long cpumask[0]; |
890 | }; | 915 | }; |
@@ -894,17 +919,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
894 | return to_cpumask(sg->cpumask); | 919 | return to_cpumask(sg->cpumask); |
895 | } | 920 | } |
896 | 921 | ||
897 | enum sched_domain_level { | ||
898 | SD_LV_NONE = 0, | ||
899 | SD_LV_SIBLING, | ||
900 | SD_LV_MC, | ||
901 | SD_LV_BOOK, | ||
902 | SD_LV_CPU, | ||
903 | SD_LV_NODE, | ||
904 | SD_LV_ALLNODES, | ||
905 | SD_LV_MAX | ||
906 | }; | ||
907 | |||
908 | struct sched_domain_attr { | 922 | struct sched_domain_attr { |
909 | int relax_domain_level; | 923 | int relax_domain_level; |
910 | }; | 924 | }; |
@@ -913,6 +927,8 @@ struct sched_domain_attr { | |||
913 | .relax_domain_level = -1, \ | 927 | .relax_domain_level = -1, \ |
914 | } | 928 | } |
915 | 929 | ||
930 | extern int sched_domain_level_max; | ||
931 | |||
916 | struct sched_domain { | 932 | struct sched_domain { |
917 | /* These fields must be setup */ | 933 | /* These fields must be setup */ |
918 | struct sched_domain *parent; /* top domain must be null terminated */ | 934 | struct sched_domain *parent; /* top domain must be null terminated */ |
@@ -930,7 +946,7 @@ struct sched_domain { | |||
930 | unsigned int forkexec_idx; | 946 | unsigned int forkexec_idx; |
931 | unsigned int smt_gain; | 947 | unsigned int smt_gain; |
932 | int flags; /* See SD_* */ | 948 | int flags; /* See SD_* */ |
933 | enum sched_domain_level level; | 949 | int level; |
934 | 950 | ||
935 | /* Runtime fields. */ | 951 | /* Runtime fields. */ |
936 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 952 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -973,6 +989,10 @@ struct sched_domain { | |||
973 | #ifdef CONFIG_SCHED_DEBUG | 989 | #ifdef CONFIG_SCHED_DEBUG |
974 | char *name; | 990 | char *name; |
975 | #endif | 991 | #endif |
992 | union { | ||
993 | void *private; /* used during construction */ | ||
994 | struct rcu_head rcu; /* used during destruction */ | ||
995 | }; | ||
976 | 996 | ||
977 | unsigned int span_weight; | 997 | unsigned int span_weight; |
978 | /* | 998 | /* |
@@ -981,9 +1001,6 @@ struct sched_domain { | |||
981 | * NOTE: this field is variable length. (Allocated dynamically | 1001 | * NOTE: this field is variable length. (Allocated dynamically |
982 | * by attaching extra space to the end of the structure, | 1002 | * by attaching extra space to the end of the structure, |
983 | * depending on how many CPUs the kernel has booted up with) | 1003 | * depending on how many CPUs the kernel has booted up with) |
984 | * | ||
985 | * It is also be embedded into static data structures at build | ||
986 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
987 | */ | 1004 | */ |
988 | unsigned long span[0]; | 1005 | unsigned long span[0]; |
989 | }; | 1006 | }; |
@@ -1046,10 +1063,15 @@ struct sched_domain; | |||
1046 | */ | 1063 | */ |
1047 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | 1064 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ |
1048 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1065 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1066 | #define WF_MIGRATED 0x04 /* internal use, task got migrated */ | ||
1049 | 1067 | ||
1050 | #define ENQUEUE_WAKEUP 1 | 1068 | #define ENQUEUE_WAKEUP 1 |
1051 | #define ENQUEUE_WAKING 2 | 1069 | #define ENQUEUE_HEAD 2 |
1052 | #define ENQUEUE_HEAD 4 | 1070 | #ifdef CONFIG_SMP |
1071 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1072 | #else | ||
1073 | #define ENQUEUE_WAKING 0 | ||
1074 | #endif | ||
1053 | 1075 | ||
1054 | #define DEQUEUE_SLEEP 1 | 1076 | #define DEQUEUE_SLEEP 1 |
1055 | 1077 | ||
@@ -1067,12 +1089,11 @@ struct sched_class { | |||
1067 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1089 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1068 | 1090 | ||
1069 | #ifdef CONFIG_SMP | 1091 | #ifdef CONFIG_SMP |
1070 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, | 1092 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1071 | int sd_flag, int flags); | ||
1072 | 1093 | ||
1073 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1094 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1074 | void (*post_schedule) (struct rq *this_rq); | 1095 | void (*post_schedule) (struct rq *this_rq); |
1075 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); | 1096 | void (*task_waking) (struct task_struct *task); |
1076 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1097 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
1077 | 1098 | ||
1078 | void (*set_cpus_allowed)(struct task_struct *p, | 1099 | void (*set_cpus_allowed)(struct task_struct *p, |
@@ -1197,13 +1218,11 @@ struct task_struct { | |||
1197 | unsigned int flags; /* per process flags, defined below */ | 1218 | unsigned int flags; /* per process flags, defined below */ |
1198 | unsigned int ptrace; | 1219 | unsigned int ptrace; |
1199 | 1220 | ||
1200 | int lock_depth; /* BKL lock depth */ | ||
1201 | |||
1202 | #ifdef CONFIG_SMP | 1221 | #ifdef CONFIG_SMP |
1203 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 1222 | struct task_struct *wake_entry; |
1204 | int oncpu; | 1223 | int on_cpu; |
1205 | #endif | ||
1206 | #endif | 1224 | #endif |
1225 | int on_rq; | ||
1207 | 1226 | ||
1208 | int prio, static_prio, normal_prio; | 1227 | int prio, static_prio, normal_prio; |
1209 | unsigned int rt_priority; | 1228 | unsigned int rt_priority; |
@@ -1264,6 +1283,7 @@ struct task_struct { | |||
1264 | int exit_state; | 1283 | int exit_state; |
1265 | int exit_code, exit_signal; | 1284 | int exit_code, exit_signal; |
1266 | int pdeath_signal; /* The signal sent when the parent dies */ | 1285 | int pdeath_signal; /* The signal sent when the parent dies */ |
1286 | unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ | ||
1267 | /* ??? */ | 1287 | /* ??? */ |
1268 | unsigned int personality; | 1288 | unsigned int personality; |
1269 | unsigned did_exec:1; | 1289 | unsigned did_exec:1; |
@@ -1274,6 +1294,7 @@ struct task_struct { | |||
1274 | 1294 | ||
1275 | /* Revert to default priority/policy when forking */ | 1295 | /* Revert to default priority/policy when forking */ |
1276 | unsigned sched_reset_on_fork:1; | 1296 | unsigned sched_reset_on_fork:1; |
1297 | unsigned sched_contributes_to_load:1; | ||
1277 | 1298 | ||
1278 | pid_t pid; | 1299 | pid_t pid; |
1279 | pid_t tgid; | 1300 | pid_t tgid; |
@@ -1526,7 +1547,7 @@ struct task_struct { | |||
1526 | #ifdef CONFIG_TRACING | 1547 | #ifdef CONFIG_TRACING |
1527 | /* state flags for use by tracers */ | 1548 | /* state flags for use by tracers */ |
1528 | unsigned long trace; | 1549 | unsigned long trace; |
1529 | /* bitmask of trace recursion */ | 1550 | /* bitmask and counter of trace recursion */ |
1530 | unsigned long trace_recursion; | 1551 | unsigned long trace_recursion; |
1531 | #endif /* CONFIG_TRACING */ | 1552 | #endif /* CONFIG_TRACING */ |
1532 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1553 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
@@ -1744,7 +1765,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1744 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ | 1765 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
1745 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ | 1766 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
1746 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ | 1767 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ |
1747 | #define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */ | ||
1748 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ | 1768 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
1749 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ | 1769 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
1750 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ | 1770 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
@@ -1783,6 +1803,17 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1783 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1803 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1784 | #define used_math() tsk_used_math(current) | 1804 | #define used_math() tsk_used_math(current) |
1785 | 1805 | ||
1806 | /* | ||
1807 | * task->group_stop flags | ||
1808 | */ | ||
1809 | #define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ | ||
1810 | #define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ | ||
1811 | #define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ | ||
1812 | #define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ | ||
1813 | #define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ | ||
1814 | |||
1815 | extern void task_clear_group_stop_pending(struct task_struct *task); | ||
1816 | |||
1786 | #ifdef CONFIG_PREEMPT_RCU | 1817 | #ifdef CONFIG_PREEMPT_RCU |
1787 | 1818 | ||
1788 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1819 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
@@ -1811,9 +1842,16 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1811 | #endif | 1842 | #endif |
1812 | 1843 | ||
1813 | #ifdef CONFIG_SMP | 1844 | #ifdef CONFIG_SMP |
1845 | extern void do_set_cpus_allowed(struct task_struct *p, | ||
1846 | const struct cpumask *new_mask); | ||
1847 | |||
1814 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1848 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1815 | const struct cpumask *new_mask); | 1849 | const struct cpumask *new_mask); |
1816 | #else | 1850 | #else |
1851 | static inline void do_set_cpus_allowed(struct task_struct *p, | ||
1852 | const struct cpumask *new_mask) | ||
1853 | { | ||
1854 | } | ||
1817 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1855 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1818 | const struct cpumask *new_mask) | 1856 | const struct cpumask *new_mask) |
1819 | { | 1857 | { |
@@ -2063,14 +2101,13 @@ extern void xtime_update(unsigned long ticks); | |||
2063 | 2101 | ||
2064 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 2102 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
2065 | extern int wake_up_process(struct task_struct *tsk); | 2103 | extern int wake_up_process(struct task_struct *tsk); |
2066 | extern void wake_up_new_task(struct task_struct *tsk, | 2104 | extern void wake_up_new_task(struct task_struct *tsk); |
2067 | unsigned long clone_flags); | ||
2068 | #ifdef CONFIG_SMP | 2105 | #ifdef CONFIG_SMP |
2069 | extern void kick_process(struct task_struct *tsk); | 2106 | extern void kick_process(struct task_struct *tsk); |
2070 | #else | 2107 | #else |
2071 | static inline void kick_process(struct task_struct *tsk) { } | 2108 | static inline void kick_process(struct task_struct *tsk) { } |
2072 | #endif | 2109 | #endif |
2073 | extern void sched_fork(struct task_struct *p, int clone_flags); | 2110 | extern void sched_fork(struct task_struct *p); |
2074 | extern void sched_dead(struct task_struct *p); | 2111 | extern void sched_dead(struct task_struct *p); |
2075 | 2112 | ||
2076 | extern void proc_caches_init(void); | 2113 | extern void proc_caches_init(void); |
@@ -2195,8 +2232,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2195 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2232 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2196 | 2233 | ||
2197 | #ifdef CONFIG_SMP | 2234 | #ifdef CONFIG_SMP |
2235 | void scheduler_ipi(void); | ||
2198 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2236 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2199 | #else | 2237 | #else |
2238 | static inline void scheduler_ipi(void) { } | ||
2200 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2239 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2201 | long match_state) | 2240 | long match_state) |
2202 | { | 2241 | { |
@@ -2302,6 +2341,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | |||
2302 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2341 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
2303 | } | 2342 | } |
2304 | 2343 | ||
2344 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ | ||
2345 | #ifdef CONFIG_CGROUPS | ||
2346 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) | ||
2347 | { | ||
2348 | down_read(&tsk->signal->threadgroup_fork_lock); | ||
2349 | } | ||
2350 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) | ||
2351 | { | ||
2352 | up_read(&tsk->signal->threadgroup_fork_lock); | ||
2353 | } | ||
2354 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) | ||
2355 | { | ||
2356 | down_write(&tsk->signal->threadgroup_fork_lock); | ||
2357 | } | ||
2358 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) | ||
2359 | { | ||
2360 | up_write(&tsk->signal->threadgroup_fork_lock); | ||
2361 | } | ||
2362 | #else | ||
2363 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} | ||
2364 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} | ||
2365 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} | ||
2366 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} | ||
2367 | #endif | ||
2368 | |||
2305 | #ifndef __HAVE_THREAD_FUNCTIONS | 2369 | #ifndef __HAVE_THREAD_FUNCTIONS |
2306 | 2370 | ||
2307 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) | 2371 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) |