aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /include/linux/sched.h
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h353
1 files changed, 251 insertions, 102 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c9ac4fc837ba..9c990d13ae35 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,7 +21,8 @@
21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
24#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ 24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25 and is now available for re-use. */
25#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 26#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
26#define CLONE_NEWIPC 0x08000000 /* New ipcs */ 27#define CLONE_NEWIPC 0x08000000 /* New ipcs */
27#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 28#define CLONE_NEWUSER 0x10000000 /* New user namespace */
@@ -71,7 +72,6 @@ struct sched_param {
71#include <linux/smp.h> 72#include <linux/smp.h>
72#include <linux/sem.h> 73#include <linux/sem.h>
73#include <linux/signal.h> 74#include <linux/signal.h>
74#include <linux/path.h>
75#include <linux/compiler.h> 75#include <linux/compiler.h>
76#include <linux/completion.h> 76#include <linux/completion.h>
77#include <linux/pid.h> 77#include <linux/pid.h>
@@ -89,7 +89,6 @@ struct sched_param {
89#include <linux/timer.h> 89#include <linux/timer.h>
90#include <linux/hrtimer.h> 90#include <linux/hrtimer.h>
91#include <linux/task_io_accounting.h> 91#include <linux/task_io_accounting.h>
92#include <linux/kobject.h>
93#include <linux/latencytop.h> 92#include <linux/latencytop.h>
94#include <linux/cred.h> 93#include <linux/cred.h>
95 94
@@ -104,6 +103,7 @@ struct robust_list_head;
104struct bio_list; 103struct bio_list;
105struct fs_struct; 104struct fs_struct;
106struct perf_event_context; 105struct perf_event_context;
106struct blk_plug;
107 107
108/* 108/*
109 * List of flags we want to share for kernel threads, 109 * List of flags we want to share for kernel threads,
@@ -147,7 +147,7 @@ extern unsigned long nr_iowait_cpu(int cpu);
147extern unsigned long this_cpu_load(void); 147extern unsigned long this_cpu_load(void);
148 148
149 149
150extern void calc_global_load(void); 150extern void calc_global_load(unsigned long ticks);
151 151
152extern unsigned long get_parent_ip(unsigned long addr); 152extern unsigned long get_parent_ip(unsigned long addr);
153 153
@@ -319,7 +319,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
319 void __user *buffer, 319 void __user *buffer,
320 size_t *lenp, loff_t *ppos); 320 size_t *lenp, loff_t *ppos);
321extern unsigned int softlockup_panic; 321extern unsigned int softlockup_panic;
322extern int softlockup_thresh; 322void lockup_detector_init(void);
323#else 323#else
324static inline void touch_softlockup_watchdog(void) 324static inline void touch_softlockup_watchdog(void)
325{ 325{
@@ -330,6 +330,9 @@ static inline void touch_softlockup_watchdog_sync(void)
330static inline void touch_all_softlockup_watchdogs(void) 330static inline void touch_all_softlockup_watchdogs(void)
331{ 331{
332} 332}
333static inline void lockup_detector_init(void)
334{
335}
333#endif 336#endif
334 337
335#ifdef CONFIG_DETECT_HUNG_TASK 338#ifdef CONFIG_DETECT_HUNG_TASK
@@ -340,6 +343,9 @@ extern unsigned long sysctl_hung_task_warnings;
340extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 343extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
341 void __user *buffer, 344 void __user *buffer,
342 size_t *lenp, loff_t *ppos); 345 size_t *lenp, loff_t *ppos);
346#else
347/* Avoid need for ifdefs elsewhere in the code */
348enum { sysctl_hung_task_timeout_secs = 0 };
343#endif 349#endif
344 350
345/* Attach to any functions which should be ignored in wchan output. */ 351/* Attach to any functions which should be ignored in wchan output. */
@@ -357,7 +363,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
357extern signed long schedule_timeout_killable(signed long timeout); 363extern signed long schedule_timeout_killable(signed long timeout);
358extern signed long schedule_timeout_uninterruptible(signed long timeout); 364extern signed long schedule_timeout_uninterruptible(signed long timeout);
359asmlinkage void schedule(void); 365asmlinkage void schedule(void);
360extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 366extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
361 367
362struct nsproxy; 368struct nsproxy;
363struct user_namespace; 369struct user_namespace;
@@ -432,6 +438,7 @@ extern int get_dumpable(struct mm_struct *mm);
432#endif 438#endif
433 /* leave room for more dump flags */ 439 /* leave room for more dump flags */
434#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 440#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
441#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
435 442
436#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 443#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
437 444
@@ -510,8 +517,11 @@ struct thread_group_cputimer {
510 spinlock_t lock; 517 spinlock_t lock;
511}; 518};
512 519
520#include <linux/rwsem.h>
521struct autogroup;
522
513/* 523/*
514 * NOTE! "signal_struct" does not have it's own 524 * NOTE! "signal_struct" does not have its own
515 * locking, because a shared signal_struct always 525 * locking, because a shared signal_struct always
516 * implies a shared sighand_struct, so locking 526 * implies a shared sighand_struct, so locking
517 * sighand_struct is always a proper superset of 527 * sighand_struct is always a proper superset of
@@ -577,6 +587,9 @@ struct signal_struct {
577 587
578 struct tty_struct *tty; /* NULL if no tty */ 588 struct tty_struct *tty; /* NULL if no tty */
579 589
590#ifdef CONFIG_SCHED_AUTOGROUP
591 struct autogroup *autogroup;
592#endif
580 /* 593 /*
581 * Cumulative resource counters for dead threads in the group, 594 * Cumulative resource counters for dead threads in the group,
582 * and for reaped dead child processes forked by this group. 595 * and for reaped dead child processes forked by this group.
@@ -624,9 +637,25 @@ struct signal_struct {
624 unsigned audit_tty; 637 unsigned audit_tty;
625 struct tty_audit_buf *tty_audit_buf; 638 struct tty_audit_buf *tty_audit_buf;
626#endif 639#endif
640#ifdef CONFIG_CGROUPS
641 /*
642 * The threadgroup_fork_lock prevents threads from forking with
643 * CLONE_THREAD while held for writing. Use this for fork-sensitive
644 * threadgroup-wide operations. It's taken for reading in fork.c in
645 * copy_process().
646 * Currently only needed write-side by cgroups.
647 */
648 struct rw_semaphore threadgroup_fork_lock;
649#endif
627 650
628 int oom_adj; /* OOM kill score adjustment (bit shift) */ 651 int oom_adj; /* OOM kill score adjustment (bit shift) */
629 int oom_score_adj; /* OOM kill score adjustment */ 652 int oom_score_adj; /* OOM kill score adjustment */
653 int oom_score_adj_min; /* OOM kill score adjustment minimum value.
654 * Only settable by CAP_SYS_RESOURCE. */
655
656 struct mutex cred_guard_mutex; /* guard against foreign influences on
657 * credential calculations
658 * (notably. ptrace) */
630}; 659};
631 660
632/* Context switch must be unlocked if interrupts are to be enabled */ 661/* Context switch must be unlocked if interrupts are to be enabled */
@@ -638,9 +667,8 @@ struct signal_struct {
638 * Bits in flags field of signal_struct. 667 * Bits in flags field of signal_struct.
639 */ 668 */
640#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 669#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
641#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ 670#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
642#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 671#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
643#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
644/* 672/*
645 * Pending notifications to parent. 673 * Pending notifications to parent.
646 */ 674 */
@@ -669,8 +697,11 @@ struct user_struct {
669 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 697 atomic_t inotify_watches; /* How many inotify watches does this user have? */
670 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 698 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
671#endif 699#endif
700#ifdef CONFIG_FANOTIFY
701 atomic_t fanotify_listeners;
702#endif
672#ifdef CONFIG_EPOLL 703#ifdef CONFIG_EPOLL
673 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 704 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
674#endif 705#endif
675#ifdef CONFIG_POSIX_MQUEUE 706#ifdef CONFIG_POSIX_MQUEUE
676 /* protected by mq_lock */ 707 /* protected by mq_lock */
@@ -713,10 +744,6 @@ struct sched_info {
713 /* timestamps */ 744 /* timestamps */
714 unsigned long long last_arrival,/* when we last ran on a cpu */ 745 unsigned long long last_arrival,/* when we last ran on a cpu */
715 last_queued; /* when we were last queued to run */ 746 last_queued; /* when we were last queued to run */
716#ifdef CONFIG_SCHEDSTATS
717 /* BKL stats */
718 unsigned int bkl_count;
719#endif
720}; 747};
721#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 748#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
722 749
@@ -774,17 +801,39 @@ enum cpu_idle_type {
774}; 801};
775 802
776/* 803/*
777 * sched-domains (multiprocessor balancing) declarations: 804 * Increase resolution of nice-level calculations for 64-bit architectures.
805 * The extra resolution improves shares distribution and load balancing of
806 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
807 * hierarchies, especially on larger systems. This is not a user-visible change
808 * and does not change the user-interface for setting shares/weights.
809 *
810 * We increase resolution only if we have enough bits to allow this increased
811 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
812 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
813 * increased costs.
778 */ 814 */
815#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
816# define SCHED_LOAD_RESOLUTION 10
817# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
818# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
819#else
820# define SCHED_LOAD_RESOLUTION 0
821# define scale_load(w) (w)
822# define scale_load_down(w) (w)
823#endif
779 824
780/* 825#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
781 * Increase resolution of nice-level calculations:
782 */
783#define SCHED_LOAD_SHIFT 10
784#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 826#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
785 827
786#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 828/*
829 * Increase resolution of cpu_power calculations
830 */
831#define SCHED_POWER_SHIFT 10
832#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
787 833
834/*
835 * sched-domains (multiprocessor balancing) declarations:
836 */
788#ifdef CONFIG_SMP 837#ifdef CONFIG_SMP
789#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 838#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
790#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 839#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
@@ -799,6 +848,7 @@ enum cpu_idle_type {
799#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 848#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
800#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 849#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
801#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 850#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
851#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
802 852
803enum powersavings_balance_level { 853enum powersavings_balance_level {
804 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 854 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -836,7 +886,7 @@ extern int __weak arch_sd_sibiling_asym_packing(void);
836 886
837/* 887/*
838 * Optimise SD flags for power savings: 888 * Optimise SD flags for power savings:
839 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. 889 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
840 * Keep default SD flags if sched_{smt,mc}_power_saving=0 890 * Keep default SD flags if sched_{smt,mc}_power_saving=0
841 */ 891 */
842 892
@@ -848,14 +898,21 @@ static inline int sd_power_saving_flags(void)
848 return 0; 898 return 0;
849} 899}
850 900
851struct sched_group { 901struct sched_group_power {
852 struct sched_group *next; /* Must be a circular list */ 902 atomic_t ref;
853
854 /* 903 /*
855 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 904 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
856 * single CPU. 905 * single CPU.
857 */ 906 */
858 unsigned int cpu_power, cpu_power_orig; 907 unsigned int power, power_orig;
908};
909
910struct sched_group {
911 struct sched_group *next; /* Must be a circular list */
912 atomic_t ref;
913
914 unsigned int group_weight;
915 struct sched_group_power *sgp;
859 916
860 /* 917 /*
861 * The CPUs this group covers. 918 * The CPUs this group covers.
@@ -863,9 +920,6 @@ struct sched_group {
863 * NOTE: this field is variable length. (Allocated dynamically 920 * NOTE: this field is variable length. (Allocated dynamically
864 * by attaching extra space to the end of the structure, 921 * by attaching extra space to the end of the structure,
865 * depending on how many CPUs the kernel has booted up with) 922 * depending on how many CPUs the kernel has booted up with)
866 *
867 * It is also be embedded into static data structures at build
868 * time. (See 'struct static_sched_group' in kernel/sched.c)
869 */ 923 */
870 unsigned long cpumask[0]; 924 unsigned long cpumask[0];
871}; 925};
@@ -875,16 +929,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
875 return to_cpumask(sg->cpumask); 929 return to_cpumask(sg->cpumask);
876} 930}
877 931
878enum sched_domain_level {
879 SD_LV_NONE = 0,
880 SD_LV_SIBLING,
881 SD_LV_MC,
882 SD_LV_CPU,
883 SD_LV_NODE,
884 SD_LV_ALLNODES,
885 SD_LV_MAX
886};
887
888struct sched_domain_attr { 932struct sched_domain_attr {
889 int relax_domain_level; 933 int relax_domain_level;
890}; 934};
@@ -893,6 +937,8 @@ struct sched_domain_attr {
893 .relax_domain_level = -1, \ 937 .relax_domain_level = -1, \
894} 938}
895 939
940extern int sched_domain_level_max;
941
896struct sched_domain { 942struct sched_domain {
897 /* These fields must be setup */ 943 /* These fields must be setup */
898 struct sched_domain *parent; /* top domain must be null terminated */ 944 struct sched_domain *parent; /* top domain must be null terminated */
@@ -910,7 +956,7 @@ struct sched_domain {
910 unsigned int forkexec_idx; 956 unsigned int forkexec_idx;
911 unsigned int smt_gain; 957 unsigned int smt_gain;
912 int flags; /* See SD_* */ 958 int flags; /* See SD_* */
913 enum sched_domain_level level; 959 int level;
914 960
915 /* Runtime fields. */ 961 /* Runtime fields. */
916 unsigned long last_balance; /* init to jiffies. units in jiffies */ 962 unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -953,6 +999,10 @@ struct sched_domain {
953#ifdef CONFIG_SCHED_DEBUG 999#ifdef CONFIG_SCHED_DEBUG
954 char *name; 1000 char *name;
955#endif 1001#endif
1002 union {
1003 void *private; /* used during construction */
1004 struct rcu_head rcu; /* used during destruction */
1005 };
956 1006
957 unsigned int span_weight; 1007 unsigned int span_weight;
958 /* 1008 /*
@@ -961,9 +1011,6 @@ struct sched_domain {
961 * NOTE: this field is variable length. (Allocated dynamically 1011 * NOTE: this field is variable length. (Allocated dynamically
962 * by attaching extra space to the end of the structure, 1012 * by attaching extra space to the end of the structure,
963 * depending on how many CPUs the kernel has booted up with) 1013 * depending on how many CPUs the kernel has booted up with)
964 *
965 * It is also be embedded into static data structures at build
966 * time. (See 'struct static_sched_domain' in kernel/sched.c)
967 */ 1014 */
968 unsigned long span[0]; 1015 unsigned long span[0];
969}; 1016};
@@ -1026,10 +1073,15 @@ struct sched_domain;
1026 */ 1073 */
1027#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1074#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1028#define WF_FORK 0x02 /* child wakeup after fork */ 1075#define WF_FORK 0x02 /* child wakeup after fork */
1076#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1029 1077
1030#define ENQUEUE_WAKEUP 1 1078#define ENQUEUE_WAKEUP 1
1031#define ENQUEUE_WAKING 2 1079#define ENQUEUE_HEAD 2
1032#define ENQUEUE_HEAD 4 1080#ifdef CONFIG_SMP
1081#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1082#else
1083#define ENQUEUE_WAKING 0
1084#endif
1033 1085
1034#define DEQUEUE_SLEEP 1 1086#define DEQUEUE_SLEEP 1
1035 1087
@@ -1039,6 +1091,7 @@ struct sched_class {
1039 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1091 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1040 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1092 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1041 void (*yield_task) (struct rq *rq); 1093 void (*yield_task) (struct rq *rq);
1094 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1042 1095
1043 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1096 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1044 1097
@@ -1046,12 +1099,11 @@ struct sched_class {
1046 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1099 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1047 1100
1048#ifdef CONFIG_SMP 1101#ifdef CONFIG_SMP
1049 int (*select_task_rq)(struct rq *rq, struct task_struct *p, 1102 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1050 int sd_flag, int flags);
1051 1103
1052 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1104 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1053 void (*post_schedule) (struct rq *this_rq); 1105 void (*post_schedule) (struct rq *this_rq);
1054 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1106 void (*task_waking) (struct task_struct *task);
1055 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1107 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1056 1108
1057 void (*set_cpus_allowed)(struct task_struct *p, 1109 void (*set_cpus_allowed)(struct task_struct *p,
@@ -1065,18 +1117,16 @@ struct sched_class {
1065 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1117 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1066 void (*task_fork) (struct task_struct *p); 1118 void (*task_fork) (struct task_struct *p);
1067 1119
1068 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1120 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1069 int running); 1121 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1070 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1071 int running);
1072 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1122 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1073 int oldprio, int running); 1123 int oldprio);
1074 1124
1075 unsigned int (*get_rr_interval) (struct rq *rq, 1125 unsigned int (*get_rr_interval) (struct rq *rq,
1076 struct task_struct *task); 1126 struct task_struct *task);
1077 1127
1078#ifdef CONFIG_FAIR_GROUP_SCHED 1128#ifdef CONFIG_FAIR_GROUP_SCHED
1079 void (*moved_group) (struct task_struct *p, int on_rq); 1129 void (*task_move_group) (struct task_struct *p, int on_rq);
1080#endif 1130#endif
1081}; 1131};
1082 1132
@@ -1165,6 +1215,13 @@ struct sched_rt_entity {
1165struct rcu_node; 1215struct rcu_node;
1166struct od_table_entry; 1216struct od_table_entry;
1167 1217
1218enum perf_event_task_context {
1219 perf_invalid_context = -1,
1220 perf_hw_context = 0,
1221 perf_sw_context,
1222 perf_nr_task_contexts,
1223};
1224
1168struct task_struct { 1225struct task_struct {
1169 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1226 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1170 void *stack; 1227 void *stack;
@@ -1172,13 +1229,11 @@ struct task_struct {
1172 unsigned int flags; /* per process flags, defined below */ 1229 unsigned int flags; /* per process flags, defined below */
1173 unsigned int ptrace; 1230 unsigned int ptrace;
1174 1231
1175 int lock_depth; /* BKL lock depth */
1176
1177#ifdef CONFIG_SMP 1232#ifdef CONFIG_SMP
1178#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1233 struct task_struct *wake_entry;
1179 int oncpu; 1234 int on_cpu;
1180#endif
1181#endif 1235#endif
1236 int on_rq;
1182 1237
1183 int prio, static_prio, normal_prio; 1238 int prio, static_prio, normal_prio;
1184 unsigned int rt_priority; 1239 unsigned int rt_priority;
@@ -1207,21 +1262,34 @@ struct task_struct {
1207 unsigned int policy; 1262 unsigned int policy;
1208 cpumask_t cpus_allowed; 1263 cpumask_t cpus_allowed;
1209 1264
1210#ifdef CONFIG_TREE_PREEMPT_RCU 1265#ifdef CONFIG_PREEMPT_RCU
1211 int rcu_read_lock_nesting; 1266 int rcu_read_lock_nesting;
1212 char rcu_read_unlock_special; 1267 char rcu_read_unlock_special;
1213 struct rcu_node *rcu_blocked_node; 1268#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
1269 int rcu_boosted;
1270#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
1214 struct list_head rcu_node_entry; 1271 struct list_head rcu_node_entry;
1272#endif /* #ifdef CONFIG_PREEMPT_RCU */
1273#ifdef CONFIG_TREE_PREEMPT_RCU
1274 struct rcu_node *rcu_blocked_node;
1215#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1275#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1276#ifdef CONFIG_RCU_BOOST
1277 struct rt_mutex *rcu_boost_mutex;
1278#endif /* #ifdef CONFIG_RCU_BOOST */
1216 1279
1217#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1280#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1218 struct sched_info sched_info; 1281 struct sched_info sched_info;
1219#endif 1282#endif
1220 1283
1221 struct list_head tasks; 1284 struct list_head tasks;
1285#ifdef CONFIG_SMP
1222 struct plist_node pushable_tasks; 1286 struct plist_node pushable_tasks;
1287#endif
1223 1288
1224 struct mm_struct *mm, *active_mm; 1289 struct mm_struct *mm, *active_mm;
1290#ifdef CONFIG_COMPAT_BRK
1291 unsigned brk_randomized:1;
1292#endif
1225#if defined(SPLIT_RSS_COUNTING) 1293#if defined(SPLIT_RSS_COUNTING)
1226 struct task_rss_stat rss_stat; 1294 struct task_rss_stat rss_stat;
1227#endif 1295#endif
@@ -1229,6 +1297,7 @@ struct task_struct {
1229 int exit_state; 1297 int exit_state;
1230 int exit_code, exit_signal; 1298 int exit_code, exit_signal;
1231 int pdeath_signal; /* The signal sent when the parent dies */ 1299 int pdeath_signal; /* The signal sent when the parent dies */
1300 unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
1232 /* ??? */ 1301 /* ??? */
1233 unsigned int personality; 1302 unsigned int personality;
1234 unsigned did_exec:1; 1303 unsigned did_exec:1;
@@ -1239,6 +1308,7 @@ struct task_struct {
1239 1308
1240 /* Revert to default priority/policy when forking */ 1309 /* Revert to default priority/policy when forking */
1241 unsigned sched_reset_on_fork:1; 1310 unsigned sched_reset_on_fork:1;
1311 unsigned sched_contributes_to_load:1;
1242 1312
1243 pid_t pid; 1313 pid_t pid;
1244 pid_t tgid; 1314 pid_t tgid;
@@ -1293,13 +1363,10 @@ struct task_struct {
1293 struct list_head cpu_timers[3]; 1363 struct list_head cpu_timers[3];
1294 1364
1295/* process credentials */ 1365/* process credentials */
1296 const struct cred *real_cred; /* objective and real subjective task 1366 const struct cred __rcu *real_cred; /* objective and real subjective task
1297 * credentials (COW) */ 1367 * credentials (COW) */
1298 const struct cred *cred; /* effective (overridable) subjective task 1368 const struct cred __rcu *cred; /* effective (overridable) subjective task
1299 * credentials (COW) */ 1369 * credentials (COW) */
1300 struct mutex cred_guard_mutex; /* guard against foreign influences on
1301 * credential calculations
1302 * (notably. ptrace) */
1303 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ 1370 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1304 1371
1305 char comm[TASK_COMM_LEN]; /* executable name excluding path 1372 char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -1400,6 +1467,11 @@ struct task_struct {
1400/* stacked block device info */ 1467/* stacked block device info */
1401 struct bio_list *bio_list; 1468 struct bio_list *bio_list;
1402 1469
1470#ifdef CONFIG_BLOCK
1471/* stack plugging */
1472 struct blk_plug *plug;
1473#endif
1474
1403/* VM state */ 1475/* VM state */
1404 struct reclaim_state *reclaim_state; 1476 struct reclaim_state *reclaim_state;
1405 1477
@@ -1423,7 +1495,7 @@ struct task_struct {
1423#endif 1495#endif
1424#ifdef CONFIG_CGROUPS 1496#ifdef CONFIG_CGROUPS
1425 /* Control Group info protected by css_set_lock */ 1497 /* Control Group info protected by css_set_lock */
1426 struct css_set *cgroups; 1498 struct css_set __rcu *cgroups;
1427 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1499 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1428 struct list_head cg_list; 1500 struct list_head cg_list;
1429#endif 1501#endif
@@ -1436,13 +1508,14 @@ struct task_struct {
1436 struct futex_pi_state *pi_state_cache; 1508 struct futex_pi_state *pi_state_cache;
1437#endif 1509#endif
1438#ifdef CONFIG_PERF_EVENTS 1510#ifdef CONFIG_PERF_EVENTS
1439 struct perf_event_context *perf_event_ctxp; 1511 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1440 struct mutex perf_event_mutex; 1512 struct mutex perf_event_mutex;
1441 struct list_head perf_event_list; 1513 struct list_head perf_event_list;
1442#endif 1514#endif
1443#ifdef CONFIG_NUMA 1515#ifdef CONFIG_NUMA
1444 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1516 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1445 short il_next; 1517 short il_next;
1518 short pref_node_fork;
1446#endif 1519#endif
1447 atomic_t fs_excl; /* holding fs exclusive resources */ 1520 atomic_t fs_excl; /* holding fs exclusive resources */
1448 struct rcu_head rcu; 1521 struct rcu_head rcu;
@@ -1495,17 +1568,20 @@ struct task_struct {
1495#ifdef CONFIG_TRACING 1568#ifdef CONFIG_TRACING
1496 /* state flags for use by tracers */ 1569 /* state flags for use by tracers */
1497 unsigned long trace; 1570 unsigned long trace;
1498 /* bitmask of trace recursion */ 1571 /* bitmask and counter of trace recursion */
1499 unsigned long trace_recursion; 1572 unsigned long trace_recursion;
1500#endif /* CONFIG_TRACING */ 1573#endif /* CONFIG_TRACING */
1501#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1574#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1502 struct memcg_batch_info { 1575 struct memcg_batch_info {
1503 int do_batch; /* incremented when batch uncharge started */ 1576 int do_batch; /* incremented when batch uncharge started */
1504 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1577 struct mem_cgroup *memcg; /* target memcg of uncharge */
1505 unsigned long bytes; /* uncharged usage */ 1578 unsigned long nr_pages; /* uncharged usage */
1506 unsigned long memsw_bytes; /* uncharged mem+swap usage */ 1579 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1507 } memcg_batch; 1580 } memcg_batch;
1508#endif 1581#endif
1582#ifdef CONFIG_HAVE_HW_BREAKPOINT
1583 atomic_t ptrace_bp_refcnt;
1584#endif
1509}; 1585};
1510 1586
1511/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1587/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1693,8 +1769,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1693/* 1769/*
1694 * Per process flags 1770 * Per process flags
1695 */ 1771 */
1696#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
1697 /* Not implemented yet, only for 486*/
1698#define PF_STARTING 0x00000002 /* being created */ 1772#define PF_STARTING 0x00000002 /* being created */
1699#define PF_EXITING 0x00000004 /* getting shut down */ 1773#define PF_EXITING 0x00000004 /* getting shut down */
1700#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1774#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1706,14 +1780,12 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1706#define PF_DUMPCORE 0x00000200 /* dumped core */ 1780#define PF_DUMPCORE 0x00000200 /* dumped core */
1707#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1781#define PF_SIGNALED 0x00000400 /* killed by a signal */
1708#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1782#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1709#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
1710#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1783#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1711#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ 1784#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1712#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1785#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1713#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1786#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1714#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1787#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1715#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1788#define PF_KSWAPD 0x00040000 /* I am kswapd */
1716#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
1717#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1789#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1718#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1790#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1719#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1791#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1724,7 +1796,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1724#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1796#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1725#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1797#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1726#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1798#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1727#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1799#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1728#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1800#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1729 1801
1730/* 1802/*
@@ -1752,16 +1824,33 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1752#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1824#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1753#define used_math() tsk_used_math(current) 1825#define used_math() tsk_used_math(current)
1754 1826
1755#ifdef CONFIG_TREE_PREEMPT_RCU 1827/*
1828 * task->group_stop flags
1829 */
1830#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
1831#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
1832#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
1833#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
1834#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
1835
1836extern void task_clear_group_stop_pending(struct task_struct *task);
1837
1838#ifdef CONFIG_PREEMPT_RCU
1756 1839
1757#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1840#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1758#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1841#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1842#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1759 1843
1760static inline void rcu_copy_process(struct task_struct *p) 1844static inline void rcu_copy_process(struct task_struct *p)
1761{ 1845{
1762 p->rcu_read_lock_nesting = 0; 1846 p->rcu_read_lock_nesting = 0;
1763 p->rcu_read_unlock_special = 0; 1847 p->rcu_read_unlock_special = 0;
1848#ifdef CONFIG_TREE_PREEMPT_RCU
1764 p->rcu_blocked_node = NULL; 1849 p->rcu_blocked_node = NULL;
1850#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1851#ifdef CONFIG_RCU_BOOST
1852 p->rcu_boost_mutex = NULL;
1853#endif /* #ifdef CONFIG_RCU_BOOST */
1765 INIT_LIST_HEAD(&p->rcu_node_entry); 1854 INIT_LIST_HEAD(&p->rcu_node_entry);
1766} 1855}
1767 1856
@@ -1774,9 +1863,16 @@ static inline void rcu_copy_process(struct task_struct *p)
1774#endif 1863#endif
1775 1864
1776#ifdef CONFIG_SMP 1865#ifdef CONFIG_SMP
1866extern void do_set_cpus_allowed(struct task_struct *p,
1867 const struct cpumask *new_mask);
1868
1777extern int set_cpus_allowed_ptr(struct task_struct *p, 1869extern int set_cpus_allowed_ptr(struct task_struct *p,
1778 const struct cpumask *new_mask); 1870 const struct cpumask *new_mask);
1779#else 1871#else
1872static inline void do_set_cpus_allowed(struct task_struct *p,
1873 const struct cpumask *new_mask)
1874{
1875}
1780static inline int set_cpus_allowed_ptr(struct task_struct *p, 1876static inline int set_cpus_allowed_ptr(struct task_struct *p,
1781 const struct cpumask *new_mask) 1877 const struct cpumask *new_mask)
1782{ 1878{
@@ -1838,6 +1934,19 @@ extern void sched_clock_idle_sleep_event(void);
1838extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1934extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1839#endif 1935#endif
1840 1936
1937#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1938/*
1939 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1940 * The reason for this explicit opt-in is not to have perf penalty with
1941 * slow sched_clocks.
1942 */
1943extern void enable_sched_clock_irqtime(void);
1944extern void disable_sched_clock_irqtime(void);
1945#else
1946static inline void enable_sched_clock_irqtime(void) {}
1947static inline void disable_sched_clock_irqtime(void) {}
1948#endif
1949
1841extern unsigned long long 1950extern unsigned long long
1842task_sched_runtime(struct task_struct *task); 1951task_sched_runtime(struct task_struct *task);
1843extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1952extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -1853,14 +1962,11 @@ extern void sched_clock_idle_sleep_event(void);
1853extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1962extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1854 1963
1855#ifdef CONFIG_HOTPLUG_CPU 1964#ifdef CONFIG_HOTPLUG_CPU
1856extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1857extern void idle_task_exit(void); 1965extern void idle_task_exit(void);
1858#else 1966#else
1859static inline void idle_task_exit(void) {} 1967static inline void idle_task_exit(void) {}
1860#endif 1968#endif
1861 1969
1862extern void sched_idle_next(void);
1863
1864#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1970#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1865extern void wake_up_idle_cpu(int cpu); 1971extern void wake_up_idle_cpu(int cpu);
1866#else 1972#else
@@ -1870,8 +1976,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1870extern unsigned int sysctl_sched_latency; 1976extern unsigned int sysctl_sched_latency;
1871extern unsigned int sysctl_sched_min_granularity; 1977extern unsigned int sysctl_sched_min_granularity;
1872extern unsigned int sysctl_sched_wakeup_granularity; 1978extern unsigned int sysctl_sched_wakeup_granularity;
1873extern unsigned int sysctl_sched_shares_ratelimit;
1874extern unsigned int sysctl_sched_shares_thresh;
1875extern unsigned int sysctl_sched_child_runs_first; 1979extern unsigned int sysctl_sched_child_runs_first;
1876 1980
1877enum sched_tunable_scaling { 1981enum sched_tunable_scaling {
@@ -1887,6 +1991,7 @@ extern unsigned int sysctl_sched_migration_cost;
1887extern unsigned int sysctl_sched_nr_migrate; 1991extern unsigned int sysctl_sched_nr_migrate;
1888extern unsigned int sysctl_sched_time_avg; 1992extern unsigned int sysctl_sched_time_avg;
1889extern unsigned int sysctl_timer_migration; 1993extern unsigned int sysctl_timer_migration;
1994extern unsigned int sysctl_sched_shares_window;
1890 1995
1891int sched_proc_update_handler(struct ctl_table *table, int write, 1996int sched_proc_update_handler(struct ctl_table *table, int write,
1892 void __user *buffer, size_t *length, 1997 void __user *buffer, size_t *length,
@@ -1910,7 +2015,23 @@ int sched_rt_handler(struct ctl_table *table, int write,
1910 void __user *buffer, size_t *lenp, 2015 void __user *buffer, size_t *lenp,
1911 loff_t *ppos); 2016 loff_t *ppos);
1912 2017
1913extern unsigned int sysctl_sched_compat_yield; 2018#ifdef CONFIG_SCHED_AUTOGROUP
2019extern unsigned int sysctl_sched_autogroup_enabled;
2020
2021extern void sched_autogroup_create_attach(struct task_struct *p);
2022extern void sched_autogroup_detach(struct task_struct *p);
2023extern void sched_autogroup_fork(struct signal_struct *sig);
2024extern void sched_autogroup_exit(struct signal_struct *sig);
2025#ifdef CONFIG_PROC_FS
2026extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2027extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
2028#endif
2029#else
2030static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2031static inline void sched_autogroup_detach(struct task_struct *p) { }
2032static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2033static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2034#endif
1914 2035
1915#ifdef CONFIG_RT_MUTEXES 2036#ifdef CONFIG_RT_MUTEXES
1916extern int rt_mutex_getprio(struct task_struct *p); 2037extern int rt_mutex_getprio(struct task_struct *p);
@@ -1924,15 +2045,17 @@ static inline int rt_mutex_getprio(struct task_struct *p)
1924# define rt_mutex_adjust_pi(p) do { } while (0) 2045# define rt_mutex_adjust_pi(p) do { } while (0)
1925#endif 2046#endif
1926 2047
2048extern bool yield_to(struct task_struct *p, bool preempt);
1927extern void set_user_nice(struct task_struct *p, long nice); 2049extern void set_user_nice(struct task_struct *p, long nice);
1928extern int task_prio(const struct task_struct *p); 2050extern int task_prio(const struct task_struct *p);
1929extern int task_nice(const struct task_struct *p); 2051extern int task_nice(const struct task_struct *p);
1930extern int can_nice(const struct task_struct *p, const int nice); 2052extern int can_nice(const struct task_struct *p, const int nice);
1931extern int task_curr(const struct task_struct *p); 2053extern int task_curr(const struct task_struct *p);
1932extern int idle_cpu(int cpu); 2054extern int idle_cpu(int cpu);
1933extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 2055extern int sched_setscheduler(struct task_struct *, int,
2056 const struct sched_param *);
1934extern int sched_setscheduler_nocheck(struct task_struct *, int, 2057extern int sched_setscheduler_nocheck(struct task_struct *, int,
1935 struct sched_param *); 2058 const struct sched_param *);
1936extern struct task_struct *idle_task(int cpu); 2059extern struct task_struct *idle_task(int cpu);
1937extern struct task_struct *curr_task(int cpu); 2060extern struct task_struct *curr_task(int cpu);
1938extern void set_curr_task(int cpu, struct task_struct *p); 2061extern void set_curr_task(int cpu, struct task_struct *p);
@@ -1995,18 +2118,17 @@ extern void release_uids(struct user_namespace *ns);
1995 2118
1996#include <asm/current.h> 2119#include <asm/current.h>
1997 2120
1998extern void do_timer(unsigned long ticks); 2121extern void xtime_update(unsigned long ticks);
1999 2122
2000extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2123extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2001extern int wake_up_process(struct task_struct *tsk); 2124extern int wake_up_process(struct task_struct *tsk);
2002extern void wake_up_new_task(struct task_struct *tsk, 2125extern void wake_up_new_task(struct task_struct *tsk);
2003 unsigned long clone_flags);
2004#ifdef CONFIG_SMP 2126#ifdef CONFIG_SMP
2005 extern void kick_process(struct task_struct *tsk); 2127 extern void kick_process(struct task_struct *tsk);
2006#else 2128#else
2007 static inline void kick_process(struct task_struct *tsk) { } 2129 static inline void kick_process(struct task_struct *tsk) { }
2008#endif 2130#endif
2009extern void sched_fork(struct task_struct *p, int clone_flags); 2131extern void sched_fork(struct task_struct *p);
2010extern void sched_dead(struct task_struct *p); 2132extern void sched_dead(struct task_struct *p);
2011 2133
2012extern void proc_caches_init(void); 2134extern void proc_caches_init(void);
@@ -2131,8 +2253,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2131extern char *get_task_comm(char *to, struct task_struct *tsk); 2253extern char *get_task_comm(char *to, struct task_struct *tsk);
2132 2254
2133#ifdef CONFIG_SMP 2255#ifdef CONFIG_SMP
2256void scheduler_ipi(void);
2134extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2257extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2135#else 2258#else
2259static inline void scheduler_ipi(void) { }
2136static inline unsigned long wait_task_inactive(struct task_struct *p, 2260static inline unsigned long wait_task_inactive(struct task_struct *p,
2137 long match_state) 2261 long match_state)
2138{ 2262{
@@ -2222,15 +2346,47 @@ static inline void task_unlock(struct task_struct *p)
2222 spin_unlock(&p->alloc_lock); 2346 spin_unlock(&p->alloc_lock);
2223} 2347}
2224 2348
2225extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2349extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2226 unsigned long *flags); 2350 unsigned long *flags);
2227 2351
2352#define lock_task_sighand(tsk, flags) \
2353({ struct sighand_struct *__ss; \
2354 __cond_lock(&(tsk)->sighand->siglock, \
2355 (__ss = __lock_task_sighand(tsk, flags))); \
2356 __ss; \
2357}) \
2358
2228static inline void unlock_task_sighand(struct task_struct *tsk, 2359static inline void unlock_task_sighand(struct task_struct *tsk,
2229 unsigned long *flags) 2360 unsigned long *flags)
2230{ 2361{
2231 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2362 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2232} 2363}
2233 2364
2365/* See the declaration of threadgroup_fork_lock in signal_struct. */
2366#ifdef CONFIG_CGROUPS
2367static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2368{
2369 down_read(&tsk->signal->threadgroup_fork_lock);
2370}
2371static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2372{
2373 up_read(&tsk->signal->threadgroup_fork_lock);
2374}
2375static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2376{
2377 down_write(&tsk->signal->threadgroup_fork_lock);
2378}
2379static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2380{
2381 up_write(&tsk->signal->threadgroup_fork_lock);
2382}
2383#else
2384static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2385static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2386static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2387static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2388#endif
2389
2234#ifndef __HAVE_THREAD_FUNCTIONS 2390#ifndef __HAVE_THREAD_FUNCTIONS
2235 2391
2236#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2392#define task_thread_info(task) ((struct thread_info *)(task)->stack)
@@ -2380,9 +2536,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
2380 2536
2381extern int __cond_resched_softirq(void); 2537extern int __cond_resched_softirq(void);
2382 2538
2383#define cond_resched_softirq() ({ \ 2539#define cond_resched_softirq() ({ \
2384 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ 2540 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2385 __cond_resched_softirq(); \ 2541 __cond_resched_softirq(); \
2386}) 2542})
2387 2543
2388/* 2544/*
@@ -2453,7 +2609,7 @@ extern void normalize_rt_tasks(void);
2453 2609
2454#ifdef CONFIG_CGROUP_SCHED 2610#ifdef CONFIG_CGROUP_SCHED
2455 2611
2456extern struct task_group init_task_group; 2612extern struct task_group root_task_group;
2457 2613
2458extern struct task_group *sched_create_group(struct task_group *parent); 2614extern struct task_group *sched_create_group(struct task_group *parent);
2459extern void sched_destroy_group(struct task_group *tg); 2615extern void sched_destroy_group(struct task_group *tg);
@@ -2518,13 +2674,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2518#define TASK_SIZE_OF(tsk) TASK_SIZE 2674#define TASK_SIZE_OF(tsk) TASK_SIZE
2519#endif 2675#endif
2520 2676
2521/*
2522 * Call the function if the target task is executing on a CPU right now:
2523 */
2524extern void task_oncpu_function_call(struct task_struct *p,
2525 void (*func) (void *info), void *info);
2526
2527
2528#ifdef CONFIG_MM_OWNER 2677#ifdef CONFIG_MM_OWNER
2529extern void mm_update_next_owner(struct mm_struct *mm); 2678extern void mm_update_next_owner(struct mm_struct *mm);
2530extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2679extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);