diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 112 |
1 files changed, 70 insertions, 42 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c5d3f847ca8d..dc7e592c473a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void); | |||
134 | extern unsigned long nr_uninterruptible(void); | 134 | extern unsigned long nr_uninterruptible(void); |
135 | extern unsigned long nr_active(void); | 135 | extern unsigned long nr_active(void); |
136 | extern unsigned long nr_iowait(void); | 136 | extern unsigned long nr_iowait(void); |
137 | extern unsigned long weighted_cpuload(const int cpu); | ||
138 | 137 | ||
139 | struct seq_file; | 138 | struct seq_file; |
140 | struct cfs_rq; | 139 | struct cfs_rq; |
@@ -246,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); | |||
246 | extern void init_idle(struct task_struct *idle, int cpu); | 245 | extern void init_idle(struct task_struct *idle, int cpu); |
247 | extern void init_idle_bootup_task(struct task_struct *idle); | 246 | extern void init_idle_bootup_task(struct task_struct *idle); |
248 | 247 | ||
248 | extern int runqueue_is_locked(void); | ||
249 | |||
249 | extern cpumask_t nohz_cpu_mask; | 250 | extern cpumask_t nohz_cpu_mask; |
250 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 251 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
251 | extern int select_nohz_load_balancer(int cpu); | 252 | extern int select_nohz_load_balancer(int cpu); |
@@ -294,10 +295,11 @@ extern void softlockup_tick(void); | |||
294 | extern void spawn_softlockup_task(void); | 295 | extern void spawn_softlockup_task(void); |
295 | extern void touch_softlockup_watchdog(void); | 296 | extern void touch_softlockup_watchdog(void); |
296 | extern void touch_all_softlockup_watchdogs(void); | 297 | extern void touch_all_softlockup_watchdogs(void); |
297 | extern unsigned long softlockup_thresh; | 298 | extern unsigned int softlockup_panic; |
298 | extern unsigned long sysctl_hung_task_check_count; | 299 | extern unsigned long sysctl_hung_task_check_count; |
299 | extern unsigned long sysctl_hung_task_timeout_secs; | 300 | extern unsigned long sysctl_hung_task_timeout_secs; |
300 | extern unsigned long sysctl_hung_task_warnings; | 301 | extern unsigned long sysctl_hung_task_warnings; |
302 | extern int softlockup_thresh; | ||
301 | #else | 303 | #else |
302 | static inline void softlockup_tick(void) | 304 | static inline void softlockup_tick(void) |
303 | { | 305 | { |
@@ -784,6 +786,8 @@ struct sched_domain { | |||
784 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | 786 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
785 | unsigned int nr_balance_failed; /* initialise to 0 */ | 787 | unsigned int nr_balance_failed; /* initialise to 0 */ |
786 | 788 | ||
789 | u64 last_update; | ||
790 | |||
787 | #ifdef CONFIG_SCHEDSTATS | 791 | #ifdef CONFIG_SCHEDSTATS |
788 | /* load_balance() stats */ | 792 | /* load_balance() stats */ |
789 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; | 793 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
@@ -821,24 +825,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
821 | struct sched_domain_attr *dattr_new); | 825 | struct sched_domain_attr *dattr_new); |
822 | extern int arch_reinit_sched_domains(void); | 826 | extern int arch_reinit_sched_domains(void); |
823 | 827 | ||
824 | #endif /* CONFIG_SMP */ | 828 | #else /* CONFIG_SMP */ |
825 | 829 | ||
826 | /* | 830 | struct sched_domain_attr; |
827 | * A runqueue laden with a single nice 0 task scores a weighted_cpuload of | ||
828 | * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a | ||
829 | * task of nice 0 or enough lower priority tasks to bring up the | ||
830 | * weighted_cpuload | ||
831 | */ | ||
832 | static inline int above_background_load(void) | ||
833 | { | ||
834 | unsigned long cpu; | ||
835 | 831 | ||
836 | for_each_online_cpu(cpu) { | 832 | static inline void |
837 | if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) | 833 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
838 | return 1; | 834 | struct sched_domain_attr *dattr_new) |
839 | } | 835 | { |
840 | return 0; | ||
841 | } | 836 | } |
837 | #endif /* !CONFIG_SMP */ | ||
842 | 838 | ||
843 | struct io_context; /* See blkdev.h */ | 839 | struct io_context; /* See blkdev.h */ |
844 | #define NGROUPS_SMALL 32 | 840 | #define NGROUPS_SMALL 32 |
@@ -921,8 +917,8 @@ struct sched_class { | |||
921 | void (*set_cpus_allowed)(struct task_struct *p, | 917 | void (*set_cpus_allowed)(struct task_struct *p, |
922 | const cpumask_t *newmask); | 918 | const cpumask_t *newmask); |
923 | 919 | ||
924 | void (*join_domain)(struct rq *rq); | 920 | void (*rq_online)(struct rq *rq); |
925 | void (*leave_domain)(struct rq *rq); | 921 | void (*rq_offline)(struct rq *rq); |
926 | 922 | ||
927 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 923 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
928 | int running); | 924 | int running); |
@@ -1039,6 +1035,7 @@ struct task_struct { | |||
1039 | #endif | 1035 | #endif |
1040 | 1036 | ||
1041 | int prio, static_prio, normal_prio; | 1037 | int prio, static_prio, normal_prio; |
1038 | unsigned int rt_priority; | ||
1042 | const struct sched_class *sched_class; | 1039 | const struct sched_class *sched_class; |
1043 | struct sched_entity se; | 1040 | struct sched_entity se; |
1044 | struct sched_rt_entity rt; | 1041 | struct sched_rt_entity rt; |
@@ -1075,12 +1072,6 @@ struct task_struct { | |||
1075 | #endif | 1072 | #endif |
1076 | 1073 | ||
1077 | struct list_head tasks; | 1074 | struct list_head tasks; |
1078 | /* | ||
1079 | * ptrace_list/ptrace_children forms the list of my children | ||
1080 | * that were stolen by a ptracer. | ||
1081 | */ | ||
1082 | struct list_head ptrace_children; | ||
1083 | struct list_head ptrace_list; | ||
1084 | 1075 | ||
1085 | struct mm_struct *mm, *active_mm; | 1076 | struct mm_struct *mm, *active_mm; |
1086 | 1077 | ||
@@ -1102,18 +1093,25 @@ struct task_struct { | |||
1102 | /* | 1093 | /* |
1103 | * pointers to (original) parent process, youngest child, younger sibling, | 1094 | * pointers to (original) parent process, youngest child, younger sibling, |
1104 | * older sibling, respectively. (p->father can be replaced with | 1095 | * older sibling, respectively. (p->father can be replaced with |
1105 | * p->parent->pid) | 1096 | * p->real_parent->pid) |
1106 | */ | 1097 | */ |
1107 | struct task_struct *real_parent; /* real parent process (when being debugged) */ | 1098 | struct task_struct *real_parent; /* real parent process */ |
1108 | struct task_struct *parent; /* parent process */ | 1099 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ |
1109 | /* | 1100 | /* |
1110 | * children/sibling forms the list of my children plus the | 1101 | * children/sibling forms the list of my natural children |
1111 | * tasks I'm ptracing. | ||
1112 | */ | 1102 | */ |
1113 | struct list_head children; /* list of my children */ | 1103 | struct list_head children; /* list of my children */ |
1114 | struct list_head sibling; /* linkage in my parent's children list */ | 1104 | struct list_head sibling; /* linkage in my parent's children list */ |
1115 | struct task_struct *group_leader; /* threadgroup leader */ | 1105 | struct task_struct *group_leader; /* threadgroup leader */ |
1116 | 1106 | ||
1107 | /* | ||
1108 | * ptraced is the list of tasks this task is using ptrace on. | ||
1109 | * This includes both natural children and PTRACE_ATTACH targets. | ||
1110 | * p->ptrace_entry is p's link on the p->parent->ptraced list. | ||
1111 | */ | ||
1112 | struct list_head ptraced; | ||
1113 | struct list_head ptrace_entry; | ||
1114 | |||
1117 | /* PID/PID hash table linkage. */ | 1115 | /* PID/PID hash table linkage. */ |
1118 | struct pid_link pids[PIDTYPE_MAX]; | 1116 | struct pid_link pids[PIDTYPE_MAX]; |
1119 | struct list_head thread_group; | 1117 | struct list_head thread_group; |
@@ -1122,7 +1120,6 @@ struct task_struct { | |||
1122 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 1120 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
1123 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | 1121 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
1124 | 1122 | ||
1125 | unsigned int rt_priority; | ||
1126 | cputime_t utime, stime, utimescaled, stimescaled; | 1123 | cputime_t utime, stime, utimescaled, stimescaled; |
1127 | cputime_t gtime; | 1124 | cputime_t gtime; |
1128 | cputime_t prev_utime, prev_stime; | 1125 | cputime_t prev_utime, prev_stime; |
@@ -1141,12 +1138,12 @@ struct task_struct { | |||
1141 | gid_t gid,egid,sgid,fsgid; | 1138 | gid_t gid,egid,sgid,fsgid; |
1142 | struct group_info *group_info; | 1139 | struct group_info *group_info; |
1143 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1140 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; |
1144 | unsigned securebits; | ||
1145 | struct user_struct *user; | 1141 | struct user_struct *user; |
1142 | unsigned securebits; | ||
1146 | #ifdef CONFIG_KEYS | 1143 | #ifdef CONFIG_KEYS |
1144 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
1147 | struct key *request_key_auth; /* assumed request_key authority */ | 1145 | struct key *request_key_auth; /* assumed request_key authority */ |
1148 | struct key *thread_keyring; /* keyring private to this thread */ | 1146 | struct key *thread_keyring; /* keyring private to this thread */ |
1149 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
1150 | #endif | 1147 | #endif |
1151 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1148 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1152 | - access with [gs]et_task_comm (which lock | 1149 | - access with [gs]et_task_comm (which lock |
@@ -1233,8 +1230,8 @@ struct task_struct { | |||
1233 | # define MAX_LOCK_DEPTH 48UL | 1230 | # define MAX_LOCK_DEPTH 48UL |
1234 | u64 curr_chain_key; | 1231 | u64 curr_chain_key; |
1235 | int lockdep_depth; | 1232 | int lockdep_depth; |
1236 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
1237 | unsigned int lockdep_recursion; | 1233 | unsigned int lockdep_recursion; |
1234 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
1238 | #endif | 1235 | #endif |
1239 | 1236 | ||
1240 | /* journalling filesystem info */ | 1237 | /* journalling filesystem info */ |
@@ -1262,10 +1259,6 @@ struct task_struct { | |||
1262 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ | 1259 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ |
1263 | cputime_t acct_stimexpd;/* stime since last update */ | 1260 | cputime_t acct_stimexpd;/* stime since last update */ |
1264 | #endif | 1261 | #endif |
1265 | #ifdef CONFIG_NUMA | ||
1266 | struct mempolicy *mempolicy; | ||
1267 | short il_next; | ||
1268 | #endif | ||
1269 | #ifdef CONFIG_CPUSETS | 1262 | #ifdef CONFIG_CPUSETS |
1270 | nodemask_t mems_allowed; | 1263 | nodemask_t mems_allowed; |
1271 | int cpuset_mems_generation; | 1264 | int cpuset_mems_generation; |
@@ -1285,6 +1278,10 @@ struct task_struct { | |||
1285 | struct list_head pi_state_list; | 1278 | struct list_head pi_state_list; |
1286 | struct futex_pi_state *pi_state_cache; | 1279 | struct futex_pi_state *pi_state_cache; |
1287 | #endif | 1280 | #endif |
1281 | #ifdef CONFIG_NUMA | ||
1282 | struct mempolicy *mempolicy; | ||
1283 | short il_next; | ||
1284 | #endif | ||
1288 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1285 | atomic_t fs_excl; /* holding fs exclusive resources */ |
1289 | struct rcu_head rcu; | 1286 | struct rcu_head rcu; |
1290 | 1287 | ||
@@ -1504,9 +1501,11 @@ static inline void put_task_struct(struct task_struct *t) | |||
1504 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1501 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1505 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1502 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
1506 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1503 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
1504 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | ||
1507 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1505 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1508 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1506 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1509 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1507 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ |
1508 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | ||
1510 | 1509 | ||
1511 | /* | 1510 | /* |
1512 | * Only the _current_ task can read/write to tsk->flags, but other | 1511 | * Only the _current_ task can read/write to tsk->flags, but other |
@@ -1573,13 +1572,28 @@ static inline void sched_clock_idle_sleep_event(void) | |||
1573 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | 1572 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) |
1574 | { | 1573 | { |
1575 | } | 1574 | } |
1576 | #else | 1575 | |
1576 | #ifdef CONFIG_NO_HZ | ||
1577 | static inline void sched_clock_tick_stop(int cpu) | ||
1578 | { | ||
1579 | } | ||
1580 | |||
1581 | static inline void sched_clock_tick_start(int cpu) | ||
1582 | { | ||
1583 | } | ||
1584 | #endif | ||
1585 | |||
1586 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
1577 | extern void sched_clock_init(void); | 1587 | extern void sched_clock_init(void); |
1578 | extern u64 sched_clock_cpu(int cpu); | 1588 | extern u64 sched_clock_cpu(int cpu); |
1579 | extern void sched_clock_tick(void); | 1589 | extern void sched_clock_tick(void); |
1580 | extern void sched_clock_idle_sleep_event(void); | 1590 | extern void sched_clock_idle_sleep_event(void); |
1581 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1591 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1592 | #ifdef CONFIG_NO_HZ | ||
1593 | extern void sched_clock_tick_stop(int cpu); | ||
1594 | extern void sched_clock_tick_start(int cpu); | ||
1582 | #endif | 1595 | #endif |
1596 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
1583 | 1597 | ||
1584 | /* | 1598 | /* |
1585 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 1599 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu |
@@ -1622,6 +1636,7 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
1622 | extern unsigned int sysctl_sched_features; | 1636 | extern unsigned int sysctl_sched_features; |
1623 | extern unsigned int sysctl_sched_migration_cost; | 1637 | extern unsigned int sysctl_sched_migration_cost; |
1624 | extern unsigned int sysctl_sched_nr_migrate; | 1638 | extern unsigned int sysctl_sched_nr_migrate; |
1639 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1625 | 1640 | ||
1626 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1641 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1627 | struct file *file, void __user *buffer, size_t *length, | 1642 | struct file *file, void __user *buffer, size_t *length, |
@@ -1655,6 +1670,8 @@ extern int can_nice(const struct task_struct *p, const int nice); | |||
1655 | extern int task_curr(const struct task_struct *p); | 1670 | extern int task_curr(const struct task_struct *p); |
1656 | extern int idle_cpu(int cpu); | 1671 | extern int idle_cpu(int cpu); |
1657 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1672 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
1673 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | ||
1674 | struct sched_param *); | ||
1658 | extern struct task_struct *idle_task(int cpu); | 1675 | extern struct task_struct *idle_task(int cpu); |
1659 | extern struct task_struct *curr_task(int cpu); | 1676 | extern struct task_struct *curr_task(int cpu); |
1660 | extern void set_curr_task(int cpu, struct task_struct *p); | 1677 | extern void set_curr_task(int cpu, struct task_struct *p); |
@@ -1870,9 +1887,6 @@ extern void wait_task_inactive(struct task_struct * p); | |||
1870 | #define wait_task_inactive(p) do { } while (0) | 1887 | #define wait_task_inactive(p) do { } while (0) |
1871 | #endif | 1888 | #endif |
1872 | 1889 | ||
1873 | #define remove_parent(p) list_del_init(&(p)->sibling) | ||
1874 | #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) | ||
1875 | |||
1876 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 1890 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) |
1877 | 1891 | ||
1878 | #define for_each_process(p) \ | 1892 | #define for_each_process(p) \ |
@@ -2131,6 +2145,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
2131 | } | 2145 | } |
2132 | #endif | 2146 | #endif |
2133 | 2147 | ||
2148 | #ifdef CONFIG_TRACING | ||
2149 | extern void | ||
2150 | __trace_special(void *__tr, void *__data, | ||
2151 | unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
2152 | #else | ||
2153 | static inline void | ||
2154 | __trace_special(void *__tr, void *__data, | ||
2155 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
2156 | { | ||
2157 | } | ||
2158 | #endif | ||
2159 | |||
2134 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2160 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); |
2135 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2161 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
2136 | 2162 | ||
@@ -2225,6 +2251,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
2225 | } | 2251 | } |
2226 | #endif /* CONFIG_MM_OWNER */ | 2252 | #endif /* CONFIG_MM_OWNER */ |
2227 | 2253 | ||
2254 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | ||
2255 | |||
2228 | #endif /* __KERNEL__ */ | 2256 | #endif /* __KERNEL__ */ |
2229 | 2257 | ||
2230 | #endif | 2258 | #endif |