aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h224
1 files changed, 145 insertions, 79 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03c238088ae..5270d449ff9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
134extern unsigned long nr_uninterruptible(void); 134extern unsigned long nr_uninterruptible(void);
135extern unsigned long nr_active(void); 135extern unsigned long nr_active(void);
136extern unsigned long nr_iowait(void); 136extern unsigned long nr_iowait(void);
137extern unsigned long weighted_cpuload(const int cpu);
138 137
139struct seq_file; 138struct seq_file;
140struct cfs_rq; 139struct cfs_rq;
@@ -158,6 +157,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
158} 157}
159#endif 158#endif
160 159
160extern unsigned long long time_sync_thresh;
161
161/* 162/*
162 * Task state bitmask. NOTE! These bits are also 163 * Task state bitmask. NOTE! These bits are also
163 * encoded in fs/proc/array.c: get_task_state(). 164 * encoded in fs/proc/array.c: get_task_state().
@@ -244,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
244extern void init_idle(struct task_struct *idle, int cpu); 245extern void init_idle(struct task_struct *idle, int cpu);
245extern void init_idle_bootup_task(struct task_struct *idle); 246extern void init_idle_bootup_task(struct task_struct *idle);
246 247
248extern int runqueue_is_locked(void);
249
247extern cpumask_t nohz_cpu_mask; 250extern cpumask_t nohz_cpu_mask;
248#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 251#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
249extern int select_nohz_load_balancer(int cpu); 252extern int select_nohz_load_balancer(int cpu);
@@ -289,13 +292,13 @@ extern void sched_show_task(struct task_struct *p);
289 292
290#ifdef CONFIG_DETECT_SOFTLOCKUP 293#ifdef CONFIG_DETECT_SOFTLOCKUP
291extern void softlockup_tick(void); 294extern void softlockup_tick(void);
292extern void spawn_softlockup_task(void);
293extern void touch_softlockup_watchdog(void); 295extern void touch_softlockup_watchdog(void);
294extern void touch_all_softlockup_watchdogs(void); 296extern void touch_all_softlockup_watchdogs(void);
295extern unsigned long softlockup_thresh; 297extern unsigned int softlockup_panic;
296extern unsigned long sysctl_hung_task_check_count; 298extern unsigned long sysctl_hung_task_check_count;
297extern unsigned long sysctl_hung_task_timeout_secs; 299extern unsigned long sysctl_hung_task_timeout_secs;
298extern unsigned long sysctl_hung_task_warnings; 300extern unsigned long sysctl_hung_task_warnings;
301extern int softlockup_thresh;
299#else 302#else
300static inline void softlockup_tick(void) 303static inline void softlockup_tick(void)
301{ 304{
@@ -502,6 +505,7 @@ struct signal_struct {
502 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 505 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
503 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 506 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
504 unsigned long inblock, oublock, cinblock, coublock; 507 unsigned long inblock, oublock, cinblock, coublock;
508 struct task_io_accounting ioac;
505 509
506 /* 510 /*
507 * Cumulative ns of scheduled CPU time for dead threads in the 511 * Cumulative ns of scheduled CPU time for dead threads in the
@@ -664,6 +668,10 @@ struct task_delay_info {
664 /* io operations performed */ 668 /* io operations performed */
665 u32 swapin_count; /* total count of the number of swapin block */ 669 u32 swapin_count; /* total count of the number of swapin block */
666 /* io operations performed */ 670 /* io operations performed */
671
672 struct timespec freepages_start, freepages_end;
673 u64 freepages_delay; /* wait for memory reclaim */
674 u32 freepages_count; /* total count of memory reclaim */
667}; 675};
668#endif /* CONFIG_TASK_DELAY_ACCT */ 676#endif /* CONFIG_TASK_DELAY_ACCT */
669 677
@@ -764,7 +772,6 @@ struct sched_domain {
764 struct sched_domain *child; /* bottom domain must be null terminated */ 772 struct sched_domain *child; /* bottom domain must be null terminated */
765 struct sched_group *groups; /* the balancing groups of the domain */ 773 struct sched_group *groups; /* the balancing groups of the domain */
766 cpumask_t span; /* span of all CPUs in this domain */ 774 cpumask_t span; /* span of all CPUs in this domain */
767 int first_cpu; /* cache of the first cpu in this domain */
768 unsigned long min_interval; /* Minimum balance interval ms */ 775 unsigned long min_interval; /* Minimum balance interval ms */
769 unsigned long max_interval; /* Maximum balance interval ms */ 776 unsigned long max_interval; /* Maximum balance interval ms */
770 unsigned int busy_factor; /* less balancing by factor if busy */ 777 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -783,6 +790,8 @@ struct sched_domain {
783 unsigned int balance_interval; /* initialise to 1. units in ms. */ 790 unsigned int balance_interval; /* initialise to 1. units in ms. */
784 unsigned int nr_balance_failed; /* initialise to 0 */ 791 unsigned int nr_balance_failed; /* initialise to 0 */
785 792
793 u64 last_update;
794
786#ifdef CONFIG_SCHEDSTATS 795#ifdef CONFIG_SCHEDSTATS
787 /* load_balance() stats */ 796 /* load_balance() stats */
788 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 797 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -820,24 +829,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
820 struct sched_domain_attr *dattr_new); 829 struct sched_domain_attr *dattr_new);
821extern int arch_reinit_sched_domains(void); 830extern int arch_reinit_sched_domains(void);
822 831
823#endif /* CONFIG_SMP */ 832#else /* CONFIG_SMP */
824 833
825/* 834struct sched_domain_attr;
826 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
827 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
828 * task of nice 0 or enough lower priority tasks to bring up the
829 * weighted_cpuload
830 */
831static inline int above_background_load(void)
832{
833 unsigned long cpu;
834 835
835 for_each_online_cpu(cpu) { 836static inline void
836 if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) 837partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
837 return 1; 838 struct sched_domain_attr *dattr_new)
838 } 839{
839 return 0;
840} 840}
841#endif /* !CONFIG_SMP */
841 842
842struct io_context; /* See blkdev.h */ 843struct io_context; /* See blkdev.h */
843#define NGROUPS_SMALL 32 844#define NGROUPS_SMALL 32
@@ -920,8 +921,8 @@ struct sched_class {
920 void (*set_cpus_allowed)(struct task_struct *p, 921 void (*set_cpus_allowed)(struct task_struct *p,
921 const cpumask_t *newmask); 922 const cpumask_t *newmask);
922 923
923 void (*join_domain)(struct rq *rq); 924 void (*rq_online)(struct rq *rq);
924 void (*leave_domain)(struct rq *rq); 925 void (*rq_offline)(struct rq *rq);
925 926
926 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 927 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
927 int running); 928 int running);
@@ -1038,6 +1039,7 @@ struct task_struct {
1038#endif 1039#endif
1039 1040
1040 int prio, static_prio, normal_prio; 1041 int prio, static_prio, normal_prio;
1042 unsigned int rt_priority;
1041 const struct sched_class *sched_class; 1043 const struct sched_class *sched_class;
1042 struct sched_entity se; 1044 struct sched_entity se;
1043 struct sched_rt_entity rt; 1045 struct sched_rt_entity rt;
@@ -1074,12 +1076,6 @@ struct task_struct {
1074#endif 1076#endif
1075 1077
1076 struct list_head tasks; 1078 struct list_head tasks;
1077 /*
1078 * ptrace_list/ptrace_children forms the list of my children
1079 * that were stolen by a ptracer.
1080 */
1081 struct list_head ptrace_children;
1082 struct list_head ptrace_list;
1083 1079
1084 struct mm_struct *mm, *active_mm; 1080 struct mm_struct *mm, *active_mm;
1085 1081
@@ -1101,18 +1097,25 @@ struct task_struct {
1101 /* 1097 /*
1102 * pointers to (original) parent process, youngest child, younger sibling, 1098 * pointers to (original) parent process, youngest child, younger sibling,
1103 * older sibling, respectively. (p->father can be replaced with 1099 * older sibling, respectively. (p->father can be replaced with
1104 * p->parent->pid) 1100 * p->real_parent->pid)
1105 */ 1101 */
1106 struct task_struct *real_parent; /* real parent process (when being debugged) */ 1102 struct task_struct *real_parent; /* real parent process */
1107 struct task_struct *parent; /* parent process */ 1103 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1108 /* 1104 /*
1109 * children/sibling forms the list of my children plus the 1105 * children/sibling forms the list of my natural children
1110 * tasks I'm ptracing.
1111 */ 1106 */
1112 struct list_head children; /* list of my children */ 1107 struct list_head children; /* list of my children */
1113 struct list_head sibling; /* linkage in my parent's children list */ 1108 struct list_head sibling; /* linkage in my parent's children list */
1114 struct task_struct *group_leader; /* threadgroup leader */ 1109 struct task_struct *group_leader; /* threadgroup leader */
1115 1110
1111 /*
1112 * ptraced is the list of tasks this task is using ptrace on.
1113 * This includes both natural children and PTRACE_ATTACH targets.
1114 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1115 */
1116 struct list_head ptraced;
1117 struct list_head ptrace_entry;
1118
1116 /* PID/PID hash table linkage. */ 1119 /* PID/PID hash table linkage. */
1117 struct pid_link pids[PIDTYPE_MAX]; 1120 struct pid_link pids[PIDTYPE_MAX];
1118 struct list_head thread_group; 1121 struct list_head thread_group;
@@ -1121,7 +1124,6 @@ struct task_struct {
1121 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1124 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1122 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1125 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1123 1126
1124 unsigned int rt_priority;
1125 cputime_t utime, stime, utimescaled, stimescaled; 1127 cputime_t utime, stime, utimescaled, stimescaled;
1126 cputime_t gtime; 1128 cputime_t gtime;
1127 cputime_t prev_utime, prev_stime; 1129 cputime_t prev_utime, prev_stime;
@@ -1140,12 +1142,12 @@ struct task_struct {
1140 gid_t gid,egid,sgid,fsgid; 1142 gid_t gid,egid,sgid,fsgid;
1141 struct group_info *group_info; 1143 struct group_info *group_info;
1142 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1144 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
1143 unsigned securebits;
1144 struct user_struct *user; 1145 struct user_struct *user;
1146 unsigned securebits;
1145#ifdef CONFIG_KEYS 1147#ifdef CONFIG_KEYS
1148 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1146 struct key *request_key_auth; /* assumed request_key authority */ 1149 struct key *request_key_auth; /* assumed request_key authority */
1147 struct key *thread_keyring; /* keyring private to this thread */ 1150 struct key *thread_keyring; /* keyring private to this thread */
1148 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1149#endif 1151#endif
1150 char comm[TASK_COMM_LEN]; /* executable name excluding path 1152 char comm[TASK_COMM_LEN]; /* executable name excluding path
1151 - access with [gs]et_task_comm (which lock 1153 - access with [gs]et_task_comm (which lock
@@ -1232,8 +1234,8 @@ struct task_struct {
1232# define MAX_LOCK_DEPTH 48UL 1234# define MAX_LOCK_DEPTH 48UL
1233 u64 curr_chain_key; 1235 u64 curr_chain_key;
1234 int lockdep_depth; 1236 int lockdep_depth;
1235 struct held_lock held_locks[MAX_LOCK_DEPTH];
1236 unsigned int lockdep_recursion; 1237 unsigned int lockdep_recursion;
1238 struct held_lock held_locks[MAX_LOCK_DEPTH];
1237#endif 1239#endif
1238 1240
1239/* journalling filesystem info */ 1241/* journalling filesystem info */
@@ -1251,19 +1253,11 @@ struct task_struct {
1251 1253
1252 unsigned long ptrace_message; 1254 unsigned long ptrace_message;
1253 siginfo_t *last_siginfo; /* For ptrace use. */ 1255 siginfo_t *last_siginfo; /* For ptrace use. */
1254#ifdef CONFIG_TASK_XACCT
1255/* i/o counters(bytes read/written, #syscalls */
1256 u64 rchar, wchar, syscr, syscw;
1257#endif
1258 struct task_io_accounting ioac; 1256 struct task_io_accounting ioac;
1259#if defined(CONFIG_TASK_XACCT) 1257#if defined(CONFIG_TASK_XACCT)
1260 u64 acct_rss_mem1; /* accumulated rss usage */ 1258 u64 acct_rss_mem1; /* accumulated rss usage */
1261 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1259 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1262 cputime_t acct_stimexpd;/* stime since last update */ 1260 cputime_t acct_timexpd; /* stime + utime since last update */
1263#endif
1264#ifdef CONFIG_NUMA
1265 struct mempolicy *mempolicy;
1266 short il_next;
1267#endif 1261#endif
1268#ifdef CONFIG_CPUSETS 1262#ifdef CONFIG_CPUSETS
1269 nodemask_t mems_allowed; 1263 nodemask_t mems_allowed;
@@ -1284,6 +1278,10 @@ struct task_struct {
1284 struct list_head pi_state_list; 1278 struct list_head pi_state_list;
1285 struct futex_pi_state *pi_state_cache; 1279 struct futex_pi_state *pi_state_cache;
1286#endif 1280#endif
1281#ifdef CONFIG_NUMA
1282 struct mempolicy *mempolicy;
1283 short il_next;
1284#endif
1287 atomic_t fs_excl; /* holding fs exclusive resources */ 1285 atomic_t fs_excl; /* holding fs exclusive resources */
1288 struct rcu_head rcu; 1286 struct rcu_head rcu;
1289 1287
@@ -1498,14 +1496,16 @@ static inline void put_task_struct(struct task_struct *t)
1498#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1496#define PF_KSWAPD 0x00040000 /* I am kswapd */
1499#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1497#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
1500#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1498#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1501#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ 1499#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1502#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1500#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1503#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1501#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1504#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1502#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1505#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1503#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1504#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1506#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1505#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1507#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1506#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1508#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1507#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
1508#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1509 1509
1510/* 1510/*
1511 * Only the _current_ task can read/write to tsk->flags, but other 1511 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1551,6 +1551,50 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1551 1551
1552extern unsigned long long sched_clock(void); 1552extern unsigned long long sched_clock(void);
1553 1553
1554#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1555static inline void sched_clock_init(void)
1556{
1557}
1558
1559static inline u64 sched_clock_cpu(int cpu)
1560{
1561 return sched_clock();
1562}
1563
1564static inline void sched_clock_tick(void)
1565{
1566}
1567
1568static inline void sched_clock_idle_sleep_event(void)
1569{
1570}
1571
1572static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1573{
1574}
1575
1576#ifdef CONFIG_NO_HZ
1577static inline void sched_clock_tick_stop(int cpu)
1578{
1579}
1580
1581static inline void sched_clock_tick_start(int cpu)
1582{
1583}
1584#endif
1585
1586#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1587extern void sched_clock_init(void);
1588extern u64 sched_clock_cpu(int cpu);
1589extern void sched_clock_tick(void);
1590extern void sched_clock_idle_sleep_event(void);
1591extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1592#ifdef CONFIG_NO_HZ
1593extern void sched_clock_tick_stop(int cpu);
1594extern void sched_clock_tick_start(int cpu);
1595#endif
1596#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1597
1554/* 1598/*
1555 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1599 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1556 * clock constructed from sched_clock(): 1600 * clock constructed from sched_clock():
@@ -1592,6 +1636,7 @@ extern unsigned int sysctl_sched_child_runs_first;
1592extern unsigned int sysctl_sched_features; 1636extern unsigned int sysctl_sched_features;
1593extern unsigned int sysctl_sched_migration_cost; 1637extern unsigned int sysctl_sched_migration_cost;
1594extern unsigned int sysctl_sched_nr_migrate; 1638extern unsigned int sysctl_sched_nr_migrate;
1639extern unsigned int sysctl_sched_shares_ratelimit;
1595 1640
1596int sched_nr_latency_handler(struct ctl_table *table, int write, 1641int sched_nr_latency_handler(struct ctl_table *table, int write,
1597 struct file *file, void __user *buffer, size_t *length, 1642 struct file *file, void __user *buffer, size_t *length,
@@ -1625,6 +1670,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
1625extern int task_curr(const struct task_struct *p); 1670extern int task_curr(const struct task_struct *p);
1626extern int idle_cpu(int cpu); 1671extern int idle_cpu(int cpu);
1627extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1672extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1673extern int sched_setscheduler_nocheck(struct task_struct *, int,
1674 struct sched_param *);
1628extern struct task_struct *idle_task(int cpu); 1675extern struct task_struct *idle_task(int cpu);
1629extern struct task_struct *curr_task(int cpu); 1676extern struct task_struct *curr_task(int cpu);
1630extern void set_curr_task(int cpu, struct task_struct *p); 1677extern void set_curr_task(int cpu, struct task_struct *p);
@@ -1668,19 +1715,13 @@ extern struct pid_namespace init_pid_ns;
1668 * finds a task by its pid in the specified namespace 1715 * finds a task by its pid in the specified namespace
1669 * find_task_by_vpid(): 1716 * find_task_by_vpid():
1670 * finds a task by its virtual pid 1717 * finds a task by its virtual pid
1671 * find_task_by_pid():
1672 * finds a task by its global pid
1673 * 1718 *
1674 * see also find_pid() etc in include/linux/pid.h 1719 * see also find_vpid() etc in include/linux/pid.h
1675 */ 1720 */
1676 1721
1677extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1722extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
1678 struct pid_namespace *ns); 1723 struct pid_namespace *ns);
1679 1724
1680static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
1681{
1682 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
1683}
1684extern struct task_struct *find_task_by_vpid(pid_t nr); 1725extern struct task_struct *find_task_by_vpid(pid_t nr);
1685extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1726extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1686 struct pid_namespace *ns); 1727 struct pid_namespace *ns);
@@ -1748,12 +1789,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
1748extern int kill_pgrp(struct pid *pid, int sig, int priv); 1789extern int kill_pgrp(struct pid *pid, int sig, int priv);
1749extern int kill_pid(struct pid *pid, int sig, int priv); 1790extern int kill_pid(struct pid *pid, int sig, int priv);
1750extern int kill_proc_info(int, struct siginfo *, pid_t); 1791extern int kill_proc_info(int, struct siginfo *, pid_t);
1751extern void do_notify_parent(struct task_struct *, int); 1792extern int do_notify_parent(struct task_struct *, int);
1752extern void force_sig(int, struct task_struct *); 1793extern void force_sig(int, struct task_struct *);
1753extern void force_sig_specific(int, struct task_struct *); 1794extern void force_sig_specific(int, struct task_struct *);
1754extern int send_sig(int, struct task_struct *, int); 1795extern int send_sig(int, struct task_struct *, int);
1755extern void zap_other_threads(struct task_struct *p); 1796extern void zap_other_threads(struct task_struct *p);
1756extern int kill_proc(pid_t, int, int);
1757extern struct sigqueue *sigqueue_alloc(void); 1797extern struct sigqueue *sigqueue_alloc(void);
1758extern void sigqueue_free(struct sigqueue *); 1798extern void sigqueue_free(struct sigqueue *);
1759extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1799extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -1817,7 +1857,9 @@ extern void exit_thread(void);
1817extern void exit_files(struct task_struct *); 1857extern void exit_files(struct task_struct *);
1818extern void __cleanup_signal(struct signal_struct *); 1858extern void __cleanup_signal(struct signal_struct *);
1819extern void __cleanup_sighand(struct sighand_struct *); 1859extern void __cleanup_sighand(struct sighand_struct *);
1860
1820extern void exit_itimers(struct signal_struct *); 1861extern void exit_itimers(struct signal_struct *);
1862extern void flush_itimer_signals(void);
1821 1863
1822extern NORET_TYPE void do_group_exit(int); 1864extern NORET_TYPE void do_group_exit(int);
1823 1865
@@ -1833,14 +1875,15 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1833extern char *get_task_comm(char *to, struct task_struct *tsk); 1875extern char *get_task_comm(char *to, struct task_struct *tsk);
1834 1876
1835#ifdef CONFIG_SMP 1877#ifdef CONFIG_SMP
1836extern void wait_task_inactive(struct task_struct * p); 1878extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1837#else 1879#else
1838#define wait_task_inactive(p) do { } while (0) 1880static inline unsigned long wait_task_inactive(struct task_struct *p,
1881 long match_state)
1882{
1883 return 1;
1884}
1839#endif 1885#endif
1840 1886
1841#define remove_parent(p) list_del_init(&(p)->sibling)
1842#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
1843
1844#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1887#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
1845 1888
1846#define for_each_process(p) \ 1889#define for_each_process(p) \
@@ -1937,6 +1980,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1937 1980
1938#endif 1981#endif
1939 1982
1983static inline int object_is_on_stack(void *obj)
1984{
1985 void *stack = task_stack_page(current);
1986
1987 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1988}
1989
1940extern void thread_info_cache_init(void); 1990extern void thread_info_cache_init(void);
1941 1991
1942/* set thread flags in other task's structures 1992/* set thread flags in other task's structures
@@ -1977,6 +2027,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk)
1977 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2027 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1978} 2028}
1979 2029
2030static inline int test_tsk_need_resched(struct task_struct *tsk)
2031{
2032 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2033}
2034
1980static inline int signal_pending(struct task_struct *p) 2035static inline int signal_pending(struct task_struct *p)
1981{ 2036{
1982 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2037 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
@@ -1989,6 +2044,16 @@ static inline int fatal_signal_pending(struct task_struct *p)
1989 return signal_pending(p) && __fatal_signal_pending(p); 2044 return signal_pending(p) && __fatal_signal_pending(p);
1990} 2045}
1991 2046
2047static inline int signal_pending_state(long state, struct task_struct *p)
2048{
2049 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2050 return 0;
2051 if (!signal_pending(p))
2052 return 0;
2053
2054 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2055}
2056
1992static inline int need_resched(void) 2057static inline int need_resched(void)
1993{ 2058{
1994 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 2059 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
@@ -2001,13 +2066,13 @@ static inline int need_resched(void)
2001 * cond_resched_lock() will drop the spinlock before scheduling, 2066 * cond_resched_lock() will drop the spinlock before scheduling,
2002 * cond_resched_softirq() will enable bhs before scheduling. 2067 * cond_resched_softirq() will enable bhs before scheduling.
2003 */ 2068 */
2004#ifdef CONFIG_PREEMPT 2069extern int _cond_resched(void);
2070#ifdef CONFIG_PREEMPT_BKL
2005static inline int cond_resched(void) 2071static inline int cond_resched(void)
2006{ 2072{
2007 return 0; 2073 return 0;
2008} 2074}
2009#else 2075#else
2010extern int _cond_resched(void);
2011static inline int cond_resched(void) 2076static inline int cond_resched(void)
2012{ 2077{
2013 return _cond_resched(); 2078 return _cond_resched();
@@ -2015,6 +2080,10 @@ static inline int cond_resched(void)
2015#endif 2080#endif
2016extern int cond_resched_lock(spinlock_t * lock); 2081extern int cond_resched_lock(spinlock_t * lock);
2017extern int cond_resched_softirq(void); 2082extern int cond_resched_softirq(void);
2083static inline int cond_resched_bkl(void)
2084{
2085 return _cond_resched();
2086}
2018 2087
2019/* 2088/*
2020 * Does a critical section need to be broken due to another 2089 * Does a critical section need to be broken due to another
@@ -2066,14 +2135,17 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2066 2135
2067#endif /* CONFIG_SMP */ 2136#endif /* CONFIG_SMP */
2068 2137
2069#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
2070extern void arch_pick_mmap_layout(struct mm_struct *mm); 2138extern void arch_pick_mmap_layout(struct mm_struct *mm);
2139
2140#ifdef CONFIG_TRACING
2141extern void
2142__trace_special(void *__tr, void *__data,
2143 unsigned long arg1, unsigned long arg2, unsigned long arg3);
2071#else 2144#else
2072static inline void arch_pick_mmap_layout(struct mm_struct *mm) 2145static inline void
2146__trace_special(void *__tr, void *__data,
2147 unsigned long arg1, unsigned long arg2, unsigned long arg3)
2073{ 2148{
2074 mm->mmap_base = TASK_UNMAPPED_BASE;
2075 mm->get_unmapped_area = arch_get_unmapped_area;
2076 mm->unmap_area = arch_unmap_area;
2077} 2149}
2078#endif 2150#endif
2079 2151
@@ -2111,22 +2183,22 @@ extern long sched_group_rt_period(struct task_group *tg);
2111#ifdef CONFIG_TASK_XACCT 2183#ifdef CONFIG_TASK_XACCT
2112static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2184static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2113{ 2185{
2114 tsk->rchar += amt; 2186 tsk->ioac.rchar += amt;
2115} 2187}
2116 2188
2117static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2189static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2118{ 2190{
2119 tsk->wchar += amt; 2191 tsk->ioac.wchar += amt;
2120} 2192}
2121 2193
2122static inline void inc_syscr(struct task_struct *tsk) 2194static inline void inc_syscr(struct task_struct *tsk)
2123{ 2195{
2124 tsk->syscr++; 2196 tsk->ioac.syscr++;
2125} 2197}
2126 2198
2127static inline void inc_syscw(struct task_struct *tsk) 2199static inline void inc_syscw(struct task_struct *tsk)
2128{ 2200{
2129 tsk->syscw++; 2201 tsk->ioac.syscw++;
2130} 2202}
2131#else 2203#else
2132static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2204static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
@@ -2146,14 +2218,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2146} 2218}
2147#endif 2219#endif
2148 2220
2149#ifdef CONFIG_SMP
2150void migration_init(void);
2151#else
2152static inline void migration_init(void)
2153{
2154}
2155#endif
2156
2157#ifndef TASK_SIZE_OF 2221#ifndef TASK_SIZE_OF
2158#define TASK_SIZE_OF(tsk) TASK_SIZE 2222#define TASK_SIZE_OF(tsk) TASK_SIZE
2159#endif 2223#endif
@@ -2171,6 +2235,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2171} 2235}
2172#endif /* CONFIG_MM_OWNER */ 2236#endif /* CONFIG_MM_OWNER */
2173 2237
2238#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
2239
2174#endif /* __KERNEL__ */ 2240#endif /* __KERNEL__ */
2175 2241
2176#endif 2242#endif