aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-15 07:46:29 -0400
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /include/linux/sched.h
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h189
1 files changed, 93 insertions, 96 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f0132f9ef4dd..1a7e8461db5a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@ struct sched_param {
87#include <linux/task_io_accounting.h> 87#include <linux/task_io_accounting.h>
88#include <linux/kobject.h> 88#include <linux/kobject.h>
89#include <linux/latencytop.h> 89#include <linux/latencytop.h>
90#include <linux/cred.h>
90 91
91#include <asm/processor.h> 92#include <asm/processor.h>
92 93
@@ -134,7 +135,6 @@ extern unsigned long nr_running(void);
134extern unsigned long nr_uninterruptible(void); 135extern unsigned long nr_uninterruptible(void);
135extern unsigned long nr_active(void); 136extern unsigned long nr_active(void);
136extern unsigned long nr_iowait(void); 137extern unsigned long nr_iowait(void);
137extern unsigned long weighted_cpuload(const int cpu);
138 138
139struct seq_file; 139struct seq_file;
140struct cfs_rq; 140struct cfs_rq;
@@ -246,6 +246,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
246extern void init_idle(struct task_struct *idle, int cpu); 246extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 247extern void init_idle_bootup_task(struct task_struct *idle);
248 248
249extern int runqueue_is_locked(void);
250
249extern cpumask_t nohz_cpu_mask; 251extern cpumask_t nohz_cpu_mask;
250#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
251extern int select_nohz_load_balancer(int cpu); 253extern int select_nohz_load_balancer(int cpu);
@@ -291,13 +293,13 @@ extern void sched_show_task(struct task_struct *p);
291 293
292#ifdef CONFIG_DETECT_SOFTLOCKUP 294#ifdef CONFIG_DETECT_SOFTLOCKUP
293extern void softlockup_tick(void); 295extern void softlockup_tick(void);
294extern void spawn_softlockup_task(void);
295extern void touch_softlockup_watchdog(void); 296extern void touch_softlockup_watchdog(void);
296extern void touch_all_softlockup_watchdogs(void); 297extern void touch_all_softlockup_watchdogs(void);
297extern unsigned long softlockup_thresh; 298extern unsigned int softlockup_panic;
298extern unsigned long sysctl_hung_task_check_count; 299extern unsigned long sysctl_hung_task_check_count;
299extern unsigned long sysctl_hung_task_timeout_secs; 300extern unsigned long sysctl_hung_task_timeout_secs;
300extern unsigned long sysctl_hung_task_warnings; 301extern unsigned long sysctl_hung_task_warnings;
302extern int softlockup_thresh;
301#else 303#else
302static inline void softlockup_tick(void) 304static inline void softlockup_tick(void)
303{ 305{
@@ -350,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
350extern void arch_unmap_area(struct mm_struct *, unsigned long); 352extern void arch_unmap_area(struct mm_struct *, unsigned long);
351extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
352 354
353#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 355#if USE_SPLIT_PTLOCKS
354/* 356/*
355 * The mm counters are not protected by its page_table_lock, 357 * The mm counters are not protected by its page_table_lock,
356 * so must be incremented atomically. 358 * so must be incremented atomically.
@@ -361,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
361#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
362#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
363 365
364#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 366#else /* !USE_SPLIT_PTLOCKS */
365/* 367/*
366 * The mm counters are protected by its page_table_lock, 368 * The mm counters are protected by its page_table_lock,
367 * so can be incremented directly. 369 * so can be incremented directly.
@@ -372,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
372#define inc_mm_counter(mm, member) (mm)->_##member++ 374#define inc_mm_counter(mm, member) (mm)->_##member++
373#define dec_mm_counter(mm, member) (mm)->_##member-- 375#define dec_mm_counter(mm, member) (mm)->_##member--
374 376
375#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 377#endif /* !USE_SPLIT_PTLOCKS */
376 378
377#define get_mm_rss(mm) \ 379#define get_mm_rss(mm) \
378 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
@@ -449,8 +451,8 @@ struct signal_struct {
449 * - everyone except group_exit_task is stopped during signal delivery 451 * - everyone except group_exit_task is stopped during signal delivery
450 * of fatal signals, group_exit_task processes the signal. 452 * of fatal signals, group_exit_task processes the signal.
451 */ 453 */
452 struct task_struct *group_exit_task;
453 int notify_count; 454 int notify_count;
455 struct task_struct *group_exit_task;
454 456
455 /* thread group stop support, overloads group_exit_code too */ 457 /* thread group stop support, overloads group_exit_code too */
456 int group_stop_count; 458 int group_stop_count;
@@ -504,6 +506,7 @@ struct signal_struct {
504 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
505 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 507 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
506 unsigned long inblock, oublock, cinblock, coublock; 508 unsigned long inblock, oublock, cinblock, coublock;
509 struct task_io_accounting ioac;
507 510
508 /* 511 /*
509 * Cumulative ns of scheduled CPU time for dead threads in the 512 * Cumulative ns of scheduled CPU time for dead threads in the
@@ -666,6 +669,10 @@ struct task_delay_info {
666 /* io operations performed */ 669 /* io operations performed */
667 u32 swapin_count; /* total count of the number of swapin block */ 670 u32 swapin_count; /* total count of the number of swapin block */
668 /* io operations performed */ 671 /* io operations performed */
672
673 struct timespec freepages_start, freepages_end;
674 u64 freepages_delay; /* wait for memory reclaim */
675 u32 freepages_count; /* total count of memory reclaim */
669}; 676};
670#endif /* CONFIG_TASK_DELAY_ACCT */ 677#endif /* CONFIG_TASK_DELAY_ACCT */
671 678
@@ -784,6 +791,8 @@ struct sched_domain {
784 unsigned int balance_interval; /* initialise to 1. units in ms. */ 791 unsigned int balance_interval; /* initialise to 1. units in ms. */
785 unsigned int nr_balance_failed; /* initialise to 0 */ 792 unsigned int nr_balance_failed; /* initialise to 0 */
786 793
794 u64 last_update;
795
787#ifdef CONFIG_SCHEDSTATS 796#ifdef CONFIG_SCHEDSTATS
788 /* load_balance() stats */ 797 /* load_balance() stats */
789 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 798 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -815,30 +824,25 @@ struct sched_domain {
815 unsigned int ttwu_move_affine; 824 unsigned int ttwu_move_affine;
816 unsigned int ttwu_move_balance; 825 unsigned int ttwu_move_balance;
817#endif 826#endif
827#ifdef CONFIG_SCHED_DEBUG
828 char *name;
829#endif
818}; 830};
819 831
820extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 832extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
821 struct sched_domain_attr *dattr_new); 833 struct sched_domain_attr *dattr_new);
822extern int arch_reinit_sched_domains(void); 834extern int arch_reinit_sched_domains(void);
823 835
824#endif /* CONFIG_SMP */ 836#else /* CONFIG_SMP */
825 837
826/* 838struct sched_domain_attr;
827 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
828 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
829 * task of nice 0 or enough lower priority tasks to bring up the
830 * weighted_cpuload
831 */
832static inline int above_background_load(void)
833{
834 unsigned long cpu;
835 839
836 for_each_online_cpu(cpu) { 840static inline void
837 if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) 841partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
838 return 1; 842 struct sched_domain_attr *dattr_new)
839 } 843{
840 return 0;
841} 844}
845#endif /* !CONFIG_SMP */
842 846
843struct io_context; /* See blkdev.h */ 847struct io_context; /* See blkdev.h */
844#define NGROUPS_SMALL 32 848#define NGROUPS_SMALL 32
@@ -896,7 +900,7 @@ struct sched_class {
896 void (*yield_task) (struct rq *rq); 900 void (*yield_task) (struct rq *rq);
897 int (*select_task_rq)(struct task_struct *p, int sync); 901 int (*select_task_rq)(struct task_struct *p, int sync);
898 902
899 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 903 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
900 904
901 struct task_struct * (*pick_next_task) (struct rq *rq); 905 struct task_struct * (*pick_next_task) (struct rq *rq);
902 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 906 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -921,8 +925,8 @@ struct sched_class {
921 void (*set_cpus_allowed)(struct task_struct *p, 925 void (*set_cpus_allowed)(struct task_struct *p,
922 const cpumask_t *newmask); 926 const cpumask_t *newmask);
923 927
924 void (*join_domain)(struct rq *rq); 928 void (*rq_online)(struct rq *rq);
925 void (*leave_domain)(struct rq *rq); 929 void (*rq_offline)(struct rq *rq);
926 930
927 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 931 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
928 int running); 932 int running);
@@ -1009,8 +1013,8 @@ struct sched_entity {
1009 1013
1010struct sched_rt_entity { 1014struct sched_rt_entity {
1011 struct list_head run_list; 1015 struct list_head run_list;
1012 unsigned int time_slice;
1013 unsigned long timeout; 1016 unsigned long timeout;
1017 unsigned int time_slice;
1014 int nr_cpus_allowed; 1018 int nr_cpus_allowed;
1015 1019
1016 struct sched_rt_entity *back; 1020 struct sched_rt_entity *back;
@@ -1039,6 +1043,7 @@ struct task_struct {
1039#endif 1043#endif
1040 1044
1041 int prio, static_prio, normal_prio; 1045 int prio, static_prio, normal_prio;
1046 unsigned int rt_priority;
1042 const struct sched_class *sched_class; 1047 const struct sched_class *sched_class;
1043 struct sched_entity se; 1048 struct sched_entity se;
1044 struct sched_rt_entity rt; 1049 struct sched_rt_entity rt;
@@ -1075,12 +1080,6 @@ struct task_struct {
1075#endif 1080#endif
1076 1081
1077 struct list_head tasks; 1082 struct list_head tasks;
1078 /*
1079 * ptrace_list/ptrace_children forms the list of my children
1080 * that were stolen by a ptracer.
1081 */
1082 struct list_head ptrace_children;
1083 struct list_head ptrace_list;
1084 1083
1085 struct mm_struct *mm, *active_mm; 1084 struct mm_struct *mm, *active_mm;
1086 1085
@@ -1101,18 +1100,25 @@ struct task_struct {
1101 /* 1100 /*
1102 * pointers to (original) parent process, youngest child, younger sibling, 1101 * pointers to (original) parent process, youngest child, younger sibling,
1103 * older sibling, respectively. (p->father can be replaced with 1102 * older sibling, respectively. (p->father can be replaced with
1104 * p->parent->pid) 1103 * p->real_parent->pid)
1105 */ 1104 */
1106 struct task_struct *real_parent; /* real parent process (when being debugged) */ 1105 struct task_struct *real_parent; /* real parent process */
1107 struct task_struct *parent; /* parent process */ 1106 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1108 /* 1107 /*
1109 * children/sibling forms the list of my children plus the 1108 * children/sibling forms the list of my natural children
1110 * tasks I'm ptracing.
1111 */ 1109 */
1112 struct list_head children; /* list of my children */ 1110 struct list_head children; /* list of my children */
1113 struct list_head sibling; /* linkage in my parent's children list */ 1111 struct list_head sibling; /* linkage in my parent's children list */
1114 struct task_struct *group_leader; /* threadgroup leader */ 1112 struct task_struct *group_leader; /* threadgroup leader */
1115 1113
1114 /*
1115 * ptraced is the list of tasks this task is using ptrace on.
1116 * This includes both natural children and PTRACE_ATTACH targets.
1117 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1118 */
1119 struct list_head ptraced;
1120 struct list_head ptrace_entry;
1121
1116 /* PID/PID hash table linkage. */ 1122 /* PID/PID hash table linkage. */
1117 struct pid_link pids[PIDTYPE_MAX]; 1123 struct pid_link pids[PIDTYPE_MAX];
1118 struct list_head thread_group; 1124 struct list_head thread_group;
@@ -1121,7 +1127,6 @@ struct task_struct {
1121 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1127 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1122 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1128 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1123 1129
1124 unsigned int rt_priority;
1125 cputime_t utime, stime, utimescaled, stimescaled; 1130 cputime_t utime, stime, utimescaled, stimescaled;
1126 cputime_t gtime; 1131 cputime_t gtime;
1127 cputime_t prev_utime, prev_stime; 1132 cputime_t prev_utime, prev_stime;
@@ -1140,12 +1145,12 @@ struct task_struct {
1140 gid_t gid,egid,sgid,fsgid; 1145 gid_t gid,egid,sgid,fsgid;
1141 struct group_info *group_info; 1146 struct group_info *group_info;
1142 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1147 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
1143 unsigned securebits;
1144 struct user_struct *user; 1148 struct user_struct *user;
1149 unsigned securebits;
1145#ifdef CONFIG_KEYS 1150#ifdef CONFIG_KEYS
1151 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1146 struct key *request_key_auth; /* assumed request_key authority */ 1152 struct key *request_key_auth; /* assumed request_key authority */
1147 struct key *thread_keyring; /* keyring private to this thread */ 1153 struct key *thread_keyring; /* keyring private to this thread */
1148 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1149#endif 1154#endif
1150 char comm[TASK_COMM_LEN]; /* executable name excluding path 1155 char comm[TASK_COMM_LEN]; /* executable name excluding path
1151 - access with [gs]et_task_comm (which lock 1156 - access with [gs]et_task_comm (which lock
@@ -1232,8 +1237,8 @@ struct task_struct {
1232# define MAX_LOCK_DEPTH 48UL 1237# define MAX_LOCK_DEPTH 48UL
1233 u64 curr_chain_key; 1238 u64 curr_chain_key;
1234 int lockdep_depth; 1239 int lockdep_depth;
1235 struct held_lock held_locks[MAX_LOCK_DEPTH];
1236 unsigned int lockdep_recursion; 1240 unsigned int lockdep_recursion;
1241 struct held_lock held_locks[MAX_LOCK_DEPTH];
1237#endif 1242#endif
1238 1243
1239/* journalling filesystem info */ 1244/* journalling filesystem info */
@@ -1251,19 +1256,11 @@ struct task_struct {
1251 1256
1252 unsigned long ptrace_message; 1257 unsigned long ptrace_message;
1253 siginfo_t *last_siginfo; /* For ptrace use. */ 1258 siginfo_t *last_siginfo; /* For ptrace use. */
1254#ifdef CONFIG_TASK_XACCT
1255/* i/o counters(bytes read/written, #syscalls */
1256 u64 rchar, wchar, syscr, syscw;
1257#endif
1258 struct task_io_accounting ioac; 1259 struct task_io_accounting ioac;
1259#if defined(CONFIG_TASK_XACCT) 1260#if defined(CONFIG_TASK_XACCT)
1260 u64 acct_rss_mem1; /* accumulated rss usage */ 1261 u64 acct_rss_mem1; /* accumulated rss usage */
1261 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1262 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1262 cputime_t acct_stimexpd;/* stime since last update */ 1263 cputime_t acct_timexpd; /* stime + utime since last update */
1263#endif
1264#ifdef CONFIG_NUMA
1265 struct mempolicy *mempolicy;
1266 short il_next;
1267#endif 1264#endif
1268#ifdef CONFIG_CPUSETS 1265#ifdef CONFIG_CPUSETS
1269 nodemask_t mems_allowed; 1266 nodemask_t mems_allowed;
@@ -1284,6 +1281,10 @@ struct task_struct {
1284 struct list_head pi_state_list; 1281 struct list_head pi_state_list;
1285 struct futex_pi_state *pi_state_cache; 1282 struct futex_pi_state *pi_state_cache;
1286#endif 1283#endif
1284#ifdef CONFIG_NUMA
1285 struct mempolicy *mempolicy;
1286 short il_next;
1287#endif
1287 atomic_t fs_excl; /* holding fs exclusive resources */ 1288 atomic_t fs_excl; /* holding fs exclusive resources */
1288 struct rcu_head rcu; 1289 struct rcu_head rcu;
1289 1290
@@ -1476,6 +1477,10 @@ static inline void put_task_struct(struct task_struct *t)
1476 __put_task_struct(t); 1477 __put_task_struct(t);
1477} 1478}
1478 1479
1480extern cputime_t task_utime(struct task_struct *p);
1481extern cputime_t task_stime(struct task_struct *p);
1482extern cputime_t task_gtime(struct task_struct *p);
1483
1479/* 1484/*
1480 * Per process flags 1485 * Per process flags
1481 */ 1486 */
@@ -1498,14 +1503,16 @@ static inline void put_task_struct(struct task_struct *t)
1498#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1503#define PF_KSWAPD 0x00040000 /* I am kswapd */
1499#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1504#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
1500#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1505#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1501#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ 1506#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1502#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1507#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1503#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1508#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1504#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1509#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1505#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1510#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1511#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1506#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1512#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1507#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1513#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1508#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1514#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
1515#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1509 1516
1510/* 1517/*
1511 * Only the _current_ task can read/write to tsk->flags, but other 1518 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1551,16 +1558,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1551 1558
1552extern unsigned long long sched_clock(void); 1559extern unsigned long long sched_clock(void);
1553 1560
1554#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1561extern void sched_clock_init(void);
1555static inline void sched_clock_init(void) 1562extern u64 sched_clock_cpu(int cpu);
1556{
1557}
1558
1559static inline u64 sched_clock_cpu(int cpu)
1560{
1561 return sched_clock();
1562}
1563 1563
1564#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1564static inline void sched_clock_tick(void) 1565static inline void sched_clock_tick(void)
1565{ 1566{
1566} 1567}
@@ -1573,8 +1574,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1573{ 1574{
1574} 1575}
1575#else 1576#else
1576extern void sched_clock_init(void);
1577extern u64 sched_clock_cpu(int cpu);
1578extern void sched_clock_tick(void); 1577extern void sched_clock_tick(void);
1579extern void sched_clock_idle_sleep_event(void); 1578extern void sched_clock_idle_sleep_event(void);
1580extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1579extern void sched_clock_idle_wakeup_event(u64 delta_ns);
@@ -1621,6 +1620,7 @@ extern unsigned int sysctl_sched_child_runs_first;
1621extern unsigned int sysctl_sched_features; 1620extern unsigned int sysctl_sched_features;
1622extern unsigned int sysctl_sched_migration_cost; 1621extern unsigned int sysctl_sched_migration_cost;
1623extern unsigned int sysctl_sched_nr_migrate; 1622extern unsigned int sysctl_sched_nr_migrate;
1623extern unsigned int sysctl_sched_shares_ratelimit;
1624 1624
1625int sched_nr_latency_handler(struct ctl_table *table, int write, 1625int sched_nr_latency_handler(struct ctl_table *table, int write,
1626 struct file *file, void __user *buffer, size_t *length, 1626 struct file *file, void __user *buffer, size_t *length,
@@ -1654,6 +1654,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
1654extern int task_curr(const struct task_struct *p); 1654extern int task_curr(const struct task_struct *p);
1655extern int idle_cpu(int cpu); 1655extern int idle_cpu(int cpu);
1656extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1656extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1657extern int sched_setscheduler_nocheck(struct task_struct *, int,
1658 struct sched_param *);
1657extern struct task_struct *idle_task(int cpu); 1659extern struct task_struct *idle_task(int cpu);
1658extern struct task_struct *curr_task(int cpu); 1660extern struct task_struct *curr_task(int cpu);
1659extern void set_curr_task(int cpu, struct task_struct *p); 1661extern void set_curr_task(int cpu, struct task_struct *p);
@@ -1697,19 +1699,13 @@ extern struct pid_namespace init_pid_ns;
1697 * finds a task by its pid in the specified namespace 1699 * finds a task by its pid in the specified namespace
1698 * find_task_by_vpid(): 1700 * find_task_by_vpid():
1699 * finds a task by its virtual pid 1701 * finds a task by its virtual pid
1700 * find_task_by_pid():
1701 * finds a task by its global pid
1702 * 1702 *
1703 * see also find_pid() etc in include/linux/pid.h 1703 * see also find_vpid() etc in include/linux/pid.h
1704 */ 1704 */
1705 1705
1706extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1706extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
1707 struct pid_namespace *ns); 1707 struct pid_namespace *ns);
1708 1708
1709static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
1710{
1711 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
1712}
1713extern struct task_struct *find_task_by_vpid(pid_t nr); 1709extern struct task_struct *find_task_by_vpid(pid_t nr);
1714extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1710extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1715 struct pid_namespace *ns); 1711 struct pid_namespace *ns);
@@ -1777,12 +1773,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
1777extern int kill_pgrp(struct pid *pid, int sig, int priv); 1773extern int kill_pgrp(struct pid *pid, int sig, int priv);
1778extern int kill_pid(struct pid *pid, int sig, int priv); 1774extern int kill_pid(struct pid *pid, int sig, int priv);
1779extern int kill_proc_info(int, struct siginfo *, pid_t); 1775extern int kill_proc_info(int, struct siginfo *, pid_t);
1780extern void do_notify_parent(struct task_struct *, int); 1776extern int do_notify_parent(struct task_struct *, int);
1781extern void force_sig(int, struct task_struct *); 1777extern void force_sig(int, struct task_struct *);
1782extern void force_sig_specific(int, struct task_struct *); 1778extern void force_sig_specific(int, struct task_struct *);
1783extern int send_sig(int, struct task_struct *, int); 1779extern int send_sig(int, struct task_struct *, int);
1784extern void zap_other_threads(struct task_struct *p); 1780extern void zap_other_threads(struct task_struct *p);
1785extern int kill_proc(pid_t, int, int);
1786extern struct sigqueue *sigqueue_alloc(void); 1781extern struct sigqueue *sigqueue_alloc(void);
1787extern void sigqueue_free(struct sigqueue *); 1782extern void sigqueue_free(struct sigqueue *);
1788extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1783extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -1864,14 +1859,15 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1864extern char *get_task_comm(char *to, struct task_struct *tsk); 1859extern char *get_task_comm(char *to, struct task_struct *tsk);
1865 1860
1866#ifdef CONFIG_SMP 1861#ifdef CONFIG_SMP
1867extern void wait_task_inactive(struct task_struct * p); 1862extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1868#else 1863#else
1869#define wait_task_inactive(p) do { } while (0) 1864static inline unsigned long wait_task_inactive(struct task_struct *p,
1865 long match_state)
1866{
1867 return 1;
1868}
1870#endif 1869#endif
1871 1870
1872#define remove_parent(p) list_del_init(&(p)->sibling)
1873#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
1874
1875#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1871#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
1876 1872
1877#define for_each_process(p) \ 1873#define for_each_process(p) \
@@ -1968,6 +1964,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1968 1964
1969#endif 1965#endif
1970 1966
1967static inline int object_is_on_stack(void *obj)
1968{
1969 void *stack = task_stack_page(current);
1970
1971 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1972}
1973
1971extern void thread_info_cache_init(void); 1974extern void thread_info_cache_init(void);
1972 1975
1973#ifdef CONFIG_DEBUG_STACK_USAGE 1976#ifdef CONFIG_DEBUG_STACK_USAGE
@@ -2045,9 +2048,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2045 if (!signal_pending(p)) 2048 if (!signal_pending(p))
2046 return 0; 2049 return 0;
2047 2050
2048 if (state & (__TASK_STOPPED | __TASK_TRACED))
2049 return 0;
2050
2051 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2051 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2052} 2052}
2053 2053
@@ -2132,14 +2132,17 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2132 2132
2133#endif /* CONFIG_SMP */ 2133#endif /* CONFIG_SMP */
2134 2134
2135#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
2136extern void arch_pick_mmap_layout(struct mm_struct *mm); 2135extern void arch_pick_mmap_layout(struct mm_struct *mm);
2136
2137#ifdef CONFIG_TRACING
2138extern void
2139__trace_special(void *__tr, void *__data,
2140 unsigned long arg1, unsigned long arg2, unsigned long arg3);
2137#else 2141#else
2138static inline void arch_pick_mmap_layout(struct mm_struct *mm) 2142static inline void
2143__trace_special(void *__tr, void *__data,
2144 unsigned long arg1, unsigned long arg2, unsigned long arg3)
2139{ 2145{
2140 mm->mmap_base = TASK_UNMAPPED_BASE;
2141 mm->get_unmapped_area = arch_get_unmapped_area;
2142 mm->unmap_area = arch_unmap_area;
2143} 2146}
2144#endif 2147#endif
2145 2148
@@ -2177,22 +2180,22 @@ extern long sched_group_rt_period(struct task_group *tg);
2177#ifdef CONFIG_TASK_XACCT 2180#ifdef CONFIG_TASK_XACCT
2178static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2181static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2179{ 2182{
2180 tsk->rchar += amt; 2183 tsk->ioac.rchar += amt;
2181} 2184}
2182 2185
2183static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2186static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2184{ 2187{
2185 tsk->wchar += amt; 2188 tsk->ioac.wchar += amt;
2186} 2189}
2187 2190
2188static inline void inc_syscr(struct task_struct *tsk) 2191static inline void inc_syscr(struct task_struct *tsk)
2189{ 2192{
2190 tsk->syscr++; 2193 tsk->ioac.syscr++;
2191} 2194}
2192 2195
2193static inline void inc_syscw(struct task_struct *tsk) 2196static inline void inc_syscw(struct task_struct *tsk)
2194{ 2197{
2195 tsk->syscw++; 2198 tsk->ioac.syscw++;
2196} 2199}
2197#else 2200#else
2198static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2201static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
@@ -2212,14 +2215,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2212} 2215}
2213#endif 2216#endif
2214 2217
2215#ifdef CONFIG_SMP
2216void migration_init(void);
2217#else
2218static inline void migration_init(void)
2219{
2220}
2221#endif
2222
2223#ifndef TASK_SIZE_OF 2218#ifndef TASK_SIZE_OF
2224#define TASK_SIZE_OF(tsk) TASK_SIZE 2219#define TASK_SIZE_OF(tsk) TASK_SIZE
2225#endif 2220#endif
@@ -2237,6 +2232,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2237} 2232}
2238#endif /* CONFIG_MM_OWNER */ 2233#endif /* CONFIG_MM_OWNER */
2239 2234
2235#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
2236
2240#endif /* __KERNEL__ */ 2237#endif /* __KERNEL__ */
2241 2238
2242#endif 2239#endif