aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /include/linux/sched.h
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h242
1 files changed, 145 insertions, 97 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1941d8b5cf11..8478f334d732 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -87,6 +87,7 @@ struct sched_param {
87#include <linux/task_io_accounting.h> 87#include <linux/task_io_accounting.h>
88#include <linux/kobject.h> 88#include <linux/kobject.h>
89#include <linux/latencytop.h> 89#include <linux/latencytop.h>
90#include <linux/cred.h>
90 91
91#include <asm/processor.h> 92#include <asm/processor.h>
92 93
@@ -286,19 +287,18 @@ extern void trap_init(void);
286extern void account_process_tick(struct task_struct *task, int user); 287extern void account_process_tick(struct task_struct *task, int user);
287extern void update_process_times(int user); 288extern void update_process_times(int user);
288extern void scheduler_tick(void); 289extern void scheduler_tick(void);
289extern void hrtick_resched(void);
290 290
291extern void sched_show_task(struct task_struct *p); 291extern void sched_show_task(struct task_struct *p);
292 292
293#ifdef CONFIG_DETECT_SOFTLOCKUP 293#ifdef CONFIG_DETECT_SOFTLOCKUP
294extern void softlockup_tick(void); 294extern void softlockup_tick(void);
295extern void spawn_softlockup_task(void);
296extern void touch_softlockup_watchdog(void); 295extern void touch_softlockup_watchdog(void);
297extern void touch_all_softlockup_watchdogs(void); 296extern void touch_all_softlockup_watchdogs(void);
298extern unsigned long softlockup_thresh; 297extern unsigned int softlockup_panic;
299extern unsigned long sysctl_hung_task_check_count; 298extern unsigned long sysctl_hung_task_check_count;
300extern unsigned long sysctl_hung_task_timeout_secs; 299extern unsigned long sysctl_hung_task_timeout_secs;
301extern unsigned long sysctl_hung_task_warnings; 300extern unsigned long sysctl_hung_task_warnings;
301extern int softlockup_thresh;
302#else 302#else
303static inline void softlockup_tick(void) 303static inline void softlockup_tick(void)
304{ 304{
@@ -351,7 +351,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
351extern void arch_unmap_area(struct mm_struct *, unsigned long); 351extern void arch_unmap_area(struct mm_struct *, unsigned long);
352extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 352extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
353 353
354#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 354#if USE_SPLIT_PTLOCKS
355/* 355/*
356 * The mm counters are not protected by its page_table_lock, 356 * The mm counters are not protected by its page_table_lock,
357 * so must be incremented atomically. 357 * so must be incremented atomically.
@@ -362,7 +362,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
362#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 362#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
363#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 363#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
364 364
365#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 365#else /* !USE_SPLIT_PTLOCKS */
366/* 366/*
367 * The mm counters are protected by its page_table_lock, 367 * The mm counters are protected by its page_table_lock,
368 * so can be incremented directly. 368 * so can be incremented directly.
@@ -373,7 +373,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
373#define inc_mm_counter(mm, member) (mm)->_##member++ 373#define inc_mm_counter(mm, member) (mm)->_##member++
374#define dec_mm_counter(mm, member) (mm)->_##member-- 374#define dec_mm_counter(mm, member) (mm)->_##member--
375 375
376#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 376#endif /* !USE_SPLIT_PTLOCKS */
377 377
378#define get_mm_rss(mm) \ 378#define get_mm_rss(mm) \
379 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 379 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
@@ -402,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
402#define MMF_DUMP_MAPPED_PRIVATE 4 402#define MMF_DUMP_MAPPED_PRIVATE 4
403#define MMF_DUMP_MAPPED_SHARED 5 403#define MMF_DUMP_MAPPED_SHARED 5
404#define MMF_DUMP_ELF_HEADERS 6 404#define MMF_DUMP_ELF_HEADERS 6
405#define MMF_DUMP_HUGETLB_PRIVATE 7
406#define MMF_DUMP_HUGETLB_SHARED 8
405#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 407#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
406#define MMF_DUMP_FILTER_BITS 5 408#define MMF_DUMP_FILTER_BITS 7
407#define MMF_DUMP_FILTER_MASK \ 409#define MMF_DUMP_FILTER_MASK \
408 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 410 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
409#define MMF_DUMP_FILTER_DEFAULT \ 411#define MMF_DUMP_FILTER_DEFAULT \
410 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 412 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
413 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
414
415#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
416# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
417#else
418# define MMF_DUMP_MASK_DEFAULT_ELF 0
419#endif
411 420
412struct sighand_struct { 421struct sighand_struct {
413 atomic_t count; 422 atomic_t count;
@@ -424,6 +433,39 @@ struct pacct_struct {
424 unsigned long ac_minflt, ac_majflt; 433 unsigned long ac_minflt, ac_majflt;
425}; 434};
426 435
436/**
437 * struct task_cputime - collected CPU time counts
438 * @utime: time spent in user mode, in &cputime_t units
439 * @stime: time spent in kernel mode, in &cputime_t units
440 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 *
442 * This structure groups together three kinds of CPU time that are
443 * tracked for threads and thread groups. Most things considering
444 * CPU time want to group these counts together and treat all three
445 * of them in parallel.
446 */
447struct task_cputime {
448 cputime_t utime;
449 cputime_t stime;
450 unsigned long long sum_exec_runtime;
451};
452/* Alternate field names when used to cache expirations. */
453#define prof_exp stime
454#define virt_exp utime
455#define sched_exp sum_exec_runtime
456
457/**
458 * struct thread_group_cputime - thread group interval timer counts
459 * @totals: thread group interval timers; substructure for
460 * uniprocessor kernel, per-cpu for SMP kernel.
461 *
462 * This structure contains the version of task_cputime, above, that is
463 * used for thread group CPU clock calculations.
464 */
465struct thread_group_cputime {
466 struct task_cputime *totals;
467};
468
427/* 469/*
428 * NOTE! "signal_struct" does not have it's own 470 * NOTE! "signal_struct" does not have it's own
429 * locking, because a shared signal_struct always 471 * locking, because a shared signal_struct always
@@ -450,8 +492,8 @@ struct signal_struct {
450 * - everyone except group_exit_task is stopped during signal delivery 492 * - everyone except group_exit_task is stopped during signal delivery
451 * of fatal signals, group_exit_task processes the signal. 493 * of fatal signals, group_exit_task processes the signal.
452 */ 494 */
453 struct task_struct *group_exit_task;
454 int notify_count; 495 int notify_count;
496 struct task_struct *group_exit_task;
455 497
456 /* thread group stop support, overloads group_exit_code too */ 498 /* thread group stop support, overloads group_exit_code too */
457 int group_stop_count; 499 int group_stop_count;
@@ -469,6 +511,17 @@ struct signal_struct {
469 cputime_t it_prof_expires, it_virt_expires; 511 cputime_t it_prof_expires, it_virt_expires;
470 cputime_t it_prof_incr, it_virt_incr; 512 cputime_t it_prof_incr, it_virt_incr;
471 513
514 /*
515 * Thread group totals for process CPU clocks.
516 * See thread_group_cputime(), et al, for details.
517 */
518 struct thread_group_cputime cputime;
519
520 /* Earliest-expiration cache. */
521 struct task_cputime cputime_expires;
522
523 struct list_head cpu_timers[3];
524
472 /* job control IDs */ 525 /* job control IDs */
473 526
474 /* 527 /*
@@ -499,20 +552,13 @@ struct signal_struct {
499 * Live threads maintain their own counters and add to these 552 * Live threads maintain their own counters and add to these
500 * in __exit_signal, except for the group leader. 553 * in __exit_signal, except for the group leader.
501 */ 554 */
502 cputime_t utime, stime, cutime, cstime; 555 cputime_t cutime, cstime;
503 cputime_t gtime; 556 cputime_t gtime;
504 cputime_t cgtime; 557 cputime_t cgtime;
505 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 558 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
506 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 559 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
507 unsigned long inblock, oublock, cinblock, coublock; 560 unsigned long inblock, oublock, cinblock, coublock;
508 561 struct task_io_accounting ioac;
509 /*
510 * Cumulative ns of scheduled CPU time for dead threads in the
511 * group, not including a zombie group leader. (This only differs
512 * from jiffies_to_ns(utime + stime) if sched_clock uses something
513 * other than jiffies.)
514 */
515 unsigned long long sum_sched_runtime;
516 562
517 /* 563 /*
518 * We don't bother to synchronize most readers of this at all, 564 * We don't bother to synchronize most readers of this at all,
@@ -525,8 +571,6 @@ struct signal_struct {
525 */ 571 */
526 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
527 573
528 struct list_head cpu_timers[3];
529
530 /* keep the process-shared keyrings here so that they do the right 574 /* keep the process-shared keyrings here so that they do the right
531 * thing in threads created with CLONE_THREAD */ 575 * thing in threads created with CLONE_THREAD */
532#ifdef CONFIG_KEYS 576#ifdef CONFIG_KEYS
@@ -636,10 +680,6 @@ struct sched_info {
636}; 680};
637#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 681#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
638 682
639#ifdef CONFIG_SCHEDSTATS
640extern const struct file_operations proc_schedstat_operations;
641#endif /* CONFIG_SCHEDSTATS */
642
643#ifdef CONFIG_TASK_DELAY_ACCT 683#ifdef CONFIG_TASK_DELAY_ACCT
644struct task_delay_info { 684struct task_delay_info {
645 spinlock_t lock; 685 spinlock_t lock;
@@ -667,6 +707,10 @@ struct task_delay_info {
667 /* io operations performed */ 707 /* io operations performed */
668 u32 swapin_count; /* total count of the number of swapin block */ 708 u32 swapin_count; /* total count of the number of swapin block */
669 /* io operations performed */ 709 /* io operations performed */
710
711 struct timespec freepages_start, freepages_end;
712 u64 freepages_delay; /* wait for memory reclaim */
713 u32 freepages_count; /* total count of memory reclaim */
670}; 714};
671#endif /* CONFIG_TASK_DELAY_ACCT */ 715#endif /* CONFIG_TASK_DELAY_ACCT */
672 716
@@ -818,13 +862,25 @@ struct sched_domain {
818 unsigned int ttwu_move_affine; 862 unsigned int ttwu_move_affine;
819 unsigned int ttwu_move_balance; 863 unsigned int ttwu_move_balance;
820#endif 864#endif
865#ifdef CONFIG_SCHED_DEBUG
866 char *name;
867#endif
821}; 868};
822 869
823extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 870extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
824 struct sched_domain_attr *dattr_new); 871 struct sched_domain_attr *dattr_new);
825extern int arch_reinit_sched_domains(void); 872extern int arch_reinit_sched_domains(void);
826 873
827#endif /* CONFIG_SMP */ 874#else /* CONFIG_SMP */
875
876struct sched_domain_attr;
877
878static inline void
879partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
880 struct sched_domain_attr *dattr_new)
881{
882}
883#endif /* !CONFIG_SMP */
828 884
829struct io_context; /* See blkdev.h */ 885struct io_context; /* See blkdev.h */
830#define NGROUPS_SMALL 32 886#define NGROUPS_SMALL 32
@@ -882,7 +938,7 @@ struct sched_class {
882 void (*yield_task) (struct rq *rq); 938 void (*yield_task) (struct rq *rq);
883 int (*select_task_rq)(struct task_struct *p, int sync); 939 int (*select_task_rq)(struct task_struct *p, int sync);
884 940
885 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 941 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
886 942
887 struct task_struct * (*pick_next_task) (struct rq *rq); 943 struct task_struct * (*pick_next_task) (struct rq *rq);
888 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 944 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -995,8 +1051,8 @@ struct sched_entity {
995 1051
996struct sched_rt_entity { 1052struct sched_rt_entity {
997 struct list_head run_list; 1053 struct list_head run_list;
998 unsigned int time_slice;
999 unsigned long timeout; 1054 unsigned long timeout;
1055 unsigned int time_slice;
1000 int nr_cpus_allowed; 1056 int nr_cpus_allowed;
1001 1057
1002 struct sched_rt_entity *back; 1058 struct sched_rt_entity *back;
@@ -1119,8 +1175,7 @@ struct task_struct {
1119/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1175/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1120 unsigned long min_flt, maj_flt; 1176 unsigned long min_flt, maj_flt;
1121 1177
1122 cputime_t it_prof_expires, it_virt_expires; 1178 struct task_cputime cputime_expires;
1123 unsigned long long it_sched_expires;
1124 struct list_head cpu_timers[3]; 1179 struct list_head cpu_timers[3];
1125 1180
1126/* process credentials */ 1181/* process credentials */
@@ -1239,15 +1294,11 @@ struct task_struct {
1239 1294
1240 unsigned long ptrace_message; 1295 unsigned long ptrace_message;
1241 siginfo_t *last_siginfo; /* For ptrace use. */ 1296 siginfo_t *last_siginfo; /* For ptrace use. */
1242#ifdef CONFIG_TASK_XACCT
1243/* i/o counters(bytes read/written, #syscalls */
1244 u64 rchar, wchar, syscr, syscw;
1245#endif
1246 struct task_io_accounting ioac; 1297 struct task_io_accounting ioac;
1247#if defined(CONFIG_TASK_XACCT) 1298#if defined(CONFIG_TASK_XACCT)
1248 u64 acct_rss_mem1; /* accumulated rss usage */ 1299 u64 acct_rss_mem1; /* accumulated rss usage */
1249 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1300 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1250 cputime_t acct_stimexpd;/* stime since last update */ 1301 cputime_t acct_timexpd; /* stime + utime since last update */
1251#endif 1302#endif
1252#ifdef CONFIG_CPUSETS 1303#ifdef CONFIG_CPUSETS
1253 nodemask_t mems_allowed; 1304 nodemask_t mems_allowed;
@@ -1290,6 +1341,12 @@ struct task_struct {
1290 int latency_record_count; 1341 int latency_record_count;
1291 struct latency_record latency_record[LT_SAVECOUNT]; 1342 struct latency_record latency_record[LT_SAVECOUNT];
1292#endif 1343#endif
1344 /*
1345 * time slack values; these are used to round up poll() and
1346 * select() etc timeout values. These are in nanoseconds.
1347 */
1348 unsigned long timer_slack_ns;
1349 unsigned long default_timer_slack_ns;
1293}; 1350};
1294 1351
1295/* 1352/*
@@ -1464,6 +1521,10 @@ static inline void put_task_struct(struct task_struct *t)
1464 __put_task_struct(t); 1521 __put_task_struct(t);
1465} 1522}
1466 1523
1524extern cputime_t task_utime(struct task_struct *p);
1525extern cputime_t task_stime(struct task_struct *p);
1526extern cputime_t task_gtime(struct task_struct *p);
1527
1467/* 1528/*
1468 * Per process flags 1529 * Per process flags
1469 */ 1530 */
@@ -1486,7 +1547,7 @@ static inline void put_task_struct(struct task_struct *t)
1486#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1547#define PF_KSWAPD 0x00040000 /* I am kswapd */
1487#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1548#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
1488#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1549#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1489#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ 1550#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1490#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1551#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1491#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1552#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1492#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1553#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
@@ -1541,16 +1602,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1541 1602
1542extern unsigned long long sched_clock(void); 1603extern unsigned long long sched_clock(void);
1543 1604
1544#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1605extern void sched_clock_init(void);
1545static inline void sched_clock_init(void) 1606extern u64 sched_clock_cpu(int cpu);
1546{
1547}
1548
1549static inline u64 sched_clock_cpu(int cpu)
1550{
1551 return sched_clock();
1552}
1553 1607
1608#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1554static inline void sched_clock_tick(void) 1609static inline void sched_clock_tick(void)
1555{ 1610{
1556} 1611}
@@ -1562,28 +1617,11 @@ static inline void sched_clock_idle_sleep_event(void)
1562static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1617static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1563{ 1618{
1564} 1619}
1565 1620#else
1566#ifdef CONFIG_NO_HZ
1567static inline void sched_clock_tick_stop(int cpu)
1568{
1569}
1570
1571static inline void sched_clock_tick_start(int cpu)
1572{
1573}
1574#endif
1575
1576#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1577extern void sched_clock_init(void);
1578extern u64 sched_clock_cpu(int cpu);
1579extern void sched_clock_tick(void); 1621extern void sched_clock_tick(void);
1580extern void sched_clock_idle_sleep_event(void); 1622extern void sched_clock_idle_sleep_event(void);
1581extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1623extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1582#ifdef CONFIG_NO_HZ
1583extern void sched_clock_tick_stop(int cpu);
1584extern void sched_clock_tick_start(int cpu);
1585#endif 1624#endif
1586#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1587 1625
1588/* 1626/*
1589 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1627 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -1593,6 +1631,7 @@ extern unsigned long long cpu_clock(int cpu);
1593 1631
1594extern unsigned long long 1632extern unsigned long long
1595task_sched_runtime(struct task_struct *task); 1633task_sched_runtime(struct task_struct *task);
1634extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1596 1635
1597/* sched_exec is called by processes performing an exec */ 1636/* sched_exec is called by processes performing an exec */
1598#ifdef CONFIG_SMP 1637#ifdef CONFIG_SMP
@@ -1627,6 +1666,7 @@ extern unsigned int sysctl_sched_features;
1627extern unsigned int sysctl_sched_migration_cost; 1666extern unsigned int sysctl_sched_migration_cost;
1628extern unsigned int sysctl_sched_nr_migrate; 1667extern unsigned int sysctl_sched_nr_migrate;
1629extern unsigned int sysctl_sched_shares_ratelimit; 1668extern unsigned int sysctl_sched_shares_ratelimit;
1669extern unsigned int sysctl_sched_shares_thresh;
1630 1670
1631int sched_nr_latency_handler(struct ctl_table *table, int write, 1671int sched_nr_latency_handler(struct ctl_table *table, int write,
1632 struct file *file, void __user *buffer, size_t *length, 1672 struct file *file, void __user *buffer, size_t *length,
@@ -1705,19 +1745,13 @@ extern struct pid_namespace init_pid_ns;
1705 * finds a task by its pid in the specified namespace 1745 * finds a task by its pid in the specified namespace
1706 * find_task_by_vpid(): 1746 * find_task_by_vpid():
1707 * finds a task by its virtual pid 1747 * finds a task by its virtual pid
1708 * find_task_by_pid():
1709 * finds a task by its global pid
1710 * 1748 *
1711 * see also find_pid() etc in include/linux/pid.h 1749 * see also find_vpid() etc in include/linux/pid.h
1712 */ 1750 */
1713 1751
1714extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1752extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
1715 struct pid_namespace *ns); 1753 struct pid_namespace *ns);
1716 1754
1717static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
1718{
1719 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
1720}
1721extern struct task_struct *find_task_by_vpid(pid_t nr); 1755extern struct task_struct *find_task_by_vpid(pid_t nr);
1722extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1756extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1723 struct pid_namespace *ns); 1757 struct pid_namespace *ns);
@@ -1785,12 +1819,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
1785extern int kill_pgrp(struct pid *pid, int sig, int priv); 1819extern int kill_pgrp(struct pid *pid, int sig, int priv);
1786extern int kill_pid(struct pid *pid, int sig, int priv); 1820extern int kill_pid(struct pid *pid, int sig, int priv);
1787extern int kill_proc_info(int, struct siginfo *, pid_t); 1821extern int kill_proc_info(int, struct siginfo *, pid_t);
1788extern void do_notify_parent(struct task_struct *, int); 1822extern int do_notify_parent(struct task_struct *, int);
1789extern void force_sig(int, struct task_struct *); 1823extern void force_sig(int, struct task_struct *);
1790extern void force_sig_specific(int, struct task_struct *); 1824extern void force_sig_specific(int, struct task_struct *);
1791extern int send_sig(int, struct task_struct *, int); 1825extern int send_sig(int, struct task_struct *, int);
1792extern void zap_other_threads(struct task_struct *p); 1826extern void zap_other_threads(struct task_struct *p);
1793extern int kill_proc(pid_t, int, int);
1794extern struct sigqueue *sigqueue_alloc(void); 1827extern struct sigqueue *sigqueue_alloc(void);
1795extern void sigqueue_free(struct sigqueue *); 1828extern void sigqueue_free(struct sigqueue *);
1796extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1829extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -1872,9 +1905,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1872extern char *get_task_comm(char *to, struct task_struct *tsk); 1905extern char *get_task_comm(char *to, struct task_struct *tsk);
1873 1906
1874#ifdef CONFIG_SMP 1907#ifdef CONFIG_SMP
1875extern void wait_task_inactive(struct task_struct * p); 1908extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1876#else 1909#else
1877#define wait_task_inactive(p) do { } while (0) 1910static inline unsigned long wait_task_inactive(struct task_struct *p,
1911 long match_state)
1912{
1913 return 1;
1914}
1878#endif 1915#endif
1879 1916
1880#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1917#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
@@ -1973,6 +2010,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1973 2010
1974#endif 2011#endif
1975 2012
2013static inline int object_is_on_stack(void *obj)
2014{
2015 void *stack = task_stack_page(current);
2016
2017 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2018}
2019
1976extern void thread_info_cache_init(void); 2020extern void thread_info_cache_init(void);
1977 2021
1978/* set thread flags in other task's structures 2022/* set thread flags in other task's structures
@@ -2037,9 +2081,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2037 if (!signal_pending(p)) 2081 if (!signal_pending(p))
2038 return 0; 2082 return 0;
2039 2083
2040 if (state & (__TASK_STOPPED | __TASK_TRACED))
2041 return 0;
2042
2043 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2084 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2044} 2085}
2045 2086
@@ -2089,6 +2130,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2089} 2130}
2090 2131
2091/* 2132/*
2133 * Thread group CPU time accounting.
2134 */
2135
2136extern int thread_group_cputime_alloc(struct task_struct *);
2137extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2138
2139static inline void thread_group_cputime_init(struct signal_struct *sig)
2140{
2141 sig->cputime.totals = NULL;
2142}
2143
2144static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2145{
2146 if (curr->signal->cputime.totals)
2147 return 0;
2148 return thread_group_cputime_alloc(curr);
2149}
2150
2151static inline void thread_group_cputime_free(struct signal_struct *sig)
2152{
2153 free_percpu(sig->cputime.totals);
2154}
2155
2156/*
2092 * Reevaluate whether the task has signals pending delivery. 2157 * Reevaluate whether the task has signals pending delivery.
2093 * Wake the task if so. 2158 * Wake the task if so.
2094 * This is required every time the blocked sigset_t changes. 2159 * This is required every time the blocked sigset_t changes.
@@ -2124,16 +2189,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2124 2189
2125#endif /* CONFIG_SMP */ 2190#endif /* CONFIG_SMP */
2126 2191
2127#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
2128extern void arch_pick_mmap_layout(struct mm_struct *mm); 2192extern void arch_pick_mmap_layout(struct mm_struct *mm);
2129#else
2130static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2131{
2132 mm->mmap_base = TASK_UNMAPPED_BASE;
2133 mm->get_unmapped_area = arch_get_unmapped_area;
2134 mm->unmap_area = arch_unmap_area;
2135}
2136#endif
2137 2193
2138#ifdef CONFIG_TRACING 2194#ifdef CONFIG_TRACING
2139extern void 2195extern void
@@ -2181,22 +2237,22 @@ extern long sched_group_rt_period(struct task_group *tg);
2181#ifdef CONFIG_TASK_XACCT 2237#ifdef CONFIG_TASK_XACCT
2182static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2238static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2183{ 2239{
2184 tsk->rchar += amt; 2240 tsk->ioac.rchar += amt;
2185} 2241}
2186 2242
2187static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2243static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2188{ 2244{
2189 tsk->wchar += amt; 2245 tsk->ioac.wchar += amt;
2190} 2246}
2191 2247
2192static inline void inc_syscr(struct task_struct *tsk) 2248static inline void inc_syscr(struct task_struct *tsk)
2193{ 2249{
2194 tsk->syscr++; 2250 tsk->ioac.syscr++;
2195} 2251}
2196 2252
2197static inline void inc_syscw(struct task_struct *tsk) 2253static inline void inc_syscw(struct task_struct *tsk)
2198{ 2254{
2199 tsk->syscw++; 2255 tsk->ioac.syscw++;
2200} 2256}
2201#else 2257#else
2202static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2258static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
@@ -2216,14 +2272,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2216} 2272}
2217#endif 2273#endif
2218 2274
2219#ifdef CONFIG_SMP
2220void migration_init(void);
2221#else
2222static inline void migration_init(void)
2223{
2224}
2225#endif
2226
2227#ifndef TASK_SIZE_OF 2275#ifndef TASK_SIZE_OF
2228#define TASK_SIZE_OF(tsk) TASK_SIZE 2276#define TASK_SIZE_OF(tsk) TASK_SIZE
2229#endif 2277#endif