diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 241 |
1 files changed, 202 insertions, 39 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 38b4791e6a5d..a06fc89cf6e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -73,6 +73,7 @@ struct sched_param { | |||
| 73 | #include <linux/seccomp.h> | 73 | #include <linux/seccomp.h> |
| 74 | #include <linux/rcupdate.h> | 74 | #include <linux/rcupdate.h> |
| 75 | #include <linux/futex.h> | 75 | #include <linux/futex.h> |
| 76 | #include <linux/rtmutex.h> | ||
| 76 | 77 | ||
| 77 | #include <linux/time.h> | 78 | #include <linux/time.h> |
| 78 | #include <linux/param.h> | 79 | #include <linux/param.h> |
| @@ -83,6 +84,7 @@ struct sched_param { | |||
| 83 | #include <asm/processor.h> | 84 | #include <asm/processor.h> |
| 84 | 85 | ||
| 85 | struct exec_domain; | 86 | struct exec_domain; |
| 87 | struct futex_pi_state; | ||
| 86 | 88 | ||
| 87 | /* | 89 | /* |
| 88 | * List of flags we want to share for kernel threads, | 90 | * List of flags we want to share for kernel threads, |
| @@ -123,6 +125,7 @@ extern unsigned long nr_running(void); | |||
| 123 | extern unsigned long nr_uninterruptible(void); | 125 | extern unsigned long nr_uninterruptible(void); |
| 124 | extern unsigned long nr_active(void); | 126 | extern unsigned long nr_active(void); |
| 125 | extern unsigned long nr_iowait(void); | 127 | extern unsigned long nr_iowait(void); |
| 128 | extern unsigned long weighted_cpuload(const int cpu); | ||
| 126 | 129 | ||
| 127 | 130 | ||
| 128 | /* | 131 | /* |
| @@ -145,6 +148,7 @@ extern unsigned long nr_iowait(void); | |||
| 145 | #define EXIT_DEAD 32 | 148 | #define EXIT_DEAD 32 |
| 146 | /* in tsk->state again */ | 149 | /* in tsk->state again */ |
| 147 | #define TASK_NONINTERACTIVE 64 | 150 | #define TASK_NONINTERACTIVE 64 |
| 151 | #define TASK_DEAD 128 | ||
| 148 | 152 | ||
| 149 | #define __set_task_state(tsk, state_value) \ | 153 | #define __set_task_state(tsk, state_value) \ |
| 150 | do { (tsk)->state = (state_value); } while (0) | 154 | do { (tsk)->state = (state_value); } while (0) |
| @@ -181,11 +185,11 @@ extern unsigned long nr_iowait(void); | |||
| 181 | extern rwlock_t tasklist_lock; | 185 | extern rwlock_t tasklist_lock; |
| 182 | extern spinlock_t mmlist_lock; | 186 | extern spinlock_t mmlist_lock; |
| 183 | 187 | ||
| 184 | typedef struct task_struct task_t; | 188 | struct task_struct; |
| 185 | 189 | ||
| 186 | extern void sched_init(void); | 190 | extern void sched_init(void); |
| 187 | extern void sched_init_smp(void); | 191 | extern void sched_init_smp(void); |
| 188 | extern void init_idle(task_t *idle, int cpu); | 192 | extern void init_idle(struct task_struct *idle, int cpu); |
| 189 | 193 | ||
| 190 | extern cpumask_t nohz_cpu_mask; | 194 | extern cpumask_t nohz_cpu_mask; |
| 191 | 195 | ||
| @@ -358,6 +362,14 @@ struct sighand_struct { | |||
| 358 | spinlock_t siglock; | 362 | spinlock_t siglock; |
| 359 | }; | 363 | }; |
| 360 | 364 | ||
| 365 | struct pacct_struct { | ||
| 366 | int ac_flag; | ||
| 367 | long ac_exitcode; | ||
| 368 | unsigned long ac_mem; | ||
| 369 | cputime_t ac_utime, ac_stime; | ||
| 370 | unsigned long ac_minflt, ac_majflt; | ||
| 371 | }; | ||
| 372 | |||
| 361 | /* | 373 | /* |
| 362 | * NOTE! "signal_struct" does not have it's own | 374 | * NOTE! "signal_struct" does not have it's own |
| 363 | * locking, because a shared signal_struct always | 375 | * locking, because a shared signal_struct always |
| @@ -372,7 +384,7 @@ struct signal_struct { | |||
| 372 | wait_queue_head_t wait_chldexit; /* for wait4() */ | 384 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
| 373 | 385 | ||
| 374 | /* current thread group signal load-balancing target: */ | 386 | /* current thread group signal load-balancing target: */ |
| 375 | task_t *curr_target; | 387 | struct task_struct *curr_target; |
| 376 | 388 | ||
| 377 | /* shared signal handling: */ | 389 | /* shared signal handling: */ |
| 378 | struct sigpending shared_pending; | 390 | struct sigpending shared_pending; |
| @@ -449,6 +461,13 @@ struct signal_struct { | |||
| 449 | struct key *session_keyring; /* keyring inherited over fork */ | 461 | struct key *session_keyring; /* keyring inherited over fork */ |
| 450 | struct key *process_keyring; /* keyring private to this process */ | 462 | struct key *process_keyring; /* keyring private to this process */ |
| 451 | #endif | 463 | #endif |
| 464 | #ifdef CONFIG_BSD_PROCESS_ACCT | ||
| 465 | struct pacct_struct pacct; /* per-process accounting information */ | ||
| 466 | #endif | ||
| 467 | #ifdef CONFIG_TASKSTATS | ||
| 468 | spinlock_t stats_lock; | ||
| 469 | struct taskstats *stats; | ||
| 470 | #endif | ||
| 452 | }; | 471 | }; |
| 453 | 472 | ||
| 454 | /* Context switch must be unlocked if interrupts are to be enabled */ | 473 | /* Context switch must be unlocked if interrupts are to be enabled */ |
| @@ -483,8 +502,11 @@ struct signal_struct { | |||
| 483 | 502 | ||
| 484 | #define MAX_PRIO (MAX_RT_PRIO + 40) | 503 | #define MAX_PRIO (MAX_RT_PRIO + 40) |
| 485 | 504 | ||
| 486 | #define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) | 505 | #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) |
| 506 | #define rt_task(p) rt_prio((p)->prio) | ||
| 487 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) | 507 | #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) |
| 508 | #define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH) | ||
| 509 | #define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) | ||
| 488 | 510 | ||
| 489 | /* | 511 | /* |
| 490 | * Some day this will be a full-fledged user tracking system.. | 512 | * Some day this will be a full-fledged user tracking system.. |
| @@ -517,11 +539,10 @@ extern struct user_struct *find_user(uid_t); | |||
| 517 | extern struct user_struct root_user; | 539 | extern struct user_struct root_user; |
| 518 | #define INIT_USER (&root_user) | 540 | #define INIT_USER (&root_user) |
| 519 | 541 | ||
| 520 | typedef struct prio_array prio_array_t; | ||
| 521 | struct backing_dev_info; | 542 | struct backing_dev_info; |
| 522 | struct reclaim_state; | 543 | struct reclaim_state; |
| 523 | 544 | ||
| 524 | #ifdef CONFIG_SCHEDSTATS | 545 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| 525 | struct sched_info { | 546 | struct sched_info { |
| 526 | /* cumulative counters */ | 547 | /* cumulative counters */ |
| 527 | unsigned long cpu_time, /* time spent on the cpu */ | 548 | unsigned long cpu_time, /* time spent on the cpu */ |
| @@ -532,9 +553,53 @@ struct sched_info { | |||
| 532 | unsigned long last_arrival, /* when we last ran on a cpu */ | 553 | unsigned long last_arrival, /* when we last ran on a cpu */ |
| 533 | last_queued; /* when we were last queued to run */ | 554 | last_queued; /* when we were last queued to run */ |
| 534 | }; | 555 | }; |
| 556 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | ||
| 535 | 557 | ||
| 558 | #ifdef CONFIG_SCHEDSTATS | ||
| 536 | extern struct file_operations proc_schedstat_operations; | 559 | extern struct file_operations proc_schedstat_operations; |
| 560 | #endif /* CONFIG_SCHEDSTATS */ | ||
| 561 | |||
| 562 | #ifdef CONFIG_TASK_DELAY_ACCT | ||
| 563 | struct task_delay_info { | ||
| 564 | spinlock_t lock; | ||
| 565 | unsigned int flags; /* Private per-task flags */ | ||
| 566 | |||
| 567 | /* For each stat XXX, add following, aligned appropriately | ||
| 568 | * | ||
| 569 | * struct timespec XXX_start, XXX_end; | ||
| 570 | * u64 XXX_delay; | ||
| 571 | * u32 XXX_count; | ||
| 572 | * | ||
| 573 | * Atomicity of updates to XXX_delay, XXX_count protected by | ||
| 574 | * single lock above (split into XXX_lock if contention is an issue). | ||
| 575 | */ | ||
| 576 | |||
| 577 | /* | ||
| 578 | * XXX_count is incremented on every XXX operation, the delay | ||
| 579 | * associated with the operation is added to XXX_delay. | ||
| 580 | * XXX_delay contains the accumulated delay time in nanoseconds. | ||
| 581 | */ | ||
| 582 | struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ | ||
| 583 | u64 blkio_delay; /* wait for sync block io completion */ | ||
| 584 | u64 swapin_delay; /* wait for swapin block io completion */ | ||
| 585 | u32 blkio_count; /* total count of the number of sync block */ | ||
| 586 | /* io operations performed */ | ||
| 587 | u32 swapin_count; /* total count of the number of swapin block */ | ||
| 588 | /* io operations performed */ | ||
| 589 | }; | ||
| 590 | #endif /* CONFIG_TASK_DELAY_ACCT */ | ||
| 591 | |||
| 592 | static inline int sched_info_on(void) | ||
| 593 | { | ||
| 594 | #ifdef CONFIG_SCHEDSTATS | ||
| 595 | return 1; | ||
| 596 | #elif defined(CONFIG_TASK_DELAY_ACCT) | ||
| 597 | extern int delayacct_on; | ||
| 598 | return delayacct_on; | ||
| 599 | #else | ||
| 600 | return 0; | ||
| 537 | #endif | 601 | #endif |
| 602 | } | ||
| 538 | 603 | ||
| 539 | enum idle_type | 604 | enum idle_type |
| 540 | { | 605 | { |
| @@ -547,9 +612,9 @@ enum idle_type | |||
| 547 | /* | 612 | /* |
| 548 | * sched-domains (multiprocessor balancing) declarations: | 613 | * sched-domains (multiprocessor balancing) declarations: |
| 549 | */ | 614 | */ |
| 550 | #ifdef CONFIG_SMP | ||
| 551 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ | 615 | #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ |
| 552 | 616 | ||
| 617 | #ifdef CONFIG_SMP | ||
| 553 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ | 618 | #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ |
| 554 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ | 619 | #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ |
| 555 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ | 620 | #define SD_BALANCE_EXEC 4 /* Balance on exec */ |
| @@ -558,6 +623,11 @@ enum idle_type | |||
| 558 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ | 623 | #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ |
| 559 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ | 624 | #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ |
| 560 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ | 625 | #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ |
| 626 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | ||
| 627 | |||
| 628 | #define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ | ||
| 629 | ? SD_POWERSAVINGS_BALANCE : 0) | ||
| 630 | |||
| 561 | 631 | ||
| 562 | struct sched_group { | 632 | struct sched_group { |
| 563 | struct sched_group *next; /* Must be a circular list */ | 633 | struct sched_group *next; /* Must be a circular list */ |
| @@ -627,7 +697,7 @@ struct sched_domain { | |||
| 627 | #endif | 697 | #endif |
| 628 | }; | 698 | }; |
| 629 | 699 | ||
| 630 | extern void partition_sched_domains(cpumask_t *partition1, | 700 | extern int partition_sched_domains(cpumask_t *partition1, |
| 631 | cpumask_t *partition2); | 701 | cpumask_t *partition2); |
| 632 | 702 | ||
| 633 | /* | 703 | /* |
| @@ -677,7 +747,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); | |||
| 677 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 747 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) |
| 678 | 748 | ||
| 679 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 749 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
| 680 | extern void prefetch_stack(struct task_struct*); | 750 | extern void prefetch_stack(struct task_struct *t); |
| 681 | #else | 751 | #else |
| 682 | static inline void prefetch_stack(struct task_struct *t) { } | 752 | static inline void prefetch_stack(struct task_struct *t) { } |
| 683 | #endif | 753 | #endif |
| @@ -693,6 +763,8 @@ enum sleep_type { | |||
| 693 | SLEEP_INTERRUPTED, | 763 | SLEEP_INTERRUPTED, |
| 694 | }; | 764 | }; |
| 695 | 765 | ||
| 766 | struct prio_array; | ||
| 767 | |||
| 696 | struct task_struct { | 768 | struct task_struct { |
| 697 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 769 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
| 698 | struct thread_info *thread_info; | 770 | struct thread_info *thread_info; |
| @@ -702,16 +774,20 @@ struct task_struct { | |||
| 702 | 774 | ||
| 703 | int lock_depth; /* BKL lock depth */ | 775 | int lock_depth; /* BKL lock depth */ |
| 704 | 776 | ||
| 705 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 777 | #ifdef CONFIG_SMP |
| 778 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
| 706 | int oncpu; | 779 | int oncpu; |
| 707 | #endif | 780 | #endif |
| 708 | int prio, static_prio; | 781 | #endif |
| 782 | int load_weight; /* for niceness load balancing purposes */ | ||
| 783 | int prio, static_prio, normal_prio; | ||
| 709 | struct list_head run_list; | 784 | struct list_head run_list; |
| 710 | prio_array_t *array; | 785 | struct prio_array *array; |
| 711 | 786 | ||
| 712 | unsigned short ioprio; | 787 | unsigned short ioprio; |
| 788 | #ifdef CONFIG_BLK_DEV_IO_TRACE | ||
| 713 | unsigned int btrace_seq; | 789 | unsigned int btrace_seq; |
| 714 | 790 | #endif | |
| 715 | unsigned long sleep_avg; | 791 | unsigned long sleep_avg; |
| 716 | unsigned long long timestamp, last_ran; | 792 | unsigned long long timestamp, last_ran; |
| 717 | unsigned long long sched_time; /* sched_clock time spent running */ | 793 | unsigned long long sched_time; /* sched_clock time spent running */ |
| @@ -721,7 +797,7 @@ struct task_struct { | |||
| 721 | cpumask_t cpus_allowed; | 797 | cpumask_t cpus_allowed; |
| 722 | unsigned int time_slice, first_time_slice; | 798 | unsigned int time_slice, first_time_slice; |
| 723 | 799 | ||
| 724 | #ifdef CONFIG_SCHEDSTATS | 800 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| 725 | struct sched_info sched_info; | 801 | struct sched_info sched_info; |
| 726 | #endif | 802 | #endif |
| 727 | 803 | ||
| @@ -745,6 +821,11 @@ struct task_struct { | |||
| 745 | unsigned did_exec:1; | 821 | unsigned did_exec:1; |
| 746 | pid_t pid; | 822 | pid_t pid; |
| 747 | pid_t tgid; | 823 | pid_t tgid; |
| 824 | |||
| 825 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
| 826 | /* Canary value for the -fstack-protector gcc feature */ | ||
| 827 | unsigned long stack_canary; | ||
| 828 | #endif | ||
| 748 | /* | 829 | /* |
| 749 | * pointers to (original) parent process, youngest child, younger sibling, | 830 | * pointers to (original) parent process, youngest child, younger sibling, |
| 750 | * older sibling, respectively. (p->father can be replaced with | 831 | * older sibling, respectively. (p->father can be replaced with |
| @@ -791,6 +872,15 @@ struct task_struct { | |||
| 791 | struct key *thread_keyring; /* keyring private to this thread */ | 872 | struct key *thread_keyring; /* keyring private to this thread */ |
| 792 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | 873 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ |
| 793 | #endif | 874 | #endif |
| 875 | /* | ||
| 876 | * fpu_counter contains the number of consecutive context switches | ||
| 877 | * that the FPU is used. If this is over a threshold, the lazy fpu | ||
| 878 | * saving becomes unlazy to save the trap. This is an unsigned char | ||
| 879 | * so that after 256 times the counter wraps and the behavior turns | ||
| 880 | * lazy again; this to deal with bursty apps that only use FPU for | ||
| 881 | * a short time | ||
| 882 | */ | ||
| 883 | unsigned char fpu_counter; | ||
| 794 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ | 884 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ |
| 795 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 885 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 796 | - access with [gs]et_task_comm (which lock | 886 | - access with [gs]et_task_comm (which lock |
| @@ -798,8 +888,10 @@ struct task_struct { | |||
| 798 | - initialized normally by flush_old_exec */ | 888 | - initialized normally by flush_old_exec */ |
| 799 | /* file system info */ | 889 | /* file system info */ |
| 800 | int link_count, total_link_count; | 890 | int link_count, total_link_count; |
| 891 | #ifdef CONFIG_SYSVIPC | ||
| 801 | /* ipc stuff */ | 892 | /* ipc stuff */ |
| 802 | struct sysv_sem sysvsem; | 893 | struct sysv_sem sysvsem; |
| 894 | #endif | ||
| 803 | /* CPU-specific state of this task */ | 895 | /* CPU-specific state of this task */ |
| 804 | struct thread_struct thread; | 896 | struct thread_struct thread; |
| 805 | /* filesystem information */ | 897 | /* filesystem information */ |
| @@ -831,13 +923,43 @@ struct task_struct { | |||
| 831 | u32 self_exec_id; | 923 | u32 self_exec_id; |
| 832 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 924 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
| 833 | spinlock_t alloc_lock; | 925 | spinlock_t alloc_lock; |
| 834 | /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ | 926 | |
| 835 | spinlock_t proc_lock; | 927 | /* Protection of the PI data structures: */ |
| 928 | spinlock_t pi_lock; | ||
| 929 | |||
| 930 | #ifdef CONFIG_RT_MUTEXES | ||
| 931 | /* PI waiters blocked on a rt_mutex held by this task */ | ||
| 932 | struct plist_head pi_waiters; | ||
| 933 | /* Deadlock detection and priority inheritance handling */ | ||
| 934 | struct rt_mutex_waiter *pi_blocked_on; | ||
| 935 | #endif | ||
| 836 | 936 | ||
| 837 | #ifdef CONFIG_DEBUG_MUTEXES | 937 | #ifdef CONFIG_DEBUG_MUTEXES |
| 838 | /* mutex deadlock detection */ | 938 | /* mutex deadlock detection */ |
| 839 | struct mutex_waiter *blocked_on; | 939 | struct mutex_waiter *blocked_on; |
| 840 | #endif | 940 | #endif |
| 941 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 942 | unsigned int irq_events; | ||
| 943 | int hardirqs_enabled; | ||
| 944 | unsigned long hardirq_enable_ip; | ||
| 945 | unsigned int hardirq_enable_event; | ||
| 946 | unsigned long hardirq_disable_ip; | ||
| 947 | unsigned int hardirq_disable_event; | ||
| 948 | int softirqs_enabled; | ||
| 949 | unsigned long softirq_disable_ip; | ||
| 950 | unsigned int softirq_disable_event; | ||
| 951 | unsigned long softirq_enable_ip; | ||
| 952 | unsigned int softirq_enable_event; | ||
| 953 | int hardirq_context; | ||
| 954 | int softirq_context; | ||
| 955 | #endif | ||
| 956 | #ifdef CONFIG_LOCKDEP | ||
| 957 | # define MAX_LOCK_DEPTH 30UL | ||
| 958 | u64 curr_chain_key; | ||
| 959 | int lockdep_depth; | ||
| 960 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
| 961 | unsigned int lockdep_recursion; | ||
| 962 | #endif | ||
| 841 | 963 | ||
| 842 | /* journalling filesystem info */ | 964 | /* journalling filesystem info */ |
| 843 | void *journal_info; | 965 | void *journal_info; |
| @@ -845,7 +967,6 @@ struct task_struct { | |||
| 845 | /* VM state */ | 967 | /* VM state */ |
| 846 | struct reclaim_state *reclaim_state; | 968 | struct reclaim_state *reclaim_state; |
| 847 | 969 | ||
| 848 | struct dentry *proc_dentry; | ||
| 849 | struct backing_dev_info *backing_dev_info; | 970 | struct backing_dev_info *backing_dev_info; |
| 850 | 971 | ||
| 851 | struct io_context *io_context; | 972 | struct io_context *io_context; |
| @@ -880,6 +1001,8 @@ struct task_struct { | |||
| 880 | #ifdef CONFIG_COMPAT | 1001 | #ifdef CONFIG_COMPAT |
| 881 | struct compat_robust_list_head __user *compat_robust_list; | 1002 | struct compat_robust_list_head __user *compat_robust_list; |
| 882 | #endif | 1003 | #endif |
| 1004 | struct list_head pi_state_list; | ||
| 1005 | struct futex_pi_state *pi_state_cache; | ||
| 883 | 1006 | ||
| 884 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1007 | atomic_t fs_excl; /* holding fs exclusive resources */ |
| 885 | struct rcu_head rcu; | 1008 | struct rcu_head rcu; |
| @@ -888,6 +1011,9 @@ struct task_struct { | |||
| 888 | * cache last used pipe for splice | 1011 | * cache last used pipe for splice |
| 889 | */ | 1012 | */ |
| 890 | struct pipe_inode_info *splice_pipe; | 1013 | struct pipe_inode_info *splice_pipe; |
| 1014 | #ifdef CONFIG_TASK_DELAY_ACCT | ||
| 1015 | struct task_delay_info *delays; | ||
| 1016 | #endif | ||
| 891 | }; | 1017 | }; |
| 892 | 1018 | ||
| 893 | static inline pid_t process_group(struct task_struct *tsk) | 1019 | static inline pid_t process_group(struct task_struct *tsk) |
| @@ -908,6 +1034,16 @@ static inline int pid_alive(struct task_struct *p) | |||
| 908 | return p->pids[PIDTYPE_PID].pid != NULL; | 1034 | return p->pids[PIDTYPE_PID].pid != NULL; |
| 909 | } | 1035 | } |
| 910 | 1036 | ||
| 1037 | /** | ||
| 1038 | * is_init - check if a task structure is the first user space | ||
| 1039 | * task the kernel created. | ||
| 1040 | * @p: Task structure to be checked. | ||
| 1041 | */ | ||
| 1042 | static inline int is_init(struct task_struct *tsk) | ||
| 1043 | { | ||
| 1044 | return tsk->pid == 1; | ||
| 1045 | } | ||
| 1046 | |||
| 911 | extern void free_task(struct task_struct *tsk); | 1047 | extern void free_task(struct task_struct *tsk); |
| 912 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) | 1048 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
| 913 | 1049 | ||
| @@ -926,7 +1062,6 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 926 | /* Not implemented yet, only for 486*/ | 1062 | /* Not implemented yet, only for 486*/ |
| 927 | #define PF_STARTING 0x00000002 /* being created */ | 1063 | #define PF_STARTING 0x00000002 /* being created */ |
| 928 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1064 | #define PF_EXITING 0x00000004 /* getting shut down */ |
| 929 | #define PF_DEAD 0x00000008 /* Dead */ | ||
| 930 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1065 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
| 931 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1066 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
| 932 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1067 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
| @@ -947,6 +1082,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 947 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1082 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
| 948 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1083 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
| 949 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1084 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
| 1085 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | ||
| 950 | 1086 | ||
| 951 | /* | 1087 | /* |
| 952 | * Only the _current_ task can read/write to tsk->flags, but other | 1088 | * Only the _current_ task can read/write to tsk->flags, but other |
| @@ -974,9 +1110,9 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 974 | #define used_math() tsk_used_math(current) | 1110 | #define used_math() tsk_used_math(current) |
| 975 | 1111 | ||
| 976 | #ifdef CONFIG_SMP | 1112 | #ifdef CONFIG_SMP |
| 977 | extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | 1113 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); |
| 978 | #else | 1114 | #else |
| 979 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 1115 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
| 980 | { | 1116 | { |
| 981 | if (!cpu_isset(0, new_mask)) | 1117 | if (!cpu_isset(0, new_mask)) |
| 982 | return -EINVAL; | 1118 | return -EINVAL; |
| @@ -985,7 +1121,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | |||
| 985 | #endif | 1121 | #endif |
| 986 | 1122 | ||
| 987 | extern unsigned long long sched_clock(void); | 1123 | extern unsigned long long sched_clock(void); |
| 988 | extern unsigned long long current_sched_time(const task_t *current_task); | 1124 | extern unsigned long long |
| 1125 | current_sched_time(const struct task_struct *current_task); | ||
| 989 | 1126 | ||
| 990 | /* sched_exec is called by processes performing an exec */ | 1127 | /* sched_exec is called by processes performing an exec */ |
| 991 | #ifdef CONFIG_SMP | 1128 | #ifdef CONFIG_SMP |
| @@ -1001,16 +1138,29 @@ static inline void idle_task_exit(void) {} | |||
| 1001 | #endif | 1138 | #endif |
| 1002 | 1139 | ||
| 1003 | extern void sched_idle_next(void); | 1140 | extern void sched_idle_next(void); |
| 1004 | extern void set_user_nice(task_t *p, long nice); | 1141 | |
| 1005 | extern int task_prio(const task_t *p); | 1142 | #ifdef CONFIG_RT_MUTEXES |
| 1006 | extern int task_nice(const task_t *p); | 1143 | extern int rt_mutex_getprio(struct task_struct *p); |
| 1007 | extern int can_nice(const task_t *p, const int nice); | 1144 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
| 1008 | extern int task_curr(const task_t *p); | 1145 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
| 1146 | #else | ||
| 1147 | static inline int rt_mutex_getprio(struct task_struct *p) | ||
| 1148 | { | ||
| 1149 | return p->normal_prio; | ||
| 1150 | } | ||
| 1151 | # define rt_mutex_adjust_pi(p) do { } while (0) | ||
| 1152 | #endif | ||
| 1153 | |||
| 1154 | extern void set_user_nice(struct task_struct *p, long nice); | ||
| 1155 | extern int task_prio(const struct task_struct *p); | ||
| 1156 | extern int task_nice(const struct task_struct *p); | ||
| 1157 | extern int can_nice(const struct task_struct *p, const int nice); | ||
| 1158 | extern int task_curr(const struct task_struct *p); | ||
| 1009 | extern int idle_cpu(int cpu); | 1159 | extern int idle_cpu(int cpu); |
| 1010 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1160 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
| 1011 | extern task_t *idle_task(int cpu); | 1161 | extern struct task_struct *idle_task(int cpu); |
| 1012 | extern task_t *curr_task(int cpu); | 1162 | extern struct task_struct *curr_task(int cpu); |
| 1013 | extern void set_curr_task(int cpu, task_t *p); | 1163 | extern void set_curr_task(int cpu, struct task_struct *p); |
| 1014 | 1164 | ||
| 1015 | void yield(void); | 1165 | void yield(void); |
| 1016 | 1166 | ||
| @@ -1056,7 +1206,7 @@ extern void switch_uid(struct user_struct *); | |||
| 1056 | 1206 | ||
| 1057 | #include <asm/current.h> | 1207 | #include <asm/current.h> |
| 1058 | 1208 | ||
| 1059 | extern void do_timer(struct pt_regs *); | 1209 | extern void do_timer(unsigned long ticks); |
| 1060 | 1210 | ||
| 1061 | extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); | 1211 | extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); |
| 1062 | extern int FASTCALL(wake_up_process(struct task_struct * tsk)); | 1212 | extern int FASTCALL(wake_up_process(struct task_struct * tsk)); |
| @@ -1067,8 +1217,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | |||
| 1067 | #else | 1217 | #else |
| 1068 | static inline void kick_process(struct task_struct *tsk) { } | 1218 | static inline void kick_process(struct task_struct *tsk) { } |
| 1069 | #endif | 1219 | #endif |
| 1070 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); | 1220 | extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); |
| 1071 | extern void FASTCALL(sched_exit(task_t * p)); | 1221 | extern void FASTCALL(sched_exit(struct task_struct * p)); |
| 1072 | 1222 | ||
| 1073 | extern int in_group_p(gid_t); | 1223 | extern int in_group_p(gid_t); |
| 1074 | extern int in_egroup_p(gid_t); | 1224 | extern int in_egroup_p(gid_t); |
| @@ -1101,7 +1251,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); | |||
| 1101 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); | 1251 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); |
| 1102 | extern int kill_pg_info(int, struct siginfo *, pid_t); | 1252 | extern int kill_pg_info(int, struct siginfo *, pid_t); |
| 1103 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 1253 | extern int kill_proc_info(int, struct siginfo *, pid_t); |
| 1104 | extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); | 1254 | extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32); |
| 1105 | extern void do_notify_parent(struct task_struct *, int); | 1255 | extern void do_notify_parent(struct task_struct *, int); |
| 1106 | extern void force_sig(int, struct task_struct *); | 1256 | extern void force_sig(int, struct task_struct *); |
| 1107 | extern void force_sig_specific(int, struct task_struct *); | 1257 | extern void force_sig_specific(int, struct task_struct *); |
| @@ -1173,17 +1323,17 @@ extern NORET_TYPE void do_group_exit(int); | |||
| 1173 | extern void daemonize(const char *, ...); | 1323 | extern void daemonize(const char *, ...); |
| 1174 | extern int allow_signal(int); | 1324 | extern int allow_signal(int); |
| 1175 | extern int disallow_signal(int); | 1325 | extern int disallow_signal(int); |
| 1176 | extern task_t *child_reaper; | 1326 | extern struct task_struct *child_reaper; |
| 1177 | 1327 | ||
| 1178 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 1328 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); |
| 1179 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 1329 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
| 1180 | task_t *fork_idle(int); | 1330 | struct task_struct *fork_idle(int); |
| 1181 | 1331 | ||
| 1182 | extern void set_task_comm(struct task_struct *tsk, char *from); | 1332 | extern void set_task_comm(struct task_struct *tsk, char *from); |
| 1183 | extern void get_task_comm(char *to, struct task_struct *tsk); | 1333 | extern void get_task_comm(char *to, struct task_struct *tsk); |
| 1184 | 1334 | ||
| 1185 | #ifdef CONFIG_SMP | 1335 | #ifdef CONFIG_SMP |
| 1186 | extern void wait_task_inactive(task_t * p); | 1336 | extern void wait_task_inactive(struct task_struct * p); |
| 1187 | #else | 1337 | #else |
| 1188 | #define wait_task_inactive(p) do { } while (0) | 1338 | #define wait_task_inactive(p) do { } while (0) |
| 1189 | #endif | 1339 | #endif |
| @@ -1209,13 +1359,13 @@ extern void wait_task_inactive(task_t * p); | |||
| 1209 | /* de_thread depends on thread_group_leader not being a pid based check */ | 1359 | /* de_thread depends on thread_group_leader not being a pid based check */ |
| 1210 | #define thread_group_leader(p) (p == p->group_leader) | 1360 | #define thread_group_leader(p) (p == p->group_leader) |
| 1211 | 1361 | ||
| 1212 | static inline task_t *next_thread(const task_t *p) | 1362 | static inline struct task_struct *next_thread(const struct task_struct *p) |
| 1213 | { | 1363 | { |
| 1214 | return list_entry(rcu_dereference(p->thread_group.next), | 1364 | return list_entry(rcu_dereference(p->thread_group.next), |
| 1215 | task_t, thread_group); | 1365 | struct task_struct, thread_group); |
| 1216 | } | 1366 | } |
| 1217 | 1367 | ||
| 1218 | static inline int thread_group_empty(task_t *p) | 1368 | static inline int thread_group_empty(struct task_struct *p) |
| 1219 | { | 1369 | { |
| 1220 | return list_empty(&p->thread_group); | 1370 | return list_empty(&p->thread_group); |
| 1221 | } | 1371 | } |
| @@ -1400,6 +1550,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
| 1400 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 1550 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); |
| 1401 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 1551 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
| 1402 | 1552 | ||
| 1553 | #include <linux/sysdev.h> | ||
| 1554 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
| 1555 | extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; | ||
| 1556 | extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); | ||
| 1557 | |||
| 1403 | extern void normalize_rt_tasks(void); | 1558 | extern void normalize_rt_tasks(void); |
| 1404 | 1559 | ||
| 1405 | #ifdef CONFIG_PM | 1560 | #ifdef CONFIG_PM |
| @@ -1429,6 +1584,14 @@ static inline void freeze(struct task_struct *p) | |||
| 1429 | } | 1584 | } |
| 1430 | 1585 | ||
| 1431 | /* | 1586 | /* |
| 1587 | * Sometimes we may need to cancel the previous 'freeze' request | ||
| 1588 | */ | ||
| 1589 | static inline void do_not_freeze(struct task_struct *p) | ||
| 1590 | { | ||
| 1591 | p->flags &= ~PF_FREEZE; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | /* | ||
| 1432 | * Wake up a frozen process | 1595 | * Wake up a frozen process |
| 1433 | */ | 1596 | */ |
| 1434 | static inline int thaw_process(struct task_struct *p) | 1597 | static inline int thaw_process(struct task_struct *p) |
