diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 95 |
1 files changed, 80 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6a1e7afb099b..03c238088aee 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -61,7 +61,6 @@ struct sched_param { | |||
61 | #include <linux/mm_types.h> | 61 | #include <linux/mm_types.h> |
62 | 62 | ||
63 | #include <asm/system.h> | 63 | #include <asm/system.h> |
64 | #include <asm/semaphore.h> | ||
65 | #include <asm/page.h> | 64 | #include <asm/page.h> |
66 | #include <asm/ptrace.h> | 65 | #include <asm/ptrace.h> |
67 | #include <asm/cputime.h> | 66 | #include <asm/cputime.h> |
@@ -69,7 +68,6 @@ struct sched_param { | |||
69 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
70 | #include <linux/sem.h> | 69 | #include <linux/sem.h> |
71 | #include <linux/signal.h> | 70 | #include <linux/signal.h> |
72 | #include <linux/securebits.h> | ||
73 | #include <linux/fs_struct.h> | 71 | #include <linux/fs_struct.h> |
74 | #include <linux/compiler.h> | 72 | #include <linux/compiler.h> |
75 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
@@ -556,6 +554,14 @@ struct signal_struct { | |||
556 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | 554 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ |
557 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 555 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ |
558 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | 556 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ |
557 | /* | ||
558 | * Pending notifications to parent. | ||
559 | */ | ||
560 | #define SIGNAL_CLD_STOPPED 0x00000010 | ||
561 | #define SIGNAL_CLD_CONTINUED 0x00000020 | ||
562 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) | ||
563 | |||
564 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ | ||
559 | 565 | ||
560 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ | 566 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
561 | static inline int signal_group_exit(const struct signal_struct *sig) | 567 | static inline int signal_group_exit(const struct signal_struct *sig) |
@@ -704,6 +710,7 @@ enum cpu_idle_type { | |||
704 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ | 710 | #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ |
705 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ | 711 | #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ |
706 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 712 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
713 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | ||
707 | 714 | ||
708 | #define BALANCE_FOR_MC_POWER \ | 715 | #define BALANCE_FOR_MC_POWER \ |
709 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 716 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) |
@@ -733,12 +740,31 @@ struct sched_group { | |||
733 | u32 reciprocal_cpu_power; | 740 | u32 reciprocal_cpu_power; |
734 | }; | 741 | }; |
735 | 742 | ||
743 | enum sched_domain_level { | ||
744 | SD_LV_NONE = 0, | ||
745 | SD_LV_SIBLING, | ||
746 | SD_LV_MC, | ||
747 | SD_LV_CPU, | ||
748 | SD_LV_NODE, | ||
749 | SD_LV_ALLNODES, | ||
750 | SD_LV_MAX | ||
751 | }; | ||
752 | |||
753 | struct sched_domain_attr { | ||
754 | int relax_domain_level; | ||
755 | }; | ||
756 | |||
757 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | ||
758 | .relax_domain_level = -1, \ | ||
759 | } | ||
760 | |||
736 | struct sched_domain { | 761 | struct sched_domain { |
737 | /* These fields must be setup */ | 762 | /* These fields must be setup */ |
738 | struct sched_domain *parent; /* top domain must be null terminated */ | 763 | struct sched_domain *parent; /* top domain must be null terminated */ |
739 | struct sched_domain *child; /* bottom domain must be null terminated */ | 764 | struct sched_domain *child; /* bottom domain must be null terminated */ |
740 | struct sched_group *groups; /* the balancing groups of the domain */ | 765 | struct sched_group *groups; /* the balancing groups of the domain */ |
741 | cpumask_t span; /* span of all CPUs in this domain */ | 766 | cpumask_t span; /* span of all CPUs in this domain */ |
767 | int first_cpu; /* cache of the first cpu in this domain */ | ||
742 | unsigned long min_interval; /* Minimum balance interval ms */ | 768 | unsigned long min_interval; /* Minimum balance interval ms */ |
743 | unsigned long max_interval; /* Maximum balance interval ms */ | 769 | unsigned long max_interval; /* Maximum balance interval ms */ |
744 | unsigned int busy_factor; /* less balancing by factor if busy */ | 770 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -750,6 +776,7 @@ struct sched_domain { | |||
750 | unsigned int wake_idx; | 776 | unsigned int wake_idx; |
751 | unsigned int forkexec_idx; | 777 | unsigned int forkexec_idx; |
752 | int flags; /* See SD_* */ | 778 | int flags; /* See SD_* */ |
779 | enum sched_domain_level level; | ||
753 | 780 | ||
754 | /* Runtime fields. */ | 781 | /* Runtime fields. */ |
755 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 782 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
@@ -789,7 +816,8 @@ struct sched_domain { | |||
789 | #endif | 816 | #endif |
790 | }; | 817 | }; |
791 | 818 | ||
792 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); | 819 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
820 | struct sched_domain_attr *dattr_new); | ||
793 | extern int arch_reinit_sched_domains(void); | 821 | extern int arch_reinit_sched_domains(void); |
794 | 822 | ||
795 | #endif /* CONFIG_SMP */ | 823 | #endif /* CONFIG_SMP */ |
@@ -889,7 +917,8 @@ struct sched_class { | |||
889 | void (*set_curr_task) (struct rq *rq); | 917 | void (*set_curr_task) (struct rq *rq); |
890 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 918 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
891 | void (*task_new) (struct rq *rq, struct task_struct *p); | 919 | void (*task_new) (struct rq *rq, struct task_struct *p); |
892 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | 920 | void (*set_cpus_allowed)(struct task_struct *p, |
921 | const cpumask_t *newmask); | ||
893 | 922 | ||
894 | void (*join_domain)(struct rq *rq); | 923 | void (*join_domain)(struct rq *rq); |
895 | void (*leave_domain)(struct rq *rq); | 924 | void (*leave_domain)(struct rq *rq); |
@@ -923,6 +952,7 @@ struct load_weight { | |||
923 | struct sched_entity { | 952 | struct sched_entity { |
924 | struct load_weight load; /* for load-balancing */ | 953 | struct load_weight load; /* for load-balancing */ |
925 | struct rb_node run_node; | 954 | struct rb_node run_node; |
955 | struct list_head group_node; | ||
926 | unsigned int on_rq; | 956 | unsigned int on_rq; |
927 | 957 | ||
928 | u64 exec_start; | 958 | u64 exec_start; |
@@ -982,6 +1012,7 @@ struct sched_rt_entity { | |||
982 | unsigned long timeout; | 1012 | unsigned long timeout; |
983 | int nr_cpus_allowed; | 1013 | int nr_cpus_allowed; |
984 | 1014 | ||
1015 | struct sched_rt_entity *back; | ||
985 | #ifdef CONFIG_RT_GROUP_SCHED | 1016 | #ifdef CONFIG_RT_GROUP_SCHED |
986 | struct sched_rt_entity *parent; | 1017 | struct sched_rt_entity *parent; |
987 | /* rq on which this entity is (to be) queued: */ | 1018 | /* rq on which this entity is (to be) queued: */ |
@@ -1109,7 +1140,7 @@ struct task_struct { | |||
1109 | gid_t gid,egid,sgid,fsgid; | 1140 | gid_t gid,egid,sgid,fsgid; |
1110 | struct group_info *group_info; | 1141 | struct group_info *group_info; |
1111 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1142 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; |
1112 | unsigned keep_capabilities:1; | 1143 | unsigned securebits; |
1113 | struct user_struct *user; | 1144 | struct user_struct *user; |
1114 | #ifdef CONFIG_KEYS | 1145 | #ifdef CONFIG_KEYS |
1115 | struct key *request_key_auth; /* assumed request_key authority */ | 1146 | struct key *request_key_auth; /* assumed request_key authority */ |
@@ -1144,7 +1175,7 @@ struct task_struct { | |||
1144 | struct sighand_struct *sighand; | 1175 | struct sighand_struct *sighand; |
1145 | 1176 | ||
1146 | sigset_t blocked, real_blocked; | 1177 | sigset_t blocked, real_blocked; |
1147 | sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */ | 1178 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
1148 | struct sigpending pending; | 1179 | struct sigpending pending; |
1149 | 1180 | ||
1150 | unsigned long sas_ss_sp; | 1181 | unsigned long sas_ss_sp; |
@@ -1502,15 +1533,21 @@ static inline void put_task_struct(struct task_struct *t) | |||
1502 | #define used_math() tsk_used_math(current) | 1533 | #define used_math() tsk_used_math(current) |
1503 | 1534 | ||
1504 | #ifdef CONFIG_SMP | 1535 | #ifdef CONFIG_SMP |
1505 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); | 1536 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1537 | const cpumask_t *new_mask); | ||
1506 | #else | 1538 | #else |
1507 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1539 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1540 | const cpumask_t *new_mask) | ||
1508 | { | 1541 | { |
1509 | if (!cpu_isset(0, new_mask)) | 1542 | if (!cpu_isset(0, *new_mask)) |
1510 | return -EINVAL; | 1543 | return -EINVAL; |
1511 | return 0; | 1544 | return 0; |
1512 | } | 1545 | } |
1513 | #endif | 1546 | #endif |
1547 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | ||
1548 | { | ||
1549 | return set_cpus_allowed_ptr(p, &new_mask); | ||
1550 | } | ||
1514 | 1551 | ||
1515 | extern unsigned long long sched_clock(void); | 1552 | extern unsigned long long sched_clock(void); |
1516 | 1553 | ||
@@ -1551,7 +1588,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1551 | extern unsigned int sysctl_sched_latency; | 1588 | extern unsigned int sysctl_sched_latency; |
1552 | extern unsigned int sysctl_sched_min_granularity; | 1589 | extern unsigned int sysctl_sched_min_granularity; |
1553 | extern unsigned int sysctl_sched_wakeup_granularity; | 1590 | extern unsigned int sysctl_sched_wakeup_granularity; |
1554 | extern unsigned int sysctl_sched_batch_wakeup_granularity; | ||
1555 | extern unsigned int sysctl_sched_child_runs_first; | 1591 | extern unsigned int sysctl_sched_child_runs_first; |
1556 | extern unsigned int sysctl_sched_features; | 1592 | extern unsigned int sysctl_sched_features; |
1557 | extern unsigned int sysctl_sched_migration_cost; | 1593 | extern unsigned int sysctl_sched_migration_cost; |
@@ -1564,6 +1600,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, | |||
1564 | extern unsigned int sysctl_sched_rt_period; | 1600 | extern unsigned int sysctl_sched_rt_period; |
1565 | extern int sysctl_sched_rt_runtime; | 1601 | extern int sysctl_sched_rt_runtime; |
1566 | 1602 | ||
1603 | int sched_rt_handler(struct ctl_table *table, int write, | ||
1604 | struct file *filp, void __user *buffer, size_t *lenp, | ||
1605 | loff_t *ppos); | ||
1606 | |||
1567 | extern unsigned int sysctl_sched_compat_yield; | 1607 | extern unsigned int sysctl_sched_compat_yield; |
1568 | 1608 | ||
1569 | #ifdef CONFIG_RT_MUTEXES | 1609 | #ifdef CONFIG_RT_MUTEXES |
@@ -1637,7 +1677,10 @@ extern struct pid_namespace init_pid_ns; | |||
1637 | extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, | 1677 | extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, |
1638 | struct pid_namespace *ns); | 1678 | struct pid_namespace *ns); |
1639 | 1679 | ||
1640 | extern struct task_struct *find_task_by_pid(pid_t nr); | 1680 | static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr) |
1681 | { | ||
1682 | return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); | ||
1683 | } | ||
1641 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 1684 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
1642 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | 1685 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
1643 | struct pid_namespace *ns); | 1686 | struct pid_namespace *ns); |
@@ -1713,8 +1756,7 @@ extern void zap_other_threads(struct task_struct *p); | |||
1713 | extern int kill_proc(pid_t, int, int); | 1756 | extern int kill_proc(pid_t, int, int); |
1714 | extern struct sigqueue *sigqueue_alloc(void); | 1757 | extern struct sigqueue *sigqueue_alloc(void); |
1715 | extern void sigqueue_free(struct sigqueue *); | 1758 | extern void sigqueue_free(struct sigqueue *); |
1716 | extern int send_sigqueue(int, struct sigqueue *, struct task_struct *); | 1759 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); |
1717 | extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *); | ||
1718 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | 1760 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
1719 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); | 1761 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); |
1720 | 1762 | ||
@@ -1765,6 +1807,8 @@ extern void mmput(struct mm_struct *); | |||
1765 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 1807 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
1766 | /* Remove the current tasks stale references to the old mm_struct */ | 1808 | /* Remove the current tasks stale references to the old mm_struct */ |
1767 | extern void mm_release(struct task_struct *, struct mm_struct *); | 1809 | extern void mm_release(struct task_struct *, struct mm_struct *); |
1810 | /* Allocate a new mm structure and copy contents from tsk->mm */ | ||
1811 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | ||
1768 | 1812 | ||
1769 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 1813 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); |
1770 | extern void flush_thread(void); | 1814 | extern void flush_thread(void); |
@@ -1893,6 +1937,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p) | |||
1893 | 1937 | ||
1894 | #endif | 1938 | #endif |
1895 | 1939 | ||
1940 | extern void thread_info_cache_init(void); | ||
1941 | |||
1896 | /* set thread flags in other task's structures | 1942 | /* set thread flags in other task's structures |
1897 | * - see asm/thread_info.h for TIF_xxxx flags available | 1943 | * - see asm/thread_info.h for TIF_xxxx flags available |
1898 | */ | 1944 | */ |
@@ -2031,7 +2077,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) | |||
2031 | } | 2077 | } |
2032 | #endif | 2078 | #endif |
2033 | 2079 | ||
2034 | extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); | 2080 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); |
2035 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2081 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); |
2036 | 2082 | ||
2037 | extern int sched_mc_power_savings, sched_smt_power_savings; | 2083 | extern int sched_mc_power_savings, sched_smt_power_savings; |
@@ -2041,8 +2087,11 @@ extern void normalize_rt_tasks(void); | |||
2041 | #ifdef CONFIG_GROUP_SCHED | 2087 | #ifdef CONFIG_GROUP_SCHED |
2042 | 2088 | ||
2043 | extern struct task_group init_task_group; | 2089 | extern struct task_group init_task_group; |
2090 | #ifdef CONFIG_USER_SCHED | ||
2091 | extern struct task_group root_task_group; | ||
2092 | #endif | ||
2044 | 2093 | ||
2045 | extern struct task_group *sched_create_group(void); | 2094 | extern struct task_group *sched_create_group(struct task_group *parent); |
2046 | extern void sched_destroy_group(struct task_group *tg); | 2095 | extern void sched_destroy_group(struct task_group *tg); |
2047 | extern void sched_move_task(struct task_struct *tsk); | 2096 | extern void sched_move_task(struct task_struct *tsk); |
2048 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2097 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -2053,6 +2102,9 @@ extern unsigned long sched_group_shares(struct task_group *tg); | |||
2053 | extern int sched_group_set_rt_runtime(struct task_group *tg, | 2102 | extern int sched_group_set_rt_runtime(struct task_group *tg, |
2054 | long rt_runtime_us); | 2103 | long rt_runtime_us); |
2055 | extern long sched_group_rt_runtime(struct task_group *tg); | 2104 | extern long sched_group_rt_runtime(struct task_group *tg); |
2105 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
2106 | long rt_period_us); | ||
2107 | extern long sched_group_rt_period(struct task_group *tg); | ||
2056 | #endif | 2108 | #endif |
2057 | #endif | 2109 | #endif |
2058 | 2110 | ||
@@ -2106,6 +2158,19 @@ static inline void migration_init(void) | |||
2106 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2158 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2107 | #endif | 2159 | #endif |
2108 | 2160 | ||
2161 | #ifdef CONFIG_MM_OWNER | ||
2162 | extern void mm_update_next_owner(struct mm_struct *mm); | ||
2163 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | ||
2164 | #else | ||
2165 | static inline void mm_update_next_owner(struct mm_struct *mm) | ||
2166 | { | ||
2167 | } | ||
2168 | |||
2169 | static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | ||
2170 | { | ||
2171 | } | ||
2172 | #endif /* CONFIG_MM_OWNER */ | ||
2173 | |||
2109 | #endif /* __KERNEL__ */ | 2174 | #endif /* __KERNEL__ */ |
2110 | 2175 | ||
2111 | #endif | 2176 | #endif |