diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-03 09:24:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-02 19:43:45 -0500 |
commit | 901b14bd946a8b7ea211105b6207e082ddd36846 (patch) | |
tree | 4bd68f1a6e17fa80b057f6debae28ff599d91d23 /include/linux | |
parent | 0ca0156973a47e689f3bc817e26e15fff3f84eec (diff) |
sched/headers: Move task lifetime APIs from <linux/sched.h> to <linux/sched/task.h>
There's a fair amount of task lifetime management (a.k.a fork()/exit())
related APIs in <linux/sched.h>, but only a small fraction of
the users of the generic sched.h header make use of them.
Move these functions to the <linux/sched/task.h> header.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/sched.h | 91 | ||||
-rw-r--r-- | include/linux/sched/task.h | 97 |
2 files changed, 97 insertions, 91 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 25cc0adb3e08..b1677c8db03f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -165,28 +165,10 @@ struct task_group; | |||
165 | /* Task command name length */ | 165 | /* Task command name length */ |
166 | #define TASK_COMM_LEN 16 | 166 | #define TASK_COMM_LEN 16 |
167 | 167 | ||
168 | #include <linux/spinlock.h> | ||
169 | |||
170 | /* | ||
171 | * This serializes "schedule()" and also protects | ||
172 | * the run-queue from deletions/modifications (but | ||
173 | * _adding_ to the beginning of the run-queue has | ||
174 | * a separate lock). | ||
175 | */ | ||
176 | extern rwlock_t tasklist_lock; | ||
177 | extern spinlock_t mmlist_lock; | ||
178 | |||
179 | struct task_struct; | 168 | struct task_struct; |
180 | 169 | ||
181 | #ifdef CONFIG_PROVE_RCU | ||
182 | extern int lockdep_tasklist_lock_is_held(void); | ||
183 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
184 | |||
185 | extern void sched_init(void); | 170 | extern void sched_init(void); |
186 | extern void sched_init_smp(void); | 171 | extern void sched_init_smp(void); |
187 | extern asmlinkage void schedule_tail(struct task_struct *prev); | ||
188 | extern void init_idle(struct task_struct *idle, int cpu); | ||
189 | extern void init_idle_bootup_task(struct task_struct *idle); | ||
190 | 172 | ||
191 | extern cpumask_var_t cpu_isolated_map; | 173 | extern cpumask_var_t cpu_isolated_map; |
192 | 174 | ||
@@ -211,8 +193,6 @@ extern void io_schedule_finish(int token); | |||
211 | extern long io_schedule_timeout(long timeout); | 193 | extern long io_schedule_timeout(long timeout); |
212 | extern void io_schedule(void); | 194 | extern void io_schedule(void); |
213 | 195 | ||
214 | void __noreturn do_task_dead(void); | ||
215 | |||
216 | struct nsproxy; | 196 | struct nsproxy; |
217 | 197 | ||
218 | /** | 198 | /** |
@@ -1120,24 +1100,6 @@ struct task_struct { | |||
1120 | */ | 1100 | */ |
1121 | }; | 1101 | }; |
1122 | 1102 | ||
1123 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | ||
1124 | extern int arch_task_struct_size __read_mostly; | ||
1125 | #else | ||
1126 | # define arch_task_struct_size (sizeof(struct task_struct)) | ||
1127 | #endif | ||
1128 | |||
1129 | #ifdef CONFIG_VMAP_STACK | ||
1130 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | ||
1131 | { | ||
1132 | return t->stack_vm_area; | ||
1133 | } | ||
1134 | #else | ||
1135 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | ||
1136 | { | ||
1137 | return NULL; | ||
1138 | } | ||
1139 | #endif | ||
1140 | |||
1141 | static inline struct pid *task_pid(struct task_struct *task) | 1103 | static inline struct pid *task_pid(struct task_struct *task) |
1142 | { | 1104 | { |
1143 | return task->pids[PIDTYPE_PID].pid; | 1105 | return task->pids[PIDTYPE_PID].pid; |
@@ -1429,21 +1391,6 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | |||
1429 | TASK_PFA_TEST(LMK_WAITING, lmk_waiting) | 1391 | TASK_PFA_TEST(LMK_WAITING, lmk_waiting) |
1430 | TASK_PFA_SET(LMK_WAITING, lmk_waiting) | 1392 | TASK_PFA_SET(LMK_WAITING, lmk_waiting) |
1431 | 1393 | ||
1432 | static inline void rcu_copy_process(struct task_struct *p) | ||
1433 | { | ||
1434 | #ifdef CONFIG_PREEMPT_RCU | ||
1435 | p->rcu_read_lock_nesting = 0; | ||
1436 | p->rcu_read_unlock_special.s = 0; | ||
1437 | p->rcu_blocked_node = NULL; | ||
1438 | INIT_LIST_HEAD(&p->rcu_node_entry); | ||
1439 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1440 | #ifdef CONFIG_TASKS_RCU | ||
1441 | p->rcu_tasks_holdout = false; | ||
1442 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); | ||
1443 | p->rcu_tasks_idle_cpu = -1; | ||
1444 | #endif /* #ifdef CONFIG_TASKS_RCU */ | ||
1445 | } | ||
1446 | |||
1447 | static inline void tsk_restore_flags(struct task_struct *task, | 1394 | static inline void tsk_restore_flags(struct task_struct *task, |
1448 | unsigned long orig_flags, unsigned long flags) | 1395 | unsigned long orig_flags, unsigned long flags) |
1449 | { | 1396 | { |
@@ -1572,45 +1519,11 @@ extern void wake_up_new_task(struct task_struct *tsk); | |||
1572 | #else | 1519 | #else |
1573 | static inline void kick_process(struct task_struct *tsk) { } | 1520 | static inline void kick_process(struct task_struct *tsk) { } |
1574 | #endif | 1521 | #endif |
1575 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); | ||
1576 | extern void sched_dead(struct task_struct *p); | ||
1577 | |||
1578 | extern void proc_caches_init(void); | ||
1579 | |||
1580 | extern void release_task(struct task_struct * p); | ||
1581 | |||
1582 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS | ||
1583 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, | ||
1584 | struct task_struct *, unsigned long); | ||
1585 | #else | ||
1586 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | ||
1587 | struct task_struct *); | ||
1588 | |||
1589 | /* Architectures that haven't opted into copy_thread_tls get the tls argument | ||
1590 | * via pt_regs, so ignore the tls argument passed via C. */ | ||
1591 | static inline int copy_thread_tls( | ||
1592 | unsigned long clone_flags, unsigned long sp, unsigned long arg, | ||
1593 | struct task_struct *p, unsigned long tls) | ||
1594 | { | ||
1595 | return copy_thread(clone_flags, sp, arg, p); | ||
1596 | } | ||
1597 | #endif | ||
1598 | extern void flush_thread(void); | ||
1599 | |||
1600 | #ifdef CONFIG_HAVE_EXIT_THREAD | ||
1601 | extern void exit_thread(struct task_struct *tsk); | ||
1602 | #else | ||
1603 | static inline void exit_thread(struct task_struct *tsk) | ||
1604 | { | ||
1605 | } | ||
1606 | #endif | ||
1607 | 1522 | ||
1608 | extern void exit_files(struct task_struct *); | 1523 | extern void exit_files(struct task_struct *); |
1609 | 1524 | ||
1610 | extern void exit_itimers(struct signal_struct *); | 1525 | extern void exit_itimers(struct signal_struct *); |
1611 | 1526 | ||
1612 | extern void do_group_exit(int); | ||
1613 | |||
1614 | extern int do_execve(struct filename *, | 1527 | extern int do_execve(struct filename *, |
1615 | const char __user * const __user *, | 1528 | const char __user * const __user *, |
1616 | const char __user * const __user *); | 1529 | const char __user * const __user *); |
@@ -1618,10 +1531,6 @@ extern int do_execveat(int, struct filename *, | |||
1618 | const char __user * const __user *, | 1531 | const char __user * const __user *, |
1619 | const char __user * const __user *, | 1532 | const char __user * const __user *, |
1620 | int); | 1533 | int); |
1621 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); | ||
1622 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | ||
1623 | struct task_struct *fork_idle(int); | ||
1624 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
1625 | 1534 | ||
1626 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); | 1535 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
1627 | static inline void set_task_comm(struct task_struct *tsk, const char *from) | 1536 | static inline void set_task_comm(struct task_struct *tsk, const char *from) |
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 0023c91ff821..e93638a03515 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h | |||
@@ -1,6 +1,103 @@ | |||
1 | #ifndef _LINUX_SCHED_TASK_H | 1 | #ifndef _LINUX_SCHED_TASK_H |
2 | #define _LINUX_SCHED_TASK_H | 2 | #define _LINUX_SCHED_TASK_H |
3 | 3 | ||
4 | /* | ||
5 | * Interface between the scheduler and various task lifetime (fork()/exit()) | ||
6 | * functionality: | ||
7 | */ | ||
8 | |||
4 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
5 | 10 | ||
11 | /* | ||
12 | * This serializes "schedule()" and also protects | ||
13 | * the run-queue from deletions/modifications (but | ||
14 | * _adding_ to the beginning of the run-queue has | ||
15 | * a separate lock). | ||
16 | */ | ||
17 | extern rwlock_t tasklist_lock; | ||
18 | extern spinlock_t mmlist_lock; | ||
19 | |||
20 | #ifdef CONFIG_PROVE_RCU | ||
21 | extern int lockdep_tasklist_lock_is_held(void); | ||
22 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
23 | |||
24 | extern asmlinkage void schedule_tail(struct task_struct *prev); | ||
25 | extern void init_idle(struct task_struct *idle, int cpu); | ||
26 | extern void init_idle_bootup_task(struct task_struct *idle); | ||
27 | |||
28 | static inline void rcu_copy_process(struct task_struct *p) | ||
29 | { | ||
30 | #ifdef CONFIG_PREEMPT_RCU | ||
31 | p->rcu_read_lock_nesting = 0; | ||
32 | p->rcu_read_unlock_special.s = 0; | ||
33 | p->rcu_blocked_node = NULL; | ||
34 | INIT_LIST_HEAD(&p->rcu_node_entry); | ||
35 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
36 | #ifdef CONFIG_TASKS_RCU | ||
37 | p->rcu_tasks_holdout = false; | ||
38 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); | ||
39 | p->rcu_tasks_idle_cpu = -1; | ||
40 | #endif /* #ifdef CONFIG_TASKS_RCU */ | ||
41 | } | ||
42 | |||
43 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); | ||
44 | extern void sched_dead(struct task_struct *p); | ||
45 | |||
46 | void __noreturn do_task_dead(void); | ||
47 | |||
48 | extern void proc_caches_init(void); | ||
49 | |||
50 | extern void release_task(struct task_struct * p); | ||
51 | |||
52 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS | ||
53 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, | ||
54 | struct task_struct *, unsigned long); | ||
55 | #else | ||
56 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | ||
57 | struct task_struct *); | ||
58 | |||
59 | /* Architectures that haven't opted into copy_thread_tls get the tls argument | ||
60 | * via pt_regs, so ignore the tls argument passed via C. */ | ||
61 | static inline int copy_thread_tls( | ||
62 | unsigned long clone_flags, unsigned long sp, unsigned long arg, | ||
63 | struct task_struct *p, unsigned long tls) | ||
64 | { | ||
65 | return copy_thread(clone_flags, sp, arg, p); | ||
66 | } | ||
67 | #endif | ||
68 | extern void flush_thread(void); | ||
69 | |||
70 | #ifdef CONFIG_HAVE_EXIT_THREAD | ||
71 | extern void exit_thread(struct task_struct *tsk); | ||
72 | #else | ||
73 | static inline void exit_thread(struct task_struct *tsk) | ||
74 | { | ||
75 | } | ||
76 | #endif | ||
77 | extern void do_group_exit(int); | ||
78 | |||
79 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); | ||
80 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | ||
81 | struct task_struct *fork_idle(int); | ||
82 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
83 | |||
84 | |||
85 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | ||
86 | extern int arch_task_struct_size __read_mostly; | ||
87 | #else | ||
88 | # define arch_task_struct_size (sizeof(struct task_struct)) | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_VMAP_STACK | ||
92 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | ||
93 | { | ||
94 | return t->stack_vm_area; | ||
95 | } | ||
96 | #else | ||
97 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | ||
98 | { | ||
99 | return NULL; | ||
100 | } | ||
101 | #endif | ||
102 | |||
6 | #endif /* _LINUX_SCHED_TASK_H */ | 103 | #endif /* _LINUX_SCHED_TASK_H */ |