diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-03 16:59:33 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-02 19:43:47 -0500 |
commit | f3ac60671954c8d413532627b1be13a76f394c49 (patch) | |
tree | 31357b110d793102098d3950c61d1ae86807fee7 | |
parent | d04b0ad37e4b6ac39a56c823ae76ab37cd044dc7 (diff) |
sched/headers: Move task-stack related APIs from <linux/sched.h> to <linux/sched/task_stack.h>
Split out the task->stack related functionality, which is not really
part of the core scheduler APIs.
Only keep task_thread_info() because it's used by sched.h.
Update the code that uses those facilities.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 2 | ||||
-rw-r--r-- | arch/mips/sibyte/sb1250/smp.c | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 115 | ||||
-rw-r--r-- | include/linux/sched/task_stack.h | 104 |
4 files changed, 115 insertions, 108 deletions
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 1d3188c23bb8..6d45f05538c8 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/irqchip/mips-gic.h> | 13 | #include <linux/irqchip/mips-gic.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched/task_stack.h> |
15 | #include <linux/sched/hotplug.h> | 15 | #include <linux/sched/hotplug.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 1cf66f5ff23d..0a4a2c3982d8 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/kernel_stat.h> | 23 | #include <linux/kernel_stat.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched/task_stack.h> |
25 | 25 | ||
26 | #include <asm/mmu_context.h> | 26 | #include <asm/mmu_context.h> |
27 | #include <asm/io.h> | 27 | #include <asm/io.h> |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3b3e31da416e..af9590c8bfb0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1450,6 +1450,15 @@ union thread_union { | |||
1450 | unsigned long stack[THREAD_SIZE/sizeof(long)]; | 1450 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
1451 | }; | 1451 | }; |
1452 | 1452 | ||
1453 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1454 | static inline struct thread_info *task_thread_info(struct task_struct *task) | ||
1455 | { | ||
1456 | return &task->thread_info; | ||
1457 | } | ||
1458 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
1459 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) | ||
1460 | #endif | ||
1461 | |||
1453 | #ifndef __HAVE_ARCH_KSTACK_END | 1462 | #ifndef __HAVE_ARCH_KSTACK_END |
1454 | static inline int kstack_end(void *addr) | 1463 | static inline int kstack_end(void *addr) |
1455 | { | 1464 | { |
@@ -1540,112 +1549,6 @@ static inline void task_unlock(struct task_struct *p) | |||
1540 | spin_unlock(&p->alloc_lock); | 1549 | spin_unlock(&p->alloc_lock); |
1541 | } | 1550 | } |
1542 | 1551 | ||
1543 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1544 | |||
1545 | static inline struct thread_info *task_thread_info(struct task_struct *task) | ||
1546 | { | ||
1547 | return &task->thread_info; | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * When accessing the stack of a non-current task that might exit, use | ||
1552 | * try_get_task_stack() instead. task_stack_page will return a pointer | ||
1553 | * that could get freed out from under you. | ||
1554 | */ | ||
1555 | static inline void *task_stack_page(const struct task_struct *task) | ||
1556 | { | ||
1557 | return task->stack; | ||
1558 | } | ||
1559 | |||
1560 | #define setup_thread_stack(new,old) do { } while(0) | ||
1561 | |||
1562 | static inline unsigned long *end_of_stack(const struct task_struct *task) | ||
1563 | { | ||
1564 | return task->stack; | ||
1565 | } | ||
1566 | |||
1567 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
1568 | |||
1569 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) | ||
1570 | #define task_stack_page(task) ((void *)(task)->stack) | ||
1571 | |||
1572 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | ||
1573 | { | ||
1574 | *task_thread_info(p) = *task_thread_info(org); | ||
1575 | task_thread_info(p)->task = p; | ||
1576 | } | ||
1577 | |||
1578 | /* | ||
1579 | * Return the address of the last usable long on the stack. | ||
1580 | * | ||
1581 | * When the stack grows down, this is just above the thread | ||
1582 | * info struct. Going any lower will corrupt the threadinfo. | ||
1583 | * | ||
1584 | * When the stack grows up, this is the highest address. | ||
1585 | * Beyond that position, we corrupt data on the next page. | ||
1586 | */ | ||
1587 | static inline unsigned long *end_of_stack(struct task_struct *p) | ||
1588 | { | ||
1589 | #ifdef CONFIG_STACK_GROWSUP | ||
1590 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
1591 | #else | ||
1592 | return (unsigned long *)(task_thread_info(p) + 1); | ||
1593 | #endif | ||
1594 | } | ||
1595 | |||
1596 | #endif | ||
1597 | |||
1598 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1599 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
1600 | { | ||
1601 | return atomic_inc_not_zero(&tsk->stack_refcount) ? | ||
1602 | task_stack_page(tsk) : NULL; | ||
1603 | } | ||
1604 | |||
1605 | extern void put_task_stack(struct task_struct *tsk); | ||
1606 | #else | ||
1607 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
1608 | { | ||
1609 | return task_stack_page(tsk); | ||
1610 | } | ||
1611 | |||
1612 | static inline void put_task_stack(struct task_struct *tsk) {} | ||
1613 | #endif | ||
1614 | |||
1615 | #define task_stack_end_corrupted(task) \ | ||
1616 | (*(end_of_stack(task)) != STACK_END_MAGIC) | ||
1617 | |||
1618 | static inline int object_is_on_stack(void *obj) | ||
1619 | { | ||
1620 | void *stack = task_stack_page(current); | ||
1621 | |||
1622 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | ||
1623 | } | ||
1624 | |||
1625 | extern void thread_stack_cache_init(void); | ||
1626 | |||
1627 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
1628 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
1629 | { | ||
1630 | unsigned long *n = end_of_stack(p); | ||
1631 | |||
1632 | do { /* Skip over canary */ | ||
1633 | # ifdef CONFIG_STACK_GROWSUP | ||
1634 | n--; | ||
1635 | # else | ||
1636 | n++; | ||
1637 | # endif | ||
1638 | } while (!*n); | ||
1639 | |||
1640 | # ifdef CONFIG_STACK_GROWSUP | ||
1641 | return (unsigned long)end_of_stack(p) - (unsigned long)n; | ||
1642 | # else | ||
1643 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
1644 | # endif | ||
1645 | } | ||
1646 | #endif | ||
1647 | extern void set_task_stack_end_magic(struct task_struct *tsk); | ||
1648 | |||
1649 | /* set thread flags in other task's structures | 1552 | /* set thread flags in other task's structures |
1650 | * - see asm/thread_info.h for TIF_xxxx flags available | 1553 | * - see asm/thread_info.h for TIF_xxxx flags available |
1651 | */ | 1554 | */ |
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index d2d578e85f7d..aaa5c2a6a0e9 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h | |||
@@ -1,7 +1,111 @@ | |||
1 | #ifndef _LINUX_SCHED_TASK_STACK_H | 1 | #ifndef _LINUX_SCHED_TASK_STACK_H |
2 | #define _LINUX_SCHED_TASK_STACK_H | 2 | #define _LINUX_SCHED_TASK_STACK_H |
3 | 3 | ||
4 | /* | ||
5 | * task->stack (kernel stack) handling interfaces: | ||
6 | */ | ||
7 | |||
4 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
5 | #include <linux/magic.h> | 9 | #include <linux/magic.h> |
6 | 10 | ||
11 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
12 | |||
13 | /* | ||
14 | * When accessing the stack of a non-current task that might exit, use | ||
15 | * try_get_task_stack() instead. task_stack_page will return a pointer | ||
16 | * that could get freed out from under you. | ||
17 | */ | ||
18 | static inline void *task_stack_page(const struct task_struct *task) | ||
19 | { | ||
20 | return task->stack; | ||
21 | } | ||
22 | |||
23 | #define setup_thread_stack(new,old) do { } while(0) | ||
24 | |||
25 | static inline unsigned long *end_of_stack(const struct task_struct *task) | ||
26 | { | ||
27 | return task->stack; | ||
28 | } | ||
29 | |||
30 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
31 | |||
32 | #define task_stack_page(task) ((void *)(task)->stack) | ||
33 | |||
34 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | ||
35 | { | ||
36 | *task_thread_info(p) = *task_thread_info(org); | ||
37 | task_thread_info(p)->task = p; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * Return the address of the last usable long on the stack. | ||
42 | * | ||
43 | * When the stack grows down, this is just above the thread | ||
44 | * info struct. Going any lower will corrupt the threadinfo. | ||
45 | * | ||
46 | * When the stack grows up, this is the highest address. | ||
47 | * Beyond that position, we corrupt data on the next page. | ||
48 | */ | ||
49 | static inline unsigned long *end_of_stack(struct task_struct *p) | ||
50 | { | ||
51 | #ifdef CONFIG_STACK_GROWSUP | ||
52 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
53 | #else | ||
54 | return (unsigned long *)(task_thread_info(p) + 1); | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | #endif | ||
59 | |||
60 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
61 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
62 | { | ||
63 | return atomic_inc_not_zero(&tsk->stack_refcount) ? | ||
64 | task_stack_page(tsk) : NULL; | ||
65 | } | ||
66 | |||
67 | extern void put_task_stack(struct task_struct *tsk); | ||
68 | #else | ||
69 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
70 | { | ||
71 | return task_stack_page(tsk); | ||
72 | } | ||
73 | |||
74 | static inline void put_task_stack(struct task_struct *tsk) {} | ||
75 | #endif | ||
76 | |||
77 | #define task_stack_end_corrupted(task) \ | ||
78 | (*(end_of_stack(task)) != STACK_END_MAGIC) | ||
79 | |||
80 | static inline int object_is_on_stack(void *obj) | ||
81 | { | ||
82 | void *stack = task_stack_page(current); | ||
83 | |||
84 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | ||
85 | } | ||
86 | |||
87 | extern void thread_stack_cache_init(void); | ||
88 | |||
89 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
90 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
91 | { | ||
92 | unsigned long *n = end_of_stack(p); | ||
93 | |||
94 | do { /* Skip over canary */ | ||
95 | # ifdef CONFIG_STACK_GROWSUP | ||
96 | n--; | ||
97 | # else | ||
98 | n++; | ||
99 | # endif | ||
100 | } while (!*n); | ||
101 | |||
102 | # ifdef CONFIG_STACK_GROWSUP | ||
103 | return (unsigned long)end_of_stack(p) - (unsigned long)n; | ||
104 | # else | ||
105 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
106 | # endif | ||
107 | } | ||
108 | #endif | ||
109 | extern void set_task_stack_end_magic(struct task_struct *tsk); | ||
110 | |||
7 | #endif /* _LINUX_SCHED_TASK_STACK_H */ | 111 | #endif /* _LINUX_SCHED_TASK_STACK_H */ |