diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 115 |
1 files changed, 9 insertions, 106 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3b3e31da416e..af9590c8bfb0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1450,6 +1450,15 @@ union thread_union { | |||
1450 | unsigned long stack[THREAD_SIZE/sizeof(long)]; | 1450 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
1451 | }; | 1451 | }; |
1452 | 1452 | ||
1453 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1454 | static inline struct thread_info *task_thread_info(struct task_struct *task) | ||
1455 | { | ||
1456 | return &task->thread_info; | ||
1457 | } | ||
1458 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
1459 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) | ||
1460 | #endif | ||
1461 | |||
1453 | #ifndef __HAVE_ARCH_KSTACK_END | 1462 | #ifndef __HAVE_ARCH_KSTACK_END |
1454 | static inline int kstack_end(void *addr) | 1463 | static inline int kstack_end(void *addr) |
1455 | { | 1464 | { |
@@ -1540,112 +1549,6 @@ static inline void task_unlock(struct task_struct *p) | |||
1540 | spin_unlock(&p->alloc_lock); | 1549 | spin_unlock(&p->alloc_lock); |
1541 | } | 1550 | } |
1542 | 1551 | ||
1543 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1544 | |||
1545 | static inline struct thread_info *task_thread_info(struct task_struct *task) | ||
1546 | { | ||
1547 | return &task->thread_info; | ||
1548 | } | ||
1549 | |||
1550 | /* | ||
1551 | * When accessing the stack of a non-current task that might exit, use | ||
1552 | * try_get_task_stack() instead. task_stack_page will return a pointer | ||
1553 | * that could get freed out from under you. | ||
1554 | */ | ||
1555 | static inline void *task_stack_page(const struct task_struct *task) | ||
1556 | { | ||
1557 | return task->stack; | ||
1558 | } | ||
1559 | |||
1560 | #define setup_thread_stack(new,old) do { } while(0) | ||
1561 | |||
1562 | static inline unsigned long *end_of_stack(const struct task_struct *task) | ||
1563 | { | ||
1564 | return task->stack; | ||
1565 | } | ||
1566 | |||
1567 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | ||
1568 | |||
1569 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) | ||
1570 | #define task_stack_page(task) ((void *)(task)->stack) | ||
1571 | |||
1572 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | ||
1573 | { | ||
1574 | *task_thread_info(p) = *task_thread_info(org); | ||
1575 | task_thread_info(p)->task = p; | ||
1576 | } | ||
1577 | |||
1578 | /* | ||
1579 | * Return the address of the last usable long on the stack. | ||
1580 | * | ||
1581 | * When the stack grows down, this is just above the thread | ||
1582 | * info struct. Going any lower will corrupt the threadinfo. | ||
1583 | * | ||
1584 | * When the stack grows up, this is the highest address. | ||
1585 | * Beyond that position, we corrupt data on the next page. | ||
1586 | */ | ||
1587 | static inline unsigned long *end_of_stack(struct task_struct *p) | ||
1588 | { | ||
1589 | #ifdef CONFIG_STACK_GROWSUP | ||
1590 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; | ||
1591 | #else | ||
1592 | return (unsigned long *)(task_thread_info(p) + 1); | ||
1593 | #endif | ||
1594 | } | ||
1595 | |||
1596 | #endif | ||
1597 | |||
1598 | #ifdef CONFIG_THREAD_INFO_IN_TASK | ||
1599 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
1600 | { | ||
1601 | return atomic_inc_not_zero(&tsk->stack_refcount) ? | ||
1602 | task_stack_page(tsk) : NULL; | ||
1603 | } | ||
1604 | |||
1605 | extern void put_task_stack(struct task_struct *tsk); | ||
1606 | #else | ||
1607 | static inline void *try_get_task_stack(struct task_struct *tsk) | ||
1608 | { | ||
1609 | return task_stack_page(tsk); | ||
1610 | } | ||
1611 | |||
1612 | static inline void put_task_stack(struct task_struct *tsk) {} | ||
1613 | #endif | ||
1614 | |||
1615 | #define task_stack_end_corrupted(task) \ | ||
1616 | (*(end_of_stack(task)) != STACK_END_MAGIC) | ||
1617 | |||
1618 | static inline int object_is_on_stack(void *obj) | ||
1619 | { | ||
1620 | void *stack = task_stack_page(current); | ||
1621 | |||
1622 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | ||
1623 | } | ||
1624 | |||
1625 | extern void thread_stack_cache_init(void); | ||
1626 | |||
1627 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
1628 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
1629 | { | ||
1630 | unsigned long *n = end_of_stack(p); | ||
1631 | |||
1632 | do { /* Skip over canary */ | ||
1633 | # ifdef CONFIG_STACK_GROWSUP | ||
1634 | n--; | ||
1635 | # else | ||
1636 | n++; | ||
1637 | # endif | ||
1638 | } while (!*n); | ||
1639 | |||
1640 | # ifdef CONFIG_STACK_GROWSUP | ||
1641 | return (unsigned long)end_of_stack(p) - (unsigned long)n; | ||
1642 | # else | ||
1643 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
1644 | # endif | ||
1645 | } | ||
1646 | #endif | ||
1647 | extern void set_task_stack_end_magic(struct task_struct *tsk); | ||
1648 | |||
1649 | /* set thread flags in other task's structures | 1552 | /* set thread flags in other task's structures |
1650 | * - see asm/thread_info.h for TIF_xxxx flags available | 1553 | * - see asm/thread_info.h for TIF_xxxx flags available |
1651 | */ | 1554 | */ |