aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h59
1 files changed, 28 insertions, 31 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0376b054a0d0..857ba40426ba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -33,6 +33,7 @@ struct sched_param {
33 33
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/sem.h> 35#include <linux/sem.h>
36#include <linux/shm.h>
36#include <linux/signal.h> 37#include <linux/signal.h>
37#include <linux/compiler.h> 38#include <linux/compiler.h>
38#include <linux/completion.h> 39#include <linux/completion.h>
@@ -813,7 +814,7 @@ struct task_delay_info {
813 * associated with the operation is added to XXX_delay. 814 * associated with the operation is added to XXX_delay.
814 * XXX_delay contains the accumulated delay time in nanoseconds. 815 * XXX_delay contains the accumulated delay time in nanoseconds.
815 */ 816 */
816 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 817 u64 blkio_start; /* Shared by blkio, swapin */
817 u64 blkio_delay; /* wait for sync block io completion */ 818 u64 blkio_delay; /* wait for sync block io completion */
818 u64 swapin_delay; /* wait for swapin block io completion */ 819 u64 swapin_delay; /* wait for swapin block io completion */
819 u32 blkio_count; /* total count of the number of sync block */ 820 u32 blkio_count; /* total count of the number of sync block */
@@ -821,7 +822,7 @@ struct task_delay_info {
821 u32 swapin_count; /* total count of the number of swapin block */ 822 u32 swapin_count; /* total count of the number of swapin block */
822 /* io operations performed */ 823 /* io operations performed */
823 824
824 struct timespec freepages_start, freepages_end; 825 u64 freepages_start;
825 u64 freepages_delay; /* wait for memory reclaim */ 826 u64 freepages_delay; /* wait for memory reclaim */
826 u32 freepages_count; /* total count of memory reclaim */ 827 u32 freepages_count; /* total count of memory reclaim */
827}; 828};
@@ -1270,9 +1271,6 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1271#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1272 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1273#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_RCU_BOOST
1274 struct rt_mutex *rcu_boost_mutex;
1275#endif /* #ifdef CONFIG_RCU_BOOST */
1276 1274
1277#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1275#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1278 struct sched_info sched_info; 1276 struct sched_info sched_info;
@@ -1307,13 +1305,12 @@ struct task_struct {
1307 * execve */ 1305 * execve */
1308 unsigned in_iowait:1; 1306 unsigned in_iowait:1;
1309 1307
1310 /* task may not gain privileges */
1311 unsigned no_new_privs:1;
1312
1313 /* Revert to default priority/policy when forking */ 1308 /* Revert to default priority/policy when forking */
1314 unsigned sched_reset_on_fork:1; 1309 unsigned sched_reset_on_fork:1;
1315 unsigned sched_contributes_to_load:1; 1310 unsigned sched_contributes_to_load:1;
1316 1311
1312 unsigned long atomic_flags; /* Flags needing atomic access. */
1313
1317 pid_t pid; 1314 pid_t pid;
1318 pid_t tgid; 1315 pid_t tgid;
1319 1316
@@ -1367,8 +1364,8 @@ struct task_struct {
1367 } vtime_snap_whence; 1364 } vtime_snap_whence;
1368#endif 1365#endif
1369 unsigned long nvcsw, nivcsw; /* context switch counts */ 1366 unsigned long nvcsw, nivcsw; /* context switch counts */
1370 struct timespec start_time; /* monotonic time */ 1367 u64 start_time; /* monotonic time in nsec */
1371 struct timespec real_start_time; /* boot based time */ 1368 u64 real_start_time; /* boot based time in nsec */
1372/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1369/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1373 unsigned long min_flt, maj_flt; 1370 unsigned long min_flt, maj_flt;
1374 1371
@@ -1389,6 +1386,7 @@ struct task_struct {
1389#ifdef CONFIG_SYSVIPC 1386#ifdef CONFIG_SYSVIPC
1390/* ipc stuff */ 1387/* ipc stuff */
1391 struct sysv_sem sysvsem; 1388 struct sysv_sem sysvsem;
1389 struct sysv_shm sysvshm;
1392#endif 1390#endif
1393#ifdef CONFIG_DETECT_HUNG_TASK 1391#ifdef CONFIG_DETECT_HUNG_TASK
1394/* hung task detection */ 1392/* hung task detection */
@@ -1440,8 +1438,6 @@ struct task_struct {
1440 struct rb_node *pi_waiters_leftmost; 1438 struct rb_node *pi_waiters_leftmost;
1441 /* Deadlock detection and priority inheritance handling */ 1439 /* Deadlock detection and priority inheritance handling */
1442 struct rt_mutex_waiter *pi_blocked_on; 1440 struct rt_mutex_waiter *pi_blocked_on;
1443 /* Top pi_waiters task */
1444 struct task_struct *pi_top_task;
1445#endif 1441#endif
1446 1442
1447#ifdef CONFIG_DEBUG_MUTEXES 1443#ifdef CONFIG_DEBUG_MUTEXES
@@ -1634,12 +1630,6 @@ struct task_struct {
1634 unsigned long trace_recursion; 1630 unsigned long trace_recursion;
1635#endif /* CONFIG_TRACING */ 1631#endif /* CONFIG_TRACING */
1636#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1632#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1637 struct memcg_batch_info {
1638 int do_batch; /* incremented when batch uncharge started */
1639 struct mem_cgroup *memcg; /* target memcg of uncharge */
1640 unsigned long nr_pages; /* uncharged usage */
1641 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1642 } memcg_batch;
1643 unsigned int memcg_kmem_skip_account; 1633 unsigned int memcg_kmem_skip_account;
1644 struct memcg_oom_info { 1634 struct memcg_oom_info {
1645 struct mem_cgroup *memcg; 1635 struct mem_cgroup *memcg;
@@ -1967,6 +1957,19 @@ static inline void memalloc_noio_restore(unsigned int flags)
1967 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 1957 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1968} 1958}
1969 1959
1960/* Per-process atomic flags. */
1961#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
1962
1963static inline bool task_no_new_privs(struct task_struct *p)
1964{
1965 return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
1966}
1967
1968static inline void task_set_no_new_privs(struct task_struct *p)
1969{
1970 set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
1971}
1972
1970/* 1973/*
1971 * task->jobctl flags 1974 * task->jobctl flags
1972 */ 1975 */
@@ -2009,9 +2012,6 @@ static inline void rcu_copy_process(struct task_struct *p)
2009#ifdef CONFIG_TREE_PREEMPT_RCU 2012#ifdef CONFIG_TREE_PREEMPT_RCU
2010 p->rcu_blocked_node = NULL; 2013 p->rcu_blocked_node = NULL;
2011#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2014#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2012#ifdef CONFIG_RCU_BOOST
2013 p->rcu_boost_mutex = NULL;
2014#endif /* #ifdef CONFIG_RCU_BOOST */
2015 INIT_LIST_HEAD(&p->rcu_node_entry); 2015 INIT_LIST_HEAD(&p->rcu_node_entry);
2016} 2016}
2017 2017
@@ -2360,8 +2360,10 @@ static inline int on_sig_stack(unsigned long sp)
2360 2360
2361static inline int sas_ss_flags(unsigned long sp) 2361static inline int sas_ss_flags(unsigned long sp)
2362{ 2362{
2363 return (current->sas_ss_size == 0 ? SS_DISABLE 2363 if (!current->sas_ss_size)
2364 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2364 return SS_DISABLE;
2365
2366 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2365} 2367}
2366 2368
2367static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 2369static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
@@ -2788,7 +2790,7 @@ static inline bool __must_check current_set_polling_and_test(void)
2788 2790
2789 /* 2791 /*
2790 * Polling state must be visible before we test NEED_RESCHED, 2792 * Polling state must be visible before we test NEED_RESCHED,
2791 * paired by resched_task() 2793 * paired by resched_curr()
2792 */ 2794 */
2793 smp_mb__after_atomic(); 2795 smp_mb__after_atomic();
2794 2796
@@ -2806,7 +2808,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
2806 2808
2807 /* 2809 /*
2808 * Polling state must be visible before we test NEED_RESCHED, 2810 * Polling state must be visible before we test NEED_RESCHED,
2809 * paired by resched_task() 2811 * paired by resched_curr()
2810 */ 2812 */
2811 smp_mb__after_atomic(); 2813 smp_mb__after_atomic();
2812 2814
@@ -2838,7 +2840,7 @@ static inline void current_clr_polling(void)
2838 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also 2840 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2839 * fold. 2841 * fold.
2840 */ 2842 */
2841 smp_mb(); /* paired with resched_task() */ 2843 smp_mb(); /* paired with resched_curr() */
2842 2844
2843 preempt_fold_need_resched(); 2845 preempt_fold_need_resched();
2844} 2846}
@@ -2963,15 +2965,10 @@ static inline void inc_syscw(struct task_struct *tsk)
2963 2965
2964#ifdef CONFIG_MEMCG 2966#ifdef CONFIG_MEMCG
2965extern void mm_update_next_owner(struct mm_struct *mm); 2967extern void mm_update_next_owner(struct mm_struct *mm);
2966extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2967#else 2968#else
2968static inline void mm_update_next_owner(struct mm_struct *mm) 2969static inline void mm_update_next_owner(struct mm_struct *mm)
2969{ 2970{
2970} 2971}
2971
2972static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2973{
2974}
2975#endif /* CONFIG_MEMCG */ 2972#endif /* CONFIG_MEMCG */
2976 2973
2977static inline unsigned long task_rlimit(const struct task_struct *tsk, 2974static inline unsigned long task_rlimit(const struct task_struct *tsk,