aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h118
1 files changed, 63 insertions, 55 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b7b9501b41af..edad7a43edea 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -384,6 +384,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
384 void __user *buffer, 384 void __user *buffer,
385 size_t *lenp, loff_t *ppos); 385 size_t *lenp, loff_t *ppos);
386extern unsigned int softlockup_panic; 386extern unsigned int softlockup_panic;
387extern unsigned int hardlockup_panic;
387void lockup_detector_init(void); 388void lockup_detector_init(void);
388#else 389#else
389static inline void touch_softlockup_watchdog(void) 390static inline void touch_softlockup_watchdog(void)
@@ -483,9 +484,11 @@ static inline int get_dumpable(struct mm_struct *mm)
483#define MMF_DUMP_ELF_HEADERS 6 484#define MMF_DUMP_ELF_HEADERS 6
484#define MMF_DUMP_HUGETLB_PRIVATE 7 485#define MMF_DUMP_HUGETLB_PRIVATE 7
485#define MMF_DUMP_HUGETLB_SHARED 8 486#define MMF_DUMP_HUGETLB_SHARED 8
487#define MMF_DUMP_DAX_PRIVATE 9
488#define MMF_DUMP_DAX_SHARED 10
486 489
487#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 490#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
488#define MMF_DUMP_FILTER_BITS 7 491#define MMF_DUMP_FILTER_BITS 9
489#define MMF_DUMP_FILTER_MASK \ 492#define MMF_DUMP_FILTER_MASK \
490 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 493 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
491#define MMF_DUMP_FILTER_DEFAULT \ 494#define MMF_DUMP_FILTER_DEFAULT \
@@ -599,33 +602,42 @@ struct task_cputime_atomic {
599 .sum_exec_runtime = ATOMIC64_INIT(0), \ 602 .sum_exec_runtime = ATOMIC64_INIT(0), \
600 } 603 }
601 604
602#ifdef CONFIG_PREEMPT_COUNT 605#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
603#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
604#else
605#define PREEMPT_DISABLED PREEMPT_ENABLED
606#endif
607 606
608/* 607/*
609 * Disable preemption until the scheduler is running. 608 * Disable preemption until the scheduler is running -- use an unconditional
610 * Reset by start_kernel()->sched_init()->init_idle(). 609 * value so that it also works on !PREEMPT_COUNT kernels.
611 * 610 *
612 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 611 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
613 * before the scheduler is active -- see should_resched().
614 */ 612 */
615#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) 613#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
614
615/*
616 * Initial preempt_count value; reflects the preempt_count schedule invariant
617 * which states that during context switches:
618 *
619 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
620 *
621 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
622 * Note: See finish_task_switch().
623 */
624#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
616 625
617/** 626/**
618 * struct thread_group_cputimer - thread group interval timer counts 627 * struct thread_group_cputimer - thread group interval timer counts
619 * @cputime_atomic: atomic thread group interval timers. 628 * @cputime_atomic: atomic thread group interval timers.
620 * @running: non-zero when there are timers running and 629 * @running: true when there are timers running and
621 * @cputime receives updates. 630 * @cputime_atomic receives updates.
631 * @checking_timer: true when a thread in the group is in the
632 * process of checking for thread group timers.
622 * 633 *
623 * This structure contains the version of task_cputime, above, that is 634 * This structure contains the version of task_cputime, above, that is
624 * used for thread group CPU timer calculations. 635 * used for thread group CPU timer calculations.
625 */ 636 */
626struct thread_group_cputimer { 637struct thread_group_cputimer {
627 struct task_cputime_atomic cputime_atomic; 638 struct task_cputime_atomic cputime_atomic;
628 int running; 639 bool running;
640 bool checking_timer;
629}; 641};
630 642
631#include <linux/rwsem.h> 643#include <linux/rwsem.h>
@@ -762,18 +774,6 @@ struct signal_struct {
762 unsigned audit_tty_log_passwd; 774 unsigned audit_tty_log_passwd;
763 struct tty_audit_buf *tty_audit_buf; 775 struct tty_audit_buf *tty_audit_buf;
764#endif 776#endif
765#ifdef CONFIG_CGROUPS
766 /*
767 * group_rwsem prevents new tasks from entering the threadgroup and
768 * member tasks from exiting,a more specifically, setting of
769 * PF_EXITING. fork and exit paths are protected with this rwsem
770 * using threadgroup_change_begin/end(). Users which require
771 * threadgroup to remain stable should use threadgroup_[un]lock()
772 * which also takes care of exec path. Currently, cgroup is the
773 * only user.
774 */
775 struct rw_semaphore group_rwsem;
776#endif
777 777
778 oom_flags_t oom_flags; 778 oom_flags_t oom_flags;
779 short oom_score_adj; /* OOM kill score adjustment */ 779 short oom_score_adj; /* OOM kill score adjustment */
@@ -840,7 +840,7 @@ struct user_struct {
840 struct hlist_node uidhash_node; 840 struct hlist_node uidhash_node;
841 kuid_t uid; 841 kuid_t uid;
842 842
843#ifdef CONFIG_PERF_EVENTS 843#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
844 atomic_long_t locked_vm; 844 atomic_long_t locked_vm;
845#endif 845#endif
846}; 846};
@@ -1139,8 +1139,6 @@ struct sched_domain_topology_level {
1139#endif 1139#endif
1140}; 1140};
1141 1141
1142extern struct sched_domain_topology_level *sched_domain_topology;
1143
1144extern void set_sched_topology(struct sched_domain_topology_level *tl); 1142extern void set_sched_topology(struct sched_domain_topology_level *tl);
1145extern void wake_up_if_idle(int cpu); 1143extern void wake_up_if_idle(int cpu);
1146 1144
@@ -1189,10 +1187,10 @@ struct load_weight {
1189 1187
1190/* 1188/*
1191 * The load_avg/util_avg accumulates an infinite geometric series. 1189 * The load_avg/util_avg accumulates an infinite geometric series.
1192 * 1) load_avg factors the amount of time that a sched_entity is 1190 * 1) load_avg factors frequency scaling into the amount of time that a
1193 * runnable on a rq into its weight. For cfs_rq, it is the aggregated 1191 * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
1194 * such weights of all runnable and blocked sched_entities. 1192 * aggregated such weights of all runnable and blocked sched_entities.
1195 * 2) util_avg factors frequency scaling into the amount of time 1193 * 2) util_avg factors frequency and cpu scaling into the amount of time
1196 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. 1194 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
1197 * For cfs_rq, it is the aggregated such times of all runnable and 1195 * For cfs_rq, it is the aggregated such times of all runnable and
1198 * blocked sched_entities. 1196 * blocked sched_entities.
@@ -1342,10 +1340,12 @@ struct sched_dl_entity {
1342 1340
1343union rcu_special { 1341union rcu_special {
1344 struct { 1342 struct {
1345 bool blocked; 1343 u8 blocked;
1346 bool need_qs; 1344 u8 need_qs;
1347 } b; 1345 u8 exp_need_qs;
1348 short s; 1346 u8 pad; /* Otherwise the compiler can store garbage here. */
1347 } b; /* Bits. */
1348 u32 s; /* Set of bits. */
1349}; 1349};
1350struct rcu_node; 1350struct rcu_node;
1351 1351
@@ -1463,7 +1463,9 @@ struct task_struct {
1463 unsigned sched_reset_on_fork:1; 1463 unsigned sched_reset_on_fork:1;
1464 unsigned sched_contributes_to_load:1; 1464 unsigned sched_contributes_to_load:1;
1465 unsigned sched_migrated:1; 1465 unsigned sched_migrated:1;
1466 1466#ifdef CONFIG_MEMCG
1467 unsigned memcg_may_oom:1;
1468#endif
1467#ifdef CONFIG_MEMCG_KMEM 1469#ifdef CONFIG_MEMCG_KMEM
1468 unsigned memcg_kmem_skip_account:1; 1470 unsigned memcg_kmem_skip_account:1;
1469#endif 1471#endif
@@ -1570,9 +1572,7 @@ struct task_struct {
1570 1572
1571 unsigned long sas_ss_sp; 1573 unsigned long sas_ss_sp;
1572 size_t sas_ss_size; 1574 size_t sas_ss_size;
1573 int (*notifier)(void *priv); 1575
1574 void *notifier_data;
1575 sigset_t *notifier_mask;
1576 struct callback_head *task_works; 1576 struct callback_head *task_works;
1577 1577
1578 struct audit_context *audit_context; 1578 struct audit_context *audit_context;
@@ -1794,12 +1794,12 @@ struct task_struct {
1794 unsigned long trace_recursion; 1794 unsigned long trace_recursion;
1795#endif /* CONFIG_TRACING */ 1795#endif /* CONFIG_TRACING */
1796#ifdef CONFIG_MEMCG 1796#ifdef CONFIG_MEMCG
1797 struct memcg_oom_info { 1797 struct mem_cgroup *memcg_in_oom;
1798 struct mem_cgroup *memcg; 1798 gfp_t memcg_oom_gfp_mask;
1799 gfp_t gfp_mask; 1799 int memcg_oom_order;
1800 int order; 1800
1801 unsigned int may_oom:1; 1801 /* number of pages to reclaim on returning to userland */
1802 } memcg_oom; 1802 unsigned int memcg_nr_pages_over_high;
1803#endif 1803#endif
1804#ifdef CONFIG_UPROBES 1804#ifdef CONFIG_UPROBES
1805 struct uprobe_task *utask; 1805 struct uprobe_task *utask;
@@ -2464,21 +2464,29 @@ extern void ignore_signals(struct task_struct *);
2464extern void flush_signal_handlers(struct task_struct *, int force_default); 2464extern void flush_signal_handlers(struct task_struct *, int force_default);
2465extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 2465extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2466 2466
2467static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 2467static inline int kernel_dequeue_signal(siginfo_t *info)
2468{ 2468{
2469 unsigned long flags; 2469 struct task_struct *tsk = current;
2470 siginfo_t __info;
2470 int ret; 2471 int ret;
2471 2472
2472 spin_lock_irqsave(&tsk->sighand->siglock, flags); 2473 spin_lock_irq(&tsk->sighand->siglock);
2473 ret = dequeue_signal(tsk, mask, info); 2474 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2474 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 2475 spin_unlock_irq(&tsk->sighand->siglock);
2475 2476
2476 return ret; 2477 return ret;
2477} 2478}
2478 2479
2479extern void block_all_signals(int (*notifier)(void *priv), void *priv, 2480static inline void kernel_signal_stop(void)
2480 sigset_t *mask); 2481{
2481extern void unblock_all_signals(void); 2482 spin_lock_irq(&current->sighand->siglock);
2483 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2484 __set_current_state(TASK_STOPPED);
2485 spin_unlock_irq(&current->sighand->siglock);
2486
2487 schedule();
2488}
2489
2482extern void release_task(struct task_struct * p); 2490extern void release_task(struct task_struct * p);
2483extern int send_sig_info(int, struct siginfo *, struct task_struct *); 2491extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2484extern int force_sigsegv(int, struct task_struct *); 2492extern int force_sigsegv(int, struct task_struct *);