summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-02 02:35:14 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-02 19:43:37 -0500
commitc3edc4010e9d102eb7b8f17d15c2ebc425fed63c (patch)
treef49be469e5001e63d7b038bd9bac71ed7b63bd02 /include/linux/sched.h
parent11701c6768367294c5086738d49196192aaf3d60 (diff)
sched/headers: Move task_struct::signal and task_struct::sighand types and accessors into <linux/sched/signal.h>
task_struct::signal and task_struct::sighand are pointers, which would normally make it straightforward to not define those types in sched.h. That is not so, because the types are accompanied by a myriad of APIs (macros and inline functions) that dereference them. Split the types and the APIs out of sched.h and move them into a new header, <linux/sched/signal.h>. With this change sched.h does not know about 'struct signal' and 'struct sighand' anymore, trying to put accessors into sched.h as a test fails the following way: ./include/linux/sched.h: In function ‘test_signal_types’: ./include/linux/sched.h:2461:18: error: dereferencing pointer to incomplete type ‘struct signal_struct’ ^ This reduces the size and complexity of sched.h significantly. Update all headers and .c code that relied on getting the signal handling functionality from <linux/sched.h> to include <linux/sched/signal.h>. The list of affected files in the preparatory patch was partly generated by grepping for the APIs, and partly by doing coverage build testing, both all[yes|mod|def|no]config builds on 64-bit and 32-bit x86, and an array of cross-architecture builds. Nevertheless some (trivial) build breakage is still expected related to rare Kconfig combinations and in-flight patches to various kernel code, but most of it should be handled by this patch. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h499
1 files changed, 3 insertions, 496 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7934cd0acbc7..c1586104d4c0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -71,6 +71,9 @@ struct blk_plug;
71struct filename; 71struct filename;
72struct nameidata; 72struct nameidata;
73 73
74struct signal_struct;
75struct sighand_struct;
76
74extern unsigned long total_forks; 77extern unsigned long total_forks;
75extern int nr_threads; 78extern int nr_threads;
76DECLARE_PER_CPU(unsigned long, process_counts); 79DECLARE_PER_CPU(unsigned long, process_counts);
@@ -361,13 +364,6 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
361static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 364static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
362#endif 365#endif
363 366
364struct sighand_struct {
365 atomic_t count;
366 struct k_sigaction action[_NSIG];
367 spinlock_t siglock;
368 wait_queue_head_t signalfd_wqh;
369};
370
371struct pacct_struct { 367struct pacct_struct {
372 int ac_flag; 368 int ac_flag;
373 long ac_exitcode; 369 long ac_exitcode;
@@ -486,195 +482,6 @@ struct thread_group_cputimer {
486struct autogroup; 482struct autogroup;
487 483
488/* 484/*
489 * NOTE! "signal_struct" does not have its own
490 * locking, because a shared signal_struct always
491 * implies a shared sighand_struct, so locking
492 * sighand_struct is always a proper superset of
493 * the locking of signal_struct.
494 */
495struct signal_struct {
496 atomic_t sigcnt;
497 atomic_t live;
498 int nr_threads;
499 struct list_head thread_head;
500
501 wait_queue_head_t wait_chldexit; /* for wait4() */
502
503 /* current thread group signal load-balancing target: */
504 struct task_struct *curr_target;
505
506 /* shared signal handling: */
507 struct sigpending shared_pending;
508
509 /* thread group exit support */
510 int group_exit_code;
511 /* overloaded:
512 * - notify group_exit_task when ->count is equal to notify_count
513 * - everyone except group_exit_task is stopped during signal delivery
514 * of fatal signals, group_exit_task processes the signal.
515 */
516 int notify_count;
517 struct task_struct *group_exit_task;
518
519 /* thread group stop support, overloads group_exit_code too */
520 int group_stop_count;
521 unsigned int flags; /* see SIGNAL_* flags below */
522
523 /*
524 * PR_SET_CHILD_SUBREAPER marks a process, like a service
525 * manager, to re-parent orphan (double-forking) child processes
526 * to this process instead of 'init'. The service manager is
527 * able to receive SIGCHLD signals and is able to investigate
528 * the process until it calls wait(). All children of this
529 * process will inherit a flag if they should look for a
530 * child_subreaper process at exit.
531 */
532 unsigned int is_child_subreaper:1;
533 unsigned int has_child_subreaper:1;
534
535#ifdef CONFIG_POSIX_TIMERS
536
537 /* POSIX.1b Interval Timers */
538 int posix_timer_id;
539 struct list_head posix_timers;
540
541 /* ITIMER_REAL timer for the process */
542 struct hrtimer real_timer;
543 ktime_t it_real_incr;
544
545 /*
546 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
547 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
548 * values are defined to 0 and 1 respectively
549 */
550 struct cpu_itimer it[2];
551
552 /*
553 * Thread group totals for process CPU timers.
554 * See thread_group_cputimer(), et al, for details.
555 */
556 struct thread_group_cputimer cputimer;
557
558 /* Earliest-expiration cache. */
559 struct task_cputime cputime_expires;
560
561 struct list_head cpu_timers[3];
562
563#endif
564
565 struct pid *leader_pid;
566
567#ifdef CONFIG_NO_HZ_FULL
568 atomic_t tick_dep_mask;
569#endif
570
571 struct pid *tty_old_pgrp;
572
573 /* boolean value for session group leader */
574 int leader;
575
576 struct tty_struct *tty; /* NULL if no tty */
577
578#ifdef CONFIG_SCHED_AUTOGROUP
579 struct autogroup *autogroup;
580#endif
581 /*
582 * Cumulative resource counters for dead threads in the group,
583 * and for reaped dead child processes forked by this group.
584 * Live threads maintain their own counters and add to these
585 * in __exit_signal, except for the group leader.
586 */
587 seqlock_t stats_lock;
588 u64 utime, stime, cutime, cstime;
589 u64 gtime;
590 u64 cgtime;
591 struct prev_cputime prev_cputime;
592 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
593 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
594 unsigned long inblock, oublock, cinblock, coublock;
595 unsigned long maxrss, cmaxrss;
596 struct task_io_accounting ioac;
597
598 /*
599 * Cumulative ns of schedule CPU time fo dead threads in the
600 * group, not including a zombie group leader, (This only differs
601 * from jiffies_to_ns(utime + stime) if sched_clock uses something
602 * other than jiffies.)
603 */
604 unsigned long long sum_sched_runtime;
605
606 /*
607 * We don't bother to synchronize most readers of this at all,
608 * because there is no reader checking a limit that actually needs
609 * to get both rlim_cur and rlim_max atomically, and either one
610 * alone is a single word that can safely be read normally.
611 * getrlimit/setrlimit use task_lock(current->group_leader) to
612 * protect this instead of the siglock, because they really
613 * have no need to disable irqs.
614 */
615 struct rlimit rlim[RLIM_NLIMITS];
616
617#ifdef CONFIG_BSD_PROCESS_ACCT
618 struct pacct_struct pacct; /* per-process accounting information */
619#endif
620#ifdef CONFIG_TASKSTATS
621 struct taskstats *stats;
622#endif
623#ifdef CONFIG_AUDIT
624 unsigned audit_tty;
625 struct tty_audit_buf *tty_audit_buf;
626#endif
627
628 /*
629 * Thread is the potential origin of an oom condition; kill first on
630 * oom
631 */
632 bool oom_flag_origin;
633 short oom_score_adj; /* OOM kill score adjustment */
634 short oom_score_adj_min; /* OOM kill score adjustment min value.
635 * Only settable by CAP_SYS_RESOURCE. */
636 struct mm_struct *oom_mm; /* recorded mm when the thread group got
637 * killed by the oom killer */
638
639 struct mutex cred_guard_mutex; /* guard against foreign influences on
640 * credential calculations
641 * (notably. ptrace) */
642};
643
644/*
645 * Bits in flags field of signal_struct.
646 */
647#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
648#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
649#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
650#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
651/*
652 * Pending notifications to parent.
653 */
654#define SIGNAL_CLD_STOPPED 0x00000010
655#define SIGNAL_CLD_CONTINUED 0x00000020
656#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
657
658#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
659
660#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
661 SIGNAL_STOP_CONTINUED)
662
663static inline void signal_set_stop_flags(struct signal_struct *sig,
664 unsigned int flags)
665{
666 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
667 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
668}
669
670/* If true, all threads except ->group_exit_task have pending SIGKILL */
671static inline int signal_group_exit(const struct signal_struct *sig)
672{
673 return (sig->flags & SIGNAL_GROUP_EXIT) ||
674 (sig->group_exit_task != NULL);
675}
676
677/*
678 * Some day this will be a full-fledged user tracking system.. 485 * Some day this will be a full-fledged user tracking system..
679 */ 486 */
680struct user_struct { 487struct user_struct {
@@ -2126,190 +1933,8 @@ extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2126extern void sched_dead(struct task_struct *p); 1933extern void sched_dead(struct task_struct *p);
2127 1934
2128extern void proc_caches_init(void); 1935extern void proc_caches_init(void);
2129extern void flush_signals(struct task_struct *);
2130extern void ignore_signals(struct task_struct *);
2131extern void flush_signal_handlers(struct task_struct *, int force_default);
2132extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2133
2134static inline int kernel_dequeue_signal(siginfo_t *info)
2135{
2136 struct task_struct *tsk = current;
2137 siginfo_t __info;
2138 int ret;
2139
2140 spin_lock_irq(&tsk->sighand->siglock);
2141 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2142 spin_unlock_irq(&tsk->sighand->siglock);
2143
2144 return ret;
2145}
2146
2147static inline void kernel_signal_stop(void)
2148{
2149 spin_lock_irq(&current->sighand->siglock);
2150 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2151 __set_current_state(TASK_STOPPED);
2152 spin_unlock_irq(&current->sighand->siglock);
2153
2154 schedule();
2155}
2156 1936
2157extern void release_task(struct task_struct * p); 1937extern void release_task(struct task_struct * p);
2158extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2159extern int force_sigsegv(int, struct task_struct *);
2160extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2161extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2162extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2163extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2164 const struct cred *, u32);
2165extern int kill_pgrp(struct pid *pid, int sig, int priv);
2166extern int kill_pid(struct pid *pid, int sig, int priv);
2167extern int kill_proc_info(int, struct siginfo *, pid_t);
2168extern __must_check bool do_notify_parent(struct task_struct *, int);
2169extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2170extern void force_sig(int, struct task_struct *);
2171extern int send_sig(int, struct task_struct *, int);
2172extern int zap_other_threads(struct task_struct *p);
2173extern struct sigqueue *sigqueue_alloc(void);
2174extern void sigqueue_free(struct sigqueue *);
2175extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2176extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2177
2178#ifdef TIF_RESTORE_SIGMASK
2179/*
2180 * Legacy restore_sigmask accessors. These are inefficient on
2181 * SMP architectures because they require atomic operations.
2182 */
2183
2184/**
2185 * set_restore_sigmask() - make sure saved_sigmask processing gets done
2186 *
2187 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
2188 * will run before returning to user mode, to process the flag. For
2189 * all callers, TIF_SIGPENDING is already set or it's no harm to set
2190 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
2191 * arch code will notice on return to user mode, in case those bits
2192 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
2193 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
2194 */
2195static inline void set_restore_sigmask(void)
2196{
2197 set_thread_flag(TIF_RESTORE_SIGMASK);
2198 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2199}
2200static inline void clear_restore_sigmask(void)
2201{
2202 clear_thread_flag(TIF_RESTORE_SIGMASK);
2203}
2204static inline bool test_restore_sigmask(void)
2205{
2206 return test_thread_flag(TIF_RESTORE_SIGMASK);
2207}
2208static inline bool test_and_clear_restore_sigmask(void)
2209{
2210 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2211}
2212
2213#else /* TIF_RESTORE_SIGMASK */
2214
2215/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
2216static inline void set_restore_sigmask(void)
2217{
2218 current->restore_sigmask = true;
2219 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2220}
2221static inline void clear_restore_sigmask(void)
2222{
2223 current->restore_sigmask = false;
2224}
2225static inline bool test_restore_sigmask(void)
2226{
2227 return current->restore_sigmask;
2228}
2229static inline bool test_and_clear_restore_sigmask(void)
2230{
2231 if (!current->restore_sigmask)
2232 return false;
2233 current->restore_sigmask = false;
2234 return true;
2235}
2236#endif
2237
2238static inline void restore_saved_sigmask(void)
2239{
2240 if (test_and_clear_restore_sigmask())
2241 __set_current_blocked(&current->saved_sigmask);
2242}
2243
2244static inline sigset_t *sigmask_to_save(void)
2245{
2246 sigset_t *res = &current->blocked;
2247 if (unlikely(test_restore_sigmask()))
2248 res = &current->saved_sigmask;
2249 return res;
2250}
2251
2252static inline int kill_cad_pid(int sig, int priv)
2253{
2254 return kill_pid(cad_pid, sig, priv);
2255}
2256
2257/* These can be the second arg to send_sig_info/send_group_sig_info. */
2258#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2259#define SEND_SIG_PRIV ((struct siginfo *) 1)
2260#define SEND_SIG_FORCED ((struct siginfo *) 2)
2261
2262/*
2263 * True if we are on the alternate signal stack.
2264 */
2265static inline int on_sig_stack(unsigned long sp)
2266{
2267 /*
2268 * If the signal stack is SS_AUTODISARM then, by construction, we
2269 * can't be on the signal stack unless user code deliberately set
2270 * SS_AUTODISARM when we were already on it.
2271 *
2272 * This improves reliability: if user state gets corrupted such that
2273 * the stack pointer points very close to the end of the signal stack,
2274 * then this check will enable the signal to be handled anyway.
2275 */
2276 if (current->sas_ss_flags & SS_AUTODISARM)
2277 return 0;
2278
2279#ifdef CONFIG_STACK_GROWSUP
2280 return sp >= current->sas_ss_sp &&
2281 sp - current->sas_ss_sp < current->sas_ss_size;
2282#else
2283 return sp > current->sas_ss_sp &&
2284 sp - current->sas_ss_sp <= current->sas_ss_size;
2285#endif
2286}
2287
2288static inline int sas_ss_flags(unsigned long sp)
2289{
2290 if (!current->sas_ss_size)
2291 return SS_DISABLE;
2292
2293 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2294}
2295
2296static inline void sas_ss_reset(struct task_struct *p)
2297{
2298 p->sas_ss_sp = 0;
2299 p->sas_ss_size = 0;
2300 p->sas_ss_flags = SS_DISABLE;
2301}
2302
2303static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2304{
2305 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2306#ifdef CONFIG_STACK_GROWSUP
2307 return current->sas_ss_sp;
2308#else
2309 return current->sas_ss_sp + current->sas_ss_size;
2310#endif
2311 return sp;
2312}
2313 1938
2314#ifdef CONFIG_HAVE_COPY_THREAD_TLS 1939#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2315extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, 1940extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
@@ -2338,10 +1963,8 @@ static inline void exit_thread(struct task_struct *tsk)
2338#endif 1963#endif
2339 1964
2340extern void exit_files(struct task_struct *); 1965extern void exit_files(struct task_struct *);
2341extern void __cleanup_sighand(struct sighand_struct *);
2342 1966
2343extern void exit_itimers(struct signal_struct *); 1967extern void exit_itimers(struct signal_struct *);
2344extern void flush_itimer_signals(void);
2345 1968
2346extern void do_group_exit(int); 1969extern void do_group_exit(int);
2347 1970
@@ -2376,81 +1999,6 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2376} 1999}
2377#endif 2000#endif
2378 2001
2379#define tasklist_empty() \
2380 list_empty(&init_task.tasks)
2381
2382#define next_task(p) \
2383 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2384
2385#define for_each_process(p) \
2386 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2387
2388extern bool current_is_single_threaded(void);
2389
2390/*
2391 * Careful: do_each_thread/while_each_thread is a double loop so
2392 * 'break' will not work as expected - use goto instead.
2393 */
2394#define do_each_thread(g, t) \
2395 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2396
2397#define while_each_thread(g, t) \
2398 while ((t = next_thread(t)) != g)
2399
2400#define __for_each_thread(signal, t) \
2401 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2402
2403#define for_each_thread(p, t) \
2404 __for_each_thread((p)->signal, t)
2405
2406/* Careful: this is a double loop, 'break' won't work as expected. */
2407#define for_each_process_thread(p, t) \
2408 for_each_process(p) for_each_thread(p, t)
2409
2410typedef int (*proc_visitor)(struct task_struct *p, void *data);
2411void walk_process_tree(struct task_struct *top, proc_visitor, void *);
2412
2413static inline int get_nr_threads(struct task_struct *tsk)
2414{
2415 return tsk->signal->nr_threads;
2416}
2417
2418static inline bool thread_group_leader(struct task_struct *p)
2419{
2420 return p->exit_signal >= 0;
2421}
2422
2423/* Do to the insanities of de_thread it is possible for a process
2424 * to have the pid of the thread group leader without actually being
2425 * the thread group leader. For iteration through the pids in proc
2426 * all we care about is that we have a task with the appropriate
2427 * pid, we don't actually care if we have the right task.
2428 */
2429static inline bool has_group_leader_pid(struct task_struct *p)
2430{
2431 return task_pid(p) == p->signal->leader_pid;
2432}
2433
2434static inline
2435bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2436{
2437 return p1->signal == p2->signal;
2438}
2439
2440static inline struct task_struct *next_thread(const struct task_struct *p)
2441{
2442 return list_entry_rcu(p->thread_group.next,
2443 struct task_struct, thread_group);
2444}
2445
2446static inline int thread_group_empty(struct task_struct *p)
2447{
2448 return list_empty(&p->thread_group);
2449}
2450
2451#define delay_group_leader(p) \
2452 (thread_group_leader(p) && !thread_group_empty(p))
2453
2454/* 2002/*
2455 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2003 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2456 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2004 * subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -2471,25 +2019,6 @@ static inline void task_unlock(struct task_struct *p)
2471 spin_unlock(&p->alloc_lock); 2019 spin_unlock(&p->alloc_lock);
2472} 2020}
2473 2021
2474extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2475 unsigned long *flags);
2476
2477static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2478 unsigned long *flags)
2479{
2480 struct sighand_struct *ret;
2481
2482 ret = __lock_task_sighand(tsk, flags);
2483 (void)__cond_lock(&tsk->sighand->siglock, ret);
2484 return ret;
2485}
2486
2487static inline void unlock_task_sighand(struct task_struct *tsk,
2488 unsigned long *flags)
2489{
2490 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2491}
2492
2493#ifdef CONFIG_THREAD_INFO_IN_TASK 2022#ifdef CONFIG_THREAD_INFO_IN_TASK
2494 2023
2495static inline struct thread_info *task_thread_info(struct task_struct *task) 2024static inline struct thread_info *task_thread_info(struct task_struct *task)
@@ -2862,28 +2391,6 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
2862} 2391}
2863#endif /* CONFIG_MEMCG */ 2392#endif /* CONFIG_MEMCG */
2864 2393
2865static inline unsigned long task_rlimit(const struct task_struct *tsk,
2866 unsigned int limit)
2867{
2868 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
2869}
2870
2871static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2872 unsigned int limit)
2873{
2874 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
2875}
2876
2877static inline unsigned long rlimit(unsigned int limit)
2878{
2879 return task_rlimit(current, limit);
2880}
2881
2882static inline unsigned long rlimit_max(unsigned int limit)
2883{
2884 return task_rlimit_max(current, limit);
2885}
2886
2887#define SCHED_CPUFREQ_RT (1U << 0) 2394#define SCHED_CPUFREQ_RT (1U << 0)
2888#define SCHED_CPUFREQ_DL (1U << 1) 2395#define SCHED_CPUFREQ_DL (1U << 1)
2889#define SCHED_CPUFREQ_IOWAIT (1U << 2) 2396#define SCHED_CPUFREQ_IOWAIT (1U << 2)