aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h160
1 files changed, 118 insertions, 42 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 701b8cbceb05..1c876e27ff93 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -73,6 +73,7 @@ struct sched_param {
73#include <linux/seccomp.h> 73#include <linux/seccomp.h>
74#include <linux/rcupdate.h> 74#include <linux/rcupdate.h>
75#include <linux/futex.h> 75#include <linux/futex.h>
76#include <linux/rtmutex.h>
76 77
77#include <linux/time.h> 78#include <linux/time.h>
78#include <linux/param.h> 79#include <linux/param.h>
@@ -83,6 +84,7 @@ struct sched_param {
83#include <asm/processor.h> 84#include <asm/processor.h>
84 85
85struct exec_domain; 86struct exec_domain;
87struct futex_pi_state;
86 88
87/* 89/*
88 * List of flags we want to share for kernel threads, 90 * List of flags we want to share for kernel threads,
@@ -123,6 +125,7 @@ extern unsigned long nr_running(void);
123extern unsigned long nr_uninterruptible(void); 125extern unsigned long nr_uninterruptible(void);
124extern unsigned long nr_active(void); 126extern unsigned long nr_active(void);
125extern unsigned long nr_iowait(void); 127extern unsigned long nr_iowait(void);
128extern unsigned long weighted_cpuload(const int cpu);
126 129
127 130
128/* 131/*
@@ -181,11 +184,11 @@ extern unsigned long nr_iowait(void);
181extern rwlock_t tasklist_lock; 184extern rwlock_t tasklist_lock;
182extern spinlock_t mmlist_lock; 185extern spinlock_t mmlist_lock;
183 186
184typedef struct task_struct task_t; 187struct task_struct;
185 188
186extern void sched_init(void); 189extern void sched_init(void);
187extern void sched_init_smp(void); 190extern void sched_init_smp(void);
188extern void init_idle(task_t *idle, int cpu); 191extern void init_idle(struct task_struct *idle, int cpu);
189 192
190extern cpumask_t nohz_cpu_mask; 193extern cpumask_t nohz_cpu_mask;
191 194
@@ -358,6 +361,14 @@ struct sighand_struct {
358 spinlock_t siglock; 361 spinlock_t siglock;
359}; 362};
360 363
364struct pacct_struct {
365 int ac_flag;
366 long ac_exitcode;
367 unsigned long ac_mem;
368 cputime_t ac_utime, ac_stime;
369 unsigned long ac_minflt, ac_majflt;
370};
371
361/* 372/*
362 * NOTE! "signal_struct" does not have it's own 373 * NOTE! "signal_struct" does not have it's own
363 * locking, because a shared signal_struct always 374 * locking, because a shared signal_struct always
@@ -372,7 +383,7 @@ struct signal_struct {
372 wait_queue_head_t wait_chldexit; /* for wait4() */ 383 wait_queue_head_t wait_chldexit; /* for wait4() */
373 384
374 /* current thread group signal load-balancing target: */ 385 /* current thread group signal load-balancing target: */
375 task_t *curr_target; 386 struct task_struct *curr_target;
376 387
377 /* shared signal handling: */ 388 /* shared signal handling: */
378 struct sigpending shared_pending; 389 struct sigpending shared_pending;
@@ -449,6 +460,9 @@ struct signal_struct {
449 struct key *session_keyring; /* keyring inherited over fork */ 460 struct key *session_keyring; /* keyring inherited over fork */
450 struct key *process_keyring; /* keyring private to this process */ 461 struct key *process_keyring; /* keyring private to this process */
451#endif 462#endif
463#ifdef CONFIG_BSD_PROCESS_ACCT
464 struct pacct_struct pacct; /* per-process accounting information */
465#endif
452}; 466};
453 467
454/* Context switch must be unlocked if interrupts are to be enabled */ 468/* Context switch must be unlocked if interrupts are to be enabled */
@@ -483,8 +497,11 @@ struct signal_struct {
483 497
484#define MAX_PRIO (MAX_RT_PRIO + 40) 498#define MAX_PRIO (MAX_RT_PRIO + 40)
485 499
486#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) 500#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
501#define rt_task(p) rt_prio((p)->prio)
487#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) 502#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
503#define has_rt_policy(p) \
504 unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
488 505
489/* 506/*
490 * Some day this will be a full-fledged user tracking system.. 507 * Some day this will be a full-fledged user tracking system..
@@ -494,7 +511,7 @@ struct user_struct {
494 atomic_t processes; /* How many processes does this user have? */ 511 atomic_t processes; /* How many processes does this user have? */
495 atomic_t files; /* How many open files does this user have? */ 512 atomic_t files; /* How many open files does this user have? */
496 atomic_t sigpending; /* How many pending signals does this user have? */ 513 atomic_t sigpending; /* How many pending signals does this user have? */
497#ifdef CONFIG_INOTIFY 514#ifdef CONFIG_INOTIFY_USER
498 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 515 atomic_t inotify_watches; /* How many inotify watches does this user have? */
499 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 516 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
500#endif 517#endif
@@ -517,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
517extern struct user_struct root_user; 534extern struct user_struct root_user;
518#define INIT_USER (&root_user) 535#define INIT_USER (&root_user)
519 536
520typedef struct prio_array prio_array_t;
521struct backing_dev_info; 537struct backing_dev_info;
522struct reclaim_state; 538struct reclaim_state;
523 539
@@ -547,9 +563,9 @@ enum idle_type
547/* 563/*
548 * sched-domains (multiprocessor balancing) declarations: 564 * sched-domains (multiprocessor balancing) declarations:
549 */ 565 */
550#ifdef CONFIG_SMP
551#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ 566#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
552 567
568#ifdef CONFIG_SMP
553#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 569#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
554#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 570#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
555#define SD_BALANCE_EXEC 4 /* Balance on exec */ 571#define SD_BALANCE_EXEC 4 /* Balance on exec */
@@ -558,6 +574,11 @@ enum idle_type
558#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 574#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
559#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 575#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
560#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 576#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
577#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
578
579#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
580 ? SD_POWERSAVINGS_BALANCE : 0)
581
561 582
562struct sched_group { 583struct sched_group {
563 struct sched_group *next; /* Must be a circular list */ 584 struct sched_group *next; /* Must be a circular list */
@@ -627,7 +648,7 @@ struct sched_domain {
627#endif 648#endif
628}; 649};
629 650
630extern void partition_sched_domains(cpumask_t *partition1, 651extern int partition_sched_domains(cpumask_t *partition1,
631 cpumask_t *partition2); 652 cpumask_t *partition2);
632 653
633/* 654/*
@@ -677,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
677 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 698 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
678 699
679#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 700#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
680extern void prefetch_stack(struct task_struct*); 701extern void prefetch_stack(struct task_struct *t);
681#else 702#else
682static inline void prefetch_stack(struct task_struct *t) { } 703static inline void prefetch_stack(struct task_struct *t) { }
683#endif 704#endif
@@ -693,6 +714,8 @@ enum sleep_type {
693 SLEEP_INTERRUPTED, 714 SLEEP_INTERRUPTED,
694}; 715};
695 716
717struct prio_array;
718
696struct task_struct { 719struct task_struct {
697 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 720 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
698 struct thread_info *thread_info; 721 struct thread_info *thread_info;
@@ -702,12 +725,15 @@ struct task_struct {
702 725
703 int lock_depth; /* BKL lock depth */ 726 int lock_depth; /* BKL lock depth */
704 727
705#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 728#ifdef CONFIG_SMP
729#ifdef __ARCH_WANT_UNLOCKED_CTXSW
706 int oncpu; 730 int oncpu;
707#endif 731#endif
708 int prio, static_prio; 732#endif
733 int load_weight; /* for niceness load balancing purposes */
734 int prio, static_prio, normal_prio;
709 struct list_head run_list; 735 struct list_head run_list;
710 prio_array_t *array; 736 struct prio_array *array;
711 737
712 unsigned short ioprio; 738 unsigned short ioprio;
713 unsigned int btrace_seq; 739 unsigned int btrace_seq;
@@ -831,13 +857,43 @@ struct task_struct {
831 u32 self_exec_id; 857 u32 self_exec_id;
832/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 858/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
833 spinlock_t alloc_lock; 859 spinlock_t alloc_lock;
834/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 860
835 spinlock_t proc_lock; 861 /* Protection of the PI data structures: */
862 spinlock_t pi_lock;
863
864#ifdef CONFIG_RT_MUTEXES
865 /* PI waiters blocked on a rt_mutex held by this task */
866 struct plist_head pi_waiters;
867 /* Deadlock detection and priority inheritance handling */
868 struct rt_mutex_waiter *pi_blocked_on;
869#endif
836 870
837#ifdef CONFIG_DEBUG_MUTEXES 871#ifdef CONFIG_DEBUG_MUTEXES
838 /* mutex deadlock detection */ 872 /* mutex deadlock detection */
839 struct mutex_waiter *blocked_on; 873 struct mutex_waiter *blocked_on;
840#endif 874#endif
875#ifdef CONFIG_TRACE_IRQFLAGS
876 unsigned int irq_events;
877 int hardirqs_enabled;
878 unsigned long hardirq_enable_ip;
879 unsigned int hardirq_enable_event;
880 unsigned long hardirq_disable_ip;
881 unsigned int hardirq_disable_event;
882 int softirqs_enabled;
883 unsigned long softirq_disable_ip;
884 unsigned int softirq_disable_event;
885 unsigned long softirq_enable_ip;
886 unsigned int softirq_enable_event;
887 int hardirq_context;
888 int softirq_context;
889#endif
890#ifdef CONFIG_LOCKDEP
891# define MAX_LOCK_DEPTH 30UL
892 u64 curr_chain_key;
893 int lockdep_depth;
894 struct held_lock held_locks[MAX_LOCK_DEPTH];
895 unsigned int lockdep_recursion;
896#endif
841 897
842/* journalling filesystem info */ 898/* journalling filesystem info */
843 void *journal_info; 899 void *journal_info;
@@ -845,7 +901,6 @@ struct task_struct {
845/* VM state */ 901/* VM state */
846 struct reclaim_state *reclaim_state; 902 struct reclaim_state *reclaim_state;
847 903
848 struct dentry *proc_dentry;
849 struct backing_dev_info *backing_dev_info; 904 struct backing_dev_info *backing_dev_info;
850 905
851 struct io_context *io_context; 906 struct io_context *io_context;
@@ -880,6 +935,8 @@ struct task_struct {
880#ifdef CONFIG_COMPAT 935#ifdef CONFIG_COMPAT
881 struct compat_robust_list_head __user *compat_robust_list; 936 struct compat_robust_list_head __user *compat_robust_list;
882#endif 937#endif
938 struct list_head pi_state_list;
939 struct futex_pi_state *pi_state_cache;
883 940
884 atomic_t fs_excl; /* holding fs exclusive resources */ 941 atomic_t fs_excl; /* holding fs exclusive resources */
885 struct rcu_head rcu; 942 struct rcu_head rcu;
@@ -941,13 +998,13 @@ static inline void put_task_struct(struct task_struct *t)
941#define PF_KSWAPD 0x00040000 /* I am kswapd */ 998#define PF_KSWAPD 0x00040000 /* I am kswapd */
942#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 999#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
943#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1000#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
944#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ 1001#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */
945#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ 1002#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
946#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ 1003#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
947#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */ 1004#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
948#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */ 1005#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
949#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
950#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1006#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1007#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
951 1008
952/* 1009/*
953 * Only the _current_ task can read/write to tsk->flags, but other 1010 * Only the _current_ task can read/write to tsk->flags, but other
@@ -975,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t)
975#define used_math() tsk_used_math(current) 1032#define used_math() tsk_used_math(current)
976 1033
977#ifdef CONFIG_SMP 1034#ifdef CONFIG_SMP
978extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1035extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
979#else 1036#else
980static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1037static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
981{ 1038{
982 if (!cpu_isset(0, new_mask)) 1039 if (!cpu_isset(0, new_mask))
983 return -EINVAL; 1040 return -EINVAL;
@@ -986,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
986#endif 1043#endif
987 1044
988extern unsigned long long sched_clock(void); 1045extern unsigned long long sched_clock(void);
989extern unsigned long long current_sched_time(const task_t *current_task); 1046extern unsigned long long
1047current_sched_time(const struct task_struct *current_task);
990 1048
991/* sched_exec is called by processes performing an exec */ 1049/* sched_exec is called by processes performing an exec */
992#ifdef CONFIG_SMP 1050#ifdef CONFIG_SMP
@@ -1002,16 +1060,29 @@ static inline void idle_task_exit(void) {}
1002#endif 1060#endif
1003 1061
1004extern void sched_idle_next(void); 1062extern void sched_idle_next(void);
1005extern void set_user_nice(task_t *p, long nice); 1063
1006extern int task_prio(const task_t *p); 1064#ifdef CONFIG_RT_MUTEXES
1007extern int task_nice(const task_t *p); 1065extern int rt_mutex_getprio(struct task_struct *p);
1008extern int can_nice(const task_t *p, const int nice); 1066extern void rt_mutex_setprio(struct task_struct *p, int prio);
1009extern int task_curr(const task_t *p); 1067extern void rt_mutex_adjust_pi(struct task_struct *p);
1068#else
1069static inline int rt_mutex_getprio(struct task_struct *p)
1070{
1071 return p->normal_prio;
1072}
1073# define rt_mutex_adjust_pi(p) do { } while (0)
1074#endif
1075
1076extern void set_user_nice(struct task_struct *p, long nice);
1077extern int task_prio(const struct task_struct *p);
1078extern int task_nice(const struct task_struct *p);
1079extern int can_nice(const struct task_struct *p, const int nice);
1080extern int task_curr(const struct task_struct *p);
1010extern int idle_cpu(int cpu); 1081extern int idle_cpu(int cpu);
1011extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1082extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1012extern task_t *idle_task(int cpu); 1083extern struct task_struct *idle_task(int cpu);
1013extern task_t *curr_task(int cpu); 1084extern struct task_struct *curr_task(int cpu);
1014extern void set_curr_task(int cpu, task_t *p); 1085extern void set_curr_task(int cpu, struct task_struct *p);
1015 1086
1016void yield(void); 1087void yield(void);
1017 1088
@@ -1068,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1068#else 1139#else
1069 static inline void kick_process(struct task_struct *tsk) { } 1140 static inline void kick_process(struct task_struct *tsk) { }
1070#endif 1141#endif
1071extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1142extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
1072extern void FASTCALL(sched_exit(task_t * p)); 1143extern void FASTCALL(sched_exit(struct task_struct * p));
1073 1144
1074extern int in_group_p(gid_t); 1145extern int in_group_p(gid_t);
1075extern int in_egroup_p(gid_t); 1146extern int in_egroup_p(gid_t);
@@ -1102,7 +1173,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1102extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); 1173extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
1103extern int kill_pg_info(int, struct siginfo *, pid_t); 1174extern int kill_pg_info(int, struct siginfo *, pid_t);
1104extern int kill_proc_info(int, struct siginfo *, pid_t); 1175extern int kill_proc_info(int, struct siginfo *, pid_t);
1105extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); 1176extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
1106extern void do_notify_parent(struct task_struct *, int); 1177extern void do_notify_parent(struct task_struct *, int);
1107extern void force_sig(int, struct task_struct *); 1178extern void force_sig(int, struct task_struct *);
1108extern void force_sig_specific(int, struct task_struct *); 1179extern void force_sig_specific(int, struct task_struct *);
@@ -1174,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int);
1174extern void daemonize(const char *, ...); 1245extern void daemonize(const char *, ...);
1175extern int allow_signal(int); 1246extern int allow_signal(int);
1176extern int disallow_signal(int); 1247extern int disallow_signal(int);
1177extern task_t *child_reaper; 1248extern struct task_struct *child_reaper;
1178 1249
1179extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1250extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1180extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1251extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1181task_t *fork_idle(int); 1252struct task_struct *fork_idle(int);
1182 1253
1183extern void set_task_comm(struct task_struct *tsk, char *from); 1254extern void set_task_comm(struct task_struct *tsk, char *from);
1184extern void get_task_comm(char *to, struct task_struct *tsk); 1255extern void get_task_comm(char *to, struct task_struct *tsk);
1185 1256
1186#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1187extern void wait_task_inactive(task_t * p); 1258extern void wait_task_inactive(struct task_struct * p);
1188#else 1259#else
1189#define wait_task_inactive(p) do { } while (0) 1260#define wait_task_inactive(p) do { } while (0)
1190#endif 1261#endif
@@ -1210,13 +1281,13 @@ extern void wait_task_inactive(task_t * p);
1210/* de_thread depends on thread_group_leader not being a pid based check */ 1281/* de_thread depends on thread_group_leader not being a pid based check */
1211#define thread_group_leader(p) (p == p->group_leader) 1282#define thread_group_leader(p) (p == p->group_leader)
1212 1283
1213static inline task_t *next_thread(const task_t *p) 1284static inline struct task_struct *next_thread(const struct task_struct *p)
1214{ 1285{
1215 return list_entry(rcu_dereference(p->thread_group.next), 1286 return list_entry(rcu_dereference(p->thread_group.next),
1216 task_t, thread_group); 1287 struct task_struct, thread_group);
1217} 1288}
1218 1289
1219static inline int thread_group_empty(task_t *p) 1290static inline int thread_group_empty(struct task_struct *p)
1220{ 1291{
1221 return list_empty(&p->thread_group); 1292 return list_empty(&p->thread_group);
1222} 1293}
@@ -1225,7 +1296,7 @@ static inline int thread_group_empty(task_t *p)
1225 (thread_group_leader(p) && !thread_group_empty(p)) 1296 (thread_group_leader(p) && !thread_group_empty(p))
1226 1297
1227/* 1298/*
1228 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring 1299 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1229 * subscriptions and synchronises with wait4(). Also used in procfs. Also 1300 * subscriptions and synchronises with wait4(). Also used in procfs. Also
1230 * pins the final release of task.io_context. Also protects ->cpuset. 1301 * pins the final release of task.io_context. Also protects ->cpuset.
1231 * 1302 *
@@ -1401,6 +1472,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
1401extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 1472extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
1402extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 1473extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
1403 1474
1475#include <linux/sysdev.h>
1476extern int sched_mc_power_savings, sched_smt_power_savings;
1477extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
1478extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
1479
1404extern void normalize_rt_tasks(void); 1480extern void normalize_rt_tasks(void);
1405 1481
1406#ifdef CONFIG_PM 1482#ifdef CONFIG_PM