diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 256 |
1 files changed, 56 insertions, 200 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbfb..54ddcb82cddf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -127,18 +127,6 @@ extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | |||
| 127 | extern void proc_sched_set_task(struct task_struct *p); | 127 | extern void proc_sched_set_task(struct task_struct *p); |
| 128 | extern void | 128 | extern void |
| 129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | 129 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
| 130 | #else | ||
| 131 | static inline void | ||
| 132 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) | ||
| 133 | { | ||
| 134 | } | ||
| 135 | static inline void proc_sched_set_task(struct task_struct *p) | ||
| 136 | { | ||
| 137 | } | ||
| 138 | static inline void | ||
| 139 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | ||
| 140 | { | ||
| 141 | } | ||
| 142 | #endif | 130 | #endif |
| 143 | 131 | ||
| 144 | /* | 132 | /* |
| @@ -163,9 +151,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 163 | #define TASK_DEAD 64 | 151 | #define TASK_DEAD 64 |
| 164 | #define TASK_WAKEKILL 128 | 152 | #define TASK_WAKEKILL 128 |
| 165 | #define TASK_WAKING 256 | 153 | #define TASK_WAKING 256 |
| 166 | #define TASK_STATE_MAX 512 | 154 | #define TASK_PARKED 512 |
| 155 | #define TASK_STATE_MAX 1024 | ||
| 167 | 156 | ||
| 168 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | 157 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" |
| 169 | 158 | ||
| 170 | extern char ___assert_task_state[1 - 2*!!( | 159 | extern char ___assert_task_state[1 - 2*!!( |
| 171 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | 160 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; |
| @@ -320,7 +309,6 @@ extern signed long schedule_timeout_killable(signed long timeout); | |||
| 320 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 309 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
| 321 | asmlinkage void schedule(void); | 310 | asmlinkage void schedule(void); |
| 322 | extern void schedule_preempt_disabled(void); | 311 | extern void schedule_preempt_disabled(void); |
| 323 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | ||
| 324 | 312 | ||
| 325 | struct nsproxy; | 313 | struct nsproxy; |
| 326 | struct user_namespace; | 314 | struct user_namespace; |
| @@ -526,7 +514,8 @@ struct signal_struct { | |||
| 526 | unsigned int has_child_subreaper:1; | 514 | unsigned int has_child_subreaper:1; |
| 527 | 515 | ||
| 528 | /* POSIX.1b Interval Timers */ | 516 | /* POSIX.1b Interval Timers */ |
| 529 | struct list_head posix_timers; | 517 | int posix_timer_id; |
| 518 | struct list_head posix_timers; | ||
| 530 | 519 | ||
| 531 | /* ITIMER_REAL timer for the process */ | 520 | /* ITIMER_REAL timer for the process */ |
| 532 | struct hrtimer real_timer; | 521 | struct hrtimer real_timer; |
| @@ -570,7 +559,7 @@ struct signal_struct { | |||
| 570 | cputime_t utime, stime, cutime, cstime; | 559 | cputime_t utime, stime, cutime, cstime; |
| 571 | cputime_t gtime; | 560 | cputime_t gtime; |
| 572 | cputime_t cgtime; | 561 | cputime_t cgtime; |
| 573 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 562 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| 574 | struct cputime prev_cputime; | 563 | struct cputime prev_cputime; |
| 575 | #endif | 564 | #endif |
| 576 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 565 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
| @@ -768,31 +757,6 @@ enum cpu_idle_type { | |||
| 768 | }; | 757 | }; |
| 769 | 758 | ||
| 770 | /* | 759 | /* |
| 771 | * Increase resolution of nice-level calculations for 64-bit architectures. | ||
| 772 | * The extra resolution improves shares distribution and load balancing of | ||
| 773 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
| 774 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
| 775 | * and does not change the user-interface for setting shares/weights. | ||
| 776 | * | ||
| 777 | * We increase resolution only if we have enough bits to allow this increased | ||
| 778 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
| 779 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
| 780 | * increased costs. | ||
| 781 | */ | ||
| 782 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
| 783 | # define SCHED_LOAD_RESOLUTION 10 | ||
| 784 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
| 785 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
| 786 | #else | ||
| 787 | # define SCHED_LOAD_RESOLUTION 0 | ||
| 788 | # define scale_load(w) (w) | ||
| 789 | # define scale_load_down(w) (w) | ||
| 790 | #endif | ||
| 791 | |||
| 792 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | ||
| 793 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
| 794 | |||
| 795 | /* | ||
| 796 | * Increase resolution of cpu_power calculations | 760 | * Increase resolution of cpu_power calculations |
| 797 | */ | 761 | */ |
| 798 | #define SCHED_POWER_SHIFT 10 | 762 | #define SCHED_POWER_SHIFT 10 |
| @@ -817,62 +781,6 @@ enum cpu_idle_type { | |||
| 817 | 781 | ||
| 818 | extern int __weak arch_sd_sibiling_asym_packing(void); | 782 | extern int __weak arch_sd_sibiling_asym_packing(void); |
| 819 | 783 | ||
| 820 | struct sched_group_power { | ||
| 821 | atomic_t ref; | ||
| 822 | /* | ||
| 823 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | ||
| 824 | * single CPU. | ||
| 825 | */ | ||
| 826 | unsigned int power, power_orig; | ||
| 827 | unsigned long next_update; | ||
| 828 | /* | ||
| 829 | * Number of busy cpus in this group. | ||
| 830 | */ | ||
| 831 | atomic_t nr_busy_cpus; | ||
| 832 | |||
| 833 | unsigned long cpumask[0]; /* iteration mask */ | ||
| 834 | }; | ||
| 835 | |||
| 836 | struct sched_group { | ||
| 837 | struct sched_group *next; /* Must be a circular list */ | ||
| 838 | atomic_t ref; | ||
| 839 | |||
| 840 | unsigned int group_weight; | ||
| 841 | struct sched_group_power *sgp; | ||
| 842 | |||
| 843 | /* | ||
| 844 | * The CPUs this group covers. | ||
| 845 | * | ||
| 846 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 847 | * by attaching extra space to the end of the structure, | ||
| 848 | * depending on how many CPUs the kernel has booted up with) | ||
| 849 | */ | ||
| 850 | unsigned long cpumask[0]; | ||
| 851 | }; | ||
| 852 | |||
| 853 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
| 854 | { | ||
| 855 | return to_cpumask(sg->cpumask); | ||
| 856 | } | ||
| 857 | |||
| 858 | /* | ||
| 859 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
| 860 | * tree. | ||
| 861 | */ | ||
| 862 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
| 863 | { | ||
| 864 | return to_cpumask(sg->sgp->cpumask); | ||
| 865 | } | ||
| 866 | |||
| 867 | /** | ||
| 868 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
| 869 | * @group: The group whose first cpu is to be returned. | ||
| 870 | */ | ||
| 871 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
| 872 | { | ||
| 873 | return cpumask_first(sched_group_cpus(group)); | ||
| 874 | } | ||
| 875 | |||
| 876 | struct sched_domain_attr { | 784 | struct sched_domain_attr { |
| 877 | int relax_domain_level; | 785 | int relax_domain_level; |
| 878 | }; | 786 | }; |
| @@ -883,6 +791,8 @@ struct sched_domain_attr { | |||
| 883 | 791 | ||
| 884 | extern int sched_domain_level_max; | 792 | extern int sched_domain_level_max; |
| 885 | 793 | ||
| 794 | struct sched_group; | ||
| 795 | |||
| 886 | struct sched_domain { | 796 | struct sched_domain { |
| 887 | /* These fields must be setup */ | 797 | /* These fields must be setup */ |
| 888 | struct sched_domain *parent; /* top domain must be null terminated */ | 798 | struct sched_domain *parent; /* top domain must be null terminated */ |
| @@ -899,6 +809,8 @@ struct sched_domain { | |||
| 899 | unsigned int wake_idx; | 809 | unsigned int wake_idx; |
| 900 | unsigned int forkexec_idx; | 810 | unsigned int forkexec_idx; |
| 901 | unsigned int smt_gain; | 811 | unsigned int smt_gain; |
| 812 | |||
| 813 | int nohz_idle; /* NOHZ IDLE status */ | ||
| 902 | int flags; /* See SD_* */ | 814 | int flags; /* See SD_* */ |
| 903 | int level; | 815 | int level; |
| 904 | 816 | ||
| @@ -971,18 +883,6 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |||
| 971 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | 883 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
| 972 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | 884 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
| 973 | 885 | ||
| 974 | /* Test a flag in parent sched domain */ | ||
| 975 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
| 976 | { | ||
| 977 | if (sd->parent && (sd->parent->flags & flag)) | ||
| 978 | return 1; | ||
| 979 | |||
| 980 | return 0; | ||
| 981 | } | ||
| 982 | |||
| 983 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | ||
| 984 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | ||
| 985 | |||
| 986 | bool cpus_share_cache(int this_cpu, int that_cpu); | 886 | bool cpus_share_cache(int this_cpu, int that_cpu); |
| 987 | 887 | ||
| 988 | #else /* CONFIG_SMP */ | 888 | #else /* CONFIG_SMP */ |
| @@ -1017,72 +917,6 @@ struct mempolicy; | |||
| 1017 | struct pipe_inode_info; | 917 | struct pipe_inode_info; |
| 1018 | struct uts_namespace; | 918 | struct uts_namespace; |
| 1019 | 919 | ||
| 1020 | struct rq; | ||
| 1021 | struct sched_domain; | ||
| 1022 | |||
| 1023 | /* | ||
| 1024 | * wake flags | ||
| 1025 | */ | ||
| 1026 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | ||
| 1027 | #define WF_FORK 0x02 /* child wakeup after fork */ | ||
| 1028 | #define WF_MIGRATED 0x04 /* internal use, task got migrated */ | ||
| 1029 | |||
| 1030 | #define ENQUEUE_WAKEUP 1 | ||
| 1031 | #define ENQUEUE_HEAD 2 | ||
| 1032 | #ifdef CONFIG_SMP | ||
| 1033 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
| 1034 | #else | ||
| 1035 | #define ENQUEUE_WAKING 0 | ||
| 1036 | #endif | ||
| 1037 | |||
| 1038 | #define DEQUEUE_SLEEP 1 | ||
| 1039 | |||
| 1040 | struct sched_class { | ||
| 1041 | const struct sched_class *next; | ||
| 1042 | |||
| 1043 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
| 1044 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | ||
| 1045 | void (*yield_task) (struct rq *rq); | ||
| 1046 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | ||
| 1047 | |||
| 1048 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | ||
| 1049 | |||
| 1050 | struct task_struct * (*pick_next_task) (struct rq *rq); | ||
| 1051 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | ||
| 1052 | |||
| 1053 | #ifdef CONFIG_SMP | ||
| 1054 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | ||
| 1055 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | ||
| 1056 | |||
| 1057 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
| 1058 | void (*post_schedule) (struct rq *this_rq); | ||
| 1059 | void (*task_waking) (struct task_struct *task); | ||
| 1060 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
| 1061 | |||
| 1062 | void (*set_cpus_allowed)(struct task_struct *p, | ||
| 1063 | const struct cpumask *newmask); | ||
| 1064 | |||
| 1065 | void (*rq_online)(struct rq *rq); | ||
| 1066 | void (*rq_offline)(struct rq *rq); | ||
| 1067 | #endif | ||
| 1068 | |||
| 1069 | void (*set_curr_task) (struct rq *rq); | ||
| 1070 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | ||
| 1071 | void (*task_fork) (struct task_struct *p); | ||
| 1072 | |||
| 1073 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); | ||
| 1074 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); | ||
| 1075 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | ||
| 1076 | int oldprio); | ||
| 1077 | |||
| 1078 | unsigned int (*get_rr_interval) (struct rq *rq, | ||
| 1079 | struct task_struct *task); | ||
| 1080 | |||
| 1081 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 1082 | void (*task_move_group) (struct task_struct *p, int on_rq); | ||
| 1083 | #endif | ||
| 1084 | }; | ||
| 1085 | |||
| 1086 | struct load_weight { | 920 | struct load_weight { |
| 1087 | unsigned long weight, inv_weight; | 921 | unsigned long weight, inv_weight; |
| 1088 | }; | 922 | }; |
| @@ -1274,8 +1108,10 @@ struct task_struct { | |||
| 1274 | int exit_code, exit_signal; | 1108 | int exit_code, exit_signal; |
| 1275 | int pdeath_signal; /* The signal sent when the parent dies */ | 1109 | int pdeath_signal; /* The signal sent when the parent dies */ |
| 1276 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ | 1110 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ |
| 1277 | /* ??? */ | 1111 | |
| 1112 | /* Used for emulating ABI behavior of previous Linux versions */ | ||
| 1278 | unsigned int personality; | 1113 | unsigned int personality; |
| 1114 | |||
| 1279 | unsigned did_exec:1; | 1115 | unsigned did_exec:1; |
| 1280 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1116 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
| 1281 | * execve */ | 1117 | * execve */ |
| @@ -1327,7 +1163,7 @@ struct task_struct { | |||
| 1327 | 1163 | ||
| 1328 | cputime_t utime, stime, utimescaled, stimescaled; | 1164 | cputime_t utime, stime, utimescaled, stimescaled; |
| 1329 | cputime_t gtime; | 1165 | cputime_t gtime; |
| 1330 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 1166 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| 1331 | struct cputime prev_cputime; | 1167 | struct cputime prev_cputime; |
| 1332 | #endif | 1168 | #endif |
| 1333 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| @@ -1793,7 +1629,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, | |||
| 1793 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1629 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1794 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1630 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
| 1795 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1631 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
| 1796 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | 1632 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1797 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1633 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1798 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1634 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
| 1799 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1635 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
| @@ -2622,6 +2458,47 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
| 2622 | } | 2458 | } |
| 2623 | 2459 | ||
| 2624 | /* | 2460 | /* |
| 2461 | * Idle thread specific functions to determine the need_resched | ||
| 2462 | * polling state. We have two versions, one based on TS_POLLING in | ||
| 2463 | * thread_info.status and one based on TIF_POLLING_NRFLAG in | ||
| 2464 | * thread_info.flags | ||
| 2465 | */ | ||
| 2466 | #ifdef TS_POLLING | ||
| 2467 | static inline int tsk_is_polling(struct task_struct *p) | ||
| 2468 | { | ||
| 2469 | return task_thread_info(p)->status & TS_POLLING; | ||
| 2470 | } | ||
| 2471 | static inline void current_set_polling(void) | ||
| 2472 | { | ||
| 2473 | current_thread_info()->status |= TS_POLLING; | ||
| 2474 | } | ||
| 2475 | |||
| 2476 | static inline void current_clr_polling(void) | ||
| 2477 | { | ||
| 2478 | current_thread_info()->status &= ~TS_POLLING; | ||
| 2479 | smp_mb__after_clear_bit(); | ||
| 2480 | } | ||
| 2481 | #elif defined(TIF_POLLING_NRFLAG) | ||
| 2482 | static inline int tsk_is_polling(struct task_struct *p) | ||
| 2483 | { | ||
| 2484 | return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); | ||
| 2485 | } | ||
| 2486 | static inline void current_set_polling(void) | ||
| 2487 | { | ||
| 2488 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
| 2489 | } | ||
| 2490 | |||
| 2491 | static inline void current_clr_polling(void) | ||
| 2492 | { | ||
| 2493 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
| 2494 | } | ||
| 2495 | #else | ||
| 2496 | static inline int tsk_is_polling(struct task_struct *p) { return 0; } | ||
| 2497 | static inline void current_set_polling(void) { } | ||
| 2498 | static inline void current_clr_polling(void) { } | ||
| 2499 | #endif | ||
| 2500 | |||
| 2501 | /* | ||
| 2625 | * Thread group CPU time accounting. | 2502 | * Thread group CPU time accounting. |
| 2626 | */ | 2503 | */ |
| 2627 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | 2504 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
| @@ -2681,28 +2558,7 @@ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | |||
| 2681 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2558 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
| 2682 | 2559 | ||
| 2683 | #ifdef CONFIG_CGROUP_SCHED | 2560 | #ifdef CONFIG_CGROUP_SCHED |
| 2684 | |||
| 2685 | extern struct task_group root_task_group; | 2561 | extern struct task_group root_task_group; |
| 2686 | |||
| 2687 | extern struct task_group *sched_create_group(struct task_group *parent); | ||
| 2688 | extern void sched_online_group(struct task_group *tg, | ||
| 2689 | struct task_group *parent); | ||
| 2690 | extern void sched_destroy_group(struct task_group *tg); | ||
| 2691 | extern void sched_offline_group(struct task_group *tg); | ||
| 2692 | extern void sched_move_task(struct task_struct *tsk); | ||
| 2693 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 2694 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | ||
| 2695 | extern unsigned long sched_group_shares(struct task_group *tg); | ||
| 2696 | #endif | ||
| 2697 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 2698 | extern int sched_group_set_rt_runtime(struct task_group *tg, | ||
| 2699 | long rt_runtime_us); | ||
| 2700 | extern long sched_group_rt_runtime(struct task_group *tg); | ||
| 2701 | extern int sched_group_set_rt_period(struct task_group *tg, | ||
| 2702 | long rt_period_us); | ||
| 2703 | extern long sched_group_rt_period(struct task_group *tg); | ||
| 2704 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
| 2705 | #endif | ||
| 2706 | #endif /* CONFIG_CGROUP_SCHED */ | 2562 | #endif /* CONFIG_CGROUP_SCHED */ |
| 2707 | 2563 | ||
| 2708 | extern int task_can_switch_user(struct user_struct *up, | 2564 | extern int task_can_switch_user(struct user_struct *up, |
