diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 195 |
1 files changed, 115 insertions, 80 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 55e30d114477..38a3f4b15394 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -96,6 +96,7 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 96 | struct futex_pi_state; |
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | ||
99 | 100 | ||
100 | /* | 101 | /* |
101 | * List of flags we want to share for kernel threads, | 102 | * List of flags we want to share for kernel threads, |
@@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
249 | extern int runqueue_is_locked(void); | 250 | extern int runqueue_is_locked(void); |
250 | extern void task_rq_unlock_wait(struct task_struct *p); | 251 | extern void task_rq_unlock_wait(struct task_struct *p); |
251 | 252 | ||
252 | extern cpumask_t nohz_cpu_mask; | 253 | extern cpumask_var_t nohz_cpu_mask; |
253 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
254 | extern int select_nohz_load_balancer(int cpu); | 255 | extern int select_nohz_load_balancer(int cpu); |
255 | #else | 256 | #else |
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu) | |||
259 | } | 260 | } |
260 | #endif | 261 | #endif |
261 | 262 | ||
262 | extern unsigned long rt_needs_cpu(int cpu); | ||
263 | |||
264 | /* | 263 | /* |
265 | * Only dump TASK_* tasks. (0 for all tasks) | 264 | * Only dump TASK_* tasks. (0 for all tasks) |
266 | */ | 265 | */ |
@@ -285,7 +284,6 @@ long io_schedule_timeout(long timeout); | |||
285 | 284 | ||
286 | extern void cpu_init (void); | 285 | extern void cpu_init (void); |
287 | extern void trap_init(void); | 286 | extern void trap_init(void); |
288 | extern void account_process_tick(struct task_struct *task, int user); | ||
289 | extern void update_process_times(int user); | 287 | extern void update_process_times(int user); |
290 | extern void scheduler_tick(void); | 288 | extern void scheduler_tick(void); |
291 | 289 | ||
@@ -572,12 +570,6 @@ struct signal_struct { | |||
572 | */ | 570 | */ |
573 | struct rlimit rlim[RLIM_NLIMITS]; | 571 | struct rlimit rlim[RLIM_NLIMITS]; |
574 | 572 | ||
575 | /* keep the process-shared keyrings here so that they do the right | ||
576 | * thing in threads created with CLONE_THREAD */ | ||
577 | #ifdef CONFIG_KEYS | ||
578 | struct key *session_keyring; /* keyring inherited over fork */ | ||
579 | struct key *process_keyring; /* keyring private to this process */ | ||
580 | #endif | ||
581 | #ifdef CONFIG_BSD_PROCESS_ACCT | 573 | #ifdef CONFIG_BSD_PROCESS_ACCT |
582 | struct pacct_struct pacct; /* per-process accounting information */ | 574 | struct pacct_struct pacct; /* per-process accounting information */ |
583 | #endif | 575 | #endif |
@@ -648,6 +640,7 @@ struct user_struct { | |||
648 | /* Hash table maintenance information */ | 640 | /* Hash table maintenance information */ |
649 | struct hlist_node uidhash_node; | 641 | struct hlist_node uidhash_node; |
650 | uid_t uid; | 642 | uid_t uid; |
643 | struct user_namespace *user_ns; | ||
651 | 644 | ||
652 | #ifdef CONFIG_USER_SCHED | 645 | #ifdef CONFIG_USER_SCHED |
653 | struct task_group *tg; | 646 | struct task_group *tg; |
@@ -665,6 +658,7 @@ extern struct user_struct *find_user(uid_t); | |||
665 | extern struct user_struct root_user; | 658 | extern struct user_struct root_user; |
666 | #define INIT_USER (&root_user) | 659 | #define INIT_USER (&root_user) |
667 | 660 | ||
661 | |||
668 | struct backing_dev_info; | 662 | struct backing_dev_info; |
669 | struct reclaim_state; | 663 | struct reclaim_state; |
670 | 664 | ||
@@ -672,8 +666,7 @@ struct reclaim_state; | |||
672 | struct sched_info { | 666 | struct sched_info { |
673 | /* cumulative counters */ | 667 | /* cumulative counters */ |
674 | unsigned long pcount; /* # of times run on this cpu */ | 668 | unsigned long pcount; /* # of times run on this cpu */ |
675 | unsigned long long cpu_time, /* time spent on the cpu */ | 669 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
676 | run_delay; /* time spent waiting on a runqueue */ | ||
677 | 670 | ||
678 | /* timestamps */ | 671 | /* timestamps */ |
679 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 672 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
@@ -764,20 +757,51 @@ enum cpu_idle_type { | |||
764 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 757 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
765 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 758 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ |
766 | 759 | ||
767 | #define BALANCE_FOR_MC_POWER \ | 760 | enum powersavings_balance_level { |
768 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 761 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
762 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
763 | * first for long running threads | ||
764 | */ | ||
765 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
766 | * cpu package for power savings | ||
767 | */ | ||
768 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
769 | }; | ||
770 | |||
771 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
772 | |||
773 | static inline int sd_balance_for_mc_power(void) | ||
774 | { | ||
775 | if (sched_smt_power_savings) | ||
776 | return SD_POWERSAVINGS_BALANCE; | ||
769 | 777 | ||
770 | #define BALANCE_FOR_PKG_POWER \ | 778 | return 0; |
771 | ((sched_mc_power_savings || sched_smt_power_savings) ? \ | 779 | } |
772 | SD_POWERSAVINGS_BALANCE : 0) | ||
773 | 780 | ||
774 | #define test_sd_parent(sd, flag) ((sd->parent && \ | 781 | static inline int sd_balance_for_package_power(void) |
775 | (sd->parent->flags & flag)) ? 1 : 0) | 782 | { |
783 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
784 | return SD_POWERSAVINGS_BALANCE; | ||
776 | 785 | ||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * Optimise SD flags for power savings: | ||
791 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | ||
792 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
793 | */ | ||
794 | |||
795 | static inline int sd_power_saving_flags(void) | ||
796 | { | ||
797 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
798 | return SD_BALANCE_NEWIDLE; | ||
799 | |||
800 | return 0; | ||
801 | } | ||
777 | 802 | ||
778 | struct sched_group { | 803 | struct sched_group { |
779 | struct sched_group *next; /* Must be a circular list */ | 804 | struct sched_group *next; /* Must be a circular list */ |
780 | cpumask_t cpumask; | ||
781 | 805 | ||
782 | /* | 806 | /* |
783 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 807 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -790,8 +814,15 @@ struct sched_group { | |||
790 | * (see include/linux/reciprocal_div.h) | 814 | * (see include/linux/reciprocal_div.h) |
791 | */ | 815 | */ |
792 | u32 reciprocal_cpu_power; | 816 | u32 reciprocal_cpu_power; |
817 | |||
818 | unsigned long cpumask[]; | ||
793 | }; | 819 | }; |
794 | 820 | ||
821 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
822 | { | ||
823 | return to_cpumask(sg->cpumask); | ||
824 | } | ||
825 | |||
795 | enum sched_domain_level { | 826 | enum sched_domain_level { |
796 | SD_LV_NONE = 0, | 827 | SD_LV_NONE = 0, |
797 | SD_LV_SIBLING, | 828 | SD_LV_SIBLING, |
@@ -815,7 +846,6 @@ struct sched_domain { | |||
815 | struct sched_domain *parent; /* top domain must be null terminated */ | 846 | struct sched_domain *parent; /* top domain must be null terminated */ |
816 | struct sched_domain *child; /* bottom domain must be null terminated */ | 847 | struct sched_domain *child; /* bottom domain must be null terminated */ |
817 | struct sched_group *groups; /* the balancing groups of the domain */ | 848 | struct sched_group *groups; /* the balancing groups of the domain */ |
818 | cpumask_t span; /* span of all CPUs in this domain */ | ||
819 | unsigned long min_interval; /* Minimum balance interval ms */ | 849 | unsigned long min_interval; /* Minimum balance interval ms */ |
820 | unsigned long max_interval; /* Maximum balance interval ms */ | 850 | unsigned long max_interval; /* Maximum balance interval ms */ |
821 | unsigned int busy_factor; /* less balancing by factor if busy */ | 851 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -870,56 +900,42 @@ struct sched_domain { | |||
870 | #ifdef CONFIG_SCHED_DEBUG | 900 | #ifdef CONFIG_SCHED_DEBUG |
871 | char *name; | 901 | char *name; |
872 | #endif | 902 | #endif |
903 | |||
904 | /* span of all CPUs in this domain */ | ||
905 | unsigned long span[]; | ||
873 | }; | 906 | }; |
874 | 907 | ||
875 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 908 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
909 | { | ||
910 | return to_cpumask(sd->span); | ||
911 | } | ||
912 | |||
913 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
876 | struct sched_domain_attr *dattr_new); | 914 | struct sched_domain_attr *dattr_new); |
877 | extern int arch_reinit_sched_domains(void); | 915 | extern int arch_reinit_sched_domains(void); |
878 | 916 | ||
917 | /* Test a flag in parent sched domain */ | ||
918 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
919 | { | ||
920 | if (sd->parent && (sd->parent->flags & flag)) | ||
921 | return 1; | ||
922 | |||
923 | return 0; | ||
924 | } | ||
925 | |||
879 | #else /* CONFIG_SMP */ | 926 | #else /* CONFIG_SMP */ |
880 | 927 | ||
881 | struct sched_domain_attr; | 928 | struct sched_domain_attr; |
882 | 929 | ||
883 | static inline void | 930 | static inline void |
884 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 931 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
885 | struct sched_domain_attr *dattr_new) | 932 | struct sched_domain_attr *dattr_new) |
886 | { | 933 | { |
887 | } | 934 | } |
888 | #endif /* !CONFIG_SMP */ | 935 | #endif /* !CONFIG_SMP */ |
889 | 936 | ||
890 | struct io_context; /* See blkdev.h */ | 937 | struct io_context; /* See blkdev.h */ |
891 | #define NGROUPS_SMALL 32 | ||
892 | #define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) | ||
893 | struct group_info { | ||
894 | int ngroups; | ||
895 | atomic_t usage; | ||
896 | gid_t small_block[NGROUPS_SMALL]; | ||
897 | int nblocks; | ||
898 | gid_t *blocks[0]; | ||
899 | }; | ||
900 | |||
901 | /* | ||
902 | * get_group_info() must be called with the owning task locked (via task_lock()) | ||
903 | * when task != current. The reason being that the vast majority of callers are | ||
904 | * looking at current->group_info, which can not be changed except by the | ||
905 | * current task. Changing current->group_info requires the task lock, too. | ||
906 | */ | ||
907 | #define get_group_info(group_info) do { \ | ||
908 | atomic_inc(&(group_info)->usage); \ | ||
909 | } while (0) | ||
910 | |||
911 | #define put_group_info(group_info) do { \ | ||
912 | if (atomic_dec_and_test(&(group_info)->usage)) \ | ||
913 | groups_free(group_info); \ | ||
914 | } while (0) | ||
915 | 938 | ||
916 | extern struct group_info *groups_alloc(int gidsetsize); | ||
917 | extern void groups_free(struct group_info *group_info); | ||
918 | extern int set_current_groups(struct group_info *group_info); | ||
919 | extern int groups_search(struct group_info *group_info, gid_t grp); | ||
920 | /* access the groups "array" with this macro */ | ||
921 | #define GROUP_AT(gi, i) \ | ||
922 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | ||
923 | 939 | ||
924 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 940 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
925 | extern void prefetch_stack(struct task_struct *t); | 941 | extern void prefetch_stack(struct task_struct *t); |
@@ -963,7 +979,7 @@ struct sched_class { | |||
963 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 979 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
964 | 980 | ||
965 | void (*set_cpus_allowed)(struct task_struct *p, | 981 | void (*set_cpus_allowed)(struct task_struct *p, |
966 | const cpumask_t *newmask); | 982 | const struct cpumask *newmask); |
967 | 983 | ||
968 | void (*rq_online)(struct rq *rq); | 984 | void (*rq_online)(struct rq *rq); |
969 | void (*rq_offline)(struct rq *rq); | 985 | void (*rq_offline)(struct rq *rq); |
@@ -1165,6 +1181,19 @@ struct task_struct { | |||
1165 | struct list_head ptraced; | 1181 | struct list_head ptraced; |
1166 | struct list_head ptrace_entry; | 1182 | struct list_head ptrace_entry; |
1167 | 1183 | ||
1184 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1185 | /* | ||
1186 | * This is the tracer handle for the ptrace BTS extension. | ||
1187 | * This field actually belongs to the ptracer task. | ||
1188 | */ | ||
1189 | struct bts_tracer *bts; | ||
1190 | /* | ||
1191 | * The buffer to hold the BTS data. | ||
1192 | */ | ||
1193 | void *bts_buffer; | ||
1194 | size_t bts_size; | ||
1195 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1196 | |||
1168 | /* PID/PID hash table linkage. */ | 1197 | /* PID/PID hash table linkage. */ |
1169 | struct pid_link pids[PIDTYPE_MAX]; | 1198 | struct pid_link pids[PIDTYPE_MAX]; |
1170 | struct list_head thread_group; | 1199 | struct list_head thread_group; |
@@ -1186,17 +1215,12 @@ struct task_struct { | |||
1186 | struct list_head cpu_timers[3]; | 1215 | struct list_head cpu_timers[3]; |
1187 | 1216 | ||
1188 | /* process credentials */ | 1217 | /* process credentials */ |
1189 | uid_t uid,euid,suid,fsuid; | 1218 | const struct cred *real_cred; /* objective and real subjective task |
1190 | gid_t gid,egid,sgid,fsgid; | 1219 | * credentials (COW) */ |
1191 | struct group_info *group_info; | 1220 | const struct cred *cred; /* effective (overridable) subjective task |
1192 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1221 | * credentials (COW) */ |
1193 | struct user_struct *user; | 1222 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ |
1194 | unsigned securebits; | 1223 | |
1195 | #ifdef CONFIG_KEYS | ||
1196 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
1197 | struct key *request_key_auth; /* assumed request_key authority */ | ||
1198 | struct key *thread_keyring; /* keyring private to this thread */ | ||
1199 | #endif | ||
1200 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1224 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1201 | - access with [gs]et_task_comm (which lock | 1225 | - access with [gs]et_task_comm (which lock |
1202 | it with task_lock()) | 1226 | it with task_lock()) |
@@ -1233,9 +1257,6 @@ struct task_struct { | |||
1233 | int (*notifier)(void *priv); | 1257 | int (*notifier)(void *priv); |
1234 | void *notifier_data; | 1258 | void *notifier_data; |
1235 | sigset_t *notifier_mask; | 1259 | sigset_t *notifier_mask; |
1236 | #ifdef CONFIG_SECURITY | ||
1237 | void *security; | ||
1238 | #endif | ||
1239 | struct audit_context *audit_context; | 1260 | struct audit_context *audit_context; |
1240 | #ifdef CONFIG_AUDITSYSCALL | 1261 | #ifdef CONFIG_AUDITSYSCALL |
1241 | uid_t loginuid; | 1262 | uid_t loginuid; |
@@ -1356,6 +1377,23 @@ struct task_struct { | |||
1356 | unsigned long default_timer_slack_ns; | 1377 | unsigned long default_timer_slack_ns; |
1357 | 1378 | ||
1358 | struct list_head *scm_work_list; | 1379 | struct list_head *scm_work_list; |
1380 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1381 | /* Index of current stored adress in ret_stack */ | ||
1382 | int curr_ret_stack; | ||
1383 | /* Stack of return addresses for return function tracing */ | ||
1384 | struct ftrace_ret_stack *ret_stack; | ||
1385 | /* | ||
1386 | * Number of functions that haven't been traced | ||
1387 | * because of depth overrun. | ||
1388 | */ | ||
1389 | atomic_t trace_overrun; | ||
1390 | /* Pause for the tracing */ | ||
1391 | atomic_t tracing_graph_pause; | ||
1392 | #endif | ||
1393 | #ifdef CONFIG_TRACING | ||
1394 | /* state flags for use by tracers */ | ||
1395 | unsigned long trace; | ||
1396 | #endif | ||
1359 | }; | 1397 | }; |
1360 | 1398 | ||
1361 | /* | 1399 | /* |
@@ -1594,12 +1632,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1594 | 1632 | ||
1595 | #ifdef CONFIG_SMP | 1633 | #ifdef CONFIG_SMP |
1596 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1634 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1597 | const cpumask_t *new_mask); | 1635 | const struct cpumask *new_mask); |
1598 | #else | 1636 | #else |
1599 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1637 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1600 | const cpumask_t *new_mask) | 1638 | const struct cpumask *new_mask) |
1601 | { | 1639 | { |
1602 | if (!cpu_isset(0, *new_mask)) | 1640 | if (!cpumask_test_cpu(0, new_mask)) |
1603 | return -EINVAL; | 1641 | return -EINVAL; |
1604 | return 0; | 1642 | return 0; |
1605 | } | 1643 | } |
@@ -1775,7 +1813,6 @@ static inline struct user_struct *get_uid(struct user_struct *u) | |||
1775 | return u; | 1813 | return u; |
1776 | } | 1814 | } |
1777 | extern void free_uid(struct user_struct *); | 1815 | extern void free_uid(struct user_struct *); |
1778 | extern void switch_uid(struct user_struct *); | ||
1779 | extern void release_uids(struct user_namespace *ns); | 1816 | extern void release_uids(struct user_namespace *ns); |
1780 | 1817 | ||
1781 | #include <asm/current.h> | 1818 | #include <asm/current.h> |
@@ -1794,9 +1831,6 @@ extern void wake_up_new_task(struct task_struct *tsk, | |||
1794 | extern void sched_fork(struct task_struct *p, int clone_flags); | 1831 | extern void sched_fork(struct task_struct *p, int clone_flags); |
1795 | extern void sched_dead(struct task_struct *p); | 1832 | extern void sched_dead(struct task_struct *p); |
1796 | 1833 | ||
1797 | extern int in_group_p(gid_t); | ||
1798 | extern int in_egroup_p(gid_t); | ||
1799 | |||
1800 | extern void proc_caches_init(void); | 1834 | extern void proc_caches_init(void); |
1801 | extern void flush_signals(struct task_struct *); | 1835 | extern void flush_signals(struct task_struct *); |
1802 | extern void ignore_signals(struct task_struct *); | 1836 | extern void ignore_signals(struct task_struct *); |
@@ -1928,6 +1962,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
1928 | #define for_each_process(p) \ | 1962 | #define for_each_process(p) \ |
1929 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 1963 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
1930 | 1964 | ||
1965 | extern bool is_single_threaded(struct task_struct *); | ||
1966 | |||
1931 | /* | 1967 | /* |
1932 | * Careful: do_each_thread/while_each_thread is a double loop so | 1968 | * Careful: do_each_thread/while_each_thread is a double loop so |
1933 | * 'break' will not work as expected - use goto instead. | 1969 | * 'break' will not work as expected - use goto instead. |
@@ -2212,10 +2248,8 @@ __trace_special(void *__tr, void *__data, | |||
2212 | } | 2248 | } |
2213 | #endif | 2249 | #endif |
2214 | 2250 | ||
2215 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2251 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2216 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2252 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2217 | |||
2218 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
2219 | 2253 | ||
2220 | extern void normalize_rt_tasks(void); | 2254 | extern void normalize_rt_tasks(void); |
2221 | 2255 | ||
@@ -2224,6 +2258,7 @@ extern void normalize_rt_tasks(void); | |||
2224 | extern struct task_group init_task_group; | 2258 | extern struct task_group init_task_group; |
2225 | #ifdef CONFIG_USER_SCHED | 2259 | #ifdef CONFIG_USER_SCHED |
2226 | extern struct task_group root_task_group; | 2260 | extern struct task_group root_task_group; |
2261 | extern void set_tg_uid(struct user_struct *user); | ||
2227 | #endif | 2262 | #endif |
2228 | 2263 | ||
2229 | extern struct task_group *sched_create_group(struct task_group *parent); | 2264 | extern struct task_group *sched_create_group(struct task_group *parent); |