diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 233 |
1 files changed, 110 insertions, 123 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 724814191fe9..225347d97d47 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -100,7 +100,7 @@ struct sched_param { | |||
| 100 | struct exec_domain; | 100 | struct exec_domain; |
| 101 | struct futex_pi_state; | 101 | struct futex_pi_state; |
| 102 | struct robust_list_head; | 102 | struct robust_list_head; |
| 103 | struct bio; | 103 | struct bio_list; |
| 104 | struct fs_struct; | 104 | struct fs_struct; |
| 105 | struct bts_context; | 105 | struct bts_context; |
| 106 | struct perf_event_context; | 106 | struct perf_event_context; |
| @@ -148,7 +148,6 @@ extern unsigned long this_cpu_load(void); | |||
| 148 | 148 | ||
| 149 | 149 | ||
| 150 | extern void calc_global_load(void); | 150 | extern void calc_global_load(void); |
| 151 | extern u64 cpu_nr_migrations(int cpu); | ||
| 152 | 151 | ||
| 153 | extern unsigned long get_parent_ip(unsigned long addr); | 152 | extern unsigned long get_parent_ip(unsigned long addr); |
| 154 | 153 | ||
| @@ -174,8 +173,6 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 174 | } | 173 | } |
| 175 | #endif | 174 | #endif |
| 176 | 175 | ||
| 177 | extern unsigned long long time_sync_thresh; | ||
| 178 | |||
| 179 | /* | 176 | /* |
| 180 | * Task state bitmask. NOTE! These bits are also | 177 | * Task state bitmask. NOTE! These bits are also |
| 181 | * encoded in fs/proc/array.c: get_task_state(). | 178 | * encoded in fs/proc/array.c: get_task_state(). |
| @@ -198,6 +195,12 @@ extern unsigned long long time_sync_thresh; | |||
| 198 | #define TASK_DEAD 64 | 195 | #define TASK_DEAD 64 |
| 199 | #define TASK_WAKEKILL 128 | 196 | #define TASK_WAKEKILL 128 |
| 200 | #define TASK_WAKING 256 | 197 | #define TASK_WAKING 256 |
| 198 | #define TASK_STATE_MAX 512 | ||
| 199 | |||
| 200 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
| 201 | |||
| 202 | extern char ___assert_task_state[1 - 2*!!( | ||
| 203 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
| 201 | 204 | ||
| 202 | /* Convenience macros for the sake of set_task_state */ | 205 | /* Convenience macros for the sake of set_task_state */ |
| 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 206 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| @@ -258,6 +261,10 @@ extern spinlock_t mmlist_lock; | |||
| 258 | 261 | ||
| 259 | struct task_struct; | 262 | struct task_struct; |
| 260 | 263 | ||
| 264 | #ifdef CONFIG_PROVE_RCU | ||
| 265 | extern int lockdep_tasklist_lock_is_held(void); | ||
| 266 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
| 267 | |||
| 261 | extern void sched_init(void); | 268 | extern void sched_init(void); |
| 262 | extern void sched_init_smp(void); | 269 | extern void sched_init_smp(void); |
| 263 | extern asmlinkage void schedule_tail(struct task_struct *prev); | 270 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
| @@ -310,6 +317,7 @@ extern void sched_show_task(struct task_struct *p); | |||
| 310 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 317 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
| 311 | extern void softlockup_tick(void); | 318 | extern void softlockup_tick(void); |
| 312 | extern void touch_softlockup_watchdog(void); | 319 | extern void touch_softlockup_watchdog(void); |
| 320 | extern void touch_softlockup_watchdog_sync(void); | ||
| 313 | extern void touch_all_softlockup_watchdogs(void); | 321 | extern void touch_all_softlockup_watchdogs(void); |
| 314 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 322 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, |
| 315 | void __user *buffer, | 323 | void __user *buffer, |
| @@ -323,6 +331,9 @@ static inline void softlockup_tick(void) | |||
| 323 | static inline void touch_softlockup_watchdog(void) | 331 | static inline void touch_softlockup_watchdog(void) |
| 324 | { | 332 | { |
| 325 | } | 333 | } |
| 334 | static inline void touch_softlockup_watchdog_sync(void) | ||
| 335 | { | ||
| 336 | } | ||
| 326 | static inline void touch_all_softlockup_watchdogs(void) | 337 | static inline void touch_all_softlockup_watchdogs(void) |
| 327 | { | 338 | { |
| 328 | } | 339 | } |
| @@ -352,7 +363,6 @@ extern signed long schedule_timeout(signed long timeout); | |||
| 352 | extern signed long schedule_timeout_interruptible(signed long timeout); | 363 | extern signed long schedule_timeout_interruptible(signed long timeout); |
| 353 | extern signed long schedule_timeout_killable(signed long timeout); | 364 | extern signed long schedule_timeout_killable(signed long timeout); |
| 354 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 365 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
| 355 | asmlinkage void __schedule(void); | ||
| 356 | asmlinkage void schedule(void); | 366 | asmlinkage void schedule(void); |
| 357 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 367 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); |
| 358 | 368 | ||
| @@ -378,6 +388,8 @@ extern int sysctl_max_map_count; | |||
| 378 | 388 | ||
| 379 | #include <linux/aio.h> | 389 | #include <linux/aio.h> |
| 380 | 390 | ||
| 391 | #ifdef CONFIG_MMU | ||
| 392 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
| 381 | extern unsigned long | 393 | extern unsigned long |
| 382 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 394 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
| 383 | unsigned long, unsigned long); | 395 | unsigned long, unsigned long); |
| @@ -387,61 +399,10 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
| 387 | unsigned long flags); | 399 | unsigned long flags); |
| 388 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 400 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
| 389 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 401 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
| 402 | #else | ||
| 403 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | ||
| 404 | #endif | ||
| 390 | 405 | ||
| 391 | #if USE_SPLIT_PTLOCKS | ||
| 392 | /* | ||
| 393 | * The mm counters are not protected by its page_table_lock, | ||
| 394 | * so must be incremented atomically. | ||
| 395 | */ | ||
| 396 | #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) | ||
| 397 | #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) | ||
| 398 | #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) | ||
| 399 | #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) | ||
| 400 | #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) | ||
| 401 | |||
| 402 | #else /* !USE_SPLIT_PTLOCKS */ | ||
| 403 | /* | ||
| 404 | * The mm counters are protected by its page_table_lock, | ||
| 405 | * so can be incremented directly. | ||
| 406 | */ | ||
| 407 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) | ||
| 408 | #define get_mm_counter(mm, member) ((mm)->_##member) | ||
| 409 | #define add_mm_counter(mm, member, value) (mm)->_##member += (value) | ||
| 410 | #define inc_mm_counter(mm, member) (mm)->_##member++ | ||
| 411 | #define dec_mm_counter(mm, member) (mm)->_##member-- | ||
| 412 | |||
| 413 | #endif /* !USE_SPLIT_PTLOCKS */ | ||
| 414 | |||
| 415 | #define get_mm_rss(mm) \ | ||
| 416 | (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) | ||
| 417 | #define update_hiwater_rss(mm) do { \ | ||
| 418 | unsigned long _rss = get_mm_rss(mm); \ | ||
| 419 | if ((mm)->hiwater_rss < _rss) \ | ||
| 420 | (mm)->hiwater_rss = _rss; \ | ||
| 421 | } while (0) | ||
| 422 | #define update_hiwater_vm(mm) do { \ | ||
| 423 | if ((mm)->hiwater_vm < (mm)->total_vm) \ | ||
| 424 | (mm)->hiwater_vm = (mm)->total_vm; \ | ||
| 425 | } while (0) | ||
| 426 | |||
| 427 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) | ||
| 428 | { | ||
| 429 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
| 430 | } | ||
| 431 | |||
| 432 | static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, | ||
| 433 | struct mm_struct *mm) | ||
| 434 | { | ||
| 435 | unsigned long hiwater_rss = get_mm_hiwater_rss(mm); | ||
| 436 | |||
| 437 | if (*maxrss < hiwater_rss) | ||
| 438 | *maxrss = hiwater_rss; | ||
| 439 | } | ||
| 440 | |||
| 441 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
| 442 | { | ||
| 443 | return max(mm->hiwater_vm, mm->total_vm); | ||
| 444 | } | ||
| 445 | 406 | ||
| 446 | extern void set_dumpable(struct mm_struct *mm, int value); | 407 | extern void set_dumpable(struct mm_struct *mm, int value); |
| 447 | extern int get_dumpable(struct mm_struct *mm); | 408 | extern int get_dumpable(struct mm_struct *mm); |
| @@ -631,6 +592,9 @@ struct signal_struct { | |||
| 631 | cputime_t utime, stime, cutime, cstime; | 592 | cputime_t utime, stime, cutime, cstime; |
| 632 | cputime_t gtime; | 593 | cputime_t gtime; |
| 633 | cputime_t cgtime; | 594 | cputime_t cgtime; |
| 595 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 596 | cputime_t prev_utime, prev_stime; | ||
| 597 | #endif | ||
| 634 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 598 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
| 635 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 599 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
| 636 | unsigned long inblock, oublock, cinblock, coublock; | 600 | unsigned long inblock, oublock, cinblock, coublock; |
| @@ -729,14 +693,6 @@ struct user_struct { | |||
| 729 | uid_t uid; | 693 | uid_t uid; |
| 730 | struct user_namespace *user_ns; | 694 | struct user_namespace *user_ns; |
| 731 | 695 | ||
| 732 | #ifdef CONFIG_USER_SCHED | ||
| 733 | struct task_group *tg; | ||
| 734 | #ifdef CONFIG_SYSFS | ||
| 735 | struct kobject kobj; | ||
| 736 | struct delayed_work work; | ||
| 737 | #endif | ||
| 738 | #endif | ||
| 739 | |||
| 740 | #ifdef CONFIG_PERF_EVENTS | 696 | #ifdef CONFIG_PERF_EVENTS |
| 741 | atomic_long_t locked_vm; | 697 | atomic_long_t locked_vm; |
| 742 | #endif | 698 | #endif |
| @@ -867,7 +823,10 @@ static inline int sd_balance_for_mc_power(void) | |||
| 867 | if (sched_smt_power_savings) | 823 | if (sched_smt_power_savings) |
| 868 | return SD_POWERSAVINGS_BALANCE; | 824 | return SD_POWERSAVINGS_BALANCE; |
| 869 | 825 | ||
| 870 | return SD_PREFER_SIBLING; | 826 | if (!sched_mc_power_savings) |
| 827 | return SD_PREFER_SIBLING; | ||
| 828 | |||
| 829 | return 0; | ||
| 871 | } | 830 | } |
| 872 | 831 | ||
| 873 | static inline int sd_balance_for_package_power(void) | 832 | static inline int sd_balance_for_package_power(void) |
| @@ -1016,9 +975,13 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | |||
| 1016 | return to_cpumask(sd->span); | 975 | return to_cpumask(sd->span); |
| 1017 | } | 976 | } |
| 1018 | 977 | ||
| 1019 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 978 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 1020 | struct sched_domain_attr *dattr_new); | 979 | struct sched_domain_attr *dattr_new); |
| 1021 | 980 | ||
| 981 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | ||
| 982 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | ||
| 983 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | ||
| 984 | |||
| 1022 | /* Test a flag in parent sched domain */ | 985 | /* Test a flag in parent sched domain */ |
| 1023 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | 986 | static inline int test_sd_parent(struct sched_domain *sd, int flag) |
| 1024 | { | 987 | { |
| @@ -1036,7 +999,7 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | |||
| 1036 | struct sched_domain_attr; | 999 | struct sched_domain_attr; |
| 1037 | 1000 | ||
| 1038 | static inline void | 1001 | static inline void |
| 1039 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | 1002 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 1040 | struct sched_domain_attr *dattr_new) | 1003 | struct sched_domain_attr *dattr_new) |
| 1041 | { | 1004 | { |
| 1042 | } | 1005 | } |
| @@ -1069,7 +1032,8 @@ struct sched_domain; | |||
| 1069 | struct sched_class { | 1032 | struct sched_class { |
| 1070 | const struct sched_class *next; | 1033 | const struct sched_class *next; |
| 1071 | 1034 | ||
| 1072 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); | 1035 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, |
| 1036 | bool head); | ||
| 1073 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 1037 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
| 1074 | void (*yield_task) (struct rq *rq); | 1038 | void (*yield_task) (struct rq *rq); |
| 1075 | 1039 | ||
| @@ -1081,17 +1045,10 @@ struct sched_class { | |||
| 1081 | #ifdef CONFIG_SMP | 1045 | #ifdef CONFIG_SMP |
| 1082 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1046 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
| 1083 | 1047 | ||
| 1084 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, | ||
| 1085 | struct rq *busiest, unsigned long max_load_move, | ||
| 1086 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
| 1087 | int *all_pinned, int *this_best_prio); | ||
| 1088 | |||
| 1089 | int (*move_one_task) (struct rq *this_rq, int this_cpu, | ||
| 1090 | struct rq *busiest, struct sched_domain *sd, | ||
| 1091 | enum cpu_idle_type idle); | ||
| 1092 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1048 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
| 1093 | void (*post_schedule) (struct rq *this_rq); | 1049 | void (*post_schedule) (struct rq *this_rq); |
| 1094 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1050 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
| 1051 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
| 1095 | 1052 | ||
| 1096 | void (*set_cpus_allowed)(struct task_struct *p, | 1053 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1097 | const struct cpumask *newmask); | 1054 | const struct cpumask *newmask); |
| @@ -1102,7 +1059,7 @@ struct sched_class { | |||
| 1102 | 1059 | ||
| 1103 | void (*set_curr_task) (struct rq *rq); | 1060 | void (*set_curr_task) (struct rq *rq); |
| 1104 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 1061 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
| 1105 | void (*task_new) (struct rq *rq, struct task_struct *p); | 1062 | void (*task_fork) (struct task_struct *p); |
| 1106 | 1063 | ||
| 1107 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 1064 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
| 1108 | int running); | 1065 | int running); |
| @@ -1111,10 +1068,11 @@ struct sched_class { | |||
| 1111 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1068 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| 1112 | int oldprio, int running); | 1069 | int oldprio, int running); |
| 1113 | 1070 | ||
| 1114 | unsigned int (*get_rr_interval) (struct task_struct *task); | 1071 | unsigned int (*get_rr_interval) (struct rq *rq, |
| 1072 | struct task_struct *task); | ||
| 1115 | 1073 | ||
| 1116 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1074 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1117 | void (*moved_group) (struct task_struct *p); | 1075 | void (*moved_group) (struct task_struct *p, int on_rq); |
| 1118 | #endif | 1076 | #endif |
| 1119 | }; | 1077 | }; |
| 1120 | 1078 | ||
| @@ -1151,8 +1109,6 @@ struct sched_entity { | |||
| 1151 | u64 start_runtime; | 1109 | u64 start_runtime; |
| 1152 | u64 avg_wakeup; | 1110 | u64 avg_wakeup; |
| 1153 | 1111 | ||
| 1154 | u64 avg_running; | ||
| 1155 | |||
| 1156 | #ifdef CONFIG_SCHEDSTATS | 1112 | #ifdef CONFIG_SCHEDSTATS |
| 1157 | u64 wait_start; | 1113 | u64 wait_start; |
| 1158 | u64 wait_max; | 1114 | u64 wait_max; |
| @@ -1175,7 +1131,6 @@ struct sched_entity { | |||
| 1175 | u64 nr_failed_migrations_running; | 1131 | u64 nr_failed_migrations_running; |
| 1176 | u64 nr_failed_migrations_hot; | 1132 | u64 nr_failed_migrations_hot; |
| 1177 | u64 nr_forced_migrations; | 1133 | u64 nr_forced_migrations; |
| 1178 | u64 nr_forced2_migrations; | ||
| 1179 | 1134 | ||
| 1180 | u64 nr_wakeups; | 1135 | u64 nr_wakeups; |
| 1181 | u64 nr_wakeups_sync; | 1136 | u64 nr_wakeups_sync; |
| @@ -1273,7 +1228,9 @@ struct task_struct { | |||
| 1273 | struct plist_node pushable_tasks; | 1228 | struct plist_node pushable_tasks; |
| 1274 | 1229 | ||
| 1275 | struct mm_struct *mm, *active_mm; | 1230 | struct mm_struct *mm, *active_mm; |
| 1276 | 1231 | #if defined(SPLIT_RSS_COUNTING) | |
| 1232 | struct task_rss_stat rss_stat; | ||
| 1233 | #endif | ||
| 1277 | /* task state */ | 1234 | /* task state */ |
| 1278 | int exit_state; | 1235 | int exit_state; |
| 1279 | int exit_code, exit_signal; | 1236 | int exit_code, exit_signal; |
| @@ -1335,7 +1292,9 @@ struct task_struct { | |||
| 1335 | 1292 | ||
| 1336 | cputime_t utime, stime, utimescaled, stimescaled; | 1293 | cputime_t utime, stime, utimescaled, stimescaled; |
| 1337 | cputime_t gtime; | 1294 | cputime_t gtime; |
| 1295 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 1338 | cputime_t prev_utime, prev_stime; | 1296 | cputime_t prev_utime, prev_stime; |
| 1297 | #endif | ||
| 1339 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1298 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
| 1340 | struct timespec start_time; /* monotonic time */ | 1299 | struct timespec start_time; /* monotonic time */ |
| 1341 | struct timespec real_start_time; /* boot based time */ | 1300 | struct timespec real_start_time; /* boot based time */ |
| @@ -1358,7 +1317,7 @@ struct task_struct { | |||
| 1358 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1317 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 1359 | - access with [gs]et_task_comm (which lock | 1318 | - access with [gs]et_task_comm (which lock |
| 1360 | it with task_lock()) | 1319 | it with task_lock()) |
| 1361 | - initialized normally by flush_old_exec */ | 1320 | - initialized normally by setup_new_exec */ |
| 1362 | /* file system info */ | 1321 | /* file system info */ |
| 1363 | int link_count, total_link_count; | 1322 | int link_count, total_link_count; |
| 1364 | #ifdef CONFIG_SYSVIPC | 1323 | #ifdef CONFIG_SYSVIPC |
| @@ -1410,7 +1369,7 @@ struct task_struct { | |||
| 1410 | #endif | 1369 | #endif |
| 1411 | 1370 | ||
| 1412 | /* Protection of the PI data structures: */ | 1371 | /* Protection of the PI data structures: */ |
| 1413 | spinlock_t pi_lock; | 1372 | raw_spinlock_t pi_lock; |
| 1414 | 1373 | ||
| 1415 | #ifdef CONFIG_RT_MUTEXES | 1374 | #ifdef CONFIG_RT_MUTEXES |
| 1416 | /* PI waiters blocked on a rt_mutex held by this task */ | 1375 | /* PI waiters blocked on a rt_mutex held by this task */ |
| @@ -1425,17 +1384,17 @@ struct task_struct { | |||
| 1425 | #endif | 1384 | #endif |
| 1426 | #ifdef CONFIG_TRACE_IRQFLAGS | 1385 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 1427 | unsigned int irq_events; | 1386 | unsigned int irq_events; |
| 1428 | int hardirqs_enabled; | ||
| 1429 | unsigned long hardirq_enable_ip; | 1387 | unsigned long hardirq_enable_ip; |
| 1430 | unsigned int hardirq_enable_event; | ||
| 1431 | unsigned long hardirq_disable_ip; | 1388 | unsigned long hardirq_disable_ip; |
| 1389 | unsigned int hardirq_enable_event; | ||
| 1432 | unsigned int hardirq_disable_event; | 1390 | unsigned int hardirq_disable_event; |
| 1433 | int softirqs_enabled; | 1391 | int hardirqs_enabled; |
| 1392 | int hardirq_context; | ||
| 1434 | unsigned long softirq_disable_ip; | 1393 | unsigned long softirq_disable_ip; |
| 1435 | unsigned int softirq_disable_event; | ||
| 1436 | unsigned long softirq_enable_ip; | 1394 | unsigned long softirq_enable_ip; |
| 1395 | unsigned int softirq_disable_event; | ||
| 1437 | unsigned int softirq_enable_event; | 1396 | unsigned int softirq_enable_event; |
| 1438 | int hardirq_context; | 1397 | int softirqs_enabled; |
| 1439 | int softirq_context; | 1398 | int softirq_context; |
| 1440 | #endif | 1399 | #endif |
| 1441 | #ifdef CONFIG_LOCKDEP | 1400 | #ifdef CONFIG_LOCKDEP |
| @@ -1451,7 +1410,7 @@ struct task_struct { | |||
| 1451 | void *journal_info; | 1410 | void *journal_info; |
| 1452 | 1411 | ||
| 1453 | /* stacked block device info */ | 1412 | /* stacked block device info */ |
| 1454 | struct bio *bio_list, **bio_tail; | 1413 | struct bio_list *bio_list; |
| 1455 | 1414 | ||
| 1456 | /* VM state */ | 1415 | /* VM state */ |
| 1457 | struct reclaim_state *reclaim_state; | 1416 | struct reclaim_state *reclaim_state; |
| @@ -1529,7 +1488,7 @@ struct task_struct { | |||
| 1529 | 1488 | ||
| 1530 | struct list_head *scm_work_list; | 1489 | struct list_head *scm_work_list; |
| 1531 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1490 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1532 | /* Index of current stored adress in ret_stack */ | 1491 | /* Index of current stored address in ret_stack */ |
| 1533 | int curr_ret_stack; | 1492 | int curr_ret_stack; |
| 1534 | /* Stack of return addresses for return function tracing */ | 1493 | /* Stack of return addresses for return function tracing */ |
| 1535 | struct ftrace_ret_stack *ret_stack; | 1494 | struct ftrace_ret_stack *ret_stack; |
| @@ -1549,11 +1508,18 @@ struct task_struct { | |||
| 1549 | /* bitmask of trace recursion */ | 1508 | /* bitmask of trace recursion */ |
| 1550 | unsigned long trace_recursion; | 1509 | unsigned long trace_recursion; |
| 1551 | #endif /* CONFIG_TRACING */ | 1510 | #endif /* CONFIG_TRACING */ |
| 1552 | unsigned long stack_start; | 1511 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
| 1512 | struct memcg_batch_info { | ||
| 1513 | int do_batch; /* incremented when batch uncharge started */ | ||
| 1514 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | ||
| 1515 | unsigned long bytes; /* uncharged usage */ | ||
| 1516 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | ||
| 1517 | } memcg_batch; | ||
| 1518 | #endif | ||
| 1553 | }; | 1519 | }; |
| 1554 | 1520 | ||
| 1555 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1521 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| 1556 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | 1522 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
| 1557 | 1523 | ||
| 1558 | /* | 1524 | /* |
| 1559 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1525 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
| @@ -1731,9 +1697,8 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 1731 | __put_task_struct(t); | 1697 | __put_task_struct(t); |
| 1732 | } | 1698 | } |
| 1733 | 1699 | ||
| 1734 | extern cputime_t task_utime(struct task_struct *p); | 1700 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
| 1735 | extern cputime_t task_stime(struct task_struct *p); | 1701 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
| 1736 | extern cputime_t task_gtime(struct task_struct *p); | ||
| 1737 | 1702 | ||
| 1738 | /* | 1703 | /* |
| 1739 | * Per process flags | 1704 | * Per process flags |
| @@ -1847,7 +1812,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
| 1847 | extern int sched_clock_stable; | 1812 | extern int sched_clock_stable; |
| 1848 | #endif | 1813 | #endif |
| 1849 | 1814 | ||
| 1850 | extern unsigned long long sched_clock(void); | 1815 | /* ftrace calls sched_clock() directly */ |
| 1816 | extern unsigned long long notrace sched_clock(void); | ||
| 1851 | 1817 | ||
| 1852 | extern void sched_clock_init(void); | 1818 | extern void sched_clock_init(void); |
| 1853 | extern u64 sched_clock_cpu(int cpu); | 1819 | extern u64 sched_clock_cpu(int cpu); |
| @@ -1910,14 +1876,22 @@ extern unsigned int sysctl_sched_wakeup_granularity; | |||
| 1910 | extern unsigned int sysctl_sched_shares_ratelimit; | 1876 | extern unsigned int sysctl_sched_shares_ratelimit; |
| 1911 | extern unsigned int sysctl_sched_shares_thresh; | 1877 | extern unsigned int sysctl_sched_shares_thresh; |
| 1912 | extern unsigned int sysctl_sched_child_runs_first; | 1878 | extern unsigned int sysctl_sched_child_runs_first; |
| 1879 | |||
| 1880 | enum sched_tunable_scaling { | ||
| 1881 | SCHED_TUNABLESCALING_NONE, | ||
| 1882 | SCHED_TUNABLESCALING_LOG, | ||
| 1883 | SCHED_TUNABLESCALING_LINEAR, | ||
| 1884 | SCHED_TUNABLESCALING_END, | ||
| 1885 | }; | ||
| 1886 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
| 1887 | |||
| 1913 | #ifdef CONFIG_SCHED_DEBUG | 1888 | #ifdef CONFIG_SCHED_DEBUG |
| 1914 | extern unsigned int sysctl_sched_features; | ||
| 1915 | extern unsigned int sysctl_sched_migration_cost; | 1889 | extern unsigned int sysctl_sched_migration_cost; |
| 1916 | extern unsigned int sysctl_sched_nr_migrate; | 1890 | extern unsigned int sysctl_sched_nr_migrate; |
| 1917 | extern unsigned int sysctl_sched_time_avg; | 1891 | extern unsigned int sysctl_sched_time_avg; |
| 1918 | extern unsigned int sysctl_timer_migration; | 1892 | extern unsigned int sysctl_timer_migration; |
| 1919 | 1893 | ||
| 1920 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1894 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 1921 | void __user *buffer, size_t *length, | 1895 | void __user *buffer, size_t *length, |
| 1922 | loff_t *ppos); | 1896 | loff_t *ppos); |
| 1923 | #endif | 1897 | #endif |
| @@ -2073,7 +2047,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t); | |||
| 2073 | extern int do_notify_parent(struct task_struct *, int); | 2047 | extern int do_notify_parent(struct task_struct *, int); |
| 2074 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2048 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
| 2075 | extern void force_sig(int, struct task_struct *); | 2049 | extern void force_sig(int, struct task_struct *); |
| 2076 | extern void force_sig_specific(int, struct task_struct *); | ||
| 2077 | extern int send_sig(int, struct task_struct *, int); | 2050 | extern int send_sig(int, struct task_struct *, int); |
| 2078 | extern void zap_other_threads(struct task_struct *p); | 2051 | extern void zap_other_threads(struct task_struct *p); |
| 2079 | extern struct sigqueue *sigqueue_alloc(void); | 2052 | extern struct sigqueue *sigqueue_alloc(void); |
| @@ -2092,16 +2065,18 @@ static inline int kill_cad_pid(int sig, int priv) | |||
| 2092 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2065 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
| 2093 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2066 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
| 2094 | 2067 | ||
| 2095 | static inline int is_si_special(const struct siginfo *info) | 2068 | /* |
| 2096 | { | 2069 | * True if we are on the alternate signal stack. |
| 2097 | return info <= SEND_SIG_FORCED; | 2070 | */ |
| 2098 | } | ||
| 2099 | |||
| 2100 | /* True if we are on the alternate signal stack. */ | ||
| 2101 | |||
| 2102 | static inline int on_sig_stack(unsigned long sp) | 2071 | static inline int on_sig_stack(unsigned long sp) |
| 2103 | { | 2072 | { |
| 2104 | return (sp - current->sas_ss_sp < current->sas_ss_size); | 2073 | #ifdef CONFIG_STACK_GROWSUP |
| 2074 | return sp >= current->sas_ss_sp && | ||
| 2075 | sp - current->sas_ss_sp < current->sas_ss_size; | ||
| 2076 | #else | ||
| 2077 | return sp > current->sas_ss_sp && | ||
| 2078 | sp - current->sas_ss_sp <= current->sas_ss_size; | ||
| 2079 | #endif | ||
| 2105 | } | 2080 | } |
| 2106 | 2081 | ||
| 2107 | static inline int sas_ss_flags(unsigned long sp) | 2082 | static inline int sas_ss_flags(unsigned long sp) |
| @@ -2430,9 +2405,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); | |||
| 2430 | 2405 | ||
| 2431 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2406 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
| 2432 | { | 2407 | { |
| 2433 | sig->cputimer.cputime = INIT_CPUTIME; | ||
| 2434 | spin_lock_init(&sig->cputimer.lock); | 2408 | spin_lock_init(&sig->cputimer.lock); |
| 2435 | sig->cputimer.running = 0; | ||
| 2436 | } | 2409 | } |
| 2437 | 2410 | ||
| 2438 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2411 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
| @@ -2475,8 +2448,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| 2475 | 2448 | ||
| 2476 | #endif /* CONFIG_SMP */ | 2449 | #endif /* CONFIG_SMP */ |
| 2477 | 2450 | ||
| 2478 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
| 2479 | |||
| 2480 | #ifdef CONFIG_TRACING | 2451 | #ifdef CONFIG_TRACING |
| 2481 | extern void | 2452 | extern void |
| 2482 | __trace_special(void *__tr, void *__data, | 2453 | __trace_special(void *__tr, void *__data, |
| @@ -2494,13 +2465,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | |||
| 2494 | 2465 | ||
| 2495 | extern void normalize_rt_tasks(void); | 2466 | extern void normalize_rt_tasks(void); |
| 2496 | 2467 | ||
| 2497 | #ifdef CONFIG_GROUP_SCHED | 2468 | #ifdef CONFIG_CGROUP_SCHED |
| 2498 | 2469 | ||
| 2499 | extern struct task_group init_task_group; | 2470 | extern struct task_group init_task_group; |
| 2500 | #ifdef CONFIG_USER_SCHED | ||
| 2501 | extern struct task_group root_task_group; | ||
| 2502 | extern void set_tg_uid(struct user_struct *user); | ||
| 2503 | #endif | ||
| 2504 | 2471 | ||
| 2505 | extern struct task_group *sched_create_group(struct task_group *parent); | 2472 | extern struct task_group *sched_create_group(struct task_group *parent); |
| 2506 | extern void sched_destroy_group(struct task_group *tg); | 2473 | extern void sched_destroy_group(struct task_group *tg); |
| @@ -2585,7 +2552,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
| 2585 | } | 2552 | } |
| 2586 | #endif /* CONFIG_MM_OWNER */ | 2553 | #endif /* CONFIG_MM_OWNER */ |
| 2587 | 2554 | ||
| 2588 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 2555 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
| 2556 | unsigned int limit) | ||
| 2557 | { | ||
| 2558 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | ||
| 2559 | } | ||
| 2560 | |||
| 2561 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | ||
| 2562 | unsigned int limit) | ||
| 2563 | { | ||
| 2564 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | ||
| 2565 | } | ||
| 2566 | |||
| 2567 | static inline unsigned long rlimit(unsigned int limit) | ||
| 2568 | { | ||
| 2569 | return task_rlimit(current, limit); | ||
| 2570 | } | ||
| 2571 | |||
| 2572 | static inline unsigned long rlimit_max(unsigned int limit) | ||
| 2573 | { | ||
| 2574 | return task_rlimit_max(current, limit); | ||
| 2575 | } | ||
| 2589 | 2576 | ||
| 2590 | #endif /* __KERNEL__ */ | 2577 | #endif /* __KERNEL__ */ |
| 2591 | 2578 | ||
