aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h100
1 files changed, 17 insertions, 83 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78efe7c485ac..dad7f668ebf7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -97,7 +97,7 @@ struct sched_param {
97struct exec_domain; 97struct exec_domain;
98struct futex_pi_state; 98struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_event_context; 103struct perf_event_context;
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock;
258 258
259struct task_struct; 259struct task_struct;
260 260
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
261extern void sched_init(void); 265extern void sched_init(void);
262extern void sched_init_smp(void); 266extern void sched_init_smp(void);
263extern asmlinkage void schedule_tail(struct task_struct *prev); 267extern asmlinkage void schedule_tail(struct task_struct *prev);
@@ -396,60 +400,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
396static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 400static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
397#endif 401#endif
398 402
399#if USE_SPLIT_PTLOCKS
400/*
401 * The mm counters are not protected by its page_table_lock,
402 * so must be incremented atomically.
403 */
404#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
405#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
406#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
407#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
408#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
409
410#else /* !USE_SPLIT_PTLOCKS */
411/*
412 * The mm counters are protected by its page_table_lock,
413 * so can be incremented directly.
414 */
415#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
416#define get_mm_counter(mm, member) ((mm)->_##member)
417#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
418#define inc_mm_counter(mm, member) (mm)->_##member++
419#define dec_mm_counter(mm, member) (mm)->_##member--
420
421#endif /* !USE_SPLIT_PTLOCKS */
422
423#define get_mm_rss(mm) \
424 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
425#define update_hiwater_rss(mm) do { \
426 unsigned long _rss = get_mm_rss(mm); \
427 if ((mm)->hiwater_rss < _rss) \
428 (mm)->hiwater_rss = _rss; \
429} while (0)
430#define update_hiwater_vm(mm) do { \
431 if ((mm)->hiwater_vm < (mm)->total_vm) \
432 (mm)->hiwater_vm = (mm)->total_vm; \
433} while (0)
434
435static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
436{
437 return max(mm->hiwater_rss, get_mm_rss(mm));
438}
439
440static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
441 struct mm_struct *mm)
442{
443 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
444
445 if (*maxrss < hiwater_rss)
446 *maxrss = hiwater_rss;
447}
448
449static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
450{
451 return max(mm->hiwater_vm, mm->total_vm);
452}
453 403
454extern void set_dumpable(struct mm_struct *mm, int value); 404extern void set_dumpable(struct mm_struct *mm, int value);
455extern int get_dumpable(struct mm_struct *mm); 405extern int get_dumpable(struct mm_struct *mm);
@@ -740,14 +690,6 @@ struct user_struct {
740 uid_t uid; 690 uid_t uid;
741 struct user_namespace *user_ns; 691 struct user_namespace *user_ns;
742 692
743#ifdef CONFIG_USER_SCHED
744 struct task_group *tg;
745#ifdef CONFIG_SYSFS
746 struct kobject kobj;
747 struct delayed_work work;
748#endif
749#endif
750
751#ifdef CONFIG_PERF_EVENTS 693#ifdef CONFIG_PERF_EVENTS
752 atomic_long_t locked_vm; 694 atomic_long_t locked_vm;
753#endif 695#endif
@@ -878,7 +820,10 @@ static inline int sd_balance_for_mc_power(void)
878 if (sched_smt_power_savings) 820 if (sched_smt_power_savings)
879 return SD_POWERSAVINGS_BALANCE; 821 return SD_POWERSAVINGS_BALANCE;
880 822
881 return SD_PREFER_SIBLING; 823 if (!sched_mc_power_savings)
824 return SD_PREFER_SIBLING;
825
826 return 0;
882} 827}
883 828
884static inline int sd_balance_for_package_power(void) 829static inline int sd_balance_for_package_power(void)
@@ -1084,7 +1029,8 @@ struct sched_domain;
1084struct sched_class { 1029struct sched_class {
1085 const struct sched_class *next; 1030 const struct sched_class *next;
1086 1031
1087 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 1032 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
1033 bool head);
1088 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1034 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1089 void (*yield_task) (struct rq *rq); 1035 void (*yield_task) (struct rq *rq);
1090 1036
@@ -1096,14 +1042,6 @@ struct sched_class {
1096#ifdef CONFIG_SMP 1042#ifdef CONFIG_SMP
1097 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1043 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1098 1044
1099 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1100 struct rq *busiest, unsigned long max_load_move,
1101 struct sched_domain *sd, enum cpu_idle_type idle,
1102 int *all_pinned, int *this_best_prio);
1103
1104 int (*move_one_task) (struct rq *this_rq, int this_cpu,
1105 struct rq *busiest, struct sched_domain *sd,
1106 enum cpu_idle_type idle);
1107 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1108 void (*post_schedule) (struct rq *this_rq); 1046 void (*post_schedule) (struct rq *this_rq);
1109 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1047 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@ -1286,7 +1224,9 @@ struct task_struct {
1286 struct plist_node pushable_tasks; 1224 struct plist_node pushable_tasks;
1287 1225
1288 struct mm_struct *mm, *active_mm; 1226 struct mm_struct *mm, *active_mm;
1289 1227#if defined(SPLIT_RSS_COUNTING)
1228 struct task_rss_stat rss_stat;
1229#endif
1290/* task state */ 1230/* task state */
1291 int exit_state; 1231 int exit_state;
1292 int exit_code, exit_signal; 1232 int exit_code, exit_signal;
@@ -1466,7 +1406,7 @@ struct task_struct {
1466 void *journal_info; 1406 void *journal_info;
1467 1407
1468/* stacked block device info */ 1408/* stacked block device info */
1469 struct bio *bio_list, **bio_tail; 1409 struct bio_list *bio_list;
1470 1410
1471/* VM state */ 1411/* VM state */
1472 struct reclaim_state *reclaim_state; 1412 struct reclaim_state *reclaim_state;
@@ -1537,7 +1477,7 @@ struct task_struct {
1537 1477
1538 struct list_head *scm_work_list; 1478 struct list_head *scm_work_list;
1539#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1479#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1540 /* Index of current stored adress in ret_stack */ 1480 /* Index of current stored address in ret_stack */
1541 int curr_ret_stack; 1481 int curr_ret_stack;
1542 /* Stack of return addresses for return function tracing */ 1482 /* Stack of return addresses for return function tracing */
1543 struct ftrace_ret_stack *ret_stack; 1483 struct ftrace_ret_stack *ret_stack;
@@ -2455,9 +2395,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2455 2395
2456static inline void thread_group_cputime_init(struct signal_struct *sig) 2396static inline void thread_group_cputime_init(struct signal_struct *sig)
2457{ 2397{
2458 sig->cputimer.cputime = INIT_CPUTIME;
2459 spin_lock_init(&sig->cputimer.lock); 2398 spin_lock_init(&sig->cputimer.lock);
2460 sig->cputimer.running = 0;
2461} 2399}
2462 2400
2463static inline void thread_group_cputime_free(struct signal_struct *sig) 2401static inline void thread_group_cputime_free(struct signal_struct *sig)
@@ -2517,13 +2455,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2517 2455
2518extern void normalize_rt_tasks(void); 2456extern void normalize_rt_tasks(void);
2519 2457
2520#ifdef CONFIG_GROUP_SCHED 2458#ifdef CONFIG_CGROUP_SCHED
2521 2459
2522extern struct task_group init_task_group; 2460extern struct task_group init_task_group;
2523#ifdef CONFIG_USER_SCHED
2524extern struct task_group root_task_group;
2525extern void set_tg_uid(struct user_struct *user);
2526#endif
2527 2461
2528extern struct task_group *sched_create_group(struct task_group *parent); 2462extern struct task_group *sched_create_group(struct task_group *parent);
2529extern void sched_destroy_group(struct task_group *tg); 2463extern void sched_destroy_group(struct task_group *tg);