aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-05-12 17:19:01 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-05-12 17:20:33 -0400
commita9aa1d02de36b450990b0e25a88fc2ff1c3e6b94 (patch)
tree1f9d19f1642d263e65906a916a48be9339accc73 /include/linux/sched.h
parent5671a10e2bc7f99d9157c6044faf8be2ef302361 (diff)
parentb57f95a38233a2e73b679bea4a5453a1cc2a1cc9 (diff)
Merge commit 'v2.6.34-rc7' into perf/nmi
Merge reason: catch up with latest softlockup detector changes.
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h106
1 files changed, 22 insertions, 84 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f7bba93929b..dad7f668ebf7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -97,7 +97,7 @@ struct sched_param {
97struct exec_domain; 97struct exec_domain;
98struct futex_pi_state; 98struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_event_context; 103struct perf_event_context;
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock;
258 258
259struct task_struct; 259struct task_struct;
260 260
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
261extern void sched_init(void); 265extern void sched_init(void);
262extern void sched_init_smp(void); 266extern void sched_init_smp(void);
263extern asmlinkage void schedule_tail(struct task_struct *prev); 267extern asmlinkage void schedule_tail(struct task_struct *prev);
@@ -310,6 +314,7 @@ extern void sched_show_task(struct task_struct *p);
310#ifdef CONFIG_DETECT_SOFTLOCKUP 314#ifdef CONFIG_DETECT_SOFTLOCKUP
311extern void softlockup_tick(void); 315extern void softlockup_tick(void);
312extern void touch_softlockup_watchdog(void); 316extern void touch_softlockup_watchdog(void);
317extern void touch_softlockup_watchdog_sync(void);
313extern void touch_all_softlockup_watchdogs(void); 318extern void touch_all_softlockup_watchdogs(void);
314extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 319extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
315 void __user *buffer, 320 void __user *buffer,
@@ -323,6 +328,9 @@ static inline void softlockup_tick(void)
323static inline void touch_softlockup_watchdog(void) 328static inline void touch_softlockup_watchdog(void)
324{ 329{
325} 330}
331static inline void touch_softlockup_watchdog_sync(void)
332{
333}
326static inline void touch_all_softlockup_watchdogs(void) 334static inline void touch_all_softlockup_watchdogs(void)
327{ 335{
328} 336}
@@ -392,60 +400,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
392static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 400static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
393#endif 401#endif
394 402
395#if USE_SPLIT_PTLOCKS
396/*
397 * The mm counters are not protected by its page_table_lock,
398 * so must be incremented atomically.
399 */
400#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
401#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
402#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
403#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
404#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
405
406#else /* !USE_SPLIT_PTLOCKS */
407/*
408 * The mm counters are protected by its page_table_lock,
409 * so can be incremented directly.
410 */
411#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
412#define get_mm_counter(mm, member) ((mm)->_##member)
413#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
414#define inc_mm_counter(mm, member) (mm)->_##member++
415#define dec_mm_counter(mm, member) (mm)->_##member--
416
417#endif /* !USE_SPLIT_PTLOCKS */
418
419#define get_mm_rss(mm) \
420 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
421#define update_hiwater_rss(mm) do { \
422 unsigned long _rss = get_mm_rss(mm); \
423 if ((mm)->hiwater_rss < _rss) \
424 (mm)->hiwater_rss = _rss; \
425} while (0)
426#define update_hiwater_vm(mm) do { \
427 if ((mm)->hiwater_vm < (mm)->total_vm) \
428 (mm)->hiwater_vm = (mm)->total_vm; \
429} while (0)
430
431static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
432{
433 return max(mm->hiwater_rss, get_mm_rss(mm));
434}
435
436static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
437 struct mm_struct *mm)
438{
439 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
440
441 if (*maxrss < hiwater_rss)
442 *maxrss = hiwater_rss;
443}
444
445static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
446{
447 return max(mm->hiwater_vm, mm->total_vm);
448}
449 403
450extern void set_dumpable(struct mm_struct *mm, int value); 404extern void set_dumpable(struct mm_struct *mm, int value);
451extern int get_dumpable(struct mm_struct *mm); 405extern int get_dumpable(struct mm_struct *mm);
@@ -736,14 +690,6 @@ struct user_struct {
736 uid_t uid; 690 uid_t uid;
737 struct user_namespace *user_ns; 691 struct user_namespace *user_ns;
738 692
739#ifdef CONFIG_USER_SCHED
740 struct task_group *tg;
741#ifdef CONFIG_SYSFS
742 struct kobject kobj;
743 struct delayed_work work;
744#endif
745#endif
746
747#ifdef CONFIG_PERF_EVENTS 693#ifdef CONFIG_PERF_EVENTS
748 atomic_long_t locked_vm; 694 atomic_long_t locked_vm;
749#endif 695#endif
@@ -874,7 +820,10 @@ static inline int sd_balance_for_mc_power(void)
874 if (sched_smt_power_savings) 820 if (sched_smt_power_savings)
875 return SD_POWERSAVINGS_BALANCE; 821 return SD_POWERSAVINGS_BALANCE;
876 822
877 return SD_PREFER_SIBLING; 823 if (!sched_mc_power_savings)
824 return SD_PREFER_SIBLING;
825
826 return 0;
878} 827}
879 828
880static inline int sd_balance_for_package_power(void) 829static inline int sd_balance_for_package_power(void)
@@ -1080,7 +1029,8 @@ struct sched_domain;
1080struct sched_class { 1029struct sched_class {
1081 const struct sched_class *next; 1030 const struct sched_class *next;
1082 1031
1083 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 1032 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
1033 bool head);
1084 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1034 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1085 void (*yield_task) (struct rq *rq); 1035 void (*yield_task) (struct rq *rq);
1086 1036
@@ -1092,14 +1042,6 @@ struct sched_class {
1092#ifdef CONFIG_SMP 1042#ifdef CONFIG_SMP
1093 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); 1043 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1094 1044
1095 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1096 struct rq *busiest, unsigned long max_load_move,
1097 struct sched_domain *sd, enum cpu_idle_type idle,
1098 int *all_pinned, int *this_best_prio);
1099
1100 int (*move_one_task) (struct rq *this_rq, int this_cpu,
1101 struct rq *busiest, struct sched_domain *sd,
1102 enum cpu_idle_type idle);
1103 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1104 void (*post_schedule) (struct rq *this_rq); 1046 void (*post_schedule) (struct rq *this_rq);
1105 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1047 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@ -1282,7 +1224,9 @@ struct task_struct {
1282 struct plist_node pushable_tasks; 1224 struct plist_node pushable_tasks;
1283 1225
1284 struct mm_struct *mm, *active_mm; 1226 struct mm_struct *mm, *active_mm;
1285 1227#if defined(SPLIT_RSS_COUNTING)
1228 struct task_rss_stat rss_stat;
1229#endif
1286/* task state */ 1230/* task state */
1287 int exit_state; 1231 int exit_state;
1288 int exit_code, exit_signal; 1232 int exit_code, exit_signal;
@@ -1369,7 +1313,7 @@ struct task_struct {
1369 char comm[TASK_COMM_LEN]; /* executable name excluding path 1313 char comm[TASK_COMM_LEN]; /* executable name excluding path
1370 - access with [gs]et_task_comm (which lock 1314 - access with [gs]et_task_comm (which lock
1371 it with task_lock()) 1315 it with task_lock())
1372 - initialized normally by flush_old_exec */ 1316 - initialized normally by setup_new_exec */
1373/* file system info */ 1317/* file system info */
1374 int link_count, total_link_count; 1318 int link_count, total_link_count;
1375#ifdef CONFIG_SYSVIPC 1319#ifdef CONFIG_SYSVIPC
@@ -1462,7 +1406,7 @@ struct task_struct {
1462 void *journal_info; 1406 void *journal_info;
1463 1407
1464/* stacked block device info */ 1408/* stacked block device info */
1465 struct bio *bio_list, **bio_tail; 1409 struct bio_list *bio_list;
1466 1410
1467/* VM state */ 1411/* VM state */
1468 struct reclaim_state *reclaim_state; 1412 struct reclaim_state *reclaim_state;
@@ -1533,7 +1477,7 @@ struct task_struct {
1533 1477
1534 struct list_head *scm_work_list; 1478 struct list_head *scm_work_list;
1535#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1479#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1536 /* Index of current stored adress in ret_stack */ 1480 /* Index of current stored address in ret_stack */
1537 int curr_ret_stack; 1481 int curr_ret_stack;
1538 /* Stack of return addresses for return function tracing */ 1482 /* Stack of return addresses for return function tracing */
1539 struct ftrace_ret_stack *ret_stack; 1483 struct ftrace_ret_stack *ret_stack;
@@ -2451,9 +2395,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2451 2395
2452static inline void thread_group_cputime_init(struct signal_struct *sig) 2396static inline void thread_group_cputime_init(struct signal_struct *sig)
2453{ 2397{
2454 sig->cputimer.cputime = INIT_CPUTIME;
2455 spin_lock_init(&sig->cputimer.lock); 2398 spin_lock_init(&sig->cputimer.lock);
2456 sig->cputimer.running = 0;
2457} 2399}
2458 2400
2459static inline void thread_group_cputime_free(struct signal_struct *sig) 2401static inline void thread_group_cputime_free(struct signal_struct *sig)
@@ -2513,13 +2455,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2513 2455
2514extern void normalize_rt_tasks(void); 2456extern void normalize_rt_tasks(void);
2515 2457
2516#ifdef CONFIG_GROUP_SCHED 2458#ifdef CONFIG_CGROUP_SCHED
2517 2459
2518extern struct task_group init_task_group; 2460extern struct task_group init_task_group;
2519#ifdef CONFIG_USER_SCHED
2520extern struct task_group root_task_group;
2521extern void set_tg_uid(struct user_struct *user);
2522#endif
2523 2461
2524extern struct task_group *sched_create_group(struct task_group *parent); 2462extern struct task_group *sched_create_group(struct task_group *parent);
2525extern void sched_destroy_group(struct task_group *tg); 2463extern void sched_destroy_group(struct task_group *tg);