aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-04-02 14:02:55 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 14:03:08 -0400
commitc9494727cf293ae2ec66af57547a3e79c724fec2 (patch)
tree44ae197b64fa7530ee695a90ad31326dda06f1e1 /include/linux/sched.h
parent6427462bfa50f50dc6c088c07037264fcc73eca1 (diff)
parent42be79e37e264557f12860fa4cc84b4de3685954 (diff)
Merge branch 'linus' into sched/core
Merge reason: update to latest upstream Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h66
1 files changed, 8 insertions, 58 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8604884cee8..43c94515273 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock;
258 258
259struct task_struct; 259struct task_struct;
260 260
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
261extern void sched_init(void); 265extern void sched_init(void);
262extern void sched_init_smp(void); 266extern void sched_init_smp(void);
263extern asmlinkage void schedule_tail(struct task_struct *prev); 267extern asmlinkage void schedule_tail(struct task_struct *prev);
@@ -402,60 +406,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
402static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 406static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
403#endif 407#endif
404 408
405#if USE_SPLIT_PTLOCKS
406/*
407 * The mm counters are not protected by its page_table_lock,
408 * so must be incremented atomically.
409 */
410#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
411#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
412#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
413#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
414#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
415
416#else /* !USE_SPLIT_PTLOCKS */
417/*
418 * The mm counters are protected by its page_table_lock,
419 * so can be incremented directly.
420 */
421#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
422#define get_mm_counter(mm, member) ((mm)->_##member)
423#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
424#define inc_mm_counter(mm, member) (mm)->_##member++
425#define dec_mm_counter(mm, member) (mm)->_##member--
426
427#endif /* !USE_SPLIT_PTLOCKS */
428
429#define get_mm_rss(mm) \
430 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
431#define update_hiwater_rss(mm) do { \
432 unsigned long _rss = get_mm_rss(mm); \
433 if ((mm)->hiwater_rss < _rss) \
434 (mm)->hiwater_rss = _rss; \
435} while (0)
436#define update_hiwater_vm(mm) do { \
437 if ((mm)->hiwater_vm < (mm)->total_vm) \
438 (mm)->hiwater_vm = (mm)->total_vm; \
439} while (0)
440
441static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
442{
443 return max(mm->hiwater_rss, get_mm_rss(mm));
444}
445
446static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
447 struct mm_struct *mm)
448{
449 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
450
451 if (*maxrss < hiwater_rss)
452 *maxrss = hiwater_rss;
453}
454
455static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
456{
457 return max(mm->hiwater_vm, mm->total_vm);
458}
459 409
460extern void set_dumpable(struct mm_struct *mm, int value); 410extern void set_dumpable(struct mm_struct *mm, int value);
461extern int get_dumpable(struct mm_struct *mm); 411extern int get_dumpable(struct mm_struct *mm);
@@ -1270,7 +1220,9 @@ struct task_struct {
1270 struct plist_node pushable_tasks; 1220 struct plist_node pushable_tasks;
1271 1221
1272 struct mm_struct *mm, *active_mm; 1222 struct mm_struct *mm, *active_mm;
1273 1223#if defined(SPLIT_RSS_COUNTING)
1224 struct task_rss_stat rss_stat;
1225#endif
1274/* task state */ 1226/* task state */
1275 int exit_state; 1227 int exit_state;
1276 int exit_code, exit_signal; 1228 int exit_code, exit_signal;
@@ -1521,7 +1473,7 @@ struct task_struct {
1521 1473
1522 struct list_head *scm_work_list; 1474 struct list_head *scm_work_list;
1523#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1475#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1524 /* Index of current stored adress in ret_stack */ 1476 /* Index of current stored address in ret_stack */
1525 int curr_ret_stack; 1477 int curr_ret_stack;
1526 /* Stack of return addresses for return function tracing */ 1478 /* Stack of return addresses for return function tracing */
1527 struct ftrace_ret_stack *ret_stack; 1479 struct ftrace_ret_stack *ret_stack;
@@ -2439,9 +2391,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2439 2391
2440static inline void thread_group_cputime_init(struct signal_struct *sig) 2392static inline void thread_group_cputime_init(struct signal_struct *sig)
2441{ 2393{
2442 sig->cputimer.cputime = INIT_CPUTIME;
2443 spin_lock_init(&sig->cputimer.lock); 2394 spin_lock_init(&sig->cputimer.lock);
2444 sig->cputimer.running = 0;
2445} 2395}
2446 2396
2447static inline void thread_group_cputime_free(struct signal_struct *sig) 2397static inline void thread_group_cputime_free(struct signal_struct *sig)