diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 58 |
1 files changed, 45 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 294eb2f80144..6f7bba93929b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
192 | #define TASK_DEAD 64 | 192 | #define TASK_DEAD 64 |
193 | #define TASK_WAKEKILL 128 | 193 | #define TASK_WAKEKILL 128 |
194 | #define TASK_WAKING 256 | 194 | #define TASK_WAKING 256 |
195 | #define TASK_STATE_MAX 512 | ||
196 | |||
197 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
198 | |||
199 | extern char ___assert_task_state[1 - 2*!!( | ||
200 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
195 | 201 | ||
196 | /* Convenience macros for the sake of set_task_state */ | 202 | /* Convenience macros for the sake of set_task_state */ |
197 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
@@ -371,6 +377,8 @@ extern int sysctl_max_map_count; | |||
371 | 377 | ||
372 | #include <linux/aio.h> | 378 | #include <linux/aio.h> |
373 | 379 | ||
380 | #ifdef CONFIG_MMU | ||
381 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
374 | extern unsigned long | 382 | extern unsigned long |
375 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 383 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
376 | unsigned long, unsigned long); | 384 | unsigned long, unsigned long); |
@@ -380,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
380 | unsigned long flags); | 388 | unsigned long flags); |
381 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 389 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
382 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 390 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
391 | #else | ||
392 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | ||
393 | #endif | ||
383 | 394 | ||
384 | #if USE_SPLIT_PTLOCKS | 395 | #if USE_SPLIT_PTLOCKS |
385 | /* | 396 | /* |
@@ -1091,7 +1102,8 @@ struct sched_class { | |||
1091 | enum cpu_idle_type idle); | 1102 | enum cpu_idle_type idle); |
1092 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1103 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1093 | void (*post_schedule) (struct rq *this_rq); | 1104 | void (*post_schedule) (struct rq *this_rq); |
1094 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1105 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
1106 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1095 | 1107 | ||
1096 | void (*set_cpus_allowed)(struct task_struct *p, | 1108 | void (*set_cpus_allowed)(struct task_struct *p, |
1097 | const struct cpumask *newmask); | 1109 | const struct cpumask *newmask); |
@@ -1115,7 +1127,7 @@ struct sched_class { | |||
1115 | struct task_struct *task); | 1127 | struct task_struct *task); |
1116 | 1128 | ||
1117 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1129 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1118 | void (*moved_group) (struct task_struct *p); | 1130 | void (*moved_group) (struct task_struct *p, int on_rq); |
1119 | #endif | 1131 | #endif |
1120 | }; | 1132 | }; |
1121 | 1133 | ||
@@ -1409,7 +1421,7 @@ struct task_struct { | |||
1409 | #endif | 1421 | #endif |
1410 | 1422 | ||
1411 | /* Protection of the PI data structures: */ | 1423 | /* Protection of the PI data structures: */ |
1412 | spinlock_t pi_lock; | 1424 | raw_spinlock_t pi_lock; |
1413 | 1425 | ||
1414 | #ifdef CONFIG_RT_MUTEXES | 1426 | #ifdef CONFIG_RT_MUTEXES |
1415 | /* PI waiters blocked on a rt_mutex held by this task */ | 1427 | /* PI waiters blocked on a rt_mutex held by this task */ |
@@ -1542,10 +1554,18 @@ struct task_struct { | |||
1542 | unsigned long trace_recursion; | 1554 | unsigned long trace_recursion; |
1543 | #endif /* CONFIG_TRACING */ | 1555 | #endif /* CONFIG_TRACING */ |
1544 | unsigned long stack_start; | 1556 | unsigned long stack_start; |
1557 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | ||
1558 | struct memcg_batch_info { | ||
1559 | int do_batch; /* incremented when batch uncharge started */ | ||
1560 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | ||
1561 | unsigned long bytes; /* uncharged usage */ | ||
1562 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | ||
1563 | } memcg_batch; | ||
1564 | #endif | ||
1545 | }; | 1565 | }; |
1546 | 1566 | ||
1547 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1567 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
1548 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | 1568 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
1549 | 1569 | ||
1550 | /* | 1570 | /* |
1551 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1571 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
@@ -2073,7 +2093,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t); | |||
2073 | extern int do_notify_parent(struct task_struct *, int); | 2093 | extern int do_notify_parent(struct task_struct *, int); |
2074 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2094 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
2075 | extern void force_sig(int, struct task_struct *); | 2095 | extern void force_sig(int, struct task_struct *); |
2076 | extern void force_sig_specific(int, struct task_struct *); | ||
2077 | extern int send_sig(int, struct task_struct *, int); | 2096 | extern int send_sig(int, struct task_struct *, int); |
2078 | extern void zap_other_threads(struct task_struct *p); | 2097 | extern void zap_other_threads(struct task_struct *p); |
2079 | extern struct sigqueue *sigqueue_alloc(void); | 2098 | extern struct sigqueue *sigqueue_alloc(void); |
@@ -2092,11 +2111,6 @@ static inline int kill_cad_pid(int sig, int priv) | |||
2092 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2111 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
2093 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2112 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
2094 | 2113 | ||
2095 | static inline int is_si_special(const struct siginfo *info) | ||
2096 | { | ||
2097 | return info <= SEND_SIG_FORCED; | ||
2098 | } | ||
2099 | |||
2100 | /* | 2114 | /* |
2101 | * True if we are on the alternate signal stack. | 2115 | * True if we are on the alternate signal stack. |
2102 | */ | 2116 | */ |
@@ -2482,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
2482 | 2496 | ||
2483 | #endif /* CONFIG_SMP */ | 2497 | #endif /* CONFIG_SMP */ |
2484 | 2498 | ||
2485 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
2486 | |||
2487 | #ifdef CONFIG_TRACING | 2499 | #ifdef CONFIG_TRACING |
2488 | extern void | 2500 | extern void |
2489 | __trace_special(void *__tr, void *__data, | 2501 | __trace_special(void *__tr, void *__data, |
@@ -2592,7 +2604,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
2592 | } | 2604 | } |
2593 | #endif /* CONFIG_MM_OWNER */ | 2605 | #endif /* CONFIG_MM_OWNER */ |
2594 | 2606 | ||
2595 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 2607 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
2608 | unsigned int limit) | ||
2609 | { | ||
2610 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | ||
2611 | } | ||
2612 | |||
2613 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | ||
2614 | unsigned int limit) | ||
2615 | { | ||
2616 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | ||
2617 | } | ||
2618 | |||
2619 | static inline unsigned long rlimit(unsigned int limit) | ||
2620 | { | ||
2621 | return task_rlimit(current, limit); | ||
2622 | } | ||
2623 | |||
2624 | static inline unsigned long rlimit_max(unsigned int limit) | ||
2625 | { | ||
2626 | return task_rlimit_max(current, limit); | ||
2627 | } | ||
2596 | 2628 | ||
2597 | #endif /* __KERNEL__ */ | 2629 | #endif /* __KERNEL__ */ |
2598 | 2630 | ||