aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h35
1 files changed, 16 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777d8a5ed06b..83bd2e2982fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,6 +99,7 @@ struct robust_list_head;
99struct bio_list; 99struct bio_list;
100struct fs_struct; 100struct fs_struct;
101struct perf_event_context; 101struct perf_event_context;
102struct blk_plug;
102 103
103/* 104/*
104 * List of flags we want to share for kernel threads, 105 * List of flags we want to share for kernel threads,
@@ -516,7 +517,7 @@ struct thread_group_cputimer {
516struct autogroup; 517struct autogroup;
517 518
518/* 519/*
519 * NOTE! "signal_struct" does not have it's own 520 * NOTE! "signal_struct" does not have its own
520 * locking, because a shared signal_struct always 521 * locking, because a shared signal_struct always
521 * implies a shared sighand_struct, so locking 522 * implies a shared sighand_struct, so locking
522 * sighand_struct is always a proper superset of 523 * sighand_struct is always a proper superset of
@@ -1058,6 +1059,7 @@ struct sched_class {
1058 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1059 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1059 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1060 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1060 void (*yield_task) (struct rq *rq); 1061 void (*yield_task) (struct rq *rq);
1062 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1061 1063
1062 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1064 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1063 1065
@@ -1084,12 +1086,10 @@ struct sched_class {
1084 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1086 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1085 void (*task_fork) (struct task_struct *p); 1087 void (*task_fork) (struct task_struct *p);
1086 1088
1087 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1089 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1088 int running); 1090 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1089 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1090 int running);
1091 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1091 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1092 int oldprio, int running); 1092 int oldprio);
1093 1093
1094 unsigned int (*get_rr_interval) (struct rq *rq, 1094 unsigned int (*get_rr_interval) (struct rq *rq,
1095 struct task_struct *task); 1095 struct task_struct *task);
@@ -1429,6 +1429,11 @@ struct task_struct {
1429/* stacked block device info */ 1429/* stacked block device info */
1430 struct bio_list *bio_list; 1430 struct bio_list *bio_list;
1431 1431
1432#ifdef CONFIG_BLOCK
1433/* stack plugging */
1434 struct blk_plug *plug;
1435#endif
1436
1432/* VM state */ 1437/* VM state */
1433 struct reclaim_state *reclaim_state; 1438 struct reclaim_state *reclaim_state;
1434 1439
@@ -1472,6 +1477,7 @@ struct task_struct {
1472#ifdef CONFIG_NUMA 1477#ifdef CONFIG_NUMA
1473 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1478 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1474 short il_next; 1479 short il_next;
1480 short pref_node_fork;
1475#endif 1481#endif
1476 atomic_t fs_excl; /* holding fs exclusive resources */ 1482 atomic_t fs_excl; /* holding fs exclusive resources */
1477 struct rcu_head rcu; 1483 struct rcu_head rcu;
@@ -1524,8 +1530,8 @@ struct task_struct {
1524 struct memcg_batch_info { 1530 struct memcg_batch_info {
1525 int do_batch; /* incremented when batch uncharge started */ 1531 int do_batch; /* incremented when batch uncharge started */
1526 struct mem_cgroup *memcg; /* target memcg of uncharge */ 1532 struct mem_cgroup *memcg; /* target memcg of uncharge */
1527 unsigned long bytes; /* uncharged usage */ 1533 unsigned long nr_pages; /* uncharged usage */
1528 unsigned long memsw_bytes; /* uncharged mem+swap usage */ 1534 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1529 } memcg_batch; 1535 } memcg_batch;
1530#endif 1536#endif
1531}; 1537};
@@ -1715,7 +1721,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1715/* 1721/*
1716 * Per process flags 1722 * Per process flags
1717 */ 1723 */
1718#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
1719#define PF_STARTING 0x00000002 /* being created */ 1724#define PF_STARTING 0x00000002 /* being created */
1720#define PF_EXITING 0x00000004 /* getting shut down */ 1725#define PF_EXITING 0x00000004 /* getting shut down */
1721#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1726#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1945,8 +1950,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
1945 void __user *buffer, size_t *lenp, 1950 void __user *buffer, size_t *lenp,
1946 loff_t *ppos); 1951 loff_t *ppos);
1947 1952
1948extern unsigned int sysctl_sched_compat_yield;
1949
1950#ifdef CONFIG_SCHED_AUTOGROUP 1953#ifdef CONFIG_SCHED_AUTOGROUP
1951extern unsigned int sysctl_sched_autogroup_enabled; 1954extern unsigned int sysctl_sched_autogroup_enabled;
1952 1955
@@ -1977,6 +1980,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
1977# define rt_mutex_adjust_pi(p) do { } while (0) 1980# define rt_mutex_adjust_pi(p) do { } while (0)
1978#endif 1981#endif
1979 1982
1983extern bool yield_to(struct task_struct *p, bool preempt);
1980extern void set_user_nice(struct task_struct *p, long nice); 1984extern void set_user_nice(struct task_struct *p, long nice);
1981extern int task_prio(const struct task_struct *p); 1985extern int task_prio(const struct task_struct *p);
1982extern int task_nice(const struct task_struct *p); 1986extern int task_nice(const struct task_struct *p);
@@ -2049,7 +2053,7 @@ extern void release_uids(struct user_namespace *ns);
2049 2053
2050#include <asm/current.h> 2054#include <asm/current.h>
2051 2055
2052extern void do_timer(unsigned long ticks); 2056extern void xtime_update(unsigned long ticks);
2053 2057
2054extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2058extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2055extern int wake_up_process(struct task_struct *tsk); 2059extern int wake_up_process(struct task_struct *tsk);
@@ -2578,13 +2582,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2578#define TASK_SIZE_OF(tsk) TASK_SIZE 2582#define TASK_SIZE_OF(tsk) TASK_SIZE
2579#endif 2583#endif
2580 2584
2581/*
2582 * Call the function if the target task is executing on a CPU right now:
2583 */
2584extern void task_oncpu_function_call(struct task_struct *p,
2585 void (*func) (void *info), void *info);
2586
2587
2588#ifdef CONFIG_MM_OWNER 2585#ifdef CONFIG_MM_OWNER
2589extern void mm_update_next_owner(struct mm_struct *mm); 2586extern void mm_update_next_owner(struct mm_struct *mm);
2590extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2587extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);