diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-05-24 03:06:26 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-05-24 03:06:26 -0400 |
commit | b73077eb03f510a84b102fb97640e595a958403c (patch) | |
tree | 8b639000418e2756bf6baece4e00e07d2534bccc /include/linux/sched.h | |
parent | 28350e330cfab46b60a1dbf763b678d859f9f3d9 (diff) | |
parent | 9d2e173644bb5c42ff1b280fbdda3f195a7cf1f7 (diff) |
Merge branch 'next' into for-linus
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34e..18d63cea2848 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -99,6 +99,7 @@ struct robust_list_head; | |||
99 | struct bio_list; | 99 | struct bio_list; |
100 | struct fs_struct; | 100 | struct fs_struct; |
101 | struct perf_event_context; | 101 | struct perf_event_context; |
102 | struct blk_plug; | ||
102 | 103 | ||
103 | /* | 104 | /* |
104 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
@@ -516,7 +517,7 @@ struct thread_group_cputimer { | |||
516 | struct autogroup; | 517 | struct autogroup; |
517 | 518 | ||
518 | /* | 519 | /* |
519 | * NOTE! "signal_struct" does not have it's own | 520 | * NOTE! "signal_struct" does not have its own |
520 | * locking, because a shared signal_struct always | 521 | * locking, because a shared signal_struct always |
521 | * implies a shared sighand_struct, so locking | 522 | * implies a shared sighand_struct, so locking |
522 | * sighand_struct is always a proper superset of | 523 | * sighand_struct is always a proper superset of |
@@ -853,7 +854,7 @@ extern int __weak arch_sd_sibiling_asym_packing(void); | |||
853 | 854 | ||
854 | /* | 855 | /* |
855 | * Optimise SD flags for power savings: | 856 | * Optimise SD flags for power savings: |
856 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | 857 | * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. |
857 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | 858 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 |
858 | */ | 859 | */ |
859 | 860 | ||
@@ -1058,6 +1059,7 @@ struct sched_class { | |||
1058 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | 1059 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
1059 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | 1060 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
1060 | void (*yield_task) (struct rq *rq); | 1061 | void (*yield_task) (struct rq *rq); |
1062 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | ||
1061 | 1063 | ||
1062 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1064 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
1063 | 1065 | ||
@@ -1084,12 +1086,10 @@ struct sched_class { | |||
1084 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 1086 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
1085 | void (*task_fork) (struct task_struct *p); | 1087 | void (*task_fork) (struct task_struct *p); |
1086 | 1088 | ||
1087 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 1089 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
1088 | int running); | 1090 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
1089 | void (*switched_to) (struct rq *this_rq, struct task_struct *task, | ||
1090 | int running); | ||
1091 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1091 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
1092 | int oldprio, int running); | 1092 | int oldprio); |
1093 | 1093 | ||
1094 | unsigned int (*get_rr_interval) (struct rq *rq, | 1094 | unsigned int (*get_rr_interval) (struct rq *rq, |
1095 | struct task_struct *task); | 1095 | struct task_struct *task); |
@@ -1254,6 +1254,9 @@ struct task_struct { | |||
1254 | #endif | 1254 | #endif |
1255 | 1255 | ||
1256 | struct mm_struct *mm, *active_mm; | 1256 | struct mm_struct *mm, *active_mm; |
1257 | #ifdef CONFIG_COMPAT_BRK | ||
1258 | unsigned brk_randomized:1; | ||
1259 | #endif | ||
1257 | #if defined(SPLIT_RSS_COUNTING) | 1260 | #if defined(SPLIT_RSS_COUNTING) |
1258 | struct task_rss_stat rss_stat; | 1261 | struct task_rss_stat rss_stat; |
1259 | #endif | 1262 | #endif |
@@ -1429,6 +1432,11 @@ struct task_struct { | |||
1429 | /* stacked block device info */ | 1432 | /* stacked block device info */ |
1430 | struct bio_list *bio_list; | 1433 | struct bio_list *bio_list; |
1431 | 1434 | ||
1435 | #ifdef CONFIG_BLOCK | ||
1436 | /* stack plugging */ | ||
1437 | struct blk_plug *plug; | ||
1438 | #endif | ||
1439 | |||
1432 | /* VM state */ | 1440 | /* VM state */ |
1433 | struct reclaim_state *reclaim_state; | 1441 | struct reclaim_state *reclaim_state; |
1434 | 1442 | ||
@@ -1472,6 +1480,7 @@ struct task_struct { | |||
1472 | #ifdef CONFIG_NUMA | 1480 | #ifdef CONFIG_NUMA |
1473 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ | 1481 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
1474 | short il_next; | 1482 | short il_next; |
1483 | short pref_node_fork; | ||
1475 | #endif | 1484 | #endif |
1476 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1485 | atomic_t fs_excl; /* holding fs exclusive resources */ |
1477 | struct rcu_head rcu; | 1486 | struct rcu_head rcu; |
@@ -1524,8 +1533,8 @@ struct task_struct { | |||
1524 | struct memcg_batch_info { | 1533 | struct memcg_batch_info { |
1525 | int do_batch; /* incremented when batch uncharge started */ | 1534 | int do_batch; /* incremented when batch uncharge started */ |
1526 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | 1535 | struct mem_cgroup *memcg; /* target memcg of uncharge */ |
1527 | unsigned long bytes; /* uncharged usage */ | 1536 | unsigned long nr_pages; /* uncharged usage */ |
1528 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | 1537 | unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ |
1529 | } memcg_batch; | 1538 | } memcg_batch; |
1530 | #endif | 1539 | #endif |
1531 | }; | 1540 | }; |
@@ -1715,7 +1724,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1715 | /* | 1724 | /* |
1716 | * Per process flags | 1725 | * Per process flags |
1717 | */ | 1726 | */ |
1718 | #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ | ||
1719 | #define PF_STARTING 0x00000002 /* being created */ | 1727 | #define PF_STARTING 0x00000002 /* being created */ |
1720 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1728 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1721 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1729 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
@@ -1744,7 +1752,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1752 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1753 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1754 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1755 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | 1756 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ |
1749 | 1757 | ||
1750 | /* | 1758 | /* |
@@ -1945,8 +1953,6 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
1945 | void __user *buffer, size_t *lenp, | 1953 | void __user *buffer, size_t *lenp, |
1946 | loff_t *ppos); | 1954 | loff_t *ppos); |
1947 | 1955 | ||
1948 | extern unsigned int sysctl_sched_compat_yield; | ||
1949 | |||
1950 | #ifdef CONFIG_SCHED_AUTOGROUP | 1956 | #ifdef CONFIG_SCHED_AUTOGROUP |
1951 | extern unsigned int sysctl_sched_autogroup_enabled; | 1957 | extern unsigned int sysctl_sched_autogroup_enabled; |
1952 | 1958 | ||
@@ -1977,6 +1983,7 @@ static inline int rt_mutex_getprio(struct task_struct *p) | |||
1977 | # define rt_mutex_adjust_pi(p) do { } while (0) | 1983 | # define rt_mutex_adjust_pi(p) do { } while (0) |
1978 | #endif | 1984 | #endif |
1979 | 1985 | ||
1986 | extern bool yield_to(struct task_struct *p, bool preempt); | ||
1980 | extern void set_user_nice(struct task_struct *p, long nice); | 1987 | extern void set_user_nice(struct task_struct *p, long nice); |
1981 | extern int task_prio(const struct task_struct *p); | 1988 | extern int task_prio(const struct task_struct *p); |
1982 | extern int task_nice(const struct task_struct *p); | 1989 | extern int task_nice(const struct task_struct *p); |
@@ -2049,7 +2056,7 @@ extern void release_uids(struct user_namespace *ns); | |||
2049 | 2056 | ||
2050 | #include <asm/current.h> | 2057 | #include <asm/current.h> |
2051 | 2058 | ||
2052 | extern void do_timer(unsigned long ticks); | 2059 | extern void xtime_update(unsigned long ticks); |
2053 | 2060 | ||
2054 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 2061 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
2055 | extern int wake_up_process(struct task_struct *tsk); | 2062 | extern int wake_up_process(struct task_struct *tsk); |
@@ -2578,13 +2585,6 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
2578 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2585 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2579 | #endif | 2586 | #endif |
2580 | 2587 | ||
2581 | /* | ||
2582 | * Call the function if the target task is executing on a CPU right now: | ||
2583 | */ | ||
2584 | extern void task_oncpu_function_call(struct task_struct *p, | ||
2585 | void (*func) (void *info), void *info); | ||
2586 | |||
2587 | |||
2588 | #ifdef CONFIG_MM_OWNER | 2588 | #ifdef CONFIG_MM_OWNER |
2589 | extern void mm_update_next_owner(struct mm_struct *mm); | 2589 | extern void mm_update_next_owner(struct mm_struct *mm); |
2590 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2590 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |