summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-14 07:18:27 -0400
committerIngo Molnar <mingo@kernel.org>2012-04-14 07:19:04 -0400
commit6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch)
tree021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /include/linux/sched.h
parent682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff)
parenta385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff)
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree), to prepare for tooling changes, and also to pick up v3.4 MM changes that the uprobes code needs to take care of. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h72
1 files changed, 54 insertions, 18 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8379e3771690..cff94cda34b2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -63,7 +63,6 @@ struct sched_param {
63#include <linux/nodemask.h> 63#include <linux/nodemask.h>
64#include <linux/mm_types.h> 64#include <linux/mm_types.h>
65 65
66#include <asm/system.h>
67#include <asm/page.h> 66#include <asm/page.h>
68#include <asm/ptrace.h> 67#include <asm/ptrace.h>
69#include <asm/cputime.h> 68#include <asm/cputime.h>
@@ -361,6 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
361extern signed long schedule_timeout_killable(signed long timeout); 360extern signed long schedule_timeout_killable(signed long timeout);
362extern signed long schedule_timeout_uninterruptible(signed long timeout); 361extern signed long schedule_timeout_uninterruptible(signed long timeout);
363asmlinkage void schedule(void); 362asmlinkage void schedule(void);
363extern void schedule_preempt_disabled(void);
364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 365
366struct nsproxy; 366struct nsproxy;
@@ -552,6 +552,18 @@ struct signal_struct {
552 int group_stop_count; 552 int group_stop_count;
553 unsigned int flags; /* see SIGNAL_* flags below */ 553 unsigned int flags; /* see SIGNAL_* flags below */
554 554
555 /*
556 * PR_SET_CHILD_SUBREAPER marks a process, like a service
557 * manager, to re-parent orphan (double-forking) child processes
558 * to this process instead of 'init'. The service manager is
559 * able to receive SIGCHLD signals and is able to investigate
560 * the process until it calls wait(). All children of this
561 * process will inherit a flag if they should look for a
562 * child_subreaper process at exit.
563 */
564 unsigned int is_child_subreaper:1;
565 unsigned int has_child_subreaper:1;
566
555 /* POSIX.1b Interval Timers */ 567 /* POSIX.1b Interval Timers */
556 struct list_head posix_timers; 568 struct list_head posix_timers;
557 569
@@ -905,6 +917,7 @@ struct sched_group_power {
905 * single CPU. 917 * single CPU.
906 */ 918 */
907 unsigned int power, power_orig; 919 unsigned int power, power_orig;
920 unsigned long next_update;
908 /* 921 /*
909 * Number of busy cpus in this group. 922 * Number of busy cpus in this group.
910 */ 923 */
@@ -1052,6 +1065,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1065unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1066unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 1067
1068bool cpus_share_cache(int this_cpu, int that_cpu);
1069
1055#else /* CONFIG_SMP */ 1070#else /* CONFIG_SMP */
1056 1071
1057struct sched_domain_attr; 1072struct sched_domain_attr;
@@ -1061,6 +1076,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 struct sched_domain_attr *dattr_new) 1076 struct sched_domain_attr *dattr_new)
1062{ 1077{
1063} 1078}
1079
1080static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1081{
1082 return true;
1083}
1084
1064#endif /* !CONFIG_SMP */ 1085#endif /* !CONFIG_SMP */
1065 1086
1066 1087
@@ -1225,6 +1246,12 @@ struct sched_rt_entity {
1225#endif 1246#endif
1226}; 1247};
1227 1248
1249/*
1250 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1251 * Timeslices get refilled after they expire.
1252 */
1253#define RR_TIMESLICE (100 * HZ / 1000)
1254
1228struct rcu_node; 1255struct rcu_node;
1229 1256
1230enum perf_event_task_context { 1257enum perf_event_task_context {
@@ -1319,6 +1346,11 @@ struct task_struct {
1319 unsigned sched_reset_on_fork:1; 1346 unsigned sched_reset_on_fork:1;
1320 unsigned sched_contributes_to_load:1; 1347 unsigned sched_contributes_to_load:1;
1321 1348
1349#ifdef CONFIG_GENERIC_HARDIRQS
1350 /* IRQ handler threads */
1351 unsigned irq_thread:1;
1352#endif
1353
1322 pid_t pid; 1354 pid_t pid;
1323 pid_t tgid; 1355 pid_t tgid;
1324 1356
@@ -1427,11 +1459,6 @@ struct task_struct {
1427 * mempolicy */ 1459 * mempolicy */
1428 spinlock_t alloc_lock; 1460 spinlock_t alloc_lock;
1429 1461
1430#ifdef CONFIG_GENERIC_HARDIRQS
1431 /* IRQ handler threads */
1432 struct irqaction *irqaction;
1433#endif
1434
1435 /* Protection of the PI data structures: */ 1462 /* Protection of the PI data structures: */
1436 raw_spinlock_t pi_lock; 1463 raw_spinlock_t pi_lock;
1437 1464
@@ -1498,7 +1525,7 @@ struct task_struct {
1498#endif 1525#endif
1499#ifdef CONFIG_CPUSETS 1526#ifdef CONFIG_CPUSETS
1500 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1527 nodemask_t mems_allowed; /* Protected by alloc_lock */
1501 int mems_allowed_change_disable; 1528 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1502 int cpuset_mem_spread_rotor; 1529 int cpuset_mem_spread_rotor;
1503 int cpuset_slab_spread_rotor; 1530 int cpuset_slab_spread_rotor;
1504#endif 1531#endif
@@ -1781,7 +1808,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1781/* 1808/*
1782 * Per process flags 1809 * Per process flags
1783 */ 1810 */
1784#define PF_STARTING 0x00000002 /* being created */
1785#define PF_EXITING 0x00000004 /* getting shut down */ 1811#define PF_EXITING 0x00000004 /* getting shut down */
1786#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1812#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1787#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1813#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1868,8 +1894,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1868#ifdef CONFIG_PREEMPT_RCU 1894#ifdef CONFIG_PREEMPT_RCU
1869 1895
1870#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1896#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1871#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1897#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1872#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1873 1898
1874static inline void rcu_copy_process(struct task_struct *p) 1899static inline void rcu_copy_process(struct task_struct *p)
1875{ 1900{
@@ -2053,7 +2078,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
2053extern void sched_autogroup_exit(struct signal_struct *sig); 2078extern void sched_autogroup_exit(struct signal_struct *sig);
2054#ifdef CONFIG_PROC_FS 2079#ifdef CONFIG_PROC_FS
2055extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2080extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2056extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2081extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2057#endif 2082#endif
2058#else 2083#else
2059static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2084static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2070,12 +2095,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2070extern int rt_mutex_getprio(struct task_struct *p); 2095extern int rt_mutex_getprio(struct task_struct *p);
2071extern void rt_mutex_setprio(struct task_struct *p, int prio); 2096extern void rt_mutex_setprio(struct task_struct *p, int prio);
2072extern void rt_mutex_adjust_pi(struct task_struct *p); 2097extern void rt_mutex_adjust_pi(struct task_struct *p);
2098static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2099{
2100 return tsk->pi_blocked_on != NULL;
2101}
2073#else 2102#else
2074static inline int rt_mutex_getprio(struct task_struct *p) 2103static inline int rt_mutex_getprio(struct task_struct *p)
2075{ 2104{
2076 return p->normal_prio; 2105 return p->normal_prio;
2077} 2106}
2078# define rt_mutex_adjust_pi(p) do { } while (0) 2107# define rt_mutex_adjust_pi(p) do { } while (0)
2108static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2109{
2110 return false;
2111}
2079#endif 2112#endif
2080 2113
2081extern bool yield_to(struct task_struct *p, bool preempt); 2114extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2375,7 +2408,7 @@ static inline int thread_group_empty(struct task_struct *p)
2375 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2408 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2376 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2409 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2377 * pins the final release of task.io_context. Also protects ->cpuset and 2410 * pins the final release of task.io_context. Also protects ->cpuset and
2378 * ->cgroup.subsys[]. 2411 * ->cgroup.subsys[]. And ->vfork_done.
2379 * 2412 *
2380 * Nests both inside and outside of read_lock(&tasklist_lock). 2413 * Nests both inside and outside of read_lock(&tasklist_lock).
2381 * It must not be nested with write_lock_irq(&tasklist_lock), 2414 * It must not be nested with write_lock_irq(&tasklist_lock),
@@ -2394,12 +2427,15 @@ static inline void task_unlock(struct task_struct *p)
2394extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2427extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2395 unsigned long *flags); 2428 unsigned long *flags);
2396 2429
2397#define lock_task_sighand(tsk, flags) \ 2430static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2398({ struct sighand_struct *__ss; \ 2431 unsigned long *flags)
2399 __cond_lock(&(tsk)->sighand->siglock, \ 2432{
2400 (__ss = __lock_task_sighand(tsk, flags))); \ 2433 struct sighand_struct *ret;
2401 __ss; \ 2434
2402}) \ 2435 ret = __lock_task_sighand(tsk, flags);
2436 (void)__cond_lock(&tsk->sighand->siglock, ret);
2437 return ret;
2438}
2403 2439
2404static inline void unlock_task_sighand(struct task_struct *tsk, 2440static inline void unlock_task_sighand(struct task_struct *tsk,
2405 unsigned long *flags) 2441 unsigned long *flags)