aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h99
1 files changed, 52 insertions, 47 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b0ad6f30679e..0cfcd1c7865e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
34#include <linux/percpu.h> 34#include <linux/percpu.h>
35#include <linux/topology.h> 35#include <linux/topology.h>
36#include <linux/seccomp.h> 36#include <linux/seccomp.h>
37#include <linux/rcupdate.h>
37 38
38#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */ 39#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
39 40
@@ -104,6 +105,7 @@ extern unsigned long nr_iowait(void);
104#include <linux/param.h> 105#include <linux/param.h>
105#include <linux/resource.h> 106#include <linux/resource.h>
106#include <linux/timer.h> 107#include <linux/timer.h>
108#include <linux/hrtimer.h>
107 109
108#include <asm/processor.h> 110#include <asm/processor.h>
109 111
@@ -158,6 +160,7 @@ extern unsigned long nr_iowait(void);
158#define SCHED_NORMAL 0 160#define SCHED_NORMAL 0
159#define SCHED_FIFO 1 161#define SCHED_FIFO 1
160#define SCHED_RR 2 162#define SCHED_RR 2
163#define SCHED_BATCH 3
161 164
162struct sched_param { 165struct sched_param {
163 int sched_priority; 166 int sched_priority;
@@ -254,25 +257,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
254 * The mm counters are not protected by its page_table_lock, 257 * The mm counters are not protected by its page_table_lock,
255 * so must be incremented atomically. 258 * so must be incremented atomically.
256 */ 259 */
257#ifdef ATOMIC64_INIT 260#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
258#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) 261#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
259#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) 262#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
260#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) 263#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
261#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) 264#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
262#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) 265typedef atomic_long_t mm_counter_t;
263typedef atomic64_t mm_counter_t;
264#else /* !ATOMIC64_INIT */
265/*
266 * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
267 * that is, at 16TB if using 4kB page size.
268 */
269#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
270#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
271#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
272#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
273#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
274typedef atomic_t mm_counter_t;
275#endif /* !ATOMIC64_INIT */
276 266
277#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 267#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
278/* 268/*
@@ -363,8 +353,16 @@ struct sighand_struct {
363 atomic_t count; 353 atomic_t count;
364 struct k_sigaction action[_NSIG]; 354 struct k_sigaction action[_NSIG];
365 spinlock_t siglock; 355 spinlock_t siglock;
356 struct rcu_head rcu;
366}; 357};
367 358
359extern void sighand_free_cb(struct rcu_head *rhp);
360
361static inline void sighand_free(struct sighand_struct *sp)
362{
363 call_rcu(&sp->rcu, sighand_free_cb);
364}
365
368/* 366/*
369 * NOTE! "signal_struct" does not have it's own 367 * NOTE! "signal_struct" does not have it's own
370 * locking, because a shared signal_struct always 368 * locking, because a shared signal_struct always
@@ -402,8 +400,8 @@ struct signal_struct {
402 struct list_head posix_timers; 400 struct list_head posix_timers;
403 401
404 /* ITIMER_REAL timer for the process */ 402 /* ITIMER_REAL timer for the process */
405 struct timer_list real_timer; 403 struct hrtimer real_timer;
406 unsigned long it_real_value, it_real_incr; 404 ktime_t it_real_incr;
407 405
408 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 406 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
409 cputime_t it_prof_expires, it_virt_expires; 407 cputime_t it_prof_expires, it_virt_expires;
@@ -473,9 +471,9 @@ struct signal_struct {
473 471
474/* 472/*
475 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 473 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
476 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are 474 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
477 * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values 475 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
478 * are inverted: lower p->prio value means higher priority. 476 * values are inverted: lower p->prio value means higher priority.
479 * 477 *
480 * The MAX_USER_RT_PRIO value allows the actual maximum 478 * The MAX_USER_RT_PRIO value allows the actual maximum
481 * RT priority to be separate from the value exported to 479 * RT priority to be separate from the value exported to
@@ -634,7 +632,14 @@ struct sched_domain {
634 632
635extern void partition_sched_domains(cpumask_t *partition1, 633extern void partition_sched_domains(cpumask_t *partition1,
636 cpumask_t *partition2); 634 cpumask_t *partition2);
637#endif /* CONFIG_SMP */ 635
636/*
637 * Maximum cache size the migration-costs auto-tuning code will
638 * search from:
639 */
640extern unsigned int max_cache_size;
641
642#endif /* CONFIG_SMP */
638 643
639 644
640struct io_context; /* See blkdev.h */ 645struct io_context; /* See blkdev.h */
@@ -692,9 +697,12 @@ struct task_struct {
692 697
693 int lock_depth; /* BKL lock depth */ 698 int lock_depth; /* BKL lock depth */
694 699
695#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 700#if defined(CONFIG_SMP)
701 int last_waker_cpu; /* CPU that last woke this task up */
702#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
696 int oncpu; 703 int oncpu;
697#endif 704#endif
705#endif
698 int prio, static_prio; 706 int prio, static_prio;
699 struct list_head run_list; 707 struct list_head run_list;
700 prio_array_t *array; 708 prio_array_t *array;
@@ -775,6 +783,7 @@ struct task_struct {
775 unsigned keep_capabilities:1; 783 unsigned keep_capabilities:1;
776 struct user_struct *user; 784 struct user_struct *user;
777#ifdef CONFIG_KEYS 785#ifdef CONFIG_KEYS
786 struct key *request_key_auth; /* assumed request_key authority */
778 struct key *thread_keyring; /* keyring private to this thread */ 787 struct key *thread_keyring; /* keyring private to this thread */
779 unsigned char jit_keyring; /* default keyring to attach requested keys to */ 788 unsigned char jit_keyring; /* default keyring to attach requested keys to */
780#endif 789#endif
@@ -800,6 +809,7 @@ struct task_struct {
800 struct sighand_struct *sighand; 809 struct sighand_struct *sighand;
801 810
802 sigset_t blocked, real_blocked; 811 sigset_t blocked, real_blocked;
812 sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */
803 struct sigpending pending; 813 struct sigpending pending;
804 814
805 unsigned long sas_ss_sp; 815 unsigned long sas_ss_sp;
@@ -820,6 +830,11 @@ struct task_struct {
820/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ 830/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
821 spinlock_t proc_lock; 831 spinlock_t proc_lock;
822 832
833#ifdef CONFIG_DEBUG_MUTEXES
834 /* mutex deadlock detection */
835 struct mutex_waiter *blocked_on;
836#endif
837
823/* journalling filesystem info */ 838/* journalling filesystem info */
824 void *journal_info; 839 void *journal_info;
825 840
@@ -857,6 +872,7 @@ struct task_struct {
857 int cpuset_mems_generation; 872 int cpuset_mems_generation;
858#endif 873#endif
859 atomic_t fs_excl; /* holding fs exclusive resources */ 874 atomic_t fs_excl; /* holding fs exclusive resources */
875 struct rcu_head rcu;
860}; 876};
861 877
862static inline pid_t process_group(struct task_struct *tsk) 878static inline pid_t process_group(struct task_struct *tsk)
@@ -880,8 +896,14 @@ static inline int pid_alive(struct task_struct *p)
880extern void free_task(struct task_struct *tsk); 896extern void free_task(struct task_struct *tsk);
881extern void __put_task_struct(struct task_struct *tsk); 897extern void __put_task_struct(struct task_struct *tsk);
882#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 898#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
883#define put_task_struct(tsk) \ 899
884do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) 900extern void __put_task_struct_cb(struct rcu_head *rhp);
901
902static inline void put_task_struct(struct task_struct *t)
903{
904 if (atomic_dec_and_test(&t->usage))
905 call_rcu(&t->rcu, __put_task_struct_cb);
906}
885 907
886/* 908/*
887 * Per process flags 909 * Per process flags
@@ -908,6 +930,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
908#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ 930#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
909#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ 931#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
910#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ 932#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
933#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
911 934
912/* 935/*
913 * Only the _current_ task can read/write to tsk->flags, but other 936 * Only the _current_ task can read/write to tsk->flags, but other
@@ -1101,21 +1124,6 @@ static inline int sas_ss_flags(unsigned long sp)
1101 : on_sig_stack(sp) ? SS_ONSTACK : 0); 1124 : on_sig_stack(sp) ? SS_ONSTACK : 0);
1102} 1125}
1103 1126
1104
1105#ifdef CONFIG_SECURITY
1106/* code is in security.c */
1107extern int capable(int cap);
1108#else
1109static inline int capable(int cap)
1110{
1111 if (cap_raised(current->cap_effective, cap)) {
1112 current->flags |= PF_SUPERPRIV;
1113 return 1;
1114 }
1115 return 0;
1116}
1117#endif
1118
1119/* 1127/*
1120 * Routines for handling mm_structs 1128 * Routines for handling mm_structs
1121 */ 1129 */
@@ -1234,6 +1242,7 @@ static inline void task_unlock(struct task_struct *p)
1234#ifndef __HAVE_THREAD_FUNCTIONS 1242#ifndef __HAVE_THREAD_FUNCTIONS
1235 1243
1236#define task_thread_info(task) (task)->thread_info 1244#define task_thread_info(task) (task)->thread_info
1245#define task_stack_page(task) ((void*)((task)->thread_info))
1237 1246
1238static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1247static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1239{ 1248{
@@ -1379,12 +1388,8 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
1379extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 1388extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
1380extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 1389extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
1381 1390
1382#ifdef CONFIG_MAGIC_SYSRQ
1383
1384extern void normalize_rt_tasks(void); 1391extern void normalize_rt_tasks(void);
1385 1392
1386#endif
1387
1388#ifdef CONFIG_PM 1393#ifdef CONFIG_PM
1389/* 1394/*
1390 * Check if a process has been frozen 1395 * Check if a process has been frozen