diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c5d3f847ca8d..835b6c6fcc56 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -134,7 +134,6 @@ extern unsigned long nr_running(void); | |||
| 134 | extern unsigned long nr_uninterruptible(void); | 134 | extern unsigned long nr_uninterruptible(void); |
| 135 | extern unsigned long nr_active(void); | 135 | extern unsigned long nr_active(void); |
| 136 | extern unsigned long nr_iowait(void); | 136 | extern unsigned long nr_iowait(void); |
| 137 | extern unsigned long weighted_cpuload(const int cpu); | ||
| 138 | 137 | ||
| 139 | struct seq_file; | 138 | struct seq_file; |
| 140 | struct cfs_rq; | 139 | struct cfs_rq; |
| @@ -784,6 +783,8 @@ struct sched_domain { | |||
| 784 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | 783 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
| 785 | unsigned int nr_balance_failed; /* initialise to 0 */ | 784 | unsigned int nr_balance_failed; /* initialise to 0 */ |
| 786 | 785 | ||
| 786 | u64 last_update; | ||
| 787 | |||
| 787 | #ifdef CONFIG_SCHEDSTATS | 788 | #ifdef CONFIG_SCHEDSTATS |
| 788 | /* load_balance() stats */ | 789 | /* load_balance() stats */ |
| 789 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; | 790 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
| @@ -823,23 +824,6 @@ extern int arch_reinit_sched_domains(void); | |||
| 823 | 824 | ||
| 824 | #endif /* CONFIG_SMP */ | 825 | #endif /* CONFIG_SMP */ |
| 825 | 826 | ||
| 826 | /* | ||
| 827 | * A runqueue laden with a single nice 0 task scores a weighted_cpuload of | ||
| 828 | * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a | ||
| 829 | * task of nice 0 or enough lower priority tasks to bring up the | ||
| 830 | * weighted_cpuload | ||
| 831 | */ | ||
| 832 | static inline int above_background_load(void) | ||
| 833 | { | ||
| 834 | unsigned long cpu; | ||
| 835 | |||
| 836 | for_each_online_cpu(cpu) { | ||
| 837 | if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) | ||
| 838 | return 1; | ||
| 839 | } | ||
| 840 | return 0; | ||
| 841 | } | ||
| 842 | |||
| 843 | struct io_context; /* See blkdev.h */ | 827 | struct io_context; /* See blkdev.h */ |
| 844 | #define NGROUPS_SMALL 32 | 828 | #define NGROUPS_SMALL 32 |
| 845 | #define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) | 829 | #define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) |
| @@ -921,8 +905,8 @@ struct sched_class { | |||
| 921 | void (*set_cpus_allowed)(struct task_struct *p, | 905 | void (*set_cpus_allowed)(struct task_struct *p, |
| 922 | const cpumask_t *newmask); | 906 | const cpumask_t *newmask); |
| 923 | 907 | ||
| 924 | void (*join_domain)(struct rq *rq); | 908 | void (*rq_online)(struct rq *rq); |
| 925 | void (*leave_domain)(struct rq *rq); | 909 | void (*rq_offline)(struct rq *rq); |
| 926 | 910 | ||
| 927 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | 911 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, |
| 928 | int running); | 912 | int running); |
| @@ -1039,6 +1023,7 @@ struct task_struct { | |||
| 1039 | #endif | 1023 | #endif |
| 1040 | 1024 | ||
| 1041 | int prio, static_prio, normal_prio; | 1025 | int prio, static_prio, normal_prio; |
| 1026 | unsigned int rt_priority; | ||
| 1042 | const struct sched_class *sched_class; | 1027 | const struct sched_class *sched_class; |
| 1043 | struct sched_entity se; | 1028 | struct sched_entity se; |
| 1044 | struct sched_rt_entity rt; | 1029 | struct sched_rt_entity rt; |
| @@ -1122,7 +1107,6 @@ struct task_struct { | |||
| 1122 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 1107 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
| 1123 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | 1108 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
| 1124 | 1109 | ||
| 1125 | unsigned int rt_priority; | ||
| 1126 | cputime_t utime, stime, utimescaled, stimescaled; | 1110 | cputime_t utime, stime, utimescaled, stimescaled; |
| 1127 | cputime_t gtime; | 1111 | cputime_t gtime; |
| 1128 | cputime_t prev_utime, prev_stime; | 1112 | cputime_t prev_utime, prev_stime; |
| @@ -1141,12 +1125,12 @@ struct task_struct { | |||
| 1141 | gid_t gid,egid,sgid,fsgid; | 1125 | gid_t gid,egid,sgid,fsgid; |
| 1142 | struct group_info *group_info; | 1126 | struct group_info *group_info; |
| 1143 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1127 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; |
| 1144 | unsigned securebits; | ||
| 1145 | struct user_struct *user; | 1128 | struct user_struct *user; |
| 1129 | unsigned securebits; | ||
| 1146 | #ifdef CONFIG_KEYS | 1130 | #ifdef CONFIG_KEYS |
| 1131 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
| 1147 | struct key *request_key_auth; /* assumed request_key authority */ | 1132 | struct key *request_key_auth; /* assumed request_key authority */ |
| 1148 | struct key *thread_keyring; /* keyring private to this thread */ | 1133 | struct key *thread_keyring; /* keyring private to this thread */ |
| 1149 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
| 1150 | #endif | 1134 | #endif |
| 1151 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1135 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 1152 | - access with [gs]et_task_comm (which lock | 1136 | - access with [gs]et_task_comm (which lock |
| @@ -1233,8 +1217,8 @@ struct task_struct { | |||
| 1233 | # define MAX_LOCK_DEPTH 48UL | 1217 | # define MAX_LOCK_DEPTH 48UL |
| 1234 | u64 curr_chain_key; | 1218 | u64 curr_chain_key; |
| 1235 | int lockdep_depth; | 1219 | int lockdep_depth; |
| 1236 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
| 1237 | unsigned int lockdep_recursion; | 1220 | unsigned int lockdep_recursion; |
| 1221 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
| 1238 | #endif | 1222 | #endif |
| 1239 | 1223 | ||
| 1240 | /* journalling filesystem info */ | 1224 | /* journalling filesystem info */ |
| @@ -1262,10 +1246,6 @@ struct task_struct { | |||
| 1262 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ | 1246 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ |
| 1263 | cputime_t acct_stimexpd;/* stime since last update */ | 1247 | cputime_t acct_stimexpd;/* stime since last update */ |
| 1264 | #endif | 1248 | #endif |
| 1265 | #ifdef CONFIG_NUMA | ||
| 1266 | struct mempolicy *mempolicy; | ||
| 1267 | short il_next; | ||
| 1268 | #endif | ||
| 1269 | #ifdef CONFIG_CPUSETS | 1249 | #ifdef CONFIG_CPUSETS |
| 1270 | nodemask_t mems_allowed; | 1250 | nodemask_t mems_allowed; |
| 1271 | int cpuset_mems_generation; | 1251 | int cpuset_mems_generation; |
| @@ -1285,6 +1265,10 @@ struct task_struct { | |||
| 1285 | struct list_head pi_state_list; | 1265 | struct list_head pi_state_list; |
| 1286 | struct futex_pi_state *pi_state_cache; | 1266 | struct futex_pi_state *pi_state_cache; |
| 1287 | #endif | 1267 | #endif |
| 1268 | #ifdef CONFIG_NUMA | ||
| 1269 | struct mempolicy *mempolicy; | ||
| 1270 | short il_next; | ||
| 1271 | #endif | ||
| 1288 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1272 | atomic_t fs_excl; /* holding fs exclusive resources */ |
| 1289 | struct rcu_head rcu; | 1273 | struct rcu_head rcu; |
| 1290 | 1274 | ||
| @@ -1504,6 +1488,7 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 1504 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1488 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1505 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1489 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
| 1506 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1490 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
| 1491 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | ||
| 1507 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1492 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
| 1508 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1493 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
| 1509 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1494 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ |
| @@ -1622,6 +1607,7 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
| 1622 | extern unsigned int sysctl_sched_features; | 1607 | extern unsigned int sysctl_sched_features; |
| 1623 | extern unsigned int sysctl_sched_migration_cost; | 1608 | extern unsigned int sysctl_sched_migration_cost; |
| 1624 | extern unsigned int sysctl_sched_nr_migrate; | 1609 | extern unsigned int sysctl_sched_nr_migrate; |
| 1610 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
| 1625 | 1611 | ||
| 1626 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1612 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
| 1627 | struct file *file, void __user *buffer, size_t *length, | 1613 | struct file *file, void __user *buffer, size_t *length, |
