diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 54 |
1 files changed, 52 insertions, 2 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b3c5653e1dca..88c85b21d633 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
| 7 | #include <linux/stop_machine.h> | 7 | #include <linux/stop_machine.h> |
| 8 | #include <linux/tick.h> | 8 | #include <linux/tick.h> |
| 9 | #include <linux/slab.h> | ||
| 9 | 10 | ||
| 10 | #include "cpupri.h" | 11 | #include "cpupri.h" |
| 11 | #include "cpuacct.h" | 12 | #include "cpuacct.h" |
| @@ -408,6 +409,10 @@ struct rq { | |||
| 408 | * remote CPUs use both these fields when doing load calculation. | 409 | * remote CPUs use both these fields when doing load calculation. |
| 409 | */ | 410 | */ |
| 410 | unsigned int nr_running; | 411 | unsigned int nr_running; |
| 412 | #ifdef CONFIG_NUMA_BALANCING | ||
| 413 | unsigned int nr_numa_running; | ||
| 414 | unsigned int nr_preferred_running; | ||
| 415 | #endif | ||
| 411 | #define CPU_LOAD_IDX_MAX 5 | 416 | #define CPU_LOAD_IDX_MAX 5 |
| 412 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 417 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
| 413 | unsigned long last_load_update_tick; | 418 | unsigned long last_load_update_tick; |
| @@ -476,6 +481,9 @@ struct rq { | |||
| 476 | u64 age_stamp; | 481 | u64 age_stamp; |
| 477 | u64 idle_stamp; | 482 | u64 idle_stamp; |
| 478 | u64 avg_idle; | 483 | u64 avg_idle; |
| 484 | |||
| 485 | /* This is used to determine avg_idle's max value */ | ||
| 486 | u64 max_idle_balance_cost; | ||
| 479 | #endif | 487 | #endif |
| 480 | 488 | ||
| 481 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 489 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| @@ -552,6 +560,12 @@ static inline u64 rq_clock_task(struct rq *rq) | |||
| 552 | return rq->clock_task; | 560 | return rq->clock_task; |
| 553 | } | 561 | } |
| 554 | 562 | ||
| 563 | #ifdef CONFIG_NUMA_BALANCING | ||
| 564 | extern void sched_setnuma(struct task_struct *p, int node); | ||
| 565 | extern int migrate_task_to(struct task_struct *p, int cpu); | ||
| 566 | extern int migrate_swap(struct task_struct *, struct task_struct *); | ||
| 567 | #endif /* CONFIG_NUMA_BALANCING */ | ||
| 568 | |||
| 555 | #ifdef CONFIG_SMP | 569 | #ifdef CONFIG_SMP |
| 556 | 570 | ||
| 557 | #define rcu_dereference_check_sched_domain(p) \ | 571 | #define rcu_dereference_check_sched_domain(p) \ |
| @@ -593,9 +607,24 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) | |||
| 593 | return hsd; | 607 | return hsd; |
| 594 | } | 608 | } |
| 595 | 609 | ||
| 610 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | ||
| 611 | { | ||
| 612 | struct sched_domain *sd; | ||
| 613 | |||
| 614 | for_each_domain(cpu, sd) { | ||
| 615 | if (sd->flags & flag) | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | |||
| 619 | return sd; | ||
| 620 | } | ||
| 621 | |||
| 596 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); | 622 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
| 597 | DECLARE_PER_CPU(int, sd_llc_size); | 623 | DECLARE_PER_CPU(int, sd_llc_size); |
| 598 | DECLARE_PER_CPU(int, sd_llc_id); | 624 | DECLARE_PER_CPU(int, sd_llc_id); |
| 625 | DECLARE_PER_CPU(struct sched_domain *, sd_numa); | ||
| 626 | DECLARE_PER_CPU(struct sched_domain *, sd_busy); | ||
| 627 | DECLARE_PER_CPU(struct sched_domain *, sd_asym); | ||
| 599 | 628 | ||
| 600 | struct sched_group_power { | 629 | struct sched_group_power { |
| 601 | atomic_t ref; | 630 | atomic_t ref; |
| @@ -605,6 +634,7 @@ struct sched_group_power { | |||
| 605 | */ | 634 | */ |
| 606 | unsigned int power, power_orig; | 635 | unsigned int power, power_orig; |
| 607 | unsigned long next_update; | 636 | unsigned long next_update; |
| 637 | int imbalance; /* XXX unrelated to power but shared group state */ | ||
| 608 | /* | 638 | /* |
| 609 | * Number of busy cpus in this group. | 639 | * Number of busy cpus in this group. |
| 610 | */ | 640 | */ |
| @@ -719,6 +749,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| 719 | */ | 749 | */ |
| 720 | smp_wmb(); | 750 | smp_wmb(); |
| 721 | task_thread_info(p)->cpu = cpu; | 751 | task_thread_info(p)->cpu = cpu; |
| 752 | p->wake_cpu = cpu; | ||
| 722 | #endif | 753 | #endif |
| 723 | } | 754 | } |
| 724 | 755 | ||
| @@ -974,7 +1005,7 @@ struct sched_class { | |||
| 974 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1005 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
| 975 | 1006 | ||
| 976 | #ifdef CONFIG_SMP | 1007 | #ifdef CONFIG_SMP |
| 977 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1008 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
| 978 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); | 1009 | void (*migrate_task_rq)(struct task_struct *p, int next_cpu); |
| 979 | 1010 | ||
| 980 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1011 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
| @@ -1220,6 +1251,24 @@ static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | |||
| 1220 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1251 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| 1221 | } | 1252 | } |
| 1222 | 1253 | ||
| 1254 | static inline void double_lock(spinlock_t *l1, spinlock_t *l2) | ||
| 1255 | { | ||
| 1256 | if (l1 > l2) | ||
| 1257 | swap(l1, l2); | ||
| 1258 | |||
| 1259 | spin_lock(l1); | ||
| 1260 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) | ||
| 1264 | { | ||
| 1265 | if (l1 > l2) | ||
| 1266 | swap(l1, l2); | ||
| 1267 | |||
| 1268 | raw_spin_lock(l1); | ||
| 1269 | raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
| 1270 | } | ||
| 1271 | |||
| 1223 | /* | 1272 | /* |
| 1224 | * double_rq_lock - safely lock two runqueues | 1273 | * double_rq_lock - safely lock two runqueues |
| 1225 | * | 1274 | * |
| @@ -1305,7 +1354,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu); | |||
| 1305 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); | 1354 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
| 1306 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); | 1355 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); |
| 1307 | 1356 | ||
| 1308 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); | 1357 | extern void cfs_bandwidth_usage_inc(void); |
| 1358 | extern void cfs_bandwidth_usage_dec(void); | ||
| 1309 | 1359 | ||
| 1310 | #ifdef CONFIG_NO_HZ_COMMON | 1360 | #ifdef CONFIG_NO_HZ_COMMON |
| 1311 | enum rq_nohz_flag_bits { | 1361 | enum rq_nohz_flag_bits { |
