diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 17:00:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 17:00:15 -0400 |
commit | c84a1e32ee58fc1cc9d3fd42619b917cce67e30a (patch) | |
tree | d3e5bed273f747e7c9e399864219bea76f4c30ea /include/linux/sched.h | |
parent | 3d521f9151dacab566904d1f57dcb3e7080cdd8f (diff) | |
parent | 096aa33863a5e48de52d2ff30e0801b7487944f4 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull scheduler updates from Ingo Molnar:
"The main scheduling related changes in this cycle were:
- various sched/numa updates, for better performance
- tree wide cleanup of open coded nice levels
- nohz fix related to rq->nr_running use
- cpuidle changes and continued consolidation to improve the
kernel/sched/idle.c high level idle scheduling logic. As part of
this effort I pulled cpuidle driver changes from Rafael as well.
- standardized idle polling amongst architectures
- continued work on preparing better power/energy aware scheduling
- sched/rt updates
- misc fixlets and cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits)
sched/numa: Decay ->wakee_flips instead of zeroing
sched/numa: Update migrate_improves/degrades_locality()
sched/numa: Allow task switch if load imbalance improves
sched/rt: Fix 'struct sched_dl_entity' and dl_task_time() comments, to match the current upstream code
sched: Consolidate open coded implementations of nice level frobbing into nice_to_rlimit() and rlimit_to_nice()
sched: Initialize rq->age_stamp on processor start
sched, nohz: Change rq->nr_running to always use wrappers
sched: Fix the rq->next_balance logic in rebalance_domains() and idle_balance()
sched: Use clamp() and clamp_val() to make sys_nice() more readable
sched: Do not zero sg->cpumask and sg->sgp->power in build_sched_groups()
sched/numa: Fix initialization of sched_domain_topology for NUMA
sched: Call select_idle_sibling() when not affine_sd
sched: Simplify return logic in sched_read_attr()
sched: Simplify return logic in sched_copy_attr()
sched: Fix exec_start/task_hot on migrated tasks
arm64: Remove TIF_POLLING_NRFLAG
metag: Remove TIF_POLLING_NRFLAG
sched/idle: Make cpuidle_idle_call() void
sched/idle: Reflow cpuidle_idle_call()
sched/idle: Delay clearing the polling bit
...
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 104 |
1 files changed, 57 insertions, 47 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dce5d844b74..70f67e4e6156 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -870,6 +870,7 @@ enum cpu_idle_type { | |||
870 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ | 870 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ |
871 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 871 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
872 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 872 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
873 | #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ | ||
873 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 874 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
874 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 875 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
875 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 876 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
@@ -877,7 +878,26 @@ enum cpu_idle_type { | |||
877 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | 878 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ |
878 | #define SD_NUMA 0x4000 /* cross-node balancing */ | 879 | #define SD_NUMA 0x4000 /* cross-node balancing */ |
879 | 880 | ||
880 | extern int __weak arch_sd_sibiling_asym_packing(void); | 881 | #ifdef CONFIG_SCHED_SMT |
882 | static inline const int cpu_smt_flags(void) | ||
883 | { | ||
884 | return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; | ||
885 | } | ||
886 | #endif | ||
887 | |||
888 | #ifdef CONFIG_SCHED_MC | ||
889 | static inline const int cpu_core_flags(void) | ||
890 | { | ||
891 | return SD_SHARE_PKG_RESOURCES; | ||
892 | } | ||
893 | #endif | ||
894 | |||
895 | #ifdef CONFIG_NUMA | ||
896 | static inline const int cpu_numa_flags(void) | ||
897 | { | ||
898 | return SD_NUMA; | ||
899 | } | ||
900 | #endif | ||
881 | 901 | ||
882 | struct sched_domain_attr { | 902 | struct sched_domain_attr { |
883 | int relax_domain_level; | 903 | int relax_domain_level; |
@@ -985,6 +1005,38 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | |||
985 | 1005 | ||
986 | bool cpus_share_cache(int this_cpu, int that_cpu); | 1006 | bool cpus_share_cache(int this_cpu, int that_cpu); |
987 | 1007 | ||
1008 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | ||
1009 | typedef const int (*sched_domain_flags_f)(void); | ||
1010 | |||
1011 | #define SDTL_OVERLAP 0x01 | ||
1012 | |||
1013 | struct sd_data { | ||
1014 | struct sched_domain **__percpu sd; | ||
1015 | struct sched_group **__percpu sg; | ||
1016 | struct sched_group_power **__percpu sgp; | ||
1017 | }; | ||
1018 | |||
1019 | struct sched_domain_topology_level { | ||
1020 | sched_domain_mask_f mask; | ||
1021 | sched_domain_flags_f sd_flags; | ||
1022 | int flags; | ||
1023 | int numa_level; | ||
1024 | struct sd_data data; | ||
1025 | #ifdef CONFIG_SCHED_DEBUG | ||
1026 | char *name; | ||
1027 | #endif | ||
1028 | }; | ||
1029 | |||
1030 | extern struct sched_domain_topology_level *sched_domain_topology; | ||
1031 | |||
1032 | extern void set_sched_topology(struct sched_domain_topology_level *tl); | ||
1033 | |||
1034 | #ifdef CONFIG_SCHED_DEBUG | ||
1035 | # define SD_INIT_NAME(type) .name = #type | ||
1036 | #else | ||
1037 | # define SD_INIT_NAME(type) | ||
1038 | #endif | ||
1039 | |||
988 | #else /* CONFIG_SMP */ | 1040 | #else /* CONFIG_SMP */ |
989 | 1041 | ||
990 | struct sched_domain_attr; | 1042 | struct sched_domain_attr; |
@@ -1123,8 +1175,8 @@ struct sched_dl_entity { | |||
1123 | 1175 | ||
1124 | /* | 1176 | /* |
1125 | * Original scheduling parameters. Copied here from sched_attr | 1177 | * Original scheduling parameters. Copied here from sched_attr |
1126 | * during sched_setscheduler2(), they will remain the same until | 1178 | * during sched_setattr(), they will remain the same until |
1127 | * the next sched_setscheduler2(). | 1179 | * the next sched_setattr(). |
1128 | */ | 1180 | */ |
1129 | u64 dl_runtime; /* maximum runtime for each instance */ | 1181 | u64 dl_runtime; /* maximum runtime for each instance */ |
1130 | u64 dl_deadline; /* relative deadline of each instance */ | 1182 | u64 dl_deadline; /* relative deadline of each instance */ |
@@ -2723,51 +2775,9 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2723 | 2775 | ||
2724 | /* | 2776 | /* |
2725 | * Idle thread specific functions to determine the need_resched | 2777 | * Idle thread specific functions to determine the need_resched |
2726 | * polling state. We have two versions, one based on TS_POLLING in | 2778 | * polling state. |
2727 | * thread_info.status and one based on TIF_POLLING_NRFLAG in | ||
2728 | * thread_info.flags | ||
2729 | */ | 2779 | */ |
2730 | #ifdef TS_POLLING | 2780 | #ifdef TIF_POLLING_NRFLAG |
2731 | static inline int tsk_is_polling(struct task_struct *p) | ||
2732 | { | ||
2733 | return task_thread_info(p)->status & TS_POLLING; | ||
2734 | } | ||
2735 | static inline void __current_set_polling(void) | ||
2736 | { | ||
2737 | current_thread_info()->status |= TS_POLLING; | ||
2738 | } | ||
2739 | |||
2740 | static inline bool __must_check current_set_polling_and_test(void) | ||
2741 | { | ||
2742 | __current_set_polling(); | ||
2743 | |||
2744 | /* | ||
2745 | * Polling state must be visible before we test NEED_RESCHED, | ||
2746 | * paired by resched_task() | ||
2747 | */ | ||
2748 | smp_mb(); | ||
2749 | |||
2750 | return unlikely(tif_need_resched()); | ||
2751 | } | ||
2752 | |||
2753 | static inline void __current_clr_polling(void) | ||
2754 | { | ||
2755 | current_thread_info()->status &= ~TS_POLLING; | ||
2756 | } | ||
2757 | |||
2758 | static inline bool __must_check current_clr_polling_and_test(void) | ||
2759 | { | ||
2760 | __current_clr_polling(); | ||
2761 | |||
2762 | /* | ||
2763 | * Polling state must be visible before we test NEED_RESCHED, | ||
2764 | * paired by resched_task() | ||
2765 | */ | ||
2766 | smp_mb(); | ||
2767 | |||
2768 | return unlikely(tif_need_resched()); | ||
2769 | } | ||
2770 | #elif defined(TIF_POLLING_NRFLAG) | ||
2771 | static inline int tsk_is_polling(struct task_struct *p) | 2781 | static inline int tsk_is_polling(struct task_struct *p) |
2772 | { | 2782 | { |
2773 | return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); | 2783 | return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); |