aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 17:00:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 17:00:15 -0400
commitc84a1e32ee58fc1cc9d3fd42619b917cce67e30a (patch)
treed3e5bed273f747e7c9e399864219bea76f4c30ea /arch/arm/kernel
parent3d521f9151dacab566904d1f57dcb3e7080cdd8f (diff)
parent096aa33863a5e48de52d2ff30e0801b7487944f4 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull scheduler updates from Ingo Molnar: "The main scheduling related changes in this cycle were: - various sched/numa updates, for better performance - tree wide cleanup of open coded nice levels - nohz fix related to rq->nr_running use - cpuidle changes and continued consolidation to improve the kernel/sched/idle.c high level idle scheduling logic. As part of this effort I pulled cpuidle driver changes from Rafael as well. - standardized idle polling amongst architectures - continued work on preparing better power/energy aware scheduling - sched/rt updates - misc fixlets and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits) sched/numa: Decay ->wakee_flips instead of zeroing sched/numa: Update migrate_improves/degrades_locality() sched/numa: Allow task switch if load imbalance improves sched/rt: Fix 'struct sched_dl_entity' and dl_task_time() comments, to match the current upstream code sched: Consolidate open coded implementations of nice level frobbing into nice_to_rlimit() and rlimit_to_nice() sched: Initialize rq->age_stamp on processor start sched, nohz: Change rq->nr_running to always use wrappers sched: Fix the rq->next_balance logic in rebalance_domains() and idle_balance() sched: Use clamp() and clamp_val() to make sys_nice() more readable sched: Do not zero sg->cpumask and sg->sgp->power in build_sched_groups() sched/numa: Fix initialization of sched_domain_topology for NUMA sched: Call select_idle_sibling() when not affine_sd sched: Simplify return logic in sched_read_attr() sched: Simplify return logic in sched_copy_attr() sched: Fix exec_start/task_hot on migrated tasks arm64: Remove TIF_POLLING_NRFLAG metag: Remove TIF_POLLING_NRFLAG sched/idle: Make cpuidle_idle_call() void sched/idle: Reflow cpuidle_idle_call() sched/idle: Delay clearing the polling bit ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/topology.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 0bc94b1fd1ae..71e1fec6d31a 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -185,6 +185,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
185 return &cpu_topology[cpu].core_sibling; 185 return &cpu_topology[cpu].core_sibling;
186} 186}
187 187
188/*
189 * The current assumption is that we can power gate each core independently.
190 * This will be superseded by DT binding once available.
191 */
192const struct cpumask *cpu_corepower_mask(int cpu)
193{
194 return &cpu_topology[cpu].thread_sibling;
195}
196
188static void update_siblings_masks(unsigned int cpuid) 197static void update_siblings_masks(unsigned int cpuid)
189{ 198{
190 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; 199 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -266,6 +275,20 @@ void store_cpu_topology(unsigned int cpuid)
266 cpu_topology[cpuid].socket_id, mpidr); 275 cpu_topology[cpuid].socket_id, mpidr);
267} 276}
268 277
278static inline const int cpu_corepower_flags(void)
279{
280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
281}
282
283static struct sched_domain_topology_level arm_topology[] = {
284#ifdef CONFIG_SCHED_MC
285 { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
286 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
287#endif
288 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
289 { NULL, },
290};
291
269/* 292/*
270 * init_cpu_topology is called at boot when only one cpu is running 293 * init_cpu_topology is called at boot when only one cpu is running
271 * which prevent simultaneous write access to cpu_topology array 294 * which prevent simultaneous write access to cpu_topology array
@@ -289,4 +312,7 @@ void __init init_cpu_topology(void)
289 smp_wmb(); 312 smp_wmb();
290 313
291 parse_dt_topology(); 314 parse_dt_topology();
315
316 /* Set scheduler topology descriptor */
317 set_sched_topology(arm_topology);
292} 318}