aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/topology.h
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-04-04 21:11:11 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:44:59 -0400
commit7c16ec585c558960a508ccf9a08fcb9ed49b3754 (patch)
treecca2b12203a10944d7095a07df7292421f578dc9 /include/linux/topology.h
parentc5f59f0833df945eef7ff35f3dc6ba61c5f293dd (diff)
cpumask: reduce stack usage in SD_x_INIT initializers
* Remove empty cpumask_t (and all non-zero/non-null) variables in SD_*_INIT macros. Use memset(0) to clear. Also, don't inline the initializer functions to save on stack space in build_sched_domains(). * Merge change to include/linux/topology.h that uses the new node_to_cpumask_ptr function in the nr_cpus_node macro into this patch. Depends on: [mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch [sched-devel]: sched: add new set_cpus_allowed_ptr function Cc: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/topology.h')
-rw-r--r--include/linux/topology.h46
1 files changed, 8 insertions, 38 deletions
diff --git a/include/linux/topology.h b/include/linux/topology.h
index bd14f8b30f09..4bb7074a2c3a 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,16 +38,15 @@
38#endif 38#endif
39 39
40#ifndef nr_cpus_node 40#ifndef nr_cpus_node
41#define nr_cpus_node(node) \ 41#define nr_cpus_node(node) \
42 ({ \ 42 ({ \
43 cpumask_t __tmp__; \ 43 node_to_cpumask_ptr(__tmp__, node); \
44 __tmp__ = node_to_cpumask(node); \ 44 cpus_weight(*__tmp__); \
45 cpus_weight(__tmp__); \
46 }) 45 })
47#endif 46#endif
48 47
49#define for_each_node_with_cpus(node) \ 48#define for_each_node_with_cpus(node) \
50 for_each_online_node(node) \ 49 for_each_online_node(node) \
51 if (nr_cpus_node(node)) 50 if (nr_cpus_node(node))
52 51
53void arch_update_cpu_topology(void); 52void arch_update_cpu_topology(void);
@@ -80,7 +79,9 @@ void arch_update_cpu_topology(void);
80 * by defining their own arch-specific initializer in include/asm/topology.h. 79 * by defining their own arch-specific initializer in include/asm/topology.h.
81 * A definition there will automagically override these default initializers 80 * A definition there will automagically override these default initializers
82 * and allow arch-specific performance tuning of sched_domains. 81 * and allow arch-specific performance tuning of sched_domains.
82 * (Only non-zero and non-null fields need be specified.)
83 */ 83 */
84
84#ifdef CONFIG_SCHED_SMT 85#ifdef CONFIG_SCHED_SMT
85/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is, 86/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
86 * so can't we drop this in favor of CONFIG_SCHED_SMT? 87 * so can't we drop this in favor of CONFIG_SCHED_SMT?
@@ -89,20 +90,10 @@ void arch_update_cpu_topology(void);
89/* Common values for SMT siblings */ 90/* Common values for SMT siblings */
90#ifndef SD_SIBLING_INIT 91#ifndef SD_SIBLING_INIT
91#define SD_SIBLING_INIT (struct sched_domain) { \ 92#define SD_SIBLING_INIT (struct sched_domain) { \
92 .span = CPU_MASK_NONE, \
93 .parent = NULL, \
94 .child = NULL, \
95 .groups = NULL, \
96 .min_interval = 1, \ 93 .min_interval = 1, \
97 .max_interval = 2, \ 94 .max_interval = 2, \
98 .busy_factor = 64, \ 95 .busy_factor = 64, \
99 .imbalance_pct = 110, \ 96 .imbalance_pct = 110, \
100 .cache_nice_tries = 0, \
101 .busy_idx = 0, \
102 .idle_idx = 0, \
103 .newidle_idx = 0, \
104 .wake_idx = 0, \
105 .forkexec_idx = 0, \
106 .flags = SD_LOAD_BALANCE \ 97 .flags = SD_LOAD_BALANCE \
107 | SD_BALANCE_NEWIDLE \ 98 | SD_BALANCE_NEWIDLE \
108 | SD_BALANCE_FORK \ 99 | SD_BALANCE_FORK \
@@ -112,7 +103,6 @@ void arch_update_cpu_topology(void);
112 | SD_SHARE_CPUPOWER, \ 103 | SD_SHARE_CPUPOWER, \
113 .last_balance = jiffies, \ 104 .last_balance = jiffies, \
114 .balance_interval = 1, \ 105 .balance_interval = 1, \
115 .nr_balance_failed = 0, \
116} 106}
117#endif 107#endif
118#endif /* CONFIG_SCHED_SMT */ 108#endif /* CONFIG_SCHED_SMT */
@@ -121,18 +111,12 @@ void arch_update_cpu_topology(void);
121/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ 111/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
122#ifndef SD_MC_INIT 112#ifndef SD_MC_INIT
123#define SD_MC_INIT (struct sched_domain) { \ 113#define SD_MC_INIT (struct sched_domain) { \
124 .span = CPU_MASK_NONE, \
125 .parent = NULL, \
126 .child = NULL, \
127 .groups = NULL, \
128 .min_interval = 1, \ 114 .min_interval = 1, \
129 .max_interval = 4, \ 115 .max_interval = 4, \
130 .busy_factor = 64, \ 116 .busy_factor = 64, \
131 .imbalance_pct = 125, \ 117 .imbalance_pct = 125, \
132 .cache_nice_tries = 1, \ 118 .cache_nice_tries = 1, \
133 .busy_idx = 2, \ 119 .busy_idx = 2, \
134 .idle_idx = 0, \
135 .newidle_idx = 0, \
136 .wake_idx = 1, \ 120 .wake_idx = 1, \
137 .forkexec_idx = 1, \ 121 .forkexec_idx = 1, \
138 .flags = SD_LOAD_BALANCE \ 122 .flags = SD_LOAD_BALANCE \
@@ -144,7 +128,6 @@ void arch_update_cpu_topology(void);
144 | BALANCE_FOR_MC_POWER, \ 128 | BALANCE_FOR_MC_POWER, \
145 .last_balance = jiffies, \ 129 .last_balance = jiffies, \
146 .balance_interval = 1, \ 130 .balance_interval = 1, \
147 .nr_balance_failed = 0, \
148} 131}
149#endif 132#endif
150#endif /* CONFIG_SCHED_MC */ 133#endif /* CONFIG_SCHED_MC */
@@ -152,10 +135,6 @@ void arch_update_cpu_topology(void);
152/* Common values for CPUs */ 135/* Common values for CPUs */
153#ifndef SD_CPU_INIT 136#ifndef SD_CPU_INIT
154#define SD_CPU_INIT (struct sched_domain) { \ 137#define SD_CPU_INIT (struct sched_domain) { \
155 .span = CPU_MASK_NONE, \
156 .parent = NULL, \
157 .child = NULL, \
158 .groups = NULL, \
159 .min_interval = 1, \ 138 .min_interval = 1, \
160 .max_interval = 4, \ 139 .max_interval = 4, \
161 .busy_factor = 64, \ 140 .busy_factor = 64, \
@@ -174,16 +153,11 @@ void arch_update_cpu_topology(void);
174 | BALANCE_FOR_PKG_POWER,\ 153 | BALANCE_FOR_PKG_POWER,\
175 .last_balance = jiffies, \ 154 .last_balance = jiffies, \
176 .balance_interval = 1, \ 155 .balance_interval = 1, \
177 .nr_balance_failed = 0, \
178} 156}
179#endif 157#endif
180 158
181/* sched_domains SD_ALLNODES_INIT for NUMA machines */ 159/* sched_domains SD_ALLNODES_INIT for NUMA machines */
182#define SD_ALLNODES_INIT (struct sched_domain) { \ 160#define SD_ALLNODES_INIT (struct sched_domain) { \
183 .span = CPU_MASK_NONE, \
184 .parent = NULL, \
185 .child = NULL, \
186 .groups = NULL, \
187 .min_interval = 64, \ 161 .min_interval = 64, \
188 .max_interval = 64*num_online_cpus(), \ 162 .max_interval = 64*num_online_cpus(), \
189 .busy_factor = 128, \ 163 .busy_factor = 128, \
@@ -191,14 +165,10 @@ void arch_update_cpu_topology(void);
191 .cache_nice_tries = 1, \ 165 .cache_nice_tries = 1, \
192 .busy_idx = 3, \ 166 .busy_idx = 3, \
193 .idle_idx = 3, \ 167 .idle_idx = 3, \
194 .newidle_idx = 0, /* unused */ \
195 .wake_idx = 0, /* unused */ \
196 .forkexec_idx = 0, /* unused */ \
197 .flags = SD_LOAD_BALANCE \ 168 .flags = SD_LOAD_BALANCE \
198 | SD_SERIALIZE, \ 169 | SD_SERIALIZE, \
199 .last_balance = jiffies, \ 170 .last_balance = jiffies, \
200 .balance_interval = 64, \ 171 .balance_interval = 64, \
201 .nr_balance_failed = 0, \
202} 172}
203 173
204#ifdef CONFIG_NUMA 174#ifdef CONFIG_NUMA