aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-18 17:40:47 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-07-14 19:36:54 -0400
commit54be16e7b2c96793bee4bf472409e9d31bc77c78 (patch)
tree1684f258dfee6300191efdb35b452451ed5a14cb /arch/metag
parent18f894c145e8515c26da09dd95e06d6b1158775c (diff)
metag: delete __cpuinit usage from all metag files
The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) are flagged as __cpuinit -- so if we remove the __cpuinit from arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the arch/metag uses of the __cpuinit macros from all C files. Currently metag does not have any __CPUINIT used in assembly files. [1] https://lkml.org/lkml/2013/5/20/589 Cc: James Hogan <james.hogan@imgtec.com> Acked-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/kernel/perf/perf_event.c6
-rw-r--r--arch/metag/kernel/smp.c22
-rw-r--r--arch/metag/kernel/traps.c2
3 files changed, 14 insertions, 16 deletions
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5b18888ee364..5cc4d4dcf3cf 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
813}; 813};
814 814
815/* PMU CPU hotplug notifier */ 815/* PMU CPU hotplug notifier */
816static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, 816static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
817 unsigned long action, void *hcpu) 817 void *hcpu)
818{ 818{
819 unsigned int cpu = (unsigned int)hcpu; 819 unsigned int cpu = (unsigned int)hcpu;
820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
828 return NOTIFY_OK; 828 return NOTIFY_OK;
829} 829}
830 830
831static struct notifier_block __cpuinitdata metag_pmu_notifier = { 831static struct notifier_block metag_pmu_notifier = {
832 .notifier_call = metag_pmu_cpu_notify, 832 .notifier_call = metag_pmu_cpu_notify,
833}; 833};
834 834
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index e413875cf6d2..7c0113142981 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
68/* 68/*
69 * "thread" is assumed to be a valid Meta hardware thread ID. 69 * "thread" is assumed to be a valid Meta hardware thread ID.
70 */ 70 */
71int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) 71int boot_secondary(unsigned int thread, struct task_struct *idle)
72{ 72{
73 u32 val; 73 u32 val;
74 74
@@ -118,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
118 * If the cache partition has changed, prints a message to the log describing 118 * If the cache partition has changed, prints a message to the log describing
119 * those changes. 119 * those changes.
120 */ 120 */
121static __cpuinit void describe_cachepart_change(unsigned int thread, 121static void describe_cachepart_change(unsigned int thread, const char *label,
122 const char *label, 122 unsigned int sz, unsigned int old,
123 unsigned int sz, 123 unsigned int new)
124 unsigned int old,
125 unsigned int new)
126{ 124{
127 unsigned int lor1, land1, gor1, gand1; 125 unsigned int lor1, land1, gor1, gand1;
128 unsigned int lor2, land2, gor2, gand2; 126 unsigned int lor2, land2, gor2, gand2;
@@ -170,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
170 * Ensures that coherency is enabled and that the threads share the same cache 168 * Ensures that coherency is enabled and that the threads share the same cache
171 * partitions. 169 * partitions.
172 */ 170 */
173static __cpuinit void setup_smp_cache(unsigned int thread) 171static void setup_smp_cache(unsigned int thread)
174{ 172{
175 unsigned int this_thread, lflags; 173 unsigned int this_thread, lflags;
176 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new; 174 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -215,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
215 icpart_old, icpart_new); 213 icpart_old, icpart_new);
216} 214}
217 215
218int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 216int __cpu_up(unsigned int cpu, struct task_struct *idle)
219{ 217{
220 unsigned int thread = cpu_2_hwthread_id[cpu]; 218 unsigned int thread = cpu_2_hwthread_id[cpu];
221 int ret; 219 int ret;
@@ -268,7 +266,7 @@ static DECLARE_COMPLETION(cpu_killed);
268/* 266/*
269 * __cpu_disable runs on the processor to be shutdown. 267 * __cpu_disable runs on the processor to be shutdown.
270 */ 268 */
271int __cpuexit __cpu_disable(void) 269int __cpu_disable(void)
272{ 270{
273 unsigned int cpu = smp_processor_id(); 271 unsigned int cpu = smp_processor_id();
274 272
@@ -299,7 +297,7 @@ int __cpuexit __cpu_disable(void)
299 * called on the thread which is asking for a CPU to be shutdown - 297 * called on the thread which is asking for a CPU to be shutdown -
300 * waits until shutdown has completed, or it is timed out. 298 * waits until shutdown has completed, or it is timed out.
301 */ 299 */
302void __cpuexit __cpu_die(unsigned int cpu) 300void __cpu_die(unsigned int cpu)
303{ 301{
304 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) 302 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
305 pr_err("CPU%u: unable to kill\n", cpu); 303 pr_err("CPU%u: unable to kill\n", cpu);
@@ -311,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
311 * Note that we do not return from this function. If this cpu is 309 * Note that we do not return from this function. If this cpu is
312 * brought online again it will need to run secondary_startup(). 310 * brought online again it will need to run secondary_startup().
313 */ 311 */
314void __cpuexit cpu_die(void) 312void cpu_die(void)
315{ 313{
316 local_irq_disable(); 314 local_irq_disable();
317 idle_task_exit(); 315 idle_task_exit();
@@ -326,7 +324,7 @@ void __cpuexit cpu_die(void)
326 * Called by both boot and secondaries to move global data into 324 * Called by both boot and secondaries to move global data into
327 * per-processor storage. 325 * per-processor storage.
328 */ 326 */
329void __cpuinit smp_store_cpu_info(unsigned int cpuid) 327void smp_store_cpu_info(unsigned int cpuid)
330{ 328{
331 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); 329 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
332 330
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index c00ade0228ef..25f9d1c2ffec 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -812,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
812} 812}
813#endif 813#endif
814 814
815void __cpuinit per_cpu_trap_init(unsigned long cpu) 815void per_cpu_trap_init(unsigned long cpu)
816{ 816{
817 TBIRES int_context; 817 TBIRES int_context;
818 unsigned int thread = cpu_2_hwthread_id[cpu]; 818 unsigned int thread = cpu_2_hwthread_id[cpu];