diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-03-29 23:38:01 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-04-20 23:23:25 -0400 |
commit | 3366e3585fbf0d40ce6f2382b544851cf4df1654 (patch) | |
tree | 2d0e01291d103d28bdb67afffb816fa1d7023fbb /arch/sh/kernel | |
parent | 4a6feab0ee5240c4bd5378d9f8a46b85718c68a7 (diff) |
sh: Move platform smp ops in to their own structure.
This cribs the MIPS plat_smp_ops approach for wrapping up the platform
ops. This will allow for mixing and matching different ops on the same
platform in the future.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/cpu/sh4a/smp-shx3.c | 20 | ||||
-rw-r--r-- | arch/sh/kernel/localtimer.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 24 |
4 files changed, 32 insertions, 16 deletions
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index c98b4574c44e..5c5d50ccbfcd 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SH-X3 SMP | 2 | * SH-X3 SMP |
3 | * | 3 | * |
4 | * Copyright (C) 2007 - 2008 Paul Mundt | 4 | * Copyright (C) 2007 - 2010 Paul Mundt |
5 | * Copyright (C) 2007 Magnus Damm | 5 | * Copyright (C) 2007 Magnus Damm |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -37,7 +37,7 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | |||
37 | return IRQ_HANDLED; | 37 | return IRQ_HANDLED; |
38 | } | 38 | } |
39 | 39 | ||
40 | void __init plat_smp_setup(void) | 40 | static void shx3_smp_setup(void) |
41 | { | 41 | { |
42 | unsigned int cpu = 0; | 42 | unsigned int cpu = 0; |
43 | int i, num; | 43 | int i, num; |
@@ -63,7 +63,7 @@ void __init plat_smp_setup(void) | |||
63 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); | 63 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); |
64 | } | 64 | } |
65 | 65 | ||
66 | void __init plat_prepare_cpus(unsigned int max_cpus) | 66 | static void shx3_prepare_cpus(unsigned int max_cpus) |
67 | { | 67 | { |
68 | int i; | 68 | int i; |
69 | 69 | ||
@@ -76,7 +76,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
76 | IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i); | 76 | IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i); |
77 | } | 77 | } |
78 | 78 | ||
79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | 79 | static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) |
80 | { | 80 | { |
81 | if (__in_29bit_mode()) | 81 | if (__in_29bit_mode()) |
82 | __raw_writel(entry_point, RESET_REG(cpu)); | 82 | __raw_writel(entry_point, RESET_REG(cpu)); |
@@ -93,12 +93,12 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | |||
93 | __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); | 93 | __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); |
94 | } | 94 | } |
95 | 95 | ||
96 | int plat_smp_processor_id(void) | 96 | static unsigned int shx3_smp_processor_id(void) |
97 | { | 97 | { |
98 | return __raw_readl(0xff000048); /* CPIDR */ | 98 | return __raw_readl(0xff000048); /* CPIDR */ |
99 | } | 99 | } |
100 | 100 | ||
101 | void plat_send_ipi(unsigned int cpu, unsigned int message) | 101 | static void shx3_send_ipi(unsigned int cpu, unsigned int message) |
102 | { | 102 | { |
103 | unsigned long addr = 0xfe410070 + (cpu * 4); | 103 | unsigned long addr = 0xfe410070 + (cpu * 4); |
104 | 104 | ||
@@ -106,3 +106,11 @@ void plat_send_ipi(unsigned int cpu, unsigned int message) | |||
106 | 106 | ||
107 | __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ | 107 | __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ |
108 | } | 108 | } |
109 | |||
110 | struct plat_smp_ops shx3_smp_ops = { | ||
111 | .smp_setup = shx3_smp_setup, | ||
112 | .prepare_cpus = shx3_prepare_cpus, | ||
113 | .start_cpu = shx3_start_cpu, | ||
114 | .smp_processor_id = shx3_smp_processor_id, | ||
115 | .send_ipi = shx3_send_ipi, | ||
116 | }; | ||
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 0b04e7d4a9b9..865a2f1029b1 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c | |||
@@ -44,7 +44,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode, | |||
44 | { | 44 | { |
45 | } | 45 | } |
46 | 46 | ||
47 | void __cpuinit local_timer_setup(unsigned int cpu) | 47 | void local_timer_setup(unsigned int cpu) |
48 | { | 48 | { |
49 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); | 49 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); |
50 | 50 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 8870d6ba64bf..29155384d5a8 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -459,9 +459,7 @@ void __init setup_arch(char **cmdline_p) | |||
459 | if (likely(sh_mv.mv_setup)) | 459 | if (likely(sh_mv.mv_setup)) |
460 | sh_mv.mv_setup(cmdline_p); | 460 | sh_mv.mv_setup(cmdline_p); |
461 | 461 | ||
462 | #ifdef CONFIG_SMP | ||
463 | plat_smp_setup(); | 462 | plat_smp_setup(); |
464 | #endif | ||
465 | } | 463 | } |
466 | 464 | ||
467 | /* processor boot mode configuration */ | 465 | /* processor boot mode configuration */ |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 002cc612deef..2f348fda0159 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * SMP support for the SuperH processors. | 4 | * SMP support for the SuperH processors. |
5 | * | 5 | * |
6 | * Copyright (C) 2002 - 2008 Paul Mundt | 6 | * Copyright (C) 2002 - 2010 Paul Mundt |
7 | * Copyright (C) 2006 - 2007 Akio Idehara | 7 | * Copyright (C) 2006 - 2007 Akio Idehara |
8 | * | 8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -31,6 +31,16 @@ | |||
31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | 32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
33 | 33 | ||
34 | struct plat_smp_ops *mp_ops = NULL; | ||
35 | |||
36 | void __cpuinit register_smp_ops(struct plat_smp_ops *ops) | ||
37 | { | ||
38 | if (mp_ops) | ||
39 | printk(KERN_WARNING "Overriding previously set SMP ops\n"); | ||
40 | |||
41 | mp_ops = ops; | ||
42 | } | ||
43 | |||
34 | static inline void __init smp_store_cpu_info(unsigned int cpu) | 44 | static inline void __init smp_store_cpu_info(unsigned int cpu) |
35 | { | 45 | { |
36 | struct sh_cpuinfo *c = cpu_data + cpu; | 46 | struct sh_cpuinfo *c = cpu_data + cpu; |
@@ -46,7 +56,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
46 | 56 | ||
47 | init_new_context(current, &init_mm); | 57 | init_new_context(current, &init_mm); |
48 | current_thread_info()->cpu = cpu; | 58 | current_thread_info()->cpu = cpu; |
49 | plat_prepare_cpus(max_cpus); | 59 | mp_ops->prepare_cpus(max_cpus); |
50 | 60 | ||
51 | #ifndef CONFIG_HOTPLUG_CPU | 61 | #ifndef CONFIG_HOTPLUG_CPU |
52 | init_cpu_present(&cpu_possible_map); | 62 | init_cpu_present(&cpu_possible_map); |
@@ -127,7 +137,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
127 | (unsigned long)&stack_start + sizeof(stack_start)); | 137 | (unsigned long)&stack_start + sizeof(stack_start)); |
128 | wmb(); | 138 | wmb(); |
129 | 139 | ||
130 | plat_start_cpu(cpu, (unsigned long)_stext); | 140 | mp_ops->start_cpu(cpu, (unsigned long)_stext); |
131 | 141 | ||
132 | timeout = jiffies + HZ; | 142 | timeout = jiffies + HZ; |
133 | while (time_before(jiffies, timeout)) { | 143 | while (time_before(jiffies, timeout)) { |
@@ -159,7 +169,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
159 | 169 | ||
160 | void smp_send_reschedule(int cpu) | 170 | void smp_send_reschedule(int cpu) |
161 | { | 171 | { |
162 | plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); | 172 | mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); |
163 | } | 173 | } |
164 | 174 | ||
165 | void smp_send_stop(void) | 175 | void smp_send_stop(void) |
@@ -172,12 +182,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
172 | int cpu; | 182 | int cpu; |
173 | 183 | ||
174 | for_each_cpu(cpu, mask) | 184 | for_each_cpu(cpu, mask) |
175 | plat_send_ipi(cpu, SMP_MSG_FUNCTION); | 185 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); |
176 | } | 186 | } |
177 | 187 | ||
178 | void arch_send_call_function_single_ipi(int cpu) | 188 | void arch_send_call_function_single_ipi(int cpu) |
179 | { | 189 | { |
180 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); | 190 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); |
181 | } | 191 | } |
182 | 192 | ||
183 | void smp_timer_broadcast(const struct cpumask *mask) | 193 | void smp_timer_broadcast(const struct cpumask *mask) |
@@ -185,7 +195,7 @@ void smp_timer_broadcast(const struct cpumask *mask) | |||
185 | int cpu; | 195 | int cpu; |
186 | 196 | ||
187 | for_each_cpu(cpu, mask) | 197 | for_each_cpu(cpu, mask) |
188 | plat_send_ipi(cpu, SMP_MSG_TIMER); | 198 | mp_ops->send_ipi(cpu, SMP_MSG_TIMER); |
189 | } | 199 | } |
190 | 200 | ||
191 | static void ipi_timer(void) | 201 | static void ipi_timer(void) |