diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:10 -0500 |
commit | a4ec1effce83796209a0258602b0cf50026d86f2 (patch) | |
tree | 557df52f7a541b7b3e9762b3ad379bf98349bfb8 /arch/x86/mach-voyager/voyager_smp.c | |
parent | 31183ba8fd05b6ddc67ab4d726167cbc52e1b346 (diff) |
x86: mach-voyager, lindent
lindent the mach-voyager files to get rid of more than 300 style errors:
errors lines of code errors/KLOC
arch/x86/mach-voyager/ [old] 409 3729 109.6
arch/x86/mach-voyager/ [new] 71 3678 19.3
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 672 |
1 files changed, 307 insertions, 365 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 88124dd35406..73c435ce10fd 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -32,7 +32,8 @@ | |||
32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; | 32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
33 | 33 | ||
34 | /* CPU IRQ affinity -- set to all ones initially */ | 34 | /* CPU IRQ affinity -- set to all ones initially */ |
35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; | 35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = |
36 | {[0 ... NR_CPUS-1] = ~0UL }; | ||
36 | 37 | ||
37 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | 38 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally |
38 | * indexed physically */ | 39 | * indexed physically */ |
@@ -76,7 +77,6 @@ EXPORT_SYMBOL(cpu_online_map); | |||
76 | * by scheduler but indexed physically */ | 77 | * by scheduler but indexed physically */ |
77 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 78 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
78 | 79 | ||
79 | |||
80 | /* The internal functions */ | 80 | /* The internal functions */ |
81 | static void send_CPI(__u32 cpuset, __u8 cpi); | 81 | static void send_CPI(__u32 cpuset, __u8 cpi); |
82 | static void ack_CPI(__u8 cpi); | 82 | static void ack_CPI(__u8 cpi); |
@@ -101,94 +101,86 @@ int hard_smp_processor_id(void); | |||
101 | int safe_smp_processor_id(void); | 101 | int safe_smp_processor_id(void); |
102 | 102 | ||
103 | /* Inline functions */ | 103 | /* Inline functions */ |
104 | static inline void | 104 | static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi) |
105 | send_one_QIC_CPI(__u8 cpu, __u8 cpi) | ||
106 | { | 105 | { |
107 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | 106 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = |
108 | (smp_processor_id() << 16) + cpi; | 107 | (smp_processor_id() << 16) + cpi; |
109 | } | 108 | } |
110 | 109 | ||
111 | static inline void | 110 | static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi) |
112 | send_QIC_CPI(__u32 cpuset, __u8 cpi) | ||
113 | { | 111 | { |
114 | int cpu; | 112 | int cpu; |
115 | 113 | ||
116 | for_each_online_cpu(cpu) { | 114 | for_each_online_cpu(cpu) { |
117 | if(cpuset & (1<<cpu)) { | 115 | if (cpuset & (1 << cpu)) { |
118 | #ifdef VOYAGER_DEBUG | 116 | #ifdef VOYAGER_DEBUG |
119 | if(!cpu_isset(cpu, cpu_online_map)) | 117 | if (!cpu_isset(cpu, cpu_online_map)) |
120 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu)); | 118 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in " |
119 | "cpu_online_map\n", | ||
120 | hard_smp_processor_id(), cpi, cpu)); | ||
121 | #endif | 121 | #endif |
122 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | 122 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); |
123 | } | 123 | } |
124 | } | 124 | } |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline void | 127 | static inline void wrapper_smp_local_timer_interrupt(void) |
128 | wrapper_smp_local_timer_interrupt(void) | ||
129 | { | 128 | { |
130 | irq_enter(); | 129 | irq_enter(); |
131 | smp_local_timer_interrupt(); | 130 | smp_local_timer_interrupt(); |
132 | irq_exit(); | 131 | irq_exit(); |
133 | } | 132 | } |
134 | 133 | ||
135 | static inline void | 134 | static inline void send_one_CPI(__u8 cpu, __u8 cpi) |
136 | send_one_CPI(__u8 cpu, __u8 cpi) | ||
137 | { | 135 | { |
138 | if(voyager_quad_processors & (1<<cpu)) | 136 | if (voyager_quad_processors & (1 << cpu)) |
139 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | 137 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); |
140 | else | 138 | else |
141 | send_CPI(1<<cpu, cpi); | 139 | send_CPI(1 << cpu, cpi); |
142 | } | 140 | } |
143 | 141 | ||
144 | static inline void | 142 | static inline void send_CPI_allbutself(__u8 cpi) |
145 | send_CPI_allbutself(__u8 cpi) | ||
146 | { | 143 | { |
147 | __u8 cpu = smp_processor_id(); | 144 | __u8 cpu = smp_processor_id(); |
148 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | 145 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); |
149 | send_CPI(mask, cpi); | 146 | send_CPI(mask, cpi); |
150 | } | 147 | } |
151 | 148 | ||
152 | static inline int | 149 | static inline int is_cpu_quad(void) |
153 | is_cpu_quad(void) | ||
154 | { | 150 | { |
155 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | 151 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); |
156 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | 152 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); |
157 | } | 153 | } |
158 | 154 | ||
159 | static inline int | 155 | static inline int is_cpu_extended(void) |
160 | is_cpu_extended(void) | ||
161 | { | 156 | { |
162 | __u8 cpu = hard_smp_processor_id(); | 157 | __u8 cpu = hard_smp_processor_id(); |
163 | 158 | ||
164 | return(voyager_extended_vic_processors & (1<<cpu)); | 159 | return (voyager_extended_vic_processors & (1 << cpu)); |
165 | } | 160 | } |
166 | 161 | ||
167 | static inline int | 162 | static inline int is_cpu_vic_boot(void) |
168 | is_cpu_vic_boot(void) | ||
169 | { | 163 | { |
170 | __u8 cpu = hard_smp_processor_id(); | 164 | __u8 cpu = hard_smp_processor_id(); |
171 | 165 | ||
172 | return(voyager_extended_vic_processors | 166 | return (voyager_extended_vic_processors |
173 | & voyager_allowed_boot_processors & (1<<cpu)); | 167 | & voyager_allowed_boot_processors & (1 << cpu)); |
174 | } | 168 | } |
175 | 169 | ||
176 | 170 | static inline void ack_CPI(__u8 cpi) | |
177 | static inline void | ||
178 | ack_CPI(__u8 cpi) | ||
179 | { | 171 | { |
180 | switch(cpi) { | 172 | switch (cpi) { |
181 | case VIC_CPU_BOOT_CPI: | 173 | case VIC_CPU_BOOT_CPI: |
182 | if(is_cpu_quad() && !is_cpu_vic_boot()) | 174 | if (is_cpu_quad() && !is_cpu_vic_boot()) |
183 | ack_QIC_CPI(cpi); | 175 | ack_QIC_CPI(cpi); |
184 | else | 176 | else |
185 | ack_VIC_CPI(cpi); | 177 | ack_VIC_CPI(cpi); |
186 | break; | 178 | break; |
187 | case VIC_SYS_INT: | 179 | case VIC_SYS_INT: |
188 | case VIC_CMN_INT: | 180 | case VIC_CMN_INT: |
189 | /* These are slightly strange. Even on the Quad card, | 181 | /* These are slightly strange. Even on the Quad card, |
190 | * They are vectored as VIC CPIs */ | 182 | * They are vectored as VIC CPIs */ |
191 | if(is_cpu_quad()) | 183 | if (is_cpu_quad()) |
192 | ack_special_QIC_CPI(cpi); | 184 | ack_special_QIC_CPI(cpi); |
193 | else | 185 | else |
194 | ack_VIC_CPI(cpi); | 186 | ack_VIC_CPI(cpi); |
@@ -205,11 +197,11 @@ ack_CPI(__u8 cpi) | |||
205 | * 8259 IRQs except that masks and things must be kept per processor | 197 | * 8259 IRQs except that masks and things must be kept per processor |
206 | */ | 198 | */ |
207 | static struct irq_chip vic_chip = { | 199 | static struct irq_chip vic_chip = { |
208 | .name = "VIC", | 200 | .name = "VIC", |
209 | .startup = startup_vic_irq, | 201 | .startup = startup_vic_irq, |
210 | .mask = mask_vic_irq, | 202 | .mask = mask_vic_irq, |
211 | .unmask = unmask_vic_irq, | 203 | .unmask = unmask_vic_irq, |
212 | .set_affinity = set_vic_irq_affinity, | 204 | .set_affinity = set_vic_irq_affinity, |
213 | }; | 205 | }; |
214 | 206 | ||
215 | /* used to count up as CPUs are brought on line (starts at 0) */ | 207 | /* used to count up as CPUs are brought on line (starts at 0) */ |
@@ -223,7 +215,7 @@ static __u32 trampoline_base; | |||
223 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ | 215 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ |
224 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | 216 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; |
225 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | 217 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; |
226 | static DEFINE_PER_CPU(int, prof_counter) = 1; | 218 | static DEFINE_PER_CPU(int, prof_counter) = 1; |
227 | 219 | ||
228 | /* the map used to check if a CPU has booted */ | 220 | /* the map used to check if a CPU has booted */ |
229 | static __u32 cpu_booted_map; | 221 | static __u32 cpu_booted_map; |
@@ -246,9 +238,9 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | |||
246 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | 238 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; |
247 | 239 | ||
248 | /* Lock for enable/disable of VIC interrupts */ | 240 | /* Lock for enable/disable of VIC interrupts */ |
249 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); | 241 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); |
250 | 242 | ||
251 | /* The boot processor is correctly set up in PC mode when it | 243 | /* The boot processor is correctly set up in PC mode when it |
252 | * comes up, but the secondaries need their master/slave 8259 | 244 | * comes up, but the secondaries need their master/slave 8259 |
253 | * pairs initializing correctly */ | 245 | * pairs initializing correctly */ |
254 | 246 | ||
@@ -262,8 +254,7 @@ static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | |||
262 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | 254 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; |
263 | 255 | ||
264 | /* debugging routine to read the isr of the cpu's pic */ | 256 | /* debugging routine to read the isr of the cpu's pic */ |
265 | static inline __u16 | 257 | static inline __u16 vic_read_isr(void) |
266 | vic_read_isr(void) | ||
267 | { | 258 | { |
268 | __u16 isr; | 259 | __u16 isr; |
269 | 260 | ||
@@ -275,17 +266,16 @@ vic_read_isr(void) | |||
275 | return isr; | 266 | return isr; |
276 | } | 267 | } |
277 | 268 | ||
278 | static __init void | 269 | static __init void qic_setup(void) |
279 | qic_setup(void) | ||
280 | { | 270 | { |
281 | if(!is_cpu_quad()) { | 271 | if (!is_cpu_quad()) { |
282 | /* not a quad, no setup */ | 272 | /* not a quad, no setup */ |
283 | return; | 273 | return; |
284 | } | 274 | } |
285 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | 275 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); |
286 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | 276 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); |
287 | 277 | ||
288 | if(is_cpu_extended()) { | 278 | if (is_cpu_extended()) { |
289 | /* the QIC duplicate of the VIC base register */ | 279 | /* the QIC duplicate of the VIC base register */ |
290 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | 280 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); |
291 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | 281 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); |
@@ -295,8 +285,7 @@ qic_setup(void) | |||
295 | } | 285 | } |
296 | } | 286 | } |
297 | 287 | ||
298 | static __init void | 288 | static __init void vic_setup_pic(void) |
299 | vic_setup_pic(void) | ||
300 | { | 289 | { |
301 | outb(1, VIC_REDIRECT_REGISTER_1); | 290 | outb(1, VIC_REDIRECT_REGISTER_1); |
302 | /* clear the claim registers for dynamic routing */ | 291 | /* clear the claim registers for dynamic routing */ |
@@ -333,7 +322,7 @@ vic_setup_pic(void) | |||
333 | 322 | ||
334 | /* ICW2: slave vector base */ | 323 | /* ICW2: slave vector base */ |
335 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | 324 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); |
336 | 325 | ||
337 | /* ICW3: slave ID */ | 326 | /* ICW3: slave ID */ |
338 | outb(0x02, 0xA1); | 327 | outb(0x02, 0xA1); |
339 | 328 | ||
@@ -341,19 +330,18 @@ vic_setup_pic(void) | |||
341 | outb(0x01, 0xA1); | 330 | outb(0x01, 0xA1); |
342 | } | 331 | } |
343 | 332 | ||
344 | static void | 333 | static void do_quad_bootstrap(void) |
345 | do_quad_bootstrap(void) | ||
346 | { | 334 | { |
347 | if(is_cpu_quad() && is_cpu_vic_boot()) { | 335 | if (is_cpu_quad() && is_cpu_vic_boot()) { |
348 | int i; | 336 | int i; |
349 | unsigned long flags; | 337 | unsigned long flags; |
350 | __u8 cpuid = hard_smp_processor_id(); | 338 | __u8 cpuid = hard_smp_processor_id(); |
351 | 339 | ||
352 | local_irq_save(flags); | 340 | local_irq_save(flags); |
353 | 341 | ||
354 | for(i = 0; i<4; i++) { | 342 | for (i = 0; i < 4; i++) { |
355 | /* FIXME: this would be >>3 &0x7 on the 32 way */ | 343 | /* FIXME: this would be >>3 &0x7 on the 32 way */ |
356 | if(((cpuid >> 2) & 0x03) == i) | 344 | if (((cpuid >> 2) & 0x03) == i) |
357 | /* don't lower our own mask! */ | 345 | /* don't lower our own mask! */ |
358 | continue; | 346 | continue; |
359 | 347 | ||
@@ -368,12 +356,10 @@ do_quad_bootstrap(void) | |||
368 | } | 356 | } |
369 | } | 357 | } |
370 | 358 | ||
371 | |||
372 | /* Set up all the basic stuff: read the SMP config and make all the | 359 | /* Set up all the basic stuff: read the SMP config and make all the |
373 | * SMP information reflect only the boot cpu. All others will be | 360 | * SMP information reflect only the boot cpu. All others will be |
374 | * brought on-line later. */ | 361 | * brought on-line later. */ |
375 | void __init | 362 | void __init find_smp_config(void) |
376 | find_smp_config(void) | ||
377 | { | 363 | { |
378 | int i; | 364 | int i; |
379 | 365 | ||
@@ -382,24 +368,31 @@ find_smp_config(void) | |||
382 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | 368 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); |
383 | 369 | ||
384 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | 370 | /* initialize the CPU structures (moved from smp_boot_cpus) */ |
385 | for(i=0; i<NR_CPUS; i++) { | 371 | for (i = 0; i < NR_CPUS; i++) { |
386 | cpu_irq_affinity[i] = ~0; | 372 | cpu_irq_affinity[i] = ~0; |
387 | } | 373 | } |
388 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | 374 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); |
389 | 375 | ||
390 | /* The boot CPU must be extended */ | 376 | /* The boot CPU must be extended */ |
391 | voyager_extended_vic_processors = 1<<boot_cpu_id; | 377 | voyager_extended_vic_processors = 1 << boot_cpu_id; |
392 | /* initially, all of the first 8 CPUs can boot */ | 378 | /* initially, all of the first 8 CPUs can boot */ |
393 | voyager_allowed_boot_processors = 0xff; | 379 | voyager_allowed_boot_processors = 0xff; |
394 | /* set up everything for just this CPU, we can alter | 380 | /* set up everything for just this CPU, we can alter |
395 | * this as we start the other CPUs later */ | 381 | * this as we start the other CPUs later */ |
396 | /* now get the CPU disposition from the extended CMOS */ | 382 | /* now get the CPU disposition from the extended CMOS */ |
397 | cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | 383 | cpus_addr(phys_cpu_present_map)[0] = |
398 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | 384 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); |
399 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; | 385 | cpus_addr(phys_cpu_present_map)[0] |= |
400 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; | 386 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; |
387 | cpus_addr(phys_cpu_present_map)[0] |= | ||
388 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | ||
389 | 2) << 16; | ||
390 | cpus_addr(phys_cpu_present_map)[0] |= | ||
391 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | ||
392 | 3) << 24; | ||
401 | cpu_possible_map = phys_cpu_present_map; | 393 | cpu_possible_map = phys_cpu_present_map; |
402 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); | 394 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
395 | cpus_addr(phys_cpu_present_map)[0]); | ||
403 | /* Here we set up the VIC to enable SMP */ | 396 | /* Here we set up the VIC to enable SMP */ |
404 | /* enable the CPIs by writing the base vector to their register */ | 397 | /* enable the CPIs by writing the base vector to their register */ |
405 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | 398 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); |
@@ -427,8 +420,7 @@ find_smp_config(void) | |||
427 | /* | 420 | /* |
428 | * The bootstrap kernel entry code has set these up. Save them | 421 | * The bootstrap kernel entry code has set these up. Save them |
429 | * for a given CPU, id is physical */ | 422 | * for a given CPU, id is physical */ |
430 | void __init | 423 | void __init smp_store_cpu_info(int id) |
431 | smp_store_cpu_info(int id) | ||
432 | { | 424 | { |
433 | struct cpuinfo_x86 *c = &cpu_data(id); | 425 | struct cpuinfo_x86 *c = &cpu_data(id); |
434 | 426 | ||
@@ -438,21 +430,19 @@ smp_store_cpu_info(int id) | |||
438 | } | 430 | } |
439 | 431 | ||
440 | /* set up the trampoline and return the physical address of the code */ | 432 | /* set up the trampoline and return the physical address of the code */ |
441 | static __u32 __init | 433 | static __u32 __init setup_trampoline(void) |
442 | setup_trampoline(void) | ||
443 | { | 434 | { |
444 | /* these two are global symbols in trampoline.S */ | 435 | /* these two are global symbols in trampoline.S */ |
445 | extern const __u8 trampoline_end[]; | 436 | extern const __u8 trampoline_end[]; |
446 | extern const __u8 trampoline_data[]; | 437 | extern const __u8 trampoline_data[]; |
447 | 438 | ||
448 | memcpy((__u8 *)trampoline_base, trampoline_data, | 439 | memcpy((__u8 *) trampoline_base, trampoline_data, |
449 | trampoline_end - trampoline_data); | 440 | trampoline_end - trampoline_data); |
450 | return virt_to_phys((__u8 *)trampoline_base); | 441 | return virt_to_phys((__u8 *) trampoline_base); |
451 | } | 442 | } |
452 | 443 | ||
453 | /* Routine initially called when a non-boot CPU is brought online */ | 444 | /* Routine initially called when a non-boot CPU is brought online */ |
454 | static void __init | 445 | static void __init start_secondary(void *unused) |
455 | start_secondary(void *unused) | ||
456 | { | 446 | { |
457 | __u8 cpuid = hard_smp_processor_id(); | 447 | __u8 cpuid = hard_smp_processor_id(); |
458 | /* external functions not defined in the headers */ | 448 | /* external functions not defined in the headers */ |
@@ -464,17 +454,18 @@ start_secondary(void *unused) | |||
464 | ack_CPI(VIC_CPU_BOOT_CPI); | 454 | ack_CPI(VIC_CPU_BOOT_CPI); |
465 | 455 | ||
466 | /* setup the 8259 master slave pair belonging to this CPU --- | 456 | /* setup the 8259 master slave pair belonging to this CPU --- |
467 | * we won't actually receive any until the boot CPU | 457 | * we won't actually receive any until the boot CPU |
468 | * relinquishes it's static routing mask */ | 458 | * relinquishes it's static routing mask */ |
469 | vic_setup_pic(); | 459 | vic_setup_pic(); |
470 | 460 | ||
471 | qic_setup(); | 461 | qic_setup(); |
472 | 462 | ||
473 | if(is_cpu_quad() && !is_cpu_vic_boot()) { | 463 | if (is_cpu_quad() && !is_cpu_vic_boot()) { |
474 | /* clear the boot CPI */ | 464 | /* clear the boot CPI */ |
475 | __u8 dummy; | 465 | __u8 dummy; |
476 | 466 | ||
477 | dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | 467 | dummy = |
468 | voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | ||
478 | printk("read dummy %d\n", dummy); | 469 | printk("read dummy %d\n", dummy); |
479 | } | 470 | } |
480 | 471 | ||
@@ -516,7 +507,6 @@ start_secondary(void *unused) | |||
516 | cpu_idle(); | 507 | cpu_idle(); |
517 | } | 508 | } |
518 | 509 | ||
519 | |||
520 | /* Routine to kick start the given CPU and wait for it to report ready | 510 | /* Routine to kick start the given CPU and wait for it to report ready |
521 | * (or timeout in startup). When this routine returns, the requested | 511 | * (or timeout in startup). When this routine returns, the requested |
522 | * CPU is either fully running and configured or known to be dead. | 512 | * CPU is either fully running and configured or known to be dead. |
@@ -524,15 +514,14 @@ start_secondary(void *unused) | |||
524 | * We call this routine sequentially 1 CPU at a time, so no need for | 514 | * We call this routine sequentially 1 CPU at a time, so no need for |
525 | * locking */ | 515 | * locking */ |
526 | 516 | ||
527 | static void __init | 517 | static void __init do_boot_cpu(__u8 cpu) |
528 | do_boot_cpu(__u8 cpu) | ||
529 | { | 518 | { |
530 | struct task_struct *idle; | 519 | struct task_struct *idle; |
531 | int timeout; | 520 | int timeout; |
532 | unsigned long flags; | 521 | unsigned long flags; |
533 | int quad_boot = (1<<cpu) & voyager_quad_processors | 522 | int quad_boot = (1 << cpu) & voyager_quad_processors |
534 | & ~( voyager_extended_vic_processors | 523 | & ~(voyager_extended_vic_processors |
535 | & voyager_allowed_boot_processors); | 524 | & voyager_allowed_boot_processors); |
536 | 525 | ||
537 | /* This is an area in head.S which was used to set up the | 526 | /* This is an area in head.S which was used to set up the |
538 | * initial kernel stack. We need to alter this to give the | 527 | * initial kernel stack. We need to alter this to give the |
@@ -543,10 +532,10 @@ do_boot_cpu(__u8 cpu) | |||
543 | } stack_start; | 532 | } stack_start; |
544 | /* This is the format of the CPI IDT gate (in real mode) which | 533 | /* This is the format of the CPI IDT gate (in real mode) which |
545 | * we're hijacking to boot the CPU */ | 534 | * we're hijacking to boot the CPU */ |
546 | union IDTFormat { | 535 | union IDTFormat { |
547 | struct seg { | 536 | struct seg { |
548 | __u16 Offset; | 537 | __u16 Offset; |
549 | __u16 Segment; | 538 | __u16 Segment; |
550 | } idt; | 539 | } idt; |
551 | __u32 val; | 540 | __u32 val; |
552 | } hijack_source; | 541 | } hijack_source; |
@@ -565,19 +554,19 @@ do_boot_cpu(__u8 cpu) | |||
565 | alternatives_smp_switch(1); | 554 | alternatives_smp_switch(1); |
566 | 555 | ||
567 | idle = fork_idle(cpu); | 556 | idle = fork_idle(cpu); |
568 | if(IS_ERR(idle)) | 557 | if (IS_ERR(idle)) |
569 | panic("failed fork for CPU%d", cpu); | 558 | panic("failed fork for CPU%d", cpu); |
570 | idle->thread.eip = (unsigned long) start_secondary; | 559 | idle->thread.eip = (unsigned long)start_secondary; |
571 | /* init_tasks (in sched.c) is indexed logically */ | 560 | /* init_tasks (in sched.c) is indexed logically */ |
572 | stack_start.esp = (void *) idle->thread.esp; | 561 | stack_start.esp = (void *)idle->thread.esp; |
573 | 562 | ||
574 | init_gdt(cpu); | 563 | init_gdt(cpu); |
575 | per_cpu(current_task, cpu) = idle; | 564 | per_cpu(current_task, cpu) = idle; |
576 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | 565 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
577 | irq_ctx_init(cpu); | 566 | irq_ctx_init(cpu); |
578 | 567 | ||
579 | /* Note: Don't modify initial ss override */ | 568 | /* Note: Don't modify initial ss override */ |
580 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, | 569 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, |
581 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, | 570 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, |
582 | hijack_source.idt.Offset, stack_start.esp)); | 571 | hijack_source.idt.Offset, stack_start.esp)); |
583 | 572 | ||
@@ -586,16 +575,23 @@ do_boot_cpu(__u8 cpu) | |||
586 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | 575 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); |
587 | flush_tlb_all(); | 576 | flush_tlb_all(); |
588 | 577 | ||
589 | if(quad_boot) { | 578 | if (quad_boot) { |
590 | printk("CPU %d: non extended Quad boot\n", cpu); | 579 | printk("CPU %d: non extended Quad boot\n", cpu); |
591 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4); | 580 | hijack_vector = |
581 | (__u32 *) | ||
582 | phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4); | ||
592 | *hijack_vector = hijack_source.val; | 583 | *hijack_vector = hijack_source.val; |
593 | } else { | 584 | } else { |
594 | printk("CPU%d: extended VIC boot\n", cpu); | 585 | printk("CPU%d: extended VIC boot\n", cpu); |
595 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4); | 586 | hijack_vector = |
587 | (__u32 *) | ||
588 | phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4); | ||
596 | *hijack_vector = hijack_source.val; | 589 | *hijack_vector = hijack_source.val; |
597 | /* VIC errata, may also receive interrupt at this address */ | 590 | /* VIC errata, may also receive interrupt at this address */ |
598 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4); | 591 | hijack_vector = |
592 | (__u32 *) | ||
593 | phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + | ||
594 | VIC_DEFAULT_CPI_BASE) * 4); | ||
599 | *hijack_vector = hijack_source.val; | 595 | *hijack_vector = hijack_source.val; |
600 | } | 596 | } |
601 | /* All non-boot CPUs start with interrupts fully masked. Need | 597 | /* All non-boot CPUs start with interrupts fully masked. Need |
@@ -603,73 +599,76 @@ do_boot_cpu(__u8 cpu) | |||
603 | * this in the VIC by masquerading as the processor we're | 599 | * this in the VIC by masquerading as the processor we're |
604 | * about to boot and lowering its interrupt mask */ | 600 | * about to boot and lowering its interrupt mask */ |
605 | local_irq_save(flags); | 601 | local_irq_save(flags); |
606 | if(quad_boot) { | 602 | if (quad_boot) { |
607 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); | 603 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); |
608 | } else { | 604 | } else { |
609 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | 605 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); |
610 | /* here we're altering registers belonging to `cpu' */ | 606 | /* here we're altering registers belonging to `cpu' */ |
611 | 607 | ||
612 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); | 608 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); |
613 | /* now go back to our original identity */ | 609 | /* now go back to our original identity */ |
614 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | 610 | outb(boot_cpu_id, VIC_PROCESSOR_ID); |
615 | 611 | ||
616 | /* and boot the CPU */ | 612 | /* and boot the CPU */ |
617 | 613 | ||
618 | send_CPI((1<<cpu), VIC_CPU_BOOT_CPI); | 614 | send_CPI((1 << cpu), VIC_CPU_BOOT_CPI); |
619 | } | 615 | } |
620 | cpu_booted_map = 0; | 616 | cpu_booted_map = 0; |
621 | local_irq_restore(flags); | 617 | local_irq_restore(flags); |
622 | 618 | ||
623 | /* now wait for it to become ready (or timeout) */ | 619 | /* now wait for it to become ready (or timeout) */ |
624 | for(timeout = 0; timeout < 50000; timeout++) { | 620 | for (timeout = 0; timeout < 50000; timeout++) { |
625 | if(cpu_booted_map) | 621 | if (cpu_booted_map) |
626 | break; | 622 | break; |
627 | udelay(100); | 623 | udelay(100); |
628 | } | 624 | } |
629 | /* reset the page table */ | 625 | /* reset the page table */ |
630 | zap_low_mappings(); | 626 | zap_low_mappings(); |
631 | 627 | ||
632 | if (cpu_booted_map) { | 628 | if (cpu_booted_map) { |
633 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | 629 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", |
634 | cpu, smp_processor_id())); | 630 | cpu, smp_processor_id())); |
635 | 631 | ||
636 | printk("CPU%d: ", cpu); | 632 | printk("CPU%d: ", cpu); |
637 | print_cpu_info(&cpu_data(cpu)); | 633 | print_cpu_info(&cpu_data(cpu)); |
638 | wmb(); | 634 | wmb(); |
639 | cpu_set(cpu, cpu_callout_map); | 635 | cpu_set(cpu, cpu_callout_map); |
640 | cpu_set(cpu, cpu_present_map); | 636 | cpu_set(cpu, cpu_present_map); |
641 | } | 637 | } else { |
642 | else { | ||
643 | printk("CPU%d FAILED TO BOOT: ", cpu); | 638 | printk("CPU%d FAILED TO BOOT: ", cpu); |
644 | if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5) | 639 | if (* |
640 | ((volatile unsigned char *)phys_to_virt(start_phys_address)) | ||
641 | == 0xA5) | ||
645 | printk("Stuck.\n"); | 642 | printk("Stuck.\n"); |
646 | else | 643 | else |
647 | printk("Not responding.\n"); | 644 | printk("Not responding.\n"); |
648 | 645 | ||
649 | cpucount--; | 646 | cpucount--; |
650 | } | 647 | } |
651 | } | 648 | } |
652 | 649 | ||
653 | void __init | 650 | void __init smp_boot_cpus(void) |
654 | smp_boot_cpus(void) | ||
655 | { | 651 | { |
656 | int i; | 652 | int i; |
657 | 653 | ||
658 | /* CAT BUS initialisation must be done after the memory */ | 654 | /* CAT BUS initialisation must be done after the memory */ |
659 | /* FIXME: The L4 has a catbus too, it just needs to be | 655 | /* FIXME: The L4 has a catbus too, it just needs to be |
660 | * accessed in a totally different way */ | 656 | * accessed in a totally different way */ |
661 | if(voyager_level == 5) { | 657 | if (voyager_level == 5) { |
662 | voyager_cat_init(); | 658 | voyager_cat_init(); |
663 | 659 | ||
664 | /* now that the cat has probed the Voyager System Bus, sanity | 660 | /* now that the cat has probed the Voyager System Bus, sanity |
665 | * check the cpu map */ | 661 | * check the cpu map */ |
666 | if( ((voyager_quad_processors | voyager_extended_vic_processors) | 662 | if (((voyager_quad_processors | voyager_extended_vic_processors) |
667 | & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) { | 663 | & cpus_addr(phys_cpu_present_map)[0]) != |
664 | cpus_addr(phys_cpu_present_map)[0]) { | ||
668 | /* should panic */ | 665 | /* should panic */ |
669 | printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); | 666 | printk("\n\n***WARNING*** " |
667 | "Sanity check of CPU present map FAILED\n"); | ||
670 | } | 668 | } |
671 | } else if(voyager_level == 4) | 669 | } else if (voyager_level == 4) |
672 | voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0]; | 670 | voyager_extended_vic_processors = |
671 | cpus_addr(phys_cpu_present_map)[0]; | ||
673 | 672 | ||
674 | /* this sets up the idle task to run on the current cpu */ | 673 | /* this sets up the idle task to run on the current cpu */ |
675 | voyager_extended_cpus = 1; | 674 | voyager_extended_cpus = 1; |
@@ -678,14 +677,14 @@ smp_boot_cpus(void) | |||
678 | //global_irq_holder = boot_cpu_id; | 677 | //global_irq_holder = boot_cpu_id; |
679 | 678 | ||
680 | /* FIXME: Need to do something about this but currently only works | 679 | /* FIXME: Need to do something about this but currently only works |
681 | * on CPUs with a tsc which none of mine have. | 680 | * on CPUs with a tsc which none of mine have. |
682 | smp_tune_scheduling(); | 681 | smp_tune_scheduling(); |
683 | */ | 682 | */ |
684 | smp_store_cpu_info(boot_cpu_id); | 683 | smp_store_cpu_info(boot_cpu_id); |
685 | printk("CPU%d: ", boot_cpu_id); | 684 | printk("CPU%d: ", boot_cpu_id); |
686 | print_cpu_info(&cpu_data(boot_cpu_id)); | 685 | print_cpu_info(&cpu_data(boot_cpu_id)); |
687 | 686 | ||
688 | if(is_cpu_quad()) { | 687 | if (is_cpu_quad()) { |
689 | /* booting on a Quad CPU */ | 688 | /* booting on a Quad CPU */ |
690 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | 689 | printk("VOYAGER SMP: Boot CPU is Quad\n"); |
691 | qic_setup(); | 690 | qic_setup(); |
@@ -697,11 +696,11 @@ smp_boot_cpus(void) | |||
697 | 696 | ||
698 | cpu_set(boot_cpu_id, cpu_online_map); | 697 | cpu_set(boot_cpu_id, cpu_online_map); |
699 | cpu_set(boot_cpu_id, cpu_callout_map); | 698 | cpu_set(boot_cpu_id, cpu_callout_map); |
700 | 699 | ||
701 | /* loop over all the extended VIC CPUs and boot them. The | 700 | /* loop over all the extended VIC CPUs and boot them. The |
702 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | 701 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
703 | for(i = 0; i < NR_CPUS; i++) { | 702 | for (i = 0; i < NR_CPUS; i++) { |
704 | if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | 703 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) |
705 | continue; | 704 | continue; |
706 | do_boot_cpu(i); | 705 | do_boot_cpu(i); |
707 | /* This udelay seems to be needed for the Quad boots | 706 | /* This udelay seems to be needed for the Quad boots |
@@ -715,25 +714,26 @@ smp_boot_cpus(void) | |||
715 | for (i = 0; i < NR_CPUS; i++) | 714 | for (i = 0; i < NR_CPUS; i++) |
716 | if (cpu_isset(i, cpu_online_map)) | 715 | if (cpu_isset(i, cpu_online_map)) |
717 | bogosum += cpu_data(i).loops_per_jiffy; | 716 | bogosum += cpu_data(i).loops_per_jiffy; |
718 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | 717 | printk(KERN_INFO "Total of %d processors activated " |
719 | cpucount+1, | 718 | "(%lu.%02lu BogoMIPS).\n", |
720 | bogosum/(500000/HZ), | 719 | cpucount + 1, bogosum / (500000 / HZ), |
721 | (bogosum/(5000/HZ))%100); | 720 | (bogosum / (5000 / HZ)) % 100); |
722 | } | 721 | } |
723 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | 722 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); |
724 | printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus); | 723 | printk("VOYAGER: Extended (interrupt handling CPUs): " |
724 | "%d, non-extended: %d\n", voyager_extended_cpus, | ||
725 | num_booting_cpus() - voyager_extended_cpus); | ||
725 | /* that's it, switch to symmetric mode */ | 726 | /* that's it, switch to symmetric mode */ |
726 | outb(0, VIC_PRIORITY_REGISTER); | 727 | outb(0, VIC_PRIORITY_REGISTER); |
727 | outb(0, VIC_CLAIM_REGISTER_0); | 728 | outb(0, VIC_CLAIM_REGISTER_0); |
728 | outb(0, VIC_CLAIM_REGISTER_1); | 729 | outb(0, VIC_CLAIM_REGISTER_1); |
729 | 730 | ||
730 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); | 731 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); |
731 | } | 732 | } |
732 | 733 | ||
733 | /* Reload the secondary CPUs task structure (this function does not | 734 | /* Reload the secondary CPUs task structure (this function does not |
734 | * return ) */ | 735 | * return ) */ |
735 | void __init | 736 | void __init initialize_secondary(void) |
736 | initialize_secondary(void) | ||
737 | { | 737 | { |
738 | #if 0 | 738 | #if 0 |
739 | // AC kernels only | 739 | // AC kernels only |
@@ -745,11 +745,9 @@ initialize_secondary(void) | |||
745 | * basically just the stack pointer and the eip. | 745 | * basically just the stack pointer and the eip. |
746 | */ | 746 | */ |
747 | 747 | ||
748 | asm volatile( | 748 | asm volatile ("movl %0,%%esp\n\t" |
749 | "movl %0,%%esp\n\t" | 749 | "jmp *%1"::"r" (current->thread.esp), |
750 | "jmp *%1" | 750 | "r"(current->thread.eip)); |
751 | : | ||
752 | :"r" (current->thread.esp),"r" (current->thread.eip)); | ||
753 | } | 751 | } |
754 | 752 | ||
755 | /* handle a Voyager SYS_INT -- If we don't, the base board will | 753 | /* handle a Voyager SYS_INT -- If we don't, the base board will |
@@ -758,25 +756,23 @@ initialize_secondary(void) | |||
758 | * System interrupts occur because some problem was detected on the | 756 | * System interrupts occur because some problem was detected on the |
759 | * various busses. To find out what you have to probe all the | 757 | * various busses. To find out what you have to probe all the |
760 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | 758 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ |
761 | fastcall void | 759 | fastcall void smp_vic_sys_interrupt(struct pt_regs *regs) |
762 | smp_vic_sys_interrupt(struct pt_regs *regs) | ||
763 | { | 760 | { |
764 | ack_CPI(VIC_SYS_INT); | 761 | ack_CPI(VIC_SYS_INT); |
765 | printk("Voyager SYSTEM INTERRUPT\n"); | 762 | printk("Voyager SYSTEM INTERRUPT\n"); |
766 | } | 763 | } |
767 | 764 | ||
768 | /* Handle a voyager CMN_INT; These interrupts occur either because of | 765 | /* Handle a voyager CMN_INT; These interrupts occur either because of |
769 | * a system status change or because a single bit memory error | 766 | * a system status change or because a single bit memory error |
770 | * occurred. FIXME: At the moment, ignore all this. */ | 767 | * occurred. FIXME: At the moment, ignore all this. */ |
771 | fastcall void | 768 | fastcall void smp_vic_cmn_interrupt(struct pt_regs *regs) |
772 | smp_vic_cmn_interrupt(struct pt_regs *regs) | ||
773 | { | 769 | { |
774 | static __u8 in_cmn_int = 0; | 770 | static __u8 in_cmn_int = 0; |
775 | static DEFINE_SPINLOCK(cmn_int_lock); | 771 | static DEFINE_SPINLOCK(cmn_int_lock); |
776 | 772 | ||
777 | /* common ints are broadcast, so make sure we only do this once */ | 773 | /* common ints are broadcast, so make sure we only do this once */ |
778 | _raw_spin_lock(&cmn_int_lock); | 774 | _raw_spin_lock(&cmn_int_lock); |
779 | if(in_cmn_int) | 775 | if (in_cmn_int) |
780 | goto unlock_end; | 776 | goto unlock_end; |
781 | 777 | ||
782 | in_cmn_int++; | 778 | in_cmn_int++; |
@@ -784,12 +780,12 @@ smp_vic_cmn_interrupt(struct pt_regs *regs) | |||
784 | 780 | ||
785 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | 781 | VDEBUG(("Voyager COMMON INTERRUPT\n")); |
786 | 782 | ||
787 | if(voyager_level == 5) | 783 | if (voyager_level == 5) |
788 | voyager_cat_do_common_interrupt(); | 784 | voyager_cat_do_common_interrupt(); |
789 | 785 | ||
790 | _raw_spin_lock(&cmn_int_lock); | 786 | _raw_spin_lock(&cmn_int_lock); |
791 | in_cmn_int = 0; | 787 | in_cmn_int = 0; |
792 | unlock_end: | 788 | unlock_end: |
793 | _raw_spin_unlock(&cmn_int_lock); | 789 | _raw_spin_unlock(&cmn_int_lock); |
794 | ack_CPI(VIC_CMN_INT); | 790 | ack_CPI(VIC_CMN_INT); |
795 | } | 791 | } |
@@ -797,26 +793,24 @@ smp_vic_cmn_interrupt(struct pt_regs *regs) | |||
797 | /* | 793 | /* |
798 | * Reschedule call back. Nothing to do, all the work is done | 794 | * Reschedule call back. Nothing to do, all the work is done |
799 | * automatically when we return from the interrupt. */ | 795 | * automatically when we return from the interrupt. */ |
800 | static void | 796 | static void smp_reschedule_interrupt(void) |
801 | smp_reschedule_interrupt(void) | ||
802 | { | 797 | { |
803 | /* do nothing */ | 798 | /* do nothing */ |
804 | } | 799 | } |
805 | 800 | ||
806 | static struct mm_struct * flush_mm; | 801 | static struct mm_struct *flush_mm; |
807 | static unsigned long flush_va; | 802 | static unsigned long flush_va; |
808 | static DEFINE_SPINLOCK(tlbstate_lock); | 803 | static DEFINE_SPINLOCK(tlbstate_lock); |
809 | #define FLUSH_ALL 0xffffffff | 804 | #define FLUSH_ALL 0xffffffff |
810 | 805 | ||
811 | /* | 806 | /* |
812 | * We cannot call mmdrop() because we are in interrupt context, | 807 | * We cannot call mmdrop() because we are in interrupt context, |
813 | * instead update mm->cpu_vm_mask. | 808 | * instead update mm->cpu_vm_mask. |
814 | * | 809 | * |
815 | * We need to reload %cr3 since the page tables may be going | 810 | * We need to reload %cr3 since the page tables may be going |
816 | * away from under us.. | 811 | * away from under us.. |
817 | */ | 812 | */ |
818 | static inline void | 813 | static inline void leave_mm(unsigned long cpu) |
819 | leave_mm (unsigned long cpu) | ||
820 | { | 814 | { |
821 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | 815 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) |
822 | BUG(); | 816 | BUG(); |
@@ -824,12 +818,10 @@ leave_mm (unsigned long cpu) | |||
824 | load_cr3(swapper_pg_dir); | 818 | load_cr3(swapper_pg_dir); |
825 | } | 819 | } |
826 | 820 | ||
827 | |||
828 | /* | 821 | /* |
829 | * Invalidate call-back | 822 | * Invalidate call-back |
830 | */ | 823 | */ |
831 | static void | 824 | static void smp_invalidate_interrupt(void) |
832 | smp_invalidate_interrupt(void) | ||
833 | { | 825 | { |
834 | __u8 cpu = smp_processor_id(); | 826 | __u8 cpu = smp_processor_id(); |
835 | 827 | ||
@@ -837,9 +829,9 @@ smp_invalidate_interrupt(void) | |||
837 | return; | 829 | return; |
838 | /* This will flood messages. Don't uncomment unless you see | 830 | /* This will flood messages. Don't uncomment unless you see |
839 | * Problems with cross cpu invalidation | 831 | * Problems with cross cpu invalidation |
840 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", | 832 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", |
841 | smp_processor_id())); | 833 | smp_processor_id())); |
842 | */ | 834 | */ |
843 | 835 | ||
844 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | 836 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { |
845 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | 837 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { |
@@ -857,11 +849,10 @@ smp_invalidate_interrupt(void) | |||
857 | 849 | ||
858 | /* All the new flush operations for 2.4 */ | 850 | /* All the new flush operations for 2.4 */ |
859 | 851 | ||
860 | |||
861 | /* This routine is called with a physical cpu mask */ | 852 | /* This routine is called with a physical cpu mask */ |
862 | static void | 853 | static void |
863 | voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | 854 | voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, |
864 | unsigned long va) | 855 | unsigned long va) |
865 | { | 856 | { |
866 | int stuck = 50000; | 857 | int stuck = 50000; |
867 | 858 | ||
@@ -875,7 +866,7 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | |||
875 | BUG(); | 866 | BUG(); |
876 | 867 | ||
877 | spin_lock(&tlbstate_lock); | 868 | spin_lock(&tlbstate_lock); |
878 | 869 | ||
879 | flush_mm = mm; | 870 | flush_mm = mm; |
880 | flush_va = va; | 871 | flush_va = va; |
881 | atomic_set_mask(cpumask, &smp_invalidate_needed); | 872 | atomic_set_mask(cpumask, &smp_invalidate_needed); |
@@ -887,23 +878,23 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | |||
887 | 878 | ||
888 | while (smp_invalidate_needed) { | 879 | while (smp_invalidate_needed) { |
889 | mb(); | 880 | mb(); |
890 | if(--stuck == 0) { | 881 | if (--stuck == 0) { |
891 | printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id()); | 882 | printk("***WARNING*** Stuck doing invalidate CPI " |
883 | "(CPU%d)\n", smp_processor_id()); | ||
892 | break; | 884 | break; |
893 | } | 885 | } |
894 | } | 886 | } |
895 | 887 | ||
896 | /* Uncomment only to debug invalidation problems | 888 | /* Uncomment only to debug invalidation problems |
897 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); | 889 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); |
898 | */ | 890 | */ |
899 | 891 | ||
900 | flush_mm = NULL; | 892 | flush_mm = NULL; |
901 | flush_va = 0; | 893 | flush_va = 0; |
902 | spin_unlock(&tlbstate_lock); | 894 | spin_unlock(&tlbstate_lock); |
903 | } | 895 | } |
904 | 896 | ||
905 | void | 897 | void flush_tlb_current_task(void) |
906 | flush_tlb_current_task(void) | ||
907 | { | 898 | { |
908 | struct mm_struct *mm = current->mm; | 899 | struct mm_struct *mm = current->mm; |
909 | unsigned long cpu_mask; | 900 | unsigned long cpu_mask; |
@@ -918,9 +909,7 @@ flush_tlb_current_task(void) | |||
918 | preempt_enable(); | 909 | preempt_enable(); |
919 | } | 910 | } |
920 | 911 | ||
921 | 912 | void flush_tlb_mm(struct mm_struct *mm) | |
922 | void | ||
923 | flush_tlb_mm (struct mm_struct * mm) | ||
924 | { | 913 | { |
925 | unsigned long cpu_mask; | 914 | unsigned long cpu_mask; |
926 | 915 | ||
@@ -940,7 +929,7 @@ flush_tlb_mm (struct mm_struct * mm) | |||
940 | preempt_enable(); | 929 | preempt_enable(); |
941 | } | 930 | } |
942 | 931 | ||
943 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | 932 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) |
944 | { | 933 | { |
945 | struct mm_struct *mm = vma->vm_mm; | 934 | struct mm_struct *mm = vma->vm_mm; |
946 | unsigned long cpu_mask; | 935 | unsigned long cpu_mask; |
@@ -949,10 +938,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
949 | 938 | ||
950 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | 939 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); |
951 | if (current->active_mm == mm) { | 940 | if (current->active_mm == mm) { |
952 | if(current->mm) | 941 | if (current->mm) |
953 | __flush_tlb_one(va); | 942 | __flush_tlb_one(va); |
954 | else | 943 | else |
955 | leave_mm(smp_processor_id()); | 944 | leave_mm(smp_processor_id()); |
956 | } | 945 | } |
957 | 946 | ||
958 | if (cpu_mask) | 947 | if (cpu_mask) |
@@ -960,21 +949,21 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |||
960 | 949 | ||
961 | preempt_enable(); | 950 | preempt_enable(); |
962 | } | 951 | } |
952 | |||
963 | EXPORT_SYMBOL(flush_tlb_page); | 953 | EXPORT_SYMBOL(flush_tlb_page); |
964 | 954 | ||
965 | /* enable the requested IRQs */ | 955 | /* enable the requested IRQs */ |
966 | static void | 956 | static void smp_enable_irq_interrupt(void) |
967 | smp_enable_irq_interrupt(void) | ||
968 | { | 957 | { |
969 | __u8 irq; | 958 | __u8 irq; |
970 | __u8 cpu = get_cpu(); | 959 | __u8 cpu = get_cpu(); |
971 | 960 | ||
972 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | 961 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, |
973 | vic_irq_enable_mask[cpu])); | 962 | vic_irq_enable_mask[cpu])); |
974 | 963 | ||
975 | spin_lock(&vic_irq_lock); | 964 | spin_lock(&vic_irq_lock); |
976 | for(irq = 0; irq < 16; irq++) { | 965 | for (irq = 0; irq < 16; irq++) { |
977 | if(vic_irq_enable_mask[cpu] & (1<<irq)) | 966 | if (vic_irq_enable_mask[cpu] & (1 << irq)) |
978 | enable_local_vic_irq(irq); | 967 | enable_local_vic_irq(irq); |
979 | } | 968 | } |
980 | vic_irq_enable_mask[cpu] = 0; | 969 | vic_irq_enable_mask[cpu] = 0; |
@@ -982,17 +971,16 @@ smp_enable_irq_interrupt(void) | |||
982 | 971 | ||
983 | put_cpu_no_resched(); | 972 | put_cpu_no_resched(); |
984 | } | 973 | } |
985 | 974 | ||
986 | /* | 975 | /* |
987 | * CPU halt call-back | 976 | * CPU halt call-back |
988 | */ | 977 | */ |
989 | static void | 978 | static void smp_stop_cpu_function(void *dummy) |
990 | smp_stop_cpu_function(void *dummy) | ||
991 | { | 979 | { |
992 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | 980 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); |
993 | cpu_clear(smp_processor_id(), cpu_online_map); | 981 | cpu_clear(smp_processor_id(), cpu_online_map); |
994 | local_irq_disable(); | 982 | local_irq_disable(); |
995 | for(;;) | 983 | for (;;) |
996 | halt(); | 984 | halt(); |
997 | } | 985 | } |
998 | 986 | ||
@@ -1006,14 +994,13 @@ struct call_data_struct { | |||
1006 | int wait; | 994 | int wait; |
1007 | }; | 995 | }; |
1008 | 996 | ||
1009 | static struct call_data_struct * call_data; | 997 | static struct call_data_struct *call_data; |
1010 | 998 | ||
1011 | /* execute a thread on a new CPU. The function to be called must be | 999 | /* execute a thread on a new CPU. The function to be called must be |
1012 | * previously set up. This is used to schedule a function for | 1000 | * previously set up. This is used to schedule a function for |
1013 | * execution on all CPUs - set up the function then broadcast a | 1001 | * execution on all CPUs - set up the function then broadcast a |
1014 | * function_interrupt CPI to come here on each CPU */ | 1002 | * function_interrupt CPI to come here on each CPU */ |
1015 | static void | 1003 | static void smp_call_function_interrupt(void) |
1016 | smp_call_function_interrupt(void) | ||
1017 | { | 1004 | { |
1018 | void (*func) (void *info) = call_data->func; | 1005 | void (*func) (void *info) = call_data->func; |
1019 | void *info = call_data->info; | 1006 | void *info = call_data->info; |
@@ -1027,16 +1014,17 @@ smp_call_function_interrupt(void) | |||
1027 | * about to execute the function | 1014 | * about to execute the function |
1028 | */ | 1015 | */ |
1029 | mb(); | 1016 | mb(); |
1030 | if(!test_and_clear_bit(cpu, &call_data->started)) { | 1017 | if (!test_and_clear_bit(cpu, &call_data->started)) { |
1031 | /* If the bit wasn't set, this could be a replay */ | 1018 | /* If the bit wasn't set, this could be a replay */ |
1032 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu); | 1019 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" |
1020 | " with no call pending\n", cpu); | ||
1033 | return; | 1021 | return; |
1034 | } | 1022 | } |
1035 | /* | 1023 | /* |
1036 | * At this point the info structure may be out of scope unless wait==1 | 1024 | * At this point the info structure may be out of scope unless wait==1 |
1037 | */ | 1025 | */ |
1038 | irq_enter(); | 1026 | irq_enter(); |
1039 | (*func)(info); | 1027 | (*func) (info); |
1040 | __get_cpu_var(irq_stat).irq_call_count++; | 1028 | __get_cpu_var(irq_stat).irq_call_count++; |
1041 | irq_exit(); | 1029 | irq_exit(); |
1042 | if (wait) { | 1030 | if (wait) { |
@@ -1046,14 +1034,13 @@ smp_call_function_interrupt(void) | |||
1046 | } | 1034 | } |
1047 | 1035 | ||
1048 | static int | 1036 | static int |
1049 | voyager_smp_call_function_mask (cpumask_t cpumask, | 1037 | voyager_smp_call_function_mask(cpumask_t cpumask, |
1050 | void (*func) (void *info), void *info, | 1038 | void (*func) (void *info), void *info, int wait) |
1051 | int wait) | ||
1052 | { | 1039 | { |
1053 | struct call_data_struct data; | 1040 | struct call_data_struct data; |
1054 | u32 mask = cpus_addr(cpumask)[0]; | 1041 | u32 mask = cpus_addr(cpumask)[0]; |
1055 | 1042 | ||
1056 | mask &= ~(1<<smp_processor_id()); | 1043 | mask &= ~(1 << smp_processor_id()); |
1057 | 1044 | ||
1058 | if (!mask) | 1045 | if (!mask) |
1059 | return 0; | 1046 | return 0; |
@@ -1093,7 +1080,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask, | |||
1093 | * so we use the system clock to interrupt one processor, which in | 1080 | * so we use the system clock to interrupt one processor, which in |
1094 | * turn, broadcasts a timer CPI to all the others --- we receive that | 1081 | * turn, broadcasts a timer CPI to all the others --- we receive that |
1095 | * CPI here. We don't use this actually for counting so losing | 1082 | * CPI here. We don't use this actually for counting so losing |
1096 | * ticks doesn't matter | 1083 | * ticks doesn't matter |
1097 | * | 1084 | * |
1098 | * FIXME: For those CPUs which actually have a local APIC, we could | 1085 | * FIXME: For those CPUs which actually have a local APIC, we could |
1099 | * try to use it to trigger this interrupt instead of having to | 1086 | * try to use it to trigger this interrupt instead of having to |
@@ -1101,8 +1088,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask, | |||
1101 | * no local APIC, so I can't do this | 1088 | * no local APIC, so I can't do this |
1102 | * | 1089 | * |
1103 | * This function is currently a placeholder and is unused in the code */ | 1090 | * This function is currently a placeholder and is unused in the code */ |
1104 | fastcall void | 1091 | fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) |
1105 | smp_apic_timer_interrupt(struct pt_regs *regs) | ||
1106 | { | 1092 | { |
1107 | struct pt_regs *old_regs = set_irq_regs(regs); | 1093 | struct pt_regs *old_regs = set_irq_regs(regs); |
1108 | wrapper_smp_local_timer_interrupt(); | 1094 | wrapper_smp_local_timer_interrupt(); |
@@ -1110,8 +1096,7 @@ smp_apic_timer_interrupt(struct pt_regs *regs) | |||
1110 | } | 1096 | } |
1111 | 1097 | ||
1112 | /* All of the QUAD interrupt GATES */ | 1098 | /* All of the QUAD interrupt GATES */ |
1113 | fastcall void | 1099 | fastcall void smp_qic_timer_interrupt(struct pt_regs *regs) |
1114 | smp_qic_timer_interrupt(struct pt_regs *regs) | ||
1115 | { | 1100 | { |
1116 | struct pt_regs *old_regs = set_irq_regs(regs); | 1101 | struct pt_regs *old_regs = set_irq_regs(regs); |
1117 | ack_QIC_CPI(QIC_TIMER_CPI); | 1102 | ack_QIC_CPI(QIC_TIMER_CPI); |
@@ -1119,60 +1104,54 @@ smp_qic_timer_interrupt(struct pt_regs *regs) | |||
1119 | set_irq_regs(old_regs); | 1104 | set_irq_regs(old_regs); |
1120 | } | 1105 | } |
1121 | 1106 | ||
1122 | fastcall void | 1107 | fastcall void smp_qic_invalidate_interrupt(struct pt_regs *regs) |
1123 | smp_qic_invalidate_interrupt(struct pt_regs *regs) | ||
1124 | { | 1108 | { |
1125 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | 1109 | ack_QIC_CPI(QIC_INVALIDATE_CPI); |
1126 | smp_invalidate_interrupt(); | 1110 | smp_invalidate_interrupt(); |
1127 | } | 1111 | } |
1128 | 1112 | ||
1129 | fastcall void | 1113 | fastcall void smp_qic_reschedule_interrupt(struct pt_regs *regs) |
1130 | smp_qic_reschedule_interrupt(struct pt_regs *regs) | ||
1131 | { | 1114 | { |
1132 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | 1115 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); |
1133 | smp_reschedule_interrupt(); | 1116 | smp_reschedule_interrupt(); |
1134 | } | 1117 | } |
1135 | 1118 | ||
1136 | fastcall void | 1119 | fastcall void smp_qic_enable_irq_interrupt(struct pt_regs *regs) |
1137 | smp_qic_enable_irq_interrupt(struct pt_regs *regs) | ||
1138 | { | 1120 | { |
1139 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | 1121 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); |
1140 | smp_enable_irq_interrupt(); | 1122 | smp_enable_irq_interrupt(); |
1141 | } | 1123 | } |
1142 | 1124 | ||
1143 | fastcall void | 1125 | fastcall void smp_qic_call_function_interrupt(struct pt_regs *regs) |
1144 | smp_qic_call_function_interrupt(struct pt_regs *regs) | ||
1145 | { | 1126 | { |
1146 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | 1127 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); |
1147 | smp_call_function_interrupt(); | 1128 | smp_call_function_interrupt(); |
1148 | } | 1129 | } |
1149 | 1130 | ||
1150 | fastcall void | 1131 | fastcall void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1151 | smp_vic_cpi_interrupt(struct pt_regs *regs) | ||
1152 | { | 1132 | { |
1153 | struct pt_regs *old_regs = set_irq_regs(regs); | 1133 | struct pt_regs *old_regs = set_irq_regs(regs); |
1154 | __u8 cpu = smp_processor_id(); | 1134 | __u8 cpu = smp_processor_id(); |
1155 | 1135 | ||
1156 | if(is_cpu_quad()) | 1136 | if (is_cpu_quad()) |
1157 | ack_QIC_CPI(VIC_CPI_LEVEL0); | 1137 | ack_QIC_CPI(VIC_CPI_LEVEL0); |
1158 | else | 1138 | else |
1159 | ack_VIC_CPI(VIC_CPI_LEVEL0); | 1139 | ack_VIC_CPI(VIC_CPI_LEVEL0); |
1160 | 1140 | ||
1161 | if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) | 1141 | if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) |
1162 | wrapper_smp_local_timer_interrupt(); | 1142 | wrapper_smp_local_timer_interrupt(); |
1163 | if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) | 1143 | if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) |
1164 | smp_invalidate_interrupt(); | 1144 | smp_invalidate_interrupt(); |
1165 | if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) | 1145 | if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) |
1166 | smp_reschedule_interrupt(); | 1146 | smp_reschedule_interrupt(); |
1167 | if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) | 1147 | if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) |
1168 | smp_enable_irq_interrupt(); | 1148 | smp_enable_irq_interrupt(); |
1169 | if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | 1149 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1170 | smp_call_function_interrupt(); | 1150 | smp_call_function_interrupt(); |
1171 | set_irq_regs(old_regs); | 1151 | set_irq_regs(old_regs); |
1172 | } | 1152 | } |
1173 | 1153 | ||
1174 | static void | 1154 | static void do_flush_tlb_all(void *info) |
1175 | do_flush_tlb_all(void* info) | ||
1176 | { | 1155 | { |
1177 | unsigned long cpu = smp_processor_id(); | 1156 | unsigned long cpu = smp_processor_id(); |
1178 | 1157 | ||
@@ -1181,65 +1160,56 @@ do_flush_tlb_all(void* info) | |||
1181 | leave_mm(cpu); | 1160 | leave_mm(cpu); |
1182 | } | 1161 | } |
1183 | 1162 | ||
1184 | |||
1185 | /* flush the TLB of every active CPU in the system */ | 1163 | /* flush the TLB of every active CPU in the system */ |
1186 | void | 1164 | void flush_tlb_all(void) |
1187 | flush_tlb_all(void) | ||
1188 | { | 1165 | { |
1189 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | 1166 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); |
1190 | } | 1167 | } |
1191 | 1168 | ||
1192 | /* used to set up the trampoline for other CPUs when the memory manager | 1169 | /* used to set up the trampoline for other CPUs when the memory manager |
1193 | * is sorted out */ | 1170 | * is sorted out */ |
1194 | void __init | 1171 | void __init smp_alloc_memory(void) |
1195 | smp_alloc_memory(void) | ||
1196 | { | 1172 | { |
1197 | trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE); | 1173 | trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE); |
1198 | if(__pa(trampoline_base) >= 0x93000) | 1174 | if (__pa(trampoline_base) >= 0x93000) |
1199 | BUG(); | 1175 | BUG(); |
1200 | } | 1176 | } |
1201 | 1177 | ||
1202 | /* send a reschedule CPI to one CPU by physical CPU number*/ | 1178 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
1203 | static void | 1179 | static void voyager_smp_send_reschedule(int cpu) |
1204 | voyager_smp_send_reschedule(int cpu) | ||
1205 | { | 1180 | { |
1206 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | 1181 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); |
1207 | } | 1182 | } |
1208 | 1183 | ||
1209 | 1184 | int hard_smp_processor_id(void) | |
1210 | int | ||
1211 | hard_smp_processor_id(void) | ||
1212 | { | 1185 | { |
1213 | __u8 i; | 1186 | __u8 i; |
1214 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | 1187 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); |
1215 | if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) | 1188 | if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) |
1216 | return cpumask & 0x1F; | 1189 | return cpumask & 0x1F; |
1217 | 1190 | ||
1218 | for(i = 0; i < 8; i++) { | 1191 | for (i = 0; i < 8; i++) { |
1219 | if(cpumask & (1<<i)) | 1192 | if (cpumask & (1 << i)) |
1220 | return i; | 1193 | return i; |
1221 | } | 1194 | } |
1222 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | 1195 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); |
1223 | return 0; | 1196 | return 0; |
1224 | } | 1197 | } |
1225 | 1198 | ||
1226 | int | 1199 | int safe_smp_processor_id(void) |
1227 | safe_smp_processor_id(void) | ||
1228 | { | 1200 | { |
1229 | return hard_smp_processor_id(); | 1201 | return hard_smp_processor_id(); |
1230 | } | 1202 | } |
1231 | 1203 | ||
1232 | /* broadcast a halt to all other CPUs */ | 1204 | /* broadcast a halt to all other CPUs */ |
1233 | static void | 1205 | static void voyager_smp_send_stop(void) |
1234 | voyager_smp_send_stop(void) | ||
1235 | { | 1206 | { |
1236 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1207 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); |
1237 | } | 1208 | } |
1238 | 1209 | ||
1239 | /* this function is triggered in time.c when a clock tick fires | 1210 | /* this function is triggered in time.c when a clock tick fires |
1240 | * we need to re-broadcast the tick to all CPUs */ | 1211 | * we need to re-broadcast the tick to all CPUs */ |
1241 | void | 1212 | void smp_vic_timer_interrupt(void) |
1242 | smp_vic_timer_interrupt(void) | ||
1243 | { | 1213 | { |
1244 | send_CPI_allbutself(VIC_TIMER_CPI); | 1214 | send_CPI_allbutself(VIC_TIMER_CPI); |
1245 | smp_local_timer_interrupt(); | 1215 | smp_local_timer_interrupt(); |
@@ -1253,8 +1223,7 @@ smp_vic_timer_interrupt(void) | |||
1253 | * multiplier is 1 and it can be changed by writing the new multiplier | 1223 | * multiplier is 1 and it can be changed by writing the new multiplier |
1254 | * value into /proc/profile. | 1224 | * value into /proc/profile. |
1255 | */ | 1225 | */ |
1256 | void | 1226 | void smp_local_timer_interrupt(void) |
1257 | smp_local_timer_interrupt(void) | ||
1258 | { | 1227 | { |
1259 | int cpu = smp_processor_id(); | 1228 | int cpu = smp_processor_id(); |
1260 | long weight; | 1229 | long weight; |
@@ -1269,18 +1238,18 @@ smp_local_timer_interrupt(void) | |||
1269 | * | 1238 | * |
1270 | * Interrupts are already masked off at this point. | 1239 | * Interrupts are already masked off at this point. |
1271 | */ | 1240 | */ |
1272 | per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu); | 1241 | per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); |
1273 | if (per_cpu(prof_counter, cpu) != | 1242 | if (per_cpu(prof_counter, cpu) != |
1274 | per_cpu(prof_old_multiplier, cpu)) { | 1243 | per_cpu(prof_old_multiplier, cpu)) { |
1275 | /* FIXME: need to update the vic timer tick here */ | 1244 | /* FIXME: need to update the vic timer tick here */ |
1276 | per_cpu(prof_old_multiplier, cpu) = | 1245 | per_cpu(prof_old_multiplier, cpu) = |
1277 | per_cpu(prof_counter, cpu); | 1246 | per_cpu(prof_counter, cpu); |
1278 | } | 1247 | } |
1279 | 1248 | ||
1280 | update_process_times(user_mode_vm(get_irq_regs())); | 1249 | update_process_times(user_mode_vm(get_irq_regs())); |
1281 | } | 1250 | } |
1282 | 1251 | ||
1283 | if( ((1<<cpu) & voyager_extended_vic_processors) == 0) | 1252 | if (((1 << cpu) & voyager_extended_vic_processors) == 0) |
1284 | /* only extended VIC processors participate in | 1253 | /* only extended VIC processors participate in |
1285 | * interrupt distribution */ | 1254 | * interrupt distribution */ |
1286 | return; | 1255 | return; |
@@ -1296,12 +1265,12 @@ smp_local_timer_interrupt(void) | |||
1296 | * we can take more than 100K local irqs per second on a 100 MHz P5. | 1265 | * we can take more than 100K local irqs per second on a 100 MHz P5. |
1297 | */ | 1266 | */ |
1298 | 1267 | ||
1299 | if((++vic_tick[cpu] & 0x7) != 0) | 1268 | if ((++vic_tick[cpu] & 0x7) != 0) |
1300 | return; | 1269 | return; |
1301 | /* get here every 16 ticks (about every 1/6 of a second) */ | 1270 | /* get here every 16 ticks (about every 1/6 of a second) */ |
1302 | 1271 | ||
1303 | /* Change our priority to give someone else a chance at getting | 1272 | /* Change our priority to give someone else a chance at getting |
1304 | * the IRQ. The algorithm goes like this: | 1273 | * the IRQ. The algorithm goes like this: |
1305 | * | 1274 | * |
1306 | * In the VIC, the dynamically routed interrupt is always | 1275 | * In the VIC, the dynamically routed interrupt is always |
1307 | * handled by the lowest priority eligible (i.e. receiving | 1276 | * handled by the lowest priority eligible (i.e. receiving |
@@ -1325,18 +1294,18 @@ smp_local_timer_interrupt(void) | |||
1325 | * affinity code since we now try to even up the interrupt | 1294 | * affinity code since we now try to even up the interrupt |
1326 | * counts when an affinity binding is keeping them on a | 1295 | * counts when an affinity binding is keeping them on a |
1327 | * particular CPU*/ | 1296 | * particular CPU*/ |
1328 | weight = (vic_intr_count[cpu]*voyager_extended_cpus | 1297 | weight = (vic_intr_count[cpu] * voyager_extended_cpus |
1329 | - vic_intr_total) >> 4; | 1298 | - vic_intr_total) >> 4; |
1330 | weight += 4; | 1299 | weight += 4; |
1331 | if(weight > 7) | 1300 | if (weight > 7) |
1332 | weight = 7; | 1301 | weight = 7; |
1333 | if(weight < 0) | 1302 | if (weight < 0) |
1334 | weight = 0; | 1303 | weight = 0; |
1335 | 1304 | ||
1336 | outb((__u8)weight, VIC_PRIORITY_REGISTER); | 1305 | outb((__u8) weight, VIC_PRIORITY_REGISTER); |
1337 | 1306 | ||
1338 | #ifdef VOYAGER_DEBUG | 1307 | #ifdef VOYAGER_DEBUG |
1339 | if((vic_tick[cpu] & 0xFFF) == 0) { | 1308 | if ((vic_tick[cpu] & 0xFFF) == 0) { |
1340 | /* print this message roughly every 25 secs */ | 1309 | /* print this message roughly every 25 secs */ |
1341 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | 1310 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", |
1342 | cpu, vic_tick[cpu], weight); | 1311 | cpu, vic_tick[cpu], weight); |
@@ -1345,15 +1314,14 @@ smp_local_timer_interrupt(void) | |||
1345 | } | 1314 | } |
1346 | 1315 | ||
1347 | /* setup the profiling timer */ | 1316 | /* setup the profiling timer */ |
1348 | int | 1317 | int setup_profiling_timer(unsigned int multiplier) |
1349 | setup_profiling_timer(unsigned int multiplier) | ||
1350 | { | 1318 | { |
1351 | int i; | 1319 | int i; |
1352 | 1320 | ||
1353 | if ( (!multiplier)) | 1321 | if ((!multiplier)) |
1354 | return -EINVAL; | 1322 | return -EINVAL; |
1355 | 1323 | ||
1356 | /* | 1324 | /* |
1357 | * Set the new multiplier for each CPU. CPUs don't start using the | 1325 | * Set the new multiplier for each CPU. CPUs don't start using the |
1358 | * new values until the next timer interrupt in which they do process | 1326 | * new values until the next timer interrupt in which they do process |
1359 | * accounting. | 1327 | * accounting. |
@@ -1367,15 +1335,13 @@ setup_profiling_timer(unsigned int multiplier) | |||
1367 | /* This is a bit of a mess, but forced on us by the genirq changes | 1335 | /* This is a bit of a mess, but forced on us by the genirq changes |
1368 | * there's no genirq handler that really does what voyager wants | 1336 | * there's no genirq handler that really does what voyager wants |
1369 | * so hack it up with the simple IRQ handler */ | 1337 | * so hack it up with the simple IRQ handler */ |
1370 | static void fastcall | 1338 | static void fastcall handle_vic_irq(unsigned int irq, struct irq_desc *desc) |
1371 | handle_vic_irq(unsigned int irq, struct irq_desc *desc) | ||
1372 | { | 1339 | { |
1373 | before_handle_vic_irq(irq); | 1340 | before_handle_vic_irq(irq); |
1374 | handle_simple_irq(irq, desc); | 1341 | handle_simple_irq(irq, desc); |
1375 | after_handle_vic_irq(irq); | 1342 | after_handle_vic_irq(irq); |
1376 | } | 1343 | } |
1377 | 1344 | ||
1378 | |||
1379 | /* The CPIs are handled in the per cpu 8259s, so they must be | 1345 | /* The CPIs are handled in the per cpu 8259s, so they must be |
1380 | * enabled to be received: FIX: enabling the CPIs in the early | 1346 | * enabled to be received: FIX: enabling the CPIs in the early |
1381 | * boot sequence interferes with bug checking; enable them later | 1347 | * boot sequence interferes with bug checking; enable them later |
@@ -1385,13 +1351,12 @@ handle_vic_irq(unsigned int irq, struct irq_desc *desc) | |||
1385 | #define QIC_SET_GATE(cpi, vector) \ | 1351 | #define QIC_SET_GATE(cpi, vector) \ |
1386 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | 1352 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) |
1387 | 1353 | ||
1388 | void __init | 1354 | void __init smp_intr_init(void) |
1389 | smp_intr_init(void) | ||
1390 | { | 1355 | { |
1391 | int i; | 1356 | int i; |
1392 | 1357 | ||
1393 | /* initialize the per cpu irq mask to all disabled */ | 1358 | /* initialize the per cpu irq mask to all disabled */ |
1394 | for(i = 0; i < NR_CPUS; i++) | 1359 | for (i = 0; i < NR_CPUS; i++) |
1395 | vic_irq_mask[i] = 0xFFFF; | 1360 | vic_irq_mask[i] = 0xFFFF; |
1396 | 1361 | ||
1397 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | 1362 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); |
@@ -1404,42 +1369,40 @@ smp_intr_init(void) | |||
1404 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | 1369 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); |
1405 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | 1370 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); |
1406 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | 1371 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); |
1407 | |||
1408 | 1372 | ||
1409 | /* now put the VIC descriptor into the first 48 IRQs | 1373 | /* now put the VIC descriptor into the first 48 IRQs |
1410 | * | 1374 | * |
1411 | * This is for later: first 16 correspond to PC IRQs; next 16 | 1375 | * This is for later: first 16 correspond to PC IRQs; next 16 |
1412 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | 1376 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ |
1413 | for(i = 0; i < 48; i++) | 1377 | for (i = 0; i < 48; i++) |
1414 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); | 1378 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1415 | } | 1379 | } |
1416 | 1380 | ||
1417 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | 1381 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per |
1418 | * processor to receive CPI */ | 1382 | * processor to receive CPI */ |
1419 | static void | 1383 | static void send_CPI(__u32 cpuset, __u8 cpi) |
1420 | send_CPI(__u32 cpuset, __u8 cpi) | ||
1421 | { | 1384 | { |
1422 | int cpu; | 1385 | int cpu; |
1423 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | 1386 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); |
1424 | 1387 | ||
1425 | if(cpi < VIC_START_FAKE_CPI) { | 1388 | if (cpi < VIC_START_FAKE_CPI) { |
1426 | /* fake CPI are only used for booting, so send to the | 1389 | /* fake CPI are only used for booting, so send to the |
1427 | * extended quads as well---Quads must be VIC booted */ | 1390 | * extended quads as well---Quads must be VIC booted */ |
1428 | outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); | 1391 | outb((__u8) (cpuset), VIC_CPI_Registers[cpi]); |
1429 | return; | 1392 | return; |
1430 | } | 1393 | } |
1431 | if(quad_cpuset) | 1394 | if (quad_cpuset) |
1432 | send_QIC_CPI(quad_cpuset, cpi); | 1395 | send_QIC_CPI(quad_cpuset, cpi); |
1433 | cpuset &= ~quad_cpuset; | 1396 | cpuset &= ~quad_cpuset; |
1434 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | 1397 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ |
1435 | if(cpuset == 0) | 1398 | if (cpuset == 0) |
1436 | return; | 1399 | return; |
1437 | for_each_online_cpu(cpu) { | 1400 | for_each_online_cpu(cpu) { |
1438 | if(cpuset & (1<<cpu)) | 1401 | if (cpuset & (1 << cpu)) |
1439 | set_bit(cpi, &vic_cpi_mailbox[cpu]); | 1402 | set_bit(cpi, &vic_cpi_mailbox[cpu]); |
1440 | } | 1403 | } |
1441 | if(cpuset) | 1404 | if (cpuset) |
1442 | outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | 1405 | outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); |
1443 | } | 1406 | } |
1444 | 1407 | ||
1445 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | 1408 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and |
@@ -1448,20 +1411,19 @@ send_CPI(__u32 cpuset, __u8 cpi) | |||
1448 | * DON'T make this inline otherwise the cache line read will be | 1411 | * DON'T make this inline otherwise the cache line read will be |
1449 | * optimised away | 1412 | * optimised away |
1450 | * */ | 1413 | * */ |
1451 | static int | 1414 | static int ack_QIC_CPI(__u8 cpi) |
1452 | ack_QIC_CPI(__u8 cpi) { | 1415 | { |
1453 | __u8 cpu = hard_smp_processor_id(); | 1416 | __u8 cpu = hard_smp_processor_id(); |
1454 | 1417 | ||
1455 | cpi &= 7; | 1418 | cpi &= 7; |
1456 | 1419 | ||
1457 | outb(1<<cpi, QIC_INTERRUPT_CLEAR1); | 1420 | outb(1 << cpi, QIC_INTERRUPT_CLEAR1); |
1458 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; | 1421 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; |
1459 | } | 1422 | } |
1460 | 1423 | ||
1461 | static void | 1424 | static void ack_special_QIC_CPI(__u8 cpi) |
1462 | ack_special_QIC_CPI(__u8 cpi) | ||
1463 | { | 1425 | { |
1464 | switch(cpi) { | 1426 | switch (cpi) { |
1465 | case VIC_CMN_INT: | 1427 | case VIC_CMN_INT: |
1466 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | 1428 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); |
1467 | break; | 1429 | break; |
@@ -1474,8 +1436,7 @@ ack_special_QIC_CPI(__u8 cpi) | |||
1474 | } | 1436 | } |
1475 | 1437 | ||
1476 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | 1438 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ |
1477 | static void | 1439 | static void ack_VIC_CPI(__u8 cpi) |
1478 | ack_VIC_CPI(__u8 cpi) | ||
1479 | { | 1440 | { |
1480 | #ifdef VOYAGER_DEBUG | 1441 | #ifdef VOYAGER_DEBUG |
1481 | unsigned long flags; | 1442 | unsigned long flags; |
@@ -1484,17 +1445,17 @@ ack_VIC_CPI(__u8 cpi) | |||
1484 | 1445 | ||
1485 | local_irq_save(flags); | 1446 | local_irq_save(flags); |
1486 | isr = vic_read_isr(); | 1447 | isr = vic_read_isr(); |
1487 | if((isr & (1<<(cpi &7))) == 0) { | 1448 | if ((isr & (1 << (cpi & 7))) == 0) { |
1488 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); | 1449 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); |
1489 | } | 1450 | } |
1490 | #endif | 1451 | #endif |
1491 | /* send specific EOI; the two system interrupts have | 1452 | /* send specific EOI; the two system interrupts have |
1492 | * bit 4 set for a separate vector but behave as the | 1453 | * bit 4 set for a separate vector but behave as the |
1493 | * corresponding 3 bit intr */ | 1454 | * corresponding 3 bit intr */ |
1494 | outb_p(0x60|(cpi & 7),0x20); | 1455 | outb_p(0x60 | (cpi & 7), 0x20); |
1495 | 1456 | ||
1496 | #ifdef VOYAGER_DEBUG | 1457 | #ifdef VOYAGER_DEBUG |
1497 | if((vic_read_isr() & (1<<(cpi &7))) != 0) { | 1458 | if ((vic_read_isr() & (1 << (cpi & 7))) != 0) { |
1498 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); | 1459 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); |
1499 | } | 1460 | } |
1500 | local_irq_restore(flags); | 1461 | local_irq_restore(flags); |
@@ -1502,12 +1463,11 @@ ack_VIC_CPI(__u8 cpi) | |||
1502 | } | 1463 | } |
1503 | 1464 | ||
1504 | /* cribbed with thanks from irq.c */ | 1465 | /* cribbed with thanks from irq.c */ |
1505 | #define __byte(x,y) (((unsigned char *)&(y))[x]) | 1466 | #define __byte(x,y) (((unsigned char *)&(y))[x]) |
1506 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) | 1467 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) |
1507 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | 1468 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) |
1508 | 1469 | ||
1509 | static unsigned int | 1470 | static unsigned int startup_vic_irq(unsigned int irq) |
1510 | startup_vic_irq(unsigned int irq) | ||
1511 | { | 1471 | { |
1512 | unmask_vic_irq(irq); | 1472 | unmask_vic_irq(irq); |
1513 | 1473 | ||
@@ -1535,13 +1495,12 @@ startup_vic_irq(unsigned int irq) | |||
1535 | * broadcast an Interrupt enable CPI which causes all other CPUs to | 1495 | * broadcast an Interrupt enable CPI which causes all other CPUs to |
1536 | * adjust their masks accordingly. */ | 1496 | * adjust their masks accordingly. */ |
1537 | 1497 | ||
1538 | static void | 1498 | static void unmask_vic_irq(unsigned int irq) |
1539 | unmask_vic_irq(unsigned int irq) | ||
1540 | { | 1499 | { |
1541 | /* linux doesn't to processor-irq affinity, so enable on | 1500 | /* linux doesn't to processor-irq affinity, so enable on |
1542 | * all CPUs we know about */ | 1501 | * all CPUs we know about */ |
1543 | int cpu = smp_processor_id(), real_cpu; | 1502 | int cpu = smp_processor_id(), real_cpu; |
1544 | __u16 mask = (1<<irq); | 1503 | __u16 mask = (1 << irq); |
1545 | __u32 processorList = 0; | 1504 | __u32 processorList = 0; |
1546 | unsigned long flags; | 1505 | unsigned long flags; |
1547 | 1506 | ||
@@ -1549,78 +1508,72 @@ unmask_vic_irq(unsigned int irq) | |||
1549 | irq, cpu, cpu_irq_affinity[cpu])); | 1508 | irq, cpu, cpu_irq_affinity[cpu])); |
1550 | spin_lock_irqsave(&vic_irq_lock, flags); | 1509 | spin_lock_irqsave(&vic_irq_lock, flags); |
1551 | for_each_online_cpu(real_cpu) { | 1510 | for_each_online_cpu(real_cpu) { |
1552 | if(!(voyager_extended_vic_processors & (1<<real_cpu))) | 1511 | if (!(voyager_extended_vic_processors & (1 << real_cpu))) |
1553 | continue; | 1512 | continue; |
1554 | if(!(cpu_irq_affinity[real_cpu] & mask)) { | 1513 | if (!(cpu_irq_affinity[real_cpu] & mask)) { |
1555 | /* irq has no affinity for this CPU, ignore */ | 1514 | /* irq has no affinity for this CPU, ignore */ |
1556 | continue; | 1515 | continue; |
1557 | } | 1516 | } |
1558 | if(real_cpu == cpu) { | 1517 | if (real_cpu == cpu) { |
1559 | enable_local_vic_irq(irq); | 1518 | enable_local_vic_irq(irq); |
1560 | } | 1519 | } else if (vic_irq_mask[real_cpu] & mask) { |
1561 | else if(vic_irq_mask[real_cpu] & mask) { | ||
1562 | vic_irq_enable_mask[real_cpu] |= mask; | 1520 | vic_irq_enable_mask[real_cpu] |= mask; |
1563 | processorList |= (1<<real_cpu); | 1521 | processorList |= (1 << real_cpu); |
1564 | } | 1522 | } |
1565 | } | 1523 | } |
1566 | spin_unlock_irqrestore(&vic_irq_lock, flags); | 1524 | spin_unlock_irqrestore(&vic_irq_lock, flags); |
1567 | if(processorList) | 1525 | if (processorList) |
1568 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); | 1526 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); |
1569 | } | 1527 | } |
1570 | 1528 | ||
1571 | static void | 1529 | static void mask_vic_irq(unsigned int irq) |
1572 | mask_vic_irq(unsigned int irq) | ||
1573 | { | 1530 | { |
1574 | /* lazy disable, do nothing */ | 1531 | /* lazy disable, do nothing */ |
1575 | } | 1532 | } |
1576 | 1533 | ||
1577 | static void | 1534 | static void enable_local_vic_irq(unsigned int irq) |
1578 | enable_local_vic_irq(unsigned int irq) | ||
1579 | { | 1535 | { |
1580 | __u8 cpu = smp_processor_id(); | 1536 | __u8 cpu = smp_processor_id(); |
1581 | __u16 mask = ~(1 << irq); | 1537 | __u16 mask = ~(1 << irq); |
1582 | __u16 old_mask = vic_irq_mask[cpu]; | 1538 | __u16 old_mask = vic_irq_mask[cpu]; |
1583 | 1539 | ||
1584 | vic_irq_mask[cpu] &= mask; | 1540 | vic_irq_mask[cpu] &= mask; |
1585 | if(vic_irq_mask[cpu] == old_mask) | 1541 | if (vic_irq_mask[cpu] == old_mask) |
1586 | return; | 1542 | return; |
1587 | 1543 | ||
1588 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | 1544 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", |
1589 | irq, cpu)); | 1545 | irq, cpu)); |
1590 | 1546 | ||
1591 | if (irq & 8) { | 1547 | if (irq & 8) { |
1592 | outb_p(cached_A1(cpu),0xA1); | 1548 | outb_p(cached_A1(cpu), 0xA1); |
1593 | (void)inb_p(0xA1); | 1549 | (void)inb_p(0xA1); |
1594 | } | 1550 | } else { |
1595 | else { | 1551 | outb_p(cached_21(cpu), 0x21); |
1596 | outb_p(cached_21(cpu),0x21); | ||
1597 | (void)inb_p(0x21); | 1552 | (void)inb_p(0x21); |
1598 | } | 1553 | } |
1599 | } | 1554 | } |
1600 | 1555 | ||
1601 | static void | 1556 | static void disable_local_vic_irq(unsigned int irq) |
1602 | disable_local_vic_irq(unsigned int irq) | ||
1603 | { | 1557 | { |
1604 | __u8 cpu = smp_processor_id(); | 1558 | __u8 cpu = smp_processor_id(); |
1605 | __u16 mask = (1 << irq); | 1559 | __u16 mask = (1 << irq); |
1606 | __u16 old_mask = vic_irq_mask[cpu]; | 1560 | __u16 old_mask = vic_irq_mask[cpu]; |
1607 | 1561 | ||
1608 | if(irq == 7) | 1562 | if (irq == 7) |
1609 | return; | 1563 | return; |
1610 | 1564 | ||
1611 | vic_irq_mask[cpu] |= mask; | 1565 | vic_irq_mask[cpu] |= mask; |
1612 | if(old_mask == vic_irq_mask[cpu]) | 1566 | if (old_mask == vic_irq_mask[cpu]) |
1613 | return; | 1567 | return; |
1614 | 1568 | ||
1615 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | 1569 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", |
1616 | irq, cpu)); | 1570 | irq, cpu)); |
1617 | 1571 | ||
1618 | if (irq & 8) { | 1572 | if (irq & 8) { |
1619 | outb_p(cached_A1(cpu),0xA1); | 1573 | outb_p(cached_A1(cpu), 0xA1); |
1620 | (void)inb_p(0xA1); | 1574 | (void)inb_p(0xA1); |
1621 | } | 1575 | } else { |
1622 | else { | 1576 | outb_p(cached_21(cpu), 0x21); |
1623 | outb_p(cached_21(cpu),0x21); | ||
1624 | (void)inb_p(0x21); | 1577 | (void)inb_p(0x21); |
1625 | } | 1578 | } |
1626 | } | 1579 | } |
@@ -1631,8 +1584,7 @@ disable_local_vic_irq(unsigned int irq) | |||
1631 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | 1584 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If |
1632 | * this interrupt actually comes in, then we mask and ack here to push | 1585 | * this interrupt actually comes in, then we mask and ack here to push |
1633 | * the interrupt off to another CPU */ | 1586 | * the interrupt off to another CPU */ |
1634 | static void | 1587 | static void before_handle_vic_irq(unsigned int irq) |
1635 | before_handle_vic_irq(unsigned int irq) | ||
1636 | { | 1588 | { |
1637 | irq_desc_t *desc = irq_desc + irq; | 1589 | irq_desc_t *desc = irq_desc + irq; |
1638 | __u8 cpu = smp_processor_id(); | 1590 | __u8 cpu = smp_processor_id(); |
@@ -1641,16 +1593,16 @@ before_handle_vic_irq(unsigned int irq) | |||
1641 | vic_intr_total++; | 1593 | vic_intr_total++; |
1642 | vic_intr_count[cpu]++; | 1594 | vic_intr_count[cpu]++; |
1643 | 1595 | ||
1644 | if(!(cpu_irq_affinity[cpu] & (1<<irq))) { | 1596 | if (!(cpu_irq_affinity[cpu] & (1 << irq))) { |
1645 | /* The irq is not in our affinity mask, push it off | 1597 | /* The irq is not in our affinity mask, push it off |
1646 | * onto another CPU */ | 1598 | * onto another CPU */ |
1647 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n", | 1599 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d " |
1648 | irq, cpu)); | 1600 | "on cpu %d\n", irq, cpu)); |
1649 | disable_local_vic_irq(irq); | 1601 | disable_local_vic_irq(irq); |
1650 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | 1602 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from |
1651 | * actually calling the interrupt routine */ | 1603 | * actually calling the interrupt routine */ |
1652 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | 1604 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; |
1653 | } else if(desc->status & IRQ_DISABLED) { | 1605 | } else if (desc->status & IRQ_DISABLED) { |
1654 | /* Damn, the interrupt actually arrived, do the lazy | 1606 | /* Damn, the interrupt actually arrived, do the lazy |
1655 | * disable thing. The interrupt routine in irq.c will | 1607 | * disable thing. The interrupt routine in irq.c will |
1656 | * not handle a IRQ_DISABLED interrupt, so nothing more | 1608 | * not handle a IRQ_DISABLED interrupt, so nothing more |
@@ -1667,8 +1619,7 @@ before_handle_vic_irq(unsigned int irq) | |||
1667 | } | 1619 | } |
1668 | 1620 | ||
1669 | /* Finish the VIC interrupt: basically mask */ | 1621 | /* Finish the VIC interrupt: basically mask */ |
1670 | static void | 1622 | static void after_handle_vic_irq(unsigned int irq) |
1671 | after_handle_vic_irq(unsigned int irq) | ||
1672 | { | 1623 | { |
1673 | irq_desc_t *desc = irq_desc + irq; | 1624 | irq_desc_t *desc = irq_desc + irq; |
1674 | 1625 | ||
@@ -1685,11 +1636,11 @@ after_handle_vic_irq(unsigned int irq) | |||
1685 | #ifdef VOYAGER_DEBUG | 1636 | #ifdef VOYAGER_DEBUG |
1686 | /* DEBUG: before we ack, check what's in progress */ | 1637 | /* DEBUG: before we ack, check what's in progress */ |
1687 | isr = vic_read_isr(); | 1638 | isr = vic_read_isr(); |
1688 | if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) { | 1639 | if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) { |
1689 | int i; | 1640 | int i; |
1690 | __u8 cpu = smp_processor_id(); | 1641 | __u8 cpu = smp_processor_id(); |
1691 | __u8 real_cpu; | 1642 | __u8 real_cpu; |
1692 | int mask; /* Um... initialize me??? --RR */ | 1643 | int mask; /* Um... initialize me??? --RR */ |
1693 | 1644 | ||
1694 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | 1645 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", |
1695 | cpu, irq); | 1646 | cpu, irq); |
@@ -1698,9 +1649,10 @@ after_handle_vic_irq(unsigned int irq) | |||
1698 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | 1649 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, |
1699 | VIC_PROCESSOR_ID); | 1650 | VIC_PROCESSOR_ID); |
1700 | isr = vic_read_isr(); | 1651 | isr = vic_read_isr(); |
1701 | if(isr & (1<<irq)) { | 1652 | if (isr & (1 << irq)) { |
1702 | printk("VOYAGER SMP: CPU%d ack irq %d\n", | 1653 | printk |
1703 | real_cpu, irq); | 1654 | ("VOYAGER SMP: CPU%d ack irq %d\n", |
1655 | real_cpu, irq); | ||
1704 | ack_vic_irq(irq); | 1656 | ack_vic_irq(irq); |
1705 | } | 1657 | } |
1706 | outb(cpu, VIC_PROCESSOR_ID); | 1658 | outb(cpu, VIC_PROCESSOR_ID); |
@@ -1711,7 +1663,7 @@ after_handle_vic_irq(unsigned int irq) | |||
1711 | * receipt by another CPU so everything must be in | 1663 | * receipt by another CPU so everything must be in |
1712 | * order here */ | 1664 | * order here */ |
1713 | ack_vic_irq(irq); | 1665 | ack_vic_irq(irq); |
1714 | if(status & IRQ_REPLAY) { | 1666 | if (status & IRQ_REPLAY) { |
1715 | /* replay is set if we disable the interrupt | 1667 | /* replay is set if we disable the interrupt |
1716 | * in the before_handle_vic_irq() routine, so | 1668 | * in the before_handle_vic_irq() routine, so |
1717 | * clear the in progress bit here to allow the | 1669 | * clear the in progress bit here to allow the |
@@ -1720,9 +1672,9 @@ after_handle_vic_irq(unsigned int irq) | |||
1720 | } | 1672 | } |
1721 | #ifdef VOYAGER_DEBUG | 1673 | #ifdef VOYAGER_DEBUG |
1722 | isr = vic_read_isr(); | 1674 | isr = vic_read_isr(); |
1723 | if((isr & (1<<irq)) != 0) | 1675 | if ((isr & (1 << irq)) != 0) |
1724 | printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n", | 1676 | printk("VOYAGER SMP: after_handle_vic_irq() after " |
1725 | irq, isr); | 1677 | "ack irq=%d, isr=0x%x\n", irq, isr); |
1726 | #endif /* VOYAGER_DEBUG */ | 1678 | #endif /* VOYAGER_DEBUG */ |
1727 | } | 1679 | } |
1728 | _raw_spin_unlock(&vic_irq_lock); | 1680 | _raw_spin_unlock(&vic_irq_lock); |
@@ -1731,7 +1683,6 @@ after_handle_vic_irq(unsigned int irq) | |||
1731 | * may be intercepted by another CPU if reasserted */ | 1683 | * may be intercepted by another CPU if reasserted */ |
1732 | } | 1684 | } |
1733 | 1685 | ||
1734 | |||
1735 | /* Linux processor - interrupt affinity manipulations. | 1686 | /* Linux processor - interrupt affinity manipulations. |
1736 | * | 1687 | * |
1737 | * For each processor, we maintain a 32 bit irq affinity mask. | 1688 | * For each processor, we maintain a 32 bit irq affinity mask. |
@@ -1748,8 +1699,7 @@ after_handle_vic_irq(unsigned int irq) | |||
1748 | * change the mask and then do an interrupt enable CPI to re-enable on | 1699 | * change the mask and then do an interrupt enable CPI to re-enable on |
1749 | * the selected processors */ | 1700 | * the selected processors */ |
1750 | 1701 | ||
1751 | void | 1702 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) |
1752 | set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | ||
1753 | { | 1703 | { |
1754 | /* Only extended processors handle interrupts */ | 1704 | /* Only extended processors handle interrupts */ |
1755 | unsigned long real_mask; | 1705 | unsigned long real_mask; |
@@ -1757,13 +1707,13 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |||
1757 | int cpu; | 1707 | int cpu; |
1758 | 1708 | ||
1759 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | 1709 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; |
1760 | 1710 | ||
1761 | if(cpus_addr(mask)[0] == 0) | 1711 | if (cpus_addr(mask)[0] == 0) |
1762 | /* can't have no CPUs to accept the interrupt -- extremely | 1712 | /* can't have no CPUs to accept the interrupt -- extremely |
1763 | * bad things will happen */ | 1713 | * bad things will happen */ |
1764 | return; | 1714 | return; |
1765 | 1715 | ||
1766 | if(irq == 0) | 1716 | if (irq == 0) |
1767 | /* can't change the affinity of the timer IRQ. This | 1717 | /* can't change the affinity of the timer IRQ. This |
1768 | * is due to the constraint in the voyager | 1718 | * is due to the constraint in the voyager |
1769 | * architecture that the CPI also comes in on and IRQ | 1719 | * architecture that the CPI also comes in on and IRQ |
@@ -1772,7 +1722,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |||
1772 | * will no-longer be able to accept VIC CPIs */ | 1722 | * will no-longer be able to accept VIC CPIs */ |
1773 | return; | 1723 | return; |
1774 | 1724 | ||
1775 | if(irq >= 32) | 1725 | if (irq >= 32) |
1776 | /* You can only have 32 interrupts in a voyager system | 1726 | /* You can only have 32 interrupts in a voyager system |
1777 | * (and 32 only if you have a secondary microchannel | 1727 | * (and 32 only if you have a secondary microchannel |
1778 | * bus) */ | 1728 | * bus) */ |
@@ -1780,8 +1730,8 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |||
1780 | 1730 | ||
1781 | for_each_online_cpu(cpu) { | 1731 | for_each_online_cpu(cpu) { |
1782 | unsigned long cpu_mask = 1 << cpu; | 1732 | unsigned long cpu_mask = 1 << cpu; |
1783 | 1733 | ||
1784 | if(cpu_mask & real_mask) { | 1734 | if (cpu_mask & real_mask) { |
1785 | /* enable the interrupt for this cpu */ | 1735 | /* enable the interrupt for this cpu */ |
1786 | cpu_irq_affinity[cpu] |= irq_mask; | 1736 | cpu_irq_affinity[cpu] |= irq_mask; |
1787 | } else { | 1737 | } else { |
@@ -1800,25 +1750,23 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |||
1800 | unmask_vic_irq(irq); | 1750 | unmask_vic_irq(irq); |
1801 | } | 1751 | } |
1802 | 1752 | ||
1803 | static void | 1753 | static void ack_vic_irq(unsigned int irq) |
1804 | ack_vic_irq(unsigned int irq) | ||
1805 | { | 1754 | { |
1806 | if (irq & 8) { | 1755 | if (irq & 8) { |
1807 | outb(0x62,0x20); /* Specific EOI to cascade */ | 1756 | outb(0x62, 0x20); /* Specific EOI to cascade */ |
1808 | outb(0x60|(irq & 7),0xA0); | 1757 | outb(0x60 | (irq & 7), 0xA0); |
1809 | } else { | 1758 | } else { |
1810 | outb(0x60 | (irq & 7),0x20); | 1759 | outb(0x60 | (irq & 7), 0x20); |
1811 | } | 1760 | } |
1812 | } | 1761 | } |
1813 | 1762 | ||
1814 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | 1763 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 |
1815 | * but are not vectored by it. This means that the 8259 mask must be | 1764 | * but are not vectored by it. This means that the 8259 mask must be |
1816 | * lowered to receive them */ | 1765 | * lowered to receive them */ |
1817 | static __init void | 1766 | static __init void vic_enable_cpi(void) |
1818 | vic_enable_cpi(void) | ||
1819 | { | 1767 | { |
1820 | __u8 cpu = smp_processor_id(); | 1768 | __u8 cpu = smp_processor_id(); |
1821 | 1769 | ||
1822 | /* just take a copy of the current mask (nop for boot cpu) */ | 1770 | /* just take a copy of the current mask (nop for boot cpu) */ |
1823 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | 1771 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; |
1824 | 1772 | ||
@@ -1827,7 +1775,7 @@ vic_enable_cpi(void) | |||
1827 | /* for sys int and cmn int */ | 1775 | /* for sys int and cmn int */ |
1828 | enable_local_vic_irq(7); | 1776 | enable_local_vic_irq(7); |
1829 | 1777 | ||
1830 | if(is_cpu_quad()) { | 1778 | if (is_cpu_quad()) { |
1831 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | 1779 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); |
1832 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | 1780 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); |
1833 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | 1781 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", |
@@ -1838,8 +1786,7 @@ vic_enable_cpi(void) | |||
1838 | cpu, vic_irq_mask[cpu])); | 1786 | cpu, vic_irq_mask[cpu])); |
1839 | } | 1787 | } |
1840 | 1788 | ||
1841 | void | 1789 | void voyager_smp_dump() |
1842 | voyager_smp_dump() | ||
1843 | { | 1790 | { |
1844 | int old_cpu = smp_processor_id(), cpu; | 1791 | int old_cpu = smp_processor_id(), cpu; |
1845 | 1792 | ||
@@ -1865,10 +1812,10 @@ voyager_smp_dump() | |||
1865 | cpu, vic_irq_mask[cpu], imr, irr, isr); | 1812 | cpu, vic_irq_mask[cpu], imr, irr, isr); |
1866 | #if 0 | 1813 | #if 0 |
1867 | /* These lines are put in to try to unstick an un ack'd irq */ | 1814 | /* These lines are put in to try to unstick an un ack'd irq */ |
1868 | if(isr != 0) { | 1815 | if (isr != 0) { |
1869 | int irq; | 1816 | int irq; |
1870 | for(irq=0; irq<16; irq++) { | 1817 | for (irq = 0; irq < 16; irq++) { |
1871 | if(isr & (1<<irq)) { | 1818 | if (isr & (1 << irq)) { |
1872 | printk("\tCPU%d: ack irq %d\n", | 1819 | printk("\tCPU%d: ack irq %d\n", |
1873 | cpu, irq); | 1820 | cpu, irq); |
1874 | local_irq_save(flags); | 1821 | local_irq_save(flags); |
@@ -1884,17 +1831,15 @@ voyager_smp_dump() | |||
1884 | } | 1831 | } |
1885 | } | 1832 | } |
1886 | 1833 | ||
1887 | void | 1834 | void smp_voyager_power_off(void *dummy) |
1888 | smp_voyager_power_off(void *dummy) | ||
1889 | { | 1835 | { |
1890 | if(smp_processor_id() == boot_cpu_id) | 1836 | if (smp_processor_id() == boot_cpu_id) |
1891 | voyager_power_off(); | 1837 | voyager_power_off(); |
1892 | else | 1838 | else |
1893 | smp_stop_cpu_function(NULL); | 1839 | smp_stop_cpu_function(NULL); |
1894 | } | 1840 | } |
1895 | 1841 | ||
1896 | static void __init | 1842 | static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) |
1897 | voyager_smp_prepare_cpus(unsigned int max_cpus) | ||
1898 | { | 1843 | { |
1899 | /* FIXME: ignore max_cpus for now */ | 1844 | /* FIXME: ignore max_cpus for now */ |
1900 | smp_boot_cpus(); | 1845 | smp_boot_cpus(); |
@@ -1911,8 +1856,7 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) | |||
1911 | cpu_set(smp_processor_id(), cpu_present_map); | 1856 | cpu_set(smp_processor_id(), cpu_present_map); |
1912 | } | 1857 | } |
1913 | 1858 | ||
1914 | static int __cpuinit | 1859 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
1915 | voyager_cpu_up(unsigned int cpu) | ||
1916 | { | 1860 | { |
1917 | /* This only works at boot for x86. See "rewrite" above. */ | 1861 | /* This only works at boot for x86. See "rewrite" above. */ |
1918 | if (cpu_isset(cpu, smp_commenced_mask)) | 1862 | if (cpu_isset(cpu, smp_commenced_mask)) |
@@ -1928,14 +1872,12 @@ voyager_cpu_up(unsigned int cpu) | |||
1928 | return 0; | 1872 | return 0; |
1929 | } | 1873 | } |
1930 | 1874 | ||
1931 | static void __init | 1875 | static void __init voyager_smp_cpus_done(unsigned int max_cpus) |
1932 | voyager_smp_cpus_done(unsigned int max_cpus) | ||
1933 | { | 1876 | { |
1934 | zap_low_mappings(); | 1877 | zap_low_mappings(); |
1935 | } | 1878 | } |
1936 | 1879 | ||
1937 | void __init | 1880 | void __init smp_setup_processor_id(void) |
1938 | smp_setup_processor_id(void) | ||
1939 | { | 1881 | { |
1940 | current_thread_info()->cpu = hard_smp_processor_id(); | 1882 | current_thread_info()->cpu = hard_smp_processor_id(); |
1941 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1883 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |