aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma.c18
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S28
-rw-r--r--arch/powerpc/kernel/head_32.S9
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/idle_power4.S21
-rw-r--r--arch/powerpc/kernel/irq.c3
-rw-r--r--arch/powerpc/kernel/smp.c153
-rw-r--r--arch/powerpc/kernel/time.c17
8 files changed, 159 insertions, 97 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index cf02cad62d9a..d238c082c3c5 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -179,3 +179,21 @@ static int __init dma_init(void)
179 return 0; 179 return 0;
180} 180}
181fs_initcall(dma_init); 181fs_initcall(dma_init);
182
183int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
184 void *cpu_addr, dma_addr_t handle, size_t size)
185{
186 unsigned long pfn;
187
188#ifdef CONFIG_NOT_COHERENT_CACHE
189 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
190 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
191#else
192 pfn = page_to_pfn(virt_to_page(cpu_addr));
193#endif
194 return remap_pfn_range(vma, vma->vm_start,
195 pfn + vma->vm_pgoff,
196 vma->vm_end - vma->vm_start,
197 vma->vm_page_prot);
198}
199EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 8a817995b4cd..c532cb2c927a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted)
977 rfid 977 rfid
978 b . /* prevent speculative execution */ 978 b . /* prevent speculative execution */
979 979
980/*
981 * Space for CPU0's segment table.
982 *
983 * On iSeries, the hypervisor must fill in at least one entry before
984 * we get control (with relocate on). The address is given to the hv
985 * as a page number (see xLparMap below), so this must be at a
986 * fixed address (the linker can't compute (u64)&initial_stab >>
987 * PAGE_SHIFT).
988 */
989 . = STAB0_OFFSET /* 0x6000 */
990 .globl initial_stab
991initial_stab:
992 .space 4096
993
994#ifdef CONFIG_PPC_PSERIES 980#ifdef CONFIG_PPC_PSERIES
995/* 981/*
996 * Data area reserved for FWNMI option. 982 * Data area reserved for FWNMI option.
@@ -1027,3 +1013,17 @@ xLparMap:
1027#ifdef CONFIG_PPC_PSERIES 1013#ifdef CONFIG_PPC_PSERIES
1028 . = 0x8000 1014 . = 0x8000
1029#endif /* CONFIG_PPC_PSERIES */ 1015#endif /* CONFIG_PPC_PSERIES */
1016
1017/*
1018 * Space for CPU0's segment table.
1019 *
1020 * On iSeries, the hypervisor must fill in at least one entry before
1021 * we get control (with relocate on). The address is given to the hv
1022 * as a page number (see xLparMap above), so this must be at a
1023 * fixed address (the linker can't compute (u64)&initial_stab >>
1024 * PAGE_SHIFT).
1025 */
1026 . = STAB0_OFFSET /* 0x8000 */
1027 .globl initial_stab
1028initial_stab:
1029 .space 4096
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f4..c5c24beb8387 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -890,6 +890,15 @@ __secondary_start:
890 mtspr SPRN_SRR1,r4 890 mtspr SPRN_SRR1,r4
891 SYNC 891 SYNC
892 RFI 892 RFI
893
894_GLOBAL(start_secondary_resume)
895 /* Reset stack */
896 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
897 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
898 li r3,0
899 std r3,0(r1) /* Zero the stack frame pointer */
900 bl start_secondary
901 b .
893#endif /* CONFIG_SMP */ 902#endif /* CONFIG_SMP */
894 903
895#ifdef CONFIG_KVM_BOOK3S_HANDLER 904#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 782f23df7c85..271140b38b6f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start)
536 add r13,r13,r4 /* for this processor. */ 536 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
538 538
539 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug)
541 */
542 li r0,0
543 stb r0,PACASOFTIRQEN(r13)
544 stb r0,PACAHARDIRQEN(r13)
545
539 /* Create a temp kernel stack for use before relocation is on. */ 546 /* Create a temp kernel stack for use before relocation is on. */
540 ld r1,PACAEMERGSP(r13) 547 ld r1,PACAEMERGSP(r13)
541 subi r1,r1,STACK_FRAME_OVERHEAD 548 subi r1,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 5328709eeedc..ba3195478600 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
53 isync 53 isync
54 b 1b 54 b 1b
55 55
56_GLOBAL(power4_cpu_offline_powersave)
57 /* Go to NAP now */
58 mfmsr r7
59 rldicl r0,r7,48,1
60 rotldi r0,r0,16
61 mtmsrd r0,1 /* hard-disable interrupts */
62 li r0,1
63 li r6,0
64 stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
65 stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
66BEGIN_FTR_SECTION
67 DSSALL
68 sync
69END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
70 ori r7,r7,MSR_EE
71 oris r7,r7,MSR_POW@h
72 sync
73 isync
74 mtmsrd r7
75 isync
76 blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 63625e0650b5..f621b7d2d869 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
246} 246}
247 247
248#ifdef CONFIG_HOTPLUG_CPU 248#ifdef CONFIG_HOTPLUG_CPU
249void fixup_irqs(const struct cpumask *map) 249void migrate_irqs(void)
250{ 250{
251 struct irq_desc *desc; 251 struct irq_desc *desc;
252 unsigned int irq; 252 unsigned int irq;
253 static int warned; 253 static int warned;
254 cpumask_var_t mask; 254 cpumask_var_t mask;
255 const struct cpumask *map = cpu_online_mask;
255 256
256 alloc_cpumask_var(&mask, GFP_KERNEL); 257 alloc_cpumask_var(&mask, GFP_KERNEL);
257 258
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 981360509172..cbdbb14be4b0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void)
305 303
306#ifdef CONFIG_HOTPLUG_CPU 304#ifdef CONFIG_HOTPLUG_CPU
307/* State of each CPU during hotplug phases */ 305/* State of each CPU during hotplug phases */
308DEFINE_PER_CPU(int, cpu_state) = { 0 }; 306static DEFINE_PER_CPU(int, cpu_state) = { 0 };
309 307
310int generic_cpu_disable(void) 308int generic_cpu_disable(void)
311{ 309{
@@ -317,30 +315,8 @@ int generic_cpu_disable(void)
317 set_cpu_online(cpu, false); 315 set_cpu_online(cpu, false);
318#ifdef CONFIG_PPC64 316#ifdef CONFIG_PPC64
319 vdso_data->processorCount--; 317 vdso_data->processorCount--;
320 fixup_irqs(cpu_online_mask);
321#endif
322 return 0;
323}
324
325int generic_cpu_enable(unsigned int cpu)
326{
327 /* Do the normal bootup if we haven't
328 * already bootstrapped. */
329 if (system_state != SYSTEM_RUNNING)
330 return -ENOSYS;
331
332 /* get the target out of it's holding state */
333 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
334 smp_wmb();
335
336 while (!cpu_online(cpu))
337 cpu_relax();
338
339#ifdef CONFIG_PPC64
340 fixup_irqs(cpu_online_mask);
341 /* counter the irq disable in fixup_irqs */
342 local_irq_enable();
343#endif 318#endif
319 migrate_irqs();
344 return 0; 320 return 0;
345} 321}
346 322
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void)
362 unsigned int cpu; 338 unsigned int cpu;
363 339
364 local_irq_disable(); 340 local_irq_disable();
341 idle_task_exit();
365 cpu = smp_processor_id(); 342 cpu = smp_processor_id();
366 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
367 __get_cpu_var(cpu_state) = CPU_DEAD; 344 __get_cpu_var(cpu_state) = CPU_DEAD;
368 smp_wmb(); 345 smp_wmb();
369 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
370 cpu_relax(); 347 cpu_relax();
371 set_cpu_online(cpu, true); 348}
372 local_irq_enable(); 349
350void generic_set_cpu_dead(unsigned int cpu)
351{
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
373} 353}
374#endif 354#endif
375 355
376static int __devinit cpu_enable(unsigned int cpu) 356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
377{ 364{
378 if (smp_ops && smp_ops->cpu_enable) 365 struct create_idle *c_idle =
379 return smp_ops->cpu_enable(cpu); 366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
380 405
381 return -ENOSYS; 406 return 0;
382} 407}
383 408
384int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
385{ 410{
386 int c; 411 int rc, c;
387 412
388 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
389 if (!cpu_enable(cpu))
390 return 0;
391 414
392 if (smp_ops == NULL || 415 if (smp_ops == NULL ||
393 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
394 return -EINVAL; 417 return -EINVAL;
395 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
396 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
397 * hotplug 425 * hotplug
398 */ 426 */
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
502} 530}
503 531
504/* Activate a secondary processor. */ 532/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused) 533void __devinit start_secondary(void *unused)
506{ 534{
507 unsigned int cpu = smp_processor_id(); 535 unsigned int cpu = smp_processor_id();
508 struct device_node *l2_cache; 536 struct device_node *l2_cache;
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused)
523 551
524 secondary_cpu_time_init(); 552 secondary_cpu_time_init();
525 553
554#ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557#endif
526 ipi_call_lock(); 558 ipi_call_lock();
527 notify_cpu_starting(cpu); 559 notify_cpu_starting(cpu);
528 set_cpu_online(cpu, true); 560 set_cpu_online(cpu, true);
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused)
558 local_irq_enable(); 590 local_irq_enable();
559 591
560 cpu_idle(); 592 cpu_idle();
561 return 0; 593
594 BUG();
562} 595}
563 596
564int setup_profiling_timer(unsigned int multiplier) 597int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
585 618
586 free_cpumask_var(old_mask); 619 free_cpumask_var(old_mask);
587 620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
588 dump_numa_cpu_topology(); 624 dump_numa_cpu_topology();
625
589} 626}
590 627
591int arch_sd_sibling_asym_packing(void) 628int arch_sd_sibling_asym_packing(void)
@@ -660,5 +697,9 @@ void cpu_die(void)
660{ 697{
661 if (ppc_md.cpu_die) 698 if (ppc_md.cpu_die)
662 ppc_md.cpu_die(); 699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
663} 703}
704
664#endif 705#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09d31dbf43f9..375480c56eb9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -356,7 +356,7 @@ void account_system_vtime(struct task_struct *tsk)
356 } 356 }
357 get_paca()->user_time_scaled += user_scaled; 357 get_paca()->user_time_scaled += user_scaled;
358 358
359 if (in_irq() || idle_task(smp_processor_id()) != tsk) { 359 if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
360 account_system_time(tsk, 0, delta, sys_scaled); 360 account_system_time(tsk, 0, delta, sys_scaled);
361 if (stolen) 361 if (stolen)
362 account_steal_time(stolen); 362 account_steal_time(stolen);
@@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs)
577 struct clock_event_device *evt = &decrementer->event; 577 struct clock_event_device *evt = &decrementer->event;
578 u64 now; 578 u64 now;
579 579
580 /* Ensure a positive value is written to the decrementer, or else
581 * some CPUs will continue to take decrementer exceptions.
582 */
583 set_dec(DECREMENTER_MAX);
584
585 /* Some implementations of hotplug will get timer interrupts while
586 * offline, just ignore these
587 */
588 if (!cpu_online(smp_processor_id()))
589 return;
590
580 trace_timer_interrupt_entry(regs); 591 trace_timer_interrupt_entry(regs);
581 592
582 __get_cpu_var(irq_stat).timer_irqs++; 593 __get_cpu_var(irq_stat).timer_irqs++;
583 594
584 /* Ensure a positive value is written to the decrementer, or else
585 * some CPUs will continuue to take decrementer exceptions */
586 set_dec(DECREMENTER_MAX);
587
588#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 595#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 596 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 597 do_IRQ(regs);