aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S2
-rw-r--r--arch/powerpc/kernel/crash.c11
-rw-r--r--arch/powerpc/kernel/fpu.S5
-rw-r--r--arch/powerpc/kernel/head_64.S35
-rw-r--r--arch/powerpc/kernel/irq.c86
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c21
-rw-r--r--arch/powerpc/kernel/machine_kexec.c56
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c57
-rw-r--r--arch/powerpc/kernel/misc_64.S2
-rw-r--r--arch/powerpc/kernel/pci_64.c11
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c46
-rw-r--r--arch/powerpc/kernel/prom_parse.c30
-rw-r--r--arch/powerpc/kernel/rtas.c21
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/smp.c24
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c48
-rw-r--r--arch/powerpc/kernel/traps.c32
-rw-r--r--arch/powerpc/kernel/udbg_16550.c6
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile3
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S1
26 files changed, 360 insertions, 160 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 814f242aeb8c..956c2e5564b7 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -67,9 +67,9 @@ pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
67 pci_direct_iommu.o iomap.o 67 pci_direct_iommu.o iomap.o
68pci32-$(CONFIG_PPC32) := pci_32.o 68pci32-$(CONFIG_PPC32) := pci_32.o
69obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) 69obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
70kexec-$(CONFIG_PPC64) := machine_kexec_64.o crash.o 70kexec-$(CONFIG_PPC64) := machine_kexec_64.o
71kexec-$(CONFIG_PPC32) := machine_kexec_32.o 71kexec-$(CONFIG_PPC32) := machine_kexec_32.o
72obj-$(CONFIG_KEXEC) += machine_kexec.o $(kexec-y) 72obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y)
73 73
74ifeq ($(CONFIG_PPC_ISERIES),y) 74ifeq ($(CONFIG_PPC_ISERIES),y)
75$(obj)/head_64.o: $(obj)/lparmap.s 75$(obj)/head_64.o: $(obj)/lparmap.s
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index f69af2c5d7b3..76e97aa71c45 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -76,6 +76,8 @@ _GLOBAL(__setup_cpu_ppc970)
76 mfspr r0,SPRN_HID0 76 mfspr r0,SPRN_HID0
77 li r11,5 /* clear DOZE and SLEEP */ 77 li r11,5 /* clear DOZE and SLEEP */
78 rldimi r0,r11,52,8 /* set NAP and DPM */ 78 rldimi r0,r11,52,8 /* set NAP and DPM */
79 li r11,0
80 rldimi r0,r11,32,31 /* clear EN_ATTN */
79 mtspr SPRN_HID0,r0 81 mtspr SPRN_HID0,r0
80 mfspr r0,SPRN_HID0 82 mfspr r0,SPRN_HID0
81 mfspr r0,SPRN_HID0 83 mfspr r0,SPRN_HID0
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 358cecdc6aef..f04c18e08b8b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -44,6 +44,7 @@
44/* This keeps a track of which one is crashing cpu. */ 44/* This keeps a track of which one is crashing cpu. */
45int crashing_cpu = -1; 45int crashing_cpu = -1;
46static cpumask_t cpus_in_crash = CPU_MASK_NONE; 46static cpumask_t cpus_in_crash = CPU_MASK_NONE;
47cpumask_t cpus_in_sr = CPU_MASK_NONE;
47 48
48static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 49static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
49 size_t data_len) 50 size_t data_len)
@@ -139,7 +140,13 @@ void crash_ipi_callback(struct pt_regs *regs)
139 140
140 if (ppc_md.kexec_cpu_down) 141 if (ppc_md.kexec_cpu_down)
141 ppc_md.kexec_cpu_down(1, 1); 142 ppc_md.kexec_cpu_down(1, 1);
143
144#ifdef CONFIG_PPC64
142 kexec_smp_wait(); 145 kexec_smp_wait();
146#else
147 for (;;); /* FIXME */
148#endif
149
143 /* NOTREACHED */ 150 /* NOTREACHED */
144} 151}
145 152
@@ -255,7 +262,11 @@ static void crash_kexec_prepare_cpus(int cpu)
255 * 262 *
256 * do this if kexec in setup.c ? 263 * do this if kexec in setup.c ?
257 */ 264 */
265#ifdef CONFIG_PPC64
258 smp_release_cpus(); 266 smp_release_cpus();
267#else
268 /* FIXME */
269#endif
259} 270}
260 271
261void crash_kexec_secondary(struct pt_regs *regs) 272void crash_kexec_secondary(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 7e2c9fe44ac1..821e152e093c 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -2,6 +2,11 @@
2 * FPU support code, moved here from head.S so that it can be used 2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files. 3 * by chips which use other head-whatever.S files.
4 * 4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Copyright (C) 1996 Paul Mackerras.
8 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
9 *
5 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index e16eb2a33173..6ff3cf506088 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -191,6 +191,37 @@ exception_marker:
191 ori reg,reg,(label)@l; /* virt addr of handler ... */ 191 ori reg,reg,(label)@l; /* virt addr of handler ... */
192#endif 192#endif
193 193
194/*
195 * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
196 * The firmware calls the registered system_reset_fwnmi and
197 * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
198 * a 32bit application at the time of the event.
199 * This firmware bug is present on POWER4 and JS20.
200 */
201#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
202 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
203 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
204 std r10,area+EX_R10(r13); \
205 std r11,area+EX_R11(r13); \
206 std r12,area+EX_R12(r13); \
207 mfspr r9,SPRN_SPRG1; \
208 std r9,area+EX_R13(r13); \
209 mfcr r9; \
210 clrrdi r12,r13,32; /* get high part of &label */ \
211 mfmsr r10; \
212 /* force 64bit mode */ \
213 li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
214 rldimi r10,r11,61,0; /* insert into top 3 bits */ \
215 /* done 64bit mode */ \
216 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
217 LOAD_HANDLER(r12,label) \
218 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
219 mtspr SPRN_SRR0,r12; \
220 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
221 mtspr SPRN_SRR1,r10; \
222 rfid; \
223 b . /* prevent speculative execution */
224
194#define EXCEPTION_PROLOG_PSERIES(area, label) \ 225#define EXCEPTION_PROLOG_PSERIES(area, label) \
195 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 226 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
196 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 227 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
@@ -604,14 +635,14 @@ slb_miss_user_pseries:
604system_reset_fwnmi: 635system_reset_fwnmi:
605 HMT_MEDIUM 636 HMT_MEDIUM
606 mtspr SPRN_SPRG1,r13 /* save r13 */ 637 mtspr SPRN_SPRG1,r13 /* save r13 */
607 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 638 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
608 639
609 .globl machine_check_fwnmi 640 .globl machine_check_fwnmi
610 .align 7 641 .align 7
611machine_check_fwnmi: 642machine_check_fwnmi:
612 HMT_MEDIUM 643 HMT_MEDIUM
613 mtspr SPRN_SPRG1,r13 /* save r13 */ 644 mtspr SPRN_SPRG1,r13 /* save r13 */
614 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 645 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
615 646
616#ifdef CONFIG_PPC_ISERIES 647#ifdef CONFIG_PPC_ISERIES
617/*** ISeries-LPAR interrupt handlers ***/ 648/*** ISeries-LPAR interrupt handlers ***/
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 01bdae35cb55..12c5971d6565 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -322,7 +322,8 @@ EXPORT_SYMBOL(do_softirq);
322 322
323static LIST_HEAD(irq_hosts); 323static LIST_HEAD(irq_hosts);
324static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; 324static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
325 325static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
326static unsigned int irq_radix_writer;
326struct irq_map_entry irq_map[NR_IRQS]; 327struct irq_map_entry irq_map[NR_IRQS];
327static unsigned int irq_virq_count = NR_IRQS; 328static unsigned int irq_virq_count = NR_IRQS;
328static struct irq_host *irq_default_host; 329static struct irq_host *irq_default_host;
@@ -455,6 +456,58 @@ void irq_set_virq_count(unsigned int count)
455 irq_virq_count = count; 456 irq_virq_count = count;
456} 457}
457 458
459/* radix tree not lockless safe ! we use a brlock-type mecanism
460 * for now, until we can use a lockless radix tree
461 */
462static void irq_radix_wrlock(unsigned long *flags)
463{
464 unsigned int cpu, ok;
465
466 spin_lock_irqsave(&irq_big_lock, *flags);
467 irq_radix_writer = 1;
468 smp_mb();
469 do {
470 barrier();
471 ok = 1;
472 for_each_possible_cpu(cpu) {
473 if (per_cpu(irq_radix_reader, cpu)) {
474 ok = 0;
475 break;
476 }
477 }
478 if (!ok)
479 cpu_relax();
480 } while(!ok);
481}
482
483static void irq_radix_wrunlock(unsigned long flags)
484{
485 smp_wmb();
486 irq_radix_writer = 0;
487 spin_unlock_irqrestore(&irq_big_lock, flags);
488}
489
490static void irq_radix_rdlock(unsigned long *flags)
491{
492 local_irq_save(*flags);
493 __get_cpu_var(irq_radix_reader) = 1;
494 smp_mb();
495 if (likely(irq_radix_writer == 0))
496 return;
497 __get_cpu_var(irq_radix_reader) = 0;
498 smp_wmb();
499 spin_lock(&irq_big_lock);
500 __get_cpu_var(irq_radix_reader) = 1;
501 spin_unlock(&irq_big_lock);
502}
503
504static void irq_radix_rdunlock(unsigned long flags)
505{
506 __get_cpu_var(irq_radix_reader) = 0;
507 local_irq_restore(flags);
508}
509
510
458unsigned int irq_create_mapping(struct irq_host *host, 511unsigned int irq_create_mapping(struct irq_host *host,
459 irq_hw_number_t hwirq) 512 irq_hw_number_t hwirq)
460{ 513{
@@ -604,13 +657,9 @@ void irq_dispose_mapping(unsigned int virq)
604 /* Check if radix tree allocated yet */ 657 /* Check if radix tree allocated yet */
605 if (host->revmap_data.tree.gfp_mask == 0) 658 if (host->revmap_data.tree.gfp_mask == 0)
606 break; 659 break;
607 /* XXX radix tree not safe ! remove lock whem it becomes safe 660 irq_radix_wrlock(&flags);
608 * and use some RCU sync to make sure everything is ok before we
609 * can re-use that map entry
610 */
611 spin_lock_irqsave(&irq_big_lock, flags);
612 radix_tree_delete(&host->revmap_data.tree, hwirq); 661 radix_tree_delete(&host->revmap_data.tree, hwirq);
613 spin_unlock_irqrestore(&irq_big_lock, flags); 662 irq_radix_wrunlock(flags);
614 break; 663 break;
615 } 664 }
616 665
@@ -677,25 +726,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
677 if (tree->gfp_mask == 0) 726 if (tree->gfp_mask == 0)
678 return irq_find_mapping(host, hwirq); 727 return irq_find_mapping(host, hwirq);
679 728
680 /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
681 * when that is fixed (when Nick's patch gets in
682 */
683 spin_lock_irqsave(&irq_big_lock, flags);
684
685 /* Now try to resolve */ 729 /* Now try to resolve */
730 irq_radix_rdlock(&flags);
686 ptr = radix_tree_lookup(tree, hwirq); 731 ptr = radix_tree_lookup(tree, hwirq);
732 irq_radix_rdunlock(flags);
733
687 /* Found it, return */ 734 /* Found it, return */
688 if (ptr) { 735 if (ptr) {
689 virq = ptr - irq_map; 736 virq = ptr - irq_map;
690 goto bail; 737 return virq;
691 } 738 }
692 739
693 /* If not there, try to insert it */ 740 /* If not there, try to insert it */
694 virq = irq_find_mapping(host, hwirq); 741 virq = irq_find_mapping(host, hwirq);
695 if (virq != NO_IRQ) 742 if (virq != NO_IRQ) {
696 radix_tree_insert(tree, virq, &irq_map[virq]); 743 irq_radix_wrlock(&flags);
697 bail: 744 radix_tree_insert(tree, hwirq, &irq_map[virq]);
698 spin_unlock_irqrestore(&irq_big_lock, flags); 745 irq_radix_wrunlock(flags);
746 }
699 return virq; 747 return virq;
700} 748}
701 749
@@ -806,12 +854,12 @@ static int irq_late_init(void)
806 struct irq_host *h; 854 struct irq_host *h;
807 unsigned long flags; 855 unsigned long flags;
808 856
809 spin_lock_irqsave(&irq_big_lock, flags); 857 irq_radix_wrlock(&flags);
810 list_for_each_entry(h, &irq_hosts, link) { 858 list_for_each_entry(h, &irq_hosts, link) {
811 if (h->revmap_type == IRQ_HOST_MAP_TREE) 859 if (h->revmap_type == IRQ_HOST_MAP_TREE)
812 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 860 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
813 } 861 }
814 spin_unlock_irqrestore(&irq_big_lock, flags); 862 irq_radix_wrunlock(flags);
815 863
816 return 0; 864 return 0;
817} 865}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 9f0898c89759..cd65c367b8b6 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -61,6 +61,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
61 if (!ret) { 61 if (!ret) {
62 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 62 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
63 p->opcode = *p->addr; 63 p->opcode = *p->addr;
64 flush_icache_range((unsigned long)p->ainsn.insn,
65 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
64 } 66 }
65 67
66 return ret; 68 return ret;
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 7e98e778b52f..40a39291861f 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -112,9 +112,10 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
112static int __init add_legacy_soc_port(struct device_node *np, 112static int __init add_legacy_soc_port(struct device_node *np,
113 struct device_node *soc_dev) 113 struct device_node *soc_dev)
114{ 114{
115 phys_addr_t addr; 115 u64 addr;
116 u32 *addrp; 116 u32 *addrp;
117 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; 117 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
118 struct device_node *tsi = of_get_parent(np);
118 119
119 /* We only support ports that have a clock frequency properly 120 /* We only support ports that have a clock frequency properly
120 * encoded in the device-tree. 121 * encoded in the device-tree.
@@ -134,7 +135,10 @@ static int __init add_legacy_soc_port(struct device_node *np,
134 /* Add port, irq will be dealt with later. We passed a translated 135 /* Add port, irq will be dealt with later. We passed a translated
135 * IO port value. It will be fixed up later along with the irq 136 * IO port value. It will be fixed up later along with the irq
136 */ 137 */
137 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); 138 if (tsi && !strcmp(tsi->type, "tsi-bridge"))
139 return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
140 else
141 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
138} 142}
139 143
140static int __init add_legacy_isa_port(struct device_node *np, 144static int __init add_legacy_isa_port(struct device_node *np,
@@ -143,7 +147,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
143 u32 *reg; 147 u32 *reg;
144 char *typep; 148 char *typep;
145 int index = -1; 149 int index = -1;
146 phys_addr_t taddr; 150 u64 taddr;
147 151
148 DBG(" -> add_legacy_isa_port(%s)\n", np->full_name); 152 DBG(" -> add_legacy_isa_port(%s)\n", np->full_name);
149 153
@@ -165,10 +169,13 @@ static int __init add_legacy_isa_port(struct device_node *np,
165 if (typep && *typep == 'S') 169 if (typep && *typep == 'S')
166 index = simple_strtol(typep+1, NULL, 0) - 1; 170 index = simple_strtol(typep+1, NULL, 0) - 1;
167 171
168 /* Translate ISA address */ 172 /* Translate ISA address. If it fails, we still register the port
173 * with no translated address so that it can be picked up as an IO
174 * port later by the serial driver
175 */
169 taddr = of_translate_address(np, reg); 176 taddr = of_translate_address(np, reg);
170 if (taddr == OF_BAD_ADDR) 177 if (taddr == OF_BAD_ADDR)
171 return -1; 178 taddr = 0;
172 179
173 /* Add port, irq will be dealt with later */ 180 /* Add port, irq will be dealt with later */
174 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, 181 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
@@ -180,7 +187,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
180static int __init add_legacy_pci_port(struct device_node *np, 187static int __init add_legacy_pci_port(struct device_node *np,
181 struct device_node *pci_dev) 188 struct device_node *pci_dev)
182{ 189{
183 phys_addr_t addr, base; 190 u64 addr, base;
184 u32 *addrp; 191 u32 *addrp;
185 unsigned int flags; 192 unsigned int flags;
186 int iotype, index = -1, lindex = 0; 193 int iotype, index = -1, lindex = 0;
@@ -461,7 +468,7 @@ static int __init serial_dev_init(void)
461 fixup_port_irq(i, np, port); 468 fixup_port_irq(i, np, port);
462 if (port->iotype == UPIO_PORT) 469 if (port->iotype == UPIO_PORT)
463 fixup_port_pio(i, np, port); 470 fixup_port_pio(i, np, port);
464 if (port->iotype == UPIO_MEM) 471 if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI))
465 fixup_port_mmio(i, np, port); 472 fixup_port_mmio(i, np, port);
466 } 473 }
467 474
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index a81ca1b841ec..e60a0c544d63 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/threads.h> 14#include <linux/threads.h>
15#include <asm/machdep.h> 15#include <asm/machdep.h>
16#include <asm/lmb.h>
16 17
17void machine_crash_shutdown(struct pt_regs *regs) 18void machine_crash_shutdown(struct pt_regs *regs)
18{ 19{
@@ -59,3 +60,58 @@ NORET_TYPE void machine_kexec(struct kimage *image)
59 } 60 }
60 for(;;); 61 for(;;);
61} 62}
63
64static int __init early_parse_crashk(char *p)
65{
66 unsigned long size;
67
68 if (!p)
69 return 1;
70
71 size = memparse(p, &p);
72
73 if (*p == '@')
74 crashk_res.start = memparse(p + 1, &p);
75 else
76 crashk_res.start = KDUMP_KERNELBASE;
77
78 crashk_res.end = crashk_res.start + size - 1;
79
80 return 0;
81}
82early_param("crashkernel", early_parse_crashk);
83
84void __init reserve_crashkernel(void)
85{
86 unsigned long size;
87
88 if (crashk_res.start == 0)
89 return;
90
91 /* We might have got these values via the command line or the
92 * device tree, either way sanitise them now. */
93
94 size = crashk_res.end - crashk_res.start + 1;
95
96 if (crashk_res.start != KDUMP_KERNELBASE)
97 printk("Crash kernel location must be 0x%x\n",
98 KDUMP_KERNELBASE);
99
100 crashk_res.start = KDUMP_KERNELBASE;
101 size = PAGE_ALIGN(size);
102 crashk_res.end = crashk_res.start + size - 1;
103
104 /* Crash kernel trumps memory limit */
105 if (memory_limit && memory_limit <= crashk_res.end) {
106 memory_limit = crashk_res.end + 1;
107 printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
108 memory_limit);
109 }
110
111 lmb_reserve(crashk_res.start, size);
112}
113
114int overlaps_crashkernel(unsigned long start, unsigned long size)
115{
116 return (start + size) > crashk_res.start && start <= crashk_res.end;
117}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index b438d45a068c..be58985c7681 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12 12
13#include <linux/cpumask.h>
14#include <linux/kexec.h> 13#include <linux/kexec.h>
15#include <linux/smp.h> 14#include <linux/smp.h>
16#include <linux/thread_info.h> 15#include <linux/thread_info.h>
@@ -21,7 +20,6 @@
21#include <asm/machdep.h> 20#include <asm/machdep.h>
22#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
23#include <asm/paca.h> 22#include <asm/paca.h>
24#include <asm/lmb.h>
25#include <asm/mmu.h> 23#include <asm/mmu.h>
26#include <asm/sections.h> /* _end */ 24#include <asm/sections.h> /* _end */
27#include <asm/prom.h> 25#include <asm/prom.h>
@@ -385,58 +383,3 @@ static int __init kexec_setup(void)
385 return 0; 383 return 0;
386} 384}
387__initcall(kexec_setup); 385__initcall(kexec_setup);
388
389static int __init early_parse_crashk(char *p)
390{
391 unsigned long size;
392
393 if (!p)
394 return 1;
395
396 size = memparse(p, &p);
397
398 if (*p == '@')
399 crashk_res.start = memparse(p + 1, &p);
400 else
401 crashk_res.start = KDUMP_KERNELBASE;
402
403 crashk_res.end = crashk_res.start + size - 1;
404
405 return 0;
406}
407early_param("crashkernel", early_parse_crashk);
408
409void __init reserve_crashkernel(void)
410{
411 unsigned long size;
412
413 if (crashk_res.start == 0)
414 return;
415
416 /* We might have got these values via the command line or the
417 * device tree, either way sanitise them now. */
418
419 size = crashk_res.end - crashk_res.start + 1;
420
421 if (crashk_res.start != KDUMP_KERNELBASE)
422 printk("Crash kernel location must be 0x%x\n",
423 KDUMP_KERNELBASE);
424
425 crashk_res.start = KDUMP_KERNELBASE;
426 size = PAGE_ALIGN(size);
427 crashk_res.end = crashk_res.start + size - 1;
428
429 /* Crash kernel trumps memory limit */
430 if (memory_limit && memory_limit <= crashk_res.end) {
431 memory_limit = crashk_res.end + 1;
432 printk("Adjusted memory limit for crashkernel, now 0x%lx\n",
433 memory_limit);
434 }
435
436 lmb_reserve(crashk_res.start, size);
437}
438
439int overlaps_crashkernel(unsigned long start, unsigned long size)
440{
441 return (start + size) > crashk_res.start && start <= crashk_res.end;
442}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index bfb407fc1aa1..e3ed21cd3d94 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -687,7 +687,7 @@ _GLOBAL(kexec_sequence)
687 /* clear out hardware hash page table and tlb */ 687 /* clear out hardware hash page table and tlb */
688 ld r5,0(r27) /* deref function descriptor */ 688 ld r5,0(r27) /* deref function descriptor */
689 mtctr r5 689 mtctr r5
690 bctrl /* ppc_md.hash_clear_all(void); */ 690 bctrl /* ppc_md.hpte_clear_all(void); */
691 691
692/* 692/*
693 * kexec image calling is: 693 * kexec image calling is:
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 2fce7738e9e2..138134c8c17d 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -1289,6 +1289,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1289 1289
1290 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1290 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1291 1291
1292#ifdef DEBUG
1293 memset(&oirq, 0xff, sizeof(oirq));
1294#endif
1292 /* Try to get a mapping from the device-tree */ 1295 /* Try to get a mapping from the device-tree */
1293 if (of_irq_map_pci(pci_dev, &oirq)) { 1296 if (of_irq_map_pci(pci_dev, &oirq)) {
1294 u8 line, pin; 1297 u8 line, pin;
@@ -1314,8 +1317,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1314 if (virq != NO_IRQ) 1317 if (virq != NO_IRQ)
1315 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1318 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1316 } else { 1319 } else {
1317 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1320 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
1318 oirq.size, oirq.specifier[0], oirq.controller->full_name); 1321 oirq.size, oirq.specifier[0], oirq.specifier[1],
1322 oirq.controller->full_name);
1319 1323
1320 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1324 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1321 oirq.size); 1325 oirq.size);
@@ -1324,6 +1328,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1324 DBG(" -> failed to map !\n"); 1328 DBG(" -> failed to map !\n");
1325 return -1; 1329 return -1;
1326 } 1330 }
1331
1332 DBG(" -> mapped to linux irq %d\n", virq);
1333
1327 pci_dev->irq = virq; 1334 pci_dev->irq = virq;
1328 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); 1335 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1329 1336
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index f6a05f090b25..39d3bfcabcd2 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -126,10 +126,6 @@ EXPORT_SYMBOL(pci_bus_mem_base_phys);
126EXPORT_SYMBOL(pci_bus_to_hose); 126EXPORT_SYMBOL(pci_bus_to_hose);
127#endif /* CONFIG_PCI */ 127#endif /* CONFIG_PCI */
128 128
129#ifdef CONFIG_NOT_COHERENT_CACHE
130EXPORT_SYMBOL(flush_dcache_all);
131#endif
132
133EXPORT_SYMBOL(start_thread); 129EXPORT_SYMBOL(start_thread);
134EXPORT_SYMBOL(kernel_thread); 130EXPORT_SYMBOL(kernel_thread);
135 131
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ebd501a59abd..4394e545f9f7 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -557,7 +557,9 @@ unsigned long prom_memparse(const char *ptr, const char **retptr)
557static void __init early_cmdline_parse(void) 557static void __init early_cmdline_parse(void)
558{ 558{
559 struct prom_t *_prom = &RELOC(prom); 559 struct prom_t *_prom = &RELOC(prom);
560#ifdef CONFIG_PPC64
560 const char *opt; 561 const char *opt;
562#endif
561 char *p; 563 char *p;
562 int l = 0; 564 int l = 0;
563 565
@@ -644,13 +646,13 @@ static unsigned char ibm_architecture_vec[] = {
644 5 - 1, /* 5 option vectors */ 646 5 - 1, /* 5 option vectors */
645 647
646 /* option vector 1: processor architectures supported */ 648 /* option vector 1: processor architectures supported */
647 3 - 1, /* length */ 649 3 - 2, /* length */
648 0, /* don't ignore, don't halt */ 650 0, /* don't ignore, don't halt */
649 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 651 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
650 OV1_PPC_2_04 | OV1_PPC_2_05, 652 OV1_PPC_2_04 | OV1_PPC_2_05,
651 653
652 /* option vector 2: Open Firmware options supported */ 654 /* option vector 2: Open Firmware options supported */
653 34 - 1, /* length */ 655 34 - 2, /* length */
654 OV2_REAL_MODE, 656 OV2_REAL_MODE,
655 0, 0, 657 0, 0,
656 W(0xffffffff), /* real_base */ 658 W(0xffffffff), /* real_base */
@@ -664,16 +666,16 @@ static unsigned char ibm_architecture_vec[] = {
664 48, /* max log_2(hash table size) */ 666 48, /* max log_2(hash table size) */
665 667
666 /* option vector 3: processor options supported */ 668 /* option vector 3: processor options supported */
667 3 - 1, /* length */ 669 3 - 2, /* length */
668 0, /* don't ignore, don't halt */ 670 0, /* don't ignore, don't halt */
669 OV3_FP | OV3_VMX, 671 OV3_FP | OV3_VMX,
670 672
671 /* option vector 4: IBM PAPR implementation */ 673 /* option vector 4: IBM PAPR implementation */
672 2 - 1, /* length */ 674 2 - 2, /* length */
673 0, /* don't halt */ 675 0, /* don't halt */
674 676
675 /* option vector 5: PAPR/OF options */ 677 /* option vector 5: PAPR/OF options */
676 3 - 1, /* length */ 678 3 - 2, /* length */
677 0, /* don't ignore, don't halt */ 679 0, /* don't ignore, don't halt */
678 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES, 680 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
679}; 681};
@@ -2030,6 +2032,39 @@ static void __init fixup_device_tree_maple(void)
2030#define fixup_device_tree_maple() 2032#define fixup_device_tree_maple()
2031#endif 2033#endif
2032 2034
2035#ifdef CONFIG_PPC_CHRP
2036/* Pegasos lacks the "ranges" property in the isa node */
2037static void __init fixup_device_tree_chrp(void)
2038{
2039 phandle isa;
2040 u32 isa_ranges[6];
2041 char *name;
2042 int rc;
2043
2044 name = "/pci@80000000/isa@c";
2045 isa = call_prom("finddevice", 1, 1, ADDR(name));
2046 if (!PHANDLE_VALID(isa))
2047 return;
2048
2049 rc = prom_getproplen(isa, "ranges");
2050 if (rc != 0 && rc != PROM_ERROR)
2051 return;
2052
2053 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2054
2055 isa_ranges[0] = 0x1;
2056 isa_ranges[1] = 0x0;
2057 isa_ranges[2] = 0x01006000;
2058 isa_ranges[3] = 0x0;
2059 isa_ranges[4] = 0x0;
2060 isa_ranges[5] = 0x00010000;
2061 prom_setprop(isa, name, "ranges",
2062 isa_ranges, sizeof(isa_ranges));
2063}
2064#else
2065#define fixup_device_tree_chrp()
2066#endif
2067
2033#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2068#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2034static void __init fixup_device_tree_pmac(void) 2069static void __init fixup_device_tree_pmac(void)
2035{ 2070{
@@ -2077,6 +2112,7 @@ static void __init fixup_device_tree_pmac(void)
2077static void __init fixup_device_tree(void) 2112static void __init fixup_device_tree(void)
2078{ 2113{
2079 fixup_device_tree_maple(); 2114 fixup_device_tree_maple();
2115 fixup_device_tree_chrp();
2080 fixup_device_tree_pmac(); 2116 fixup_device_tree_pmac();
2081} 2117}
2082 2118
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 6a7e997c401d..a10825a5dfe6 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -598,11 +598,6 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
598 return p; 598 return p;
599} 599}
600 600
601static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
602{
603 return (((pin - 1) + slot) % 4) + 1;
604}
605
606/* This doesn't need to be called if you don't have any special workaround 601/* This doesn't need to be called if you don't have any special workaround
607 * flags to pass 602 * flags to pass
608 */ 603 */
@@ -644,14 +639,17 @@ void of_irq_map_init(unsigned int flags)
644 639
645} 640}
646 641
647int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr, 642int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize,
648 struct of_irq *out_irq) 643 u32 *addr, struct of_irq *out_irq)
649{ 644{
650 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; 645 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
651 u32 *tmp, *imap, *imask; 646 u32 *tmp, *imap, *imask;
652 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; 647 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
653 int imaplen, match, i; 648 int imaplen, match, i;
654 649
650 DBG("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
651 parent->full_name, intspec[0], intspec[1], ointsize);
652
655 ipar = of_node_get(parent); 653 ipar = of_node_get(parent);
656 654
657 /* First get the #interrupt-cells property of the current cursor 655 /* First get the #interrupt-cells property of the current cursor
@@ -675,6 +673,9 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
675 673
676 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); 674 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
677 675
676 if (ointsize != intsize)
677 return -EINVAL;
678
678 /* Look for this #address-cells. We have to implement the old linux 679 /* Look for this #address-cells. We have to implement the old linux
679 * trick of looking for the parent here as some device-trees rely on it 680 * trick of looking for the parent here as some device-trees rely on it
680 */ 681 */
@@ -880,17 +881,26 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
880 } 881 }
881 intsize = *tmp; 882 intsize = *tmp;
882 883
884 DBG(" intsize=%d intlen=%d\n", intsize, intlen);
885
883 /* Check index */ 886 /* Check index */
884 if ((index + 1) * intsize > intlen) 887 if ((index + 1) * intsize > intlen)
885 return -EINVAL; 888 return -EINVAL;
886 889
887 /* Get new specifier and map it */ 890 /* Get new specifier and map it */
888 res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq); 891 res = of_irq_map_raw(p, intspec + index * intsize, intsize,
892 addr, out_irq);
889 of_node_put(p); 893 of_node_put(p);
890 return res; 894 return res;
891} 895}
892EXPORT_SYMBOL_GPL(of_irq_map_one); 896EXPORT_SYMBOL_GPL(of_irq_map_one);
893 897
898#ifdef CONFIG_PCI
899static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
900{
901 return (((pin - 1) + slot) % 4) + 1;
902}
903
894int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) 904int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
895{ 905{
896 struct device_node *dn, *ppnode; 906 struct device_node *dn, *ppnode;
@@ -964,7 +974,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
964 laddr[0] = (pdev->bus->number << 16) 974 laddr[0] = (pdev->bus->number << 16)
965 | (pdev->devfn << 8); 975 | (pdev->devfn << 8);
966 laddr[1] = laddr[2] = 0; 976 laddr[1] = laddr[2] = 0;
967 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq); 977 return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
968} 978}
969EXPORT_SYMBOL_GPL(of_irq_map_pci); 979EXPORT_SYMBOL_GPL(of_irq_map_pci);
970 980#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 4a4cb5598402..77f1e06d208d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -569,6 +569,27 @@ int rtas_set_indicator(int indicator, int index, int new_value)
569} 569}
570EXPORT_SYMBOL(rtas_set_indicator); 570EXPORT_SYMBOL(rtas_set_indicator);
571 571
572/*
573 * Ignoring RTAS extended delay
574 */
575int rtas_set_indicator_fast(int indicator, int index, int new_value)
576{
577 int rc;
578 int token = rtas_token("set-indicator");
579
580 if (token == RTAS_UNKNOWN_SERVICE)
581 return -ENOENT;
582
583 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
584
585 WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905));
586
587 if (rc < 0)
588 return rtas_error_rc(rc);
589
590 return rc;
591}
592
572void rtas_restart(char *cmd) 593void rtas_restart(char *cmd)
573{ 594{
574 if (rtas_flash_term_hook) 595 if (rtas_flash_term_hook)
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index f19e2e0e61e7..de59c6c31a5b 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -45,8 +45,9 @@ void __devinit smp_generic_take_timebase(void)
45{ 45{
46 int cmd; 46 int cmd;
47 u64 tb; 47 u64 tb;
48 unsigned long flags;
48 49
49 local_irq_disable(); 50 local_irq_save(flags);
50 while (!running) 51 while (!running)
51 barrier(); 52 barrier();
52 rmb(); 53 rmb();
@@ -70,7 +71,7 @@ void __devinit smp_generic_take_timebase(void)
70 set_tb(tb >> 32, tb & 0xfffffffful); 71 set_tb(tb >> 32, tb & 0xfffffffful);
71 enter_contest(tbsync->mark, -1); 72 enter_contest(tbsync->mark, -1);
72 } 73 }
73 local_irq_enable(); 74 local_irq_restore(flags);
74} 75}
75 76
76static int __devinit start_contest(int cmd, long offset, int num) 77static int __devinit start_contest(int cmd, long offset, int num)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 46c56cfd1b2f..6a9bc9ce54e0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -144,13 +144,15 @@ void smp_message_recv(int msg, struct pt_regs *regs)
144 144
145void smp_send_reschedule(int cpu) 145void smp_send_reschedule(int cpu)
146{ 146{
147 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 147 if (likely(smp_ops))
148 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
148} 149}
149 150
150#ifdef CONFIG_DEBUGGER 151#ifdef CONFIG_DEBUGGER
151void smp_send_debugger_break(int cpu) 152void smp_send_debugger_break(int cpu)
152{ 153{
153 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 154 if (likely(smp_ops))
155 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
154} 156}
155#endif 157#endif
156 158
@@ -158,7 +160,7 @@ void smp_send_debugger_break(int cpu)
158void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 160void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
159{ 161{
160 crash_ipi_function_ptr = crash_ipi_callback; 162 crash_ipi_function_ptr = crash_ipi_callback;
161 if (crash_ipi_callback) { 163 if (crash_ipi_callback && smp_ops) {
162 mb(); 164 mb();
163 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 165 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
164 } 166 }
@@ -220,6 +222,9 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
220 /* Can deadlock when called with interrupts disabled */ 222 /* Can deadlock when called with interrupts disabled */
221 WARN_ON(irqs_disabled()); 223 WARN_ON(irqs_disabled());
222 224
225 if (unlikely(smp_ops == NULL))
226 return -1;
227
223 data.func = func; 228 data.func = func;
224 data.info = info; 229 data.info = info;
225 atomic_set(&data.started, 0); 230 atomic_set(&data.started, 0);
@@ -357,7 +362,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
357 smp_store_cpu_info(boot_cpuid); 362 smp_store_cpu_info(boot_cpuid);
358 cpu_callin_map[boot_cpuid] = 1; 363 cpu_callin_map[boot_cpuid] = 1;
359 364
360 max_cpus = smp_ops->probe(); 365 if (smp_ops)
366 max_cpus = smp_ops->probe();
367 else
368 max_cpus = 1;
361 369
362 smp_space_timers(max_cpus); 370 smp_space_timers(max_cpus);
363 371
@@ -453,7 +461,7 @@ void generic_mach_cpu_die(void)
453 461
454static int __devinit cpu_enable(unsigned int cpu) 462static int __devinit cpu_enable(unsigned int cpu)
455{ 463{
456 if (smp_ops->cpu_enable) 464 if (smp_ops && smp_ops->cpu_enable)
457 return smp_ops->cpu_enable(cpu); 465 return smp_ops->cpu_enable(cpu);
458 466
459 return -ENOSYS; 467 return -ENOSYS;
@@ -467,7 +475,8 @@ int __devinit __cpu_up(unsigned int cpu)
467 if (!cpu_enable(cpu)) 475 if (!cpu_enable(cpu))
468 return 0; 476 return 0;
469 477
470 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) 478 if (smp_ops == NULL ||
479 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
471 return -EINVAL; 480 return -EINVAL;
472 481
473 /* Make sure callin-map entry is 0 (can be leftover a CPU 482 /* Make sure callin-map entry is 0 (can be leftover a CPU
@@ -568,7 +577,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
568 old_mask = current->cpus_allowed; 577 old_mask = current->cpus_allowed;
569 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 578 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
570 579
571 smp_ops->setup_cpu(boot_cpuid); 580 if (smp_ops)
581 smp_ops->setup_cpu(boot_cpuid);
572 582
573 set_cpus_allowed(current, old_mask); 583 set_cpus_allowed(current, old_mask);
574 584
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 010435095550..fec228cd0163 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -278,7 +278,7 @@ static void unregister_cpu_online(unsigned int cpu)
278} 278}
279#endif /* CONFIG_HOTPLUG_CPU */ 279#endif /* CONFIG_HOTPLUG_CPU */
280 280
281static int __devinit sysfs_cpu_notify(struct notifier_block *self, 281static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
282 unsigned long action, void *hcpu) 282 unsigned long action, void *hcpu)
283{ 283{
284 unsigned int cpu = (unsigned int)(long)hcpu; 284 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -296,7 +296,7 @@ static int __devinit sysfs_cpu_notify(struct notifier_block *self,
296 return NOTIFY_OK; 296 return NOTIFY_OK;
297} 297}
298 298
299static struct notifier_block __devinitdata sysfs_cpu_nb = { 299static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
300 .notifier_call = sysfs_cpu_notify, 300 .notifier_call = sysfs_cpu_notify,
301}; 301};
302 302
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 774c0a3c5019..a124499e65d9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -125,15 +125,8 @@ static long timezone_offset;
125unsigned long ppc_proc_freq; 125unsigned long ppc_proc_freq;
126unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
127 127
128u64 tb_last_jiffy __cacheline_aligned_in_smp; 128static u64 tb_last_jiffy __cacheline_aligned_in_smp;
129unsigned long tb_last_stamp; 129static DEFINE_PER_CPU(u64, last_jiffy);
130
131/*
132 * Note that on ppc32 this only stores the bottom 32 bits of
133 * the timebase value, but that's enough to tell when a jiffy
134 * has passed.
135 */
136DEFINE_PER_CPU(unsigned long, last_jiffy);
137 130
138#ifdef CONFIG_VIRT_CPU_ACCOUNTING 131#ifdef CONFIG_VIRT_CPU_ACCOUNTING
139/* 132/*
@@ -417,7 +410,7 @@ static __inline__ void timer_check_rtc(void)
417/* 410/*
418 * This version of gettimeofday has microsecond resolution. 411 * This version of gettimeofday has microsecond resolution.
419 */ 412 */
420static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) 413static inline void __do_gettimeofday(struct timeval *tv)
421{ 414{
422 unsigned long sec, usec; 415 unsigned long sec, usec;
423 u64 tb_ticks, xsec; 416 u64 tb_ticks, xsec;
@@ -431,7 +424,12 @@ static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
431 * without a divide (and in fact, without a multiply) 424 * without a divide (and in fact, without a multiply)
432 */ 425 */
433 temp_varp = do_gtod.varp; 426 temp_varp = do_gtod.varp;
434 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 427
428 /* Sampling the time base must be done after loading
429 * do_gtod.varp in order to avoid racing with update_gtod.
430 */
431 data_barrier(temp_varp);
432 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
435 temp_tb_to_xs = temp_varp->tb_to_xs; 433 temp_tb_to_xs = temp_varp->tb_to_xs;
436 temp_stamp_xsec = temp_varp->stamp_xsec; 434 temp_stamp_xsec = temp_varp->stamp_xsec;
437 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); 435 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
@@ -453,7 +451,7 @@ void do_gettimeofday(struct timeval *tv)
453 do { 451 do {
454 seq = read_seqbegin_irqsave(&xtime_lock, flags); 452 seq = read_seqbegin_irqsave(&xtime_lock, flags);
455 sec = xtime.tv_sec; 453 sec = xtime.tv_sec;
456 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 454 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
457 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 455 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
458 usec = nsec / 1000; 456 usec = nsec / 1000;
459 while (usec >= 1000000) { 457 while (usec >= 1000000) {
@@ -464,7 +462,7 @@ void do_gettimeofday(struct timeval *tv)
464 tv->tv_usec = usec; 462 tv->tv_usec = usec;
465 return; 463 return;
466 } 464 }
467 __do_gettimeofday(tv, get_tb()); 465 __do_gettimeofday(tv);
468} 466}
469 467
470EXPORT_SYMBOL(do_gettimeofday); 468EXPORT_SYMBOL(do_gettimeofday);
@@ -650,6 +648,7 @@ void timer_interrupt(struct pt_regs * regs)
650 int next_dec; 648 int next_dec;
651 int cpu = smp_processor_id(); 649 int cpu = smp_processor_id();
652 unsigned long ticks; 650 unsigned long ticks;
651 u64 tb_next_jiffy;
653 652
654#ifdef CONFIG_PPC32 653#ifdef CONFIG_PPC32
655 if (atomic_read(&ppc_n_lost_interrupts) != 0) 654 if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -691,11 +690,13 @@ void timer_interrupt(struct pt_regs * regs)
691 continue; 690 continue;
692 691
693 write_seqlock(&xtime_lock); 692 write_seqlock(&xtime_lock);
694 tb_last_jiffy += tb_ticks_per_jiffy; 693 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
695 tb_last_stamp = per_cpu(last_jiffy, cpu); 694 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
696 do_timer(regs); 695 tb_last_jiffy = tb_next_jiffy;
697 timer_recalc_offset(tb_last_jiffy); 696 do_timer(regs);
698 timer_check_rtc(); 697 timer_recalc_offset(tb_last_jiffy);
698 timer_check_rtc();
699 }
699 write_sequnlock(&xtime_lock); 700 write_sequnlock(&xtime_lock);
700 } 701 }
701 702
@@ -740,7 +741,7 @@ void __init smp_space_timers(unsigned int max_cpus)
740 int i; 741 int i;
741 unsigned long half = tb_ticks_per_jiffy / 2; 742 unsigned long half = tb_ticks_per_jiffy / 2;
742 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 743 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
743 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 744 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
744 745
745 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 746 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
746 previous_tb -= tb_ticks_per_jiffy; 747 previous_tb -= tb_ticks_per_jiffy;
@@ -821,7 +822,7 @@ int do_settimeofday(struct timespec *tv)
821 * and therefore the (jiffies - wall_jiffies) computation 822 * and therefore the (jiffies - wall_jiffies) computation
822 * has been removed. 823 * has been removed.
823 */ 824 */
824 tb_delta = tb_ticks_since(tb_last_stamp); 825 tb_delta = tb_ticks_since(tb_last_jiffy);
825 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ 826 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
826 new_nsec -= SCALE_XSEC(tb_delta, 1000000000); 827 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
827 828
@@ -941,8 +942,7 @@ void __init time_init(void)
941 if (__USE_RTC()) { 942 if (__USE_RTC()) {
942 /* 601 processor: dec counts down by 128 every 128ns */ 943 /* 601 processor: dec counts down by 128 every 128ns */
943 ppc_tb_freq = 1000000000; 944 ppc_tb_freq = 1000000000;
944 tb_last_stamp = get_rtcl(); 945 tb_last_jiffy = get_rtcl();
945 tb_last_jiffy = tb_last_stamp;
946 } else { 946 } else {
947 /* Normal PowerPC with timebase register */ 947 /* Normal PowerPC with timebase register */
948 ppc_md.calibrate_decr(); 948 ppc_md.calibrate_decr();
@@ -950,7 +950,7 @@ void __init time_init(void)
950 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 950 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
951 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 951 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
952 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 952 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
953 tb_last_stamp = tb_last_jiffy = get_tb(); 953 tb_last_jiffy = get_tb();
954 } 954 }
955 955
956 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 956 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
@@ -1027,7 +1027,7 @@ void __init time_init(void)
1027 do_gtod.varp = &do_gtod.vars[0]; 1027 do_gtod.varp = &do_gtod.vars[0];
1028 do_gtod.var_idx = 0; 1028 do_gtod.var_idx = 0;
1029 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 1029 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
1030 __get_cpu_var(last_jiffy) = tb_last_stamp; 1030 __get_cpu_var(last_jiffy) = tb_last_jiffy;
1031 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 1031 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1032 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 1032 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
1033 do_gtod.varp->tb_to_xs = tb_to_xs; 1033 do_gtod.varp->tb_to_xs = tb_to_xs;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3c668078e524..9b352bd0a460 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -55,9 +55,6 @@
55 55
56#ifdef CONFIG_PPC64 /* XXX */ 56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base 57#define _IO_BASE pci_io_base
58#ifdef CONFIG_KEXEC
59cpumask_t cpus_in_sr = CPU_MASK_NONE;
60#endif
61#endif 58#endif
62 59
63#ifdef CONFIG_DEBUGGER 60#ifdef CONFIG_DEBUGGER
@@ -150,13 +147,9 @@ int die(const char *str, struct pt_regs *regs, long err)
150 if (in_interrupt()) 147 if (in_interrupt())
151 panic("Fatal exception in interrupt"); 148 panic("Fatal exception in interrupt");
152 149
153 if (panic_on_oops) { 150 if (panic_on_oops)
154#ifdef CONFIG_PPC64
155 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
156 ssleep(5);
157#endif
158 panic("Fatal exception"); 151 panic("Fatal exception");
159 } 152
160 do_exit(err); 153 do_exit(err);
161 154
162 return 0; 155 return 0;
@@ -215,6 +208,19 @@ void system_reset_exception(struct pt_regs *regs)
215 208
216 die("System Reset", regs, SIGABRT); 209 die("System Reset", regs, SIGABRT);
217 210
211 /*
212 * Some CPUs when released from the debugger will execute this path.
213 * These CPUs entered the debugger via a soft-reset. If the CPU was
214 * hung before entering the debugger it will return to the hung
215 * state when exiting this function. This causes a problem in
216 * kdump since the hung CPU(s) will not respond to the IPI sent
217 * from kdump. To prevent the problem we call crash_kexec_secondary()
218 * here. If a kdump had not been initiated or we exit the debugger
219 * with the "exit and recover" command (x) crash_kexec_secondary()
220 * will return after 5ms and the CPU returns to its previous state.
221 */
222 crash_kexec_secondary(regs);
223
218 /* Must die if the interrupt is not recoverable */ 224 /* Must die if the interrupt is not recoverable */
219 if (!(regs->msr & MSR_RI)) 225 if (!(regs->msr & MSR_RI))
220 panic("Unrecoverable System Reset"); 226 panic("Unrecoverable System Reset");
@@ -579,14 +585,14 @@ static void parse_fpe(struct pt_regs *regs)
579#define INST_MFSPR_PVR_MASK 0xfc1fffff 585#define INST_MFSPR_PVR_MASK 0xfc1fffff
580 586
581#define INST_DCBA 0x7c0005ec 587#define INST_DCBA 0x7c0005ec
582#define INST_DCBA_MASK 0x7c0007fe 588#define INST_DCBA_MASK 0xfc0007fe
583 589
584#define INST_MCRXR 0x7c000400 590#define INST_MCRXR 0x7c000400
585#define INST_MCRXR_MASK 0x7c0007fe 591#define INST_MCRXR_MASK 0xfc0007fe
586 592
587#define INST_STRING 0x7c00042a 593#define INST_STRING 0x7c00042a
588#define INST_STRING_MASK 0x7c0007fe 594#define INST_STRING_MASK 0xfc0007fe
589#define INST_STRING_GEN_MASK 0x7c00067e 595#define INST_STRING_GEN_MASK 0xfc00067e
590#define INST_LSWI 0x7c0004aa 596#define INST_LSWI 0x7c0004aa
591#define INST_LSWX 0x7c00042a 597#define INST_LSWX 0x7c00042a
592#define INST_STSWI 0x7c0005aa 598#define INST_STSWI 0x7c0005aa
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0835b4841dea..2d17f2b8eda7 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -81,10 +81,14 @@ static int udbg_550_getc(void)
81void udbg_init_uart(void __iomem *comport, unsigned int speed, 81void udbg_init_uart(void __iomem *comport, unsigned int speed,
82 unsigned int clock) 82 unsigned int clock)
83{ 83{
84 unsigned int dll, base_bauds = clock / 16; 84 unsigned int dll, base_bauds;
85 85
86 if (clock == 0)
87 clock = 1843200;
86 if (speed == 0) 88 if (speed == 0)
87 speed = 9600; 89 speed = 9600;
90
91 base_bauds = clock / 16;
88 dll = base_bauds / speed; 92 dll = base_bauds / speed;
89 93
90 if (comport) { 94 if (comport) {
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 8a3bed5f143a..3726358faae8 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -14,7 +14,8 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
14 14
15 15
16EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 16EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin
17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
18 $(call ld-option, -Wl$(comma)--hash-style=sysv)
18EXTRA_AFLAGS := -D__VDSO32__ -s 19EXTRA_AFLAGS := -D__VDSO32__ -s
19 20
20obj-y += vdso32_wrapper.o 21obj-y += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index f4bad720cb0a..6187af2d54c3 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -14,6 +14,7 @@ SECTIONS
14{ 14{
15 . = VDSO32_LBASE + SIZEOF_HEADERS; 15 . = VDSO32_LBASE + SIZEOF_HEADERS;
16 .hash : { *(.hash) } :text 16 .hash : { *(.hash) } :text
17 .gnu.hash : { *(.gnu.hash) }
17 .dynsym : { *(.dynsym) } 18 .dynsym : { *(.dynsym) }
18 .dynstr : { *(.dynstr) } 19 .dynstr : { *(.dynstr) }
19 .gnu.version : { *(.gnu.version) } 20 .gnu.version : { *(.gnu.version) }
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index ab39988452cc..43af9b2a6f3b 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -8,7 +8,8 @@ targets := $(obj-vdso64) vdso64.so
8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) 8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
9 9
10EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 10EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin
11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
12 $(call ld-option, -Wl$(comma)--hash-style=sysv)
12EXTRA_AFLAGS := -D__VDSO64__ -s 13EXTRA_AFLAGS := -D__VDSO64__ -s
13 14
14obj-y += vdso64_wrapper.o 15obj-y += vdso64_wrapper.o
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4bdf224464ab..4a2b6dc0960c 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -12,6 +12,7 @@ SECTIONS
12{ 12{
13 . = VDSO64_LBASE + SIZEOF_HEADERS; 13 . = VDSO64_LBASE + SIZEOF_HEADERS;
14 .hash : { *(.hash) } :text 14 .hash : { *(.hash) } :text
15 .gnu.hash : { *(.gnu.hash) }
15 .dynsym : { *(.dynsym) } 16 .dynsym : { *(.dynsym) }
16 .dynstr : { *(.dynstr) } 17 .dynstr : { *(.dynstr) }
17 .gnu.version : { *(.gnu.version) } 18 .gnu.version : { *(.gnu.version) }