diff options
author | Tony Lindgren <tony@atomide.com> | 2010-03-01 17:19:05 -0500 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2010-03-01 17:19:05 -0500 |
commit | d702d12167a2c05a346f49aac7a311d597762495 (patch) | |
tree | baae42c299cce34d6df24b5d01f8b1d0b481bd9a /arch/powerpc/kernel | |
parent | 9418c65f9bd861d0f7e39aab9cfb3aa6f2275d11 (diff) | |
parent | ac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (diff) |
Merge with mainline to remove plat-omap/Kconfig conflict
Conflicts:
arch/arm/plat-omap/Kconfig
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/firmware.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 140 | ||||
-rw-r--r-- | arch/powerpc/kernel/kgdb.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/kprobes.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/nvram_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/of_platform.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_of_scan.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_callchain.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/pmc.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 116 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 887 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 81 | ||||
-rw-r--r-- | arch/powerpc/kernel/ptrace.c | 516 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 128 |
23 files changed, 956 insertions, 1028 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index bdcb557d470a..07109d843787 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -791,9 +791,8 @@ _GLOBAL(enter_rtas) | |||
791 | 791 | ||
792 | li r9,1 | 792 | li r9,1 |
793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | 793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) |
794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP | 794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI |
795 | andc r6,r0,r9 | 795 | andc r6,r0,r9 |
796 | ori r6,r6,MSR_RI | ||
797 | sync /* disable interrupts so SRR0/1 */ | 796 | sync /* disable interrupts so SRR0/1 */ |
798 | mtmsrd r0 /* don't get trashed */ | 797 | mtmsrd r0 /* don't get trashed */ |
799 | 798 | ||
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index 1679a70bbcad..6b1f4271eb53 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c | |||
@@ -17,5 +17,5 @@ | |||
17 | 17 | ||
18 | #include <asm/firmware.h> | 18 | #include <asm/firmware.h> |
19 | 19 | ||
20 | unsigned long powerpc_firmware_features; | 20 | unsigned long powerpc_firmware_features __read_mostly; |
21 | EXPORT_SYMBOL_GPL(powerpc_firmware_features); | 21 | EXPORT_SYMBOL_GPL(powerpc_firmware_features); |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 7f4bd7f3b6af..25793bb0e782 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -214,11 +214,11 @@ skpinv: addi r6,r6,1 /* Increment */ | |||
214 | bl 1f /* Find our address */ | 214 | bl 1f /* Find our address */ |
215 | 1: mflr r9 | 215 | 1: mflr r9 |
216 | rlwimi r7,r9,0,20,31 | 216 | rlwimi r7,r9,0,20,31 |
217 | addi r7,r7,24 | 217 | addi r7,r7,(2f - 1b) |
218 | mtspr SPRN_SRR0,r7 | 218 | mtspr SPRN_SRR0,r7 |
219 | mtspr SPRN_SRR1,r6 | 219 | mtspr SPRN_SRR1,r6 |
220 | rfi | 220 | rfi |
221 | 221 | 2: | |
222 | /* 4. Clear out PIDs & Search info */ | 222 | /* 4. Clear out PIDs & Search info */ |
223 | li r6,0 | 223 | li r6,0 |
224 | mtspr SPRN_MAS6,r6 | 224 | mtspr SPRN_MAS6,r6 |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9040330b0530..64f6f2031c22 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -73,8 +73,10 @@ | |||
73 | #define CREATE_TRACE_POINTS | 73 | #define CREATE_TRACE_POINTS |
74 | #include <asm/trace.h> | 74 | #include <asm/trace.h> |
75 | 75 | ||
76 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | ||
77 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
78 | |||
76 | int __irq_offset_value; | 79 | int __irq_offset_value; |
77 | static int ppc_spurious_interrupts; | ||
78 | 80 | ||
79 | #ifdef CONFIG_PPC32 | 81 | #ifdef CONFIG_PPC32 |
80 | EXPORT_SYMBOL(__irq_offset_value); | 82 | EXPORT_SYMBOL(__irq_offset_value); |
@@ -180,30 +182,64 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
180 | EXPORT_SYMBOL(raw_local_irq_restore); | 182 | EXPORT_SYMBOL(raw_local_irq_restore); |
181 | #endif /* CONFIG_PPC64 */ | 183 | #endif /* CONFIG_PPC64 */ |
182 | 184 | ||
185 | static int show_other_interrupts(struct seq_file *p, int prec) | ||
186 | { | ||
187 | int j; | ||
188 | |||
189 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
190 | if (tau_initialized) { | ||
191 | seq_printf(p, "%*s: ", prec, "TAU"); | ||
192 | for_each_online_cpu(j) | ||
193 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
194 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
195 | } | ||
196 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | ||
197 | |||
198 | seq_printf(p, "%*s: ", prec, "LOC"); | ||
199 | for_each_online_cpu(j) | ||
200 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | ||
201 | seq_printf(p, " Local timer interrupts\n"); | ||
202 | |||
203 | seq_printf(p, "%*s: ", prec, "SPU"); | ||
204 | for_each_online_cpu(j) | ||
205 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | ||
206 | seq_printf(p, " Spurious interrupts\n"); | ||
207 | |||
208 | seq_printf(p, "%*s: ", prec, "CNT"); | ||
209 | for_each_online_cpu(j) | ||
210 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | ||
211 | seq_printf(p, " Performance monitoring interrupts\n"); | ||
212 | |||
213 | seq_printf(p, "%*s: ", prec, "MCE"); | ||
214 | for_each_online_cpu(j) | ||
215 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | ||
216 | seq_printf(p, " Machine check exceptions\n"); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
183 | int show_interrupts(struct seq_file *p, void *v) | 221 | int show_interrupts(struct seq_file *p, void *v) |
184 | { | 222 | { |
185 | int i = *(loff_t *)v, j; | 223 | unsigned long flags, any_count = 0; |
224 | int i = *(loff_t *) v, j, prec; | ||
186 | struct irqaction *action; | 225 | struct irqaction *action; |
187 | struct irq_desc *desc; | 226 | struct irq_desc *desc; |
188 | unsigned long flags; | ||
189 | 227 | ||
228 | if (i > nr_irqs) | ||
229 | return 0; | ||
230 | |||
231 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
232 | j *= 10; | ||
233 | |||
234 | if (i == nr_irqs) | ||
235 | return show_other_interrupts(p, prec); | ||
236 | |||
237 | /* print header */ | ||
190 | if (i == 0) { | 238 | if (i == 0) { |
191 | seq_puts(p, " "); | 239 | seq_printf(p, "%*s", prec + 8, ""); |
192 | for_each_online_cpu(j) | 240 | for_each_online_cpu(j) |
193 | seq_printf(p, "CPU%d ", j); | 241 | seq_printf(p, "CPU%-8d", j); |
194 | seq_putc(p, '\n'); | 242 | seq_putc(p, '\n'); |
195 | } else if (i == nr_irqs) { | ||
196 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
197 | if (tau_initialized){ | ||
198 | seq_puts(p, "TAU: "); | ||
199 | for_each_online_cpu(j) | ||
200 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
201 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
202 | } | ||
203 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ | ||
204 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | ||
205 | |||
206 | return 0; | ||
207 | } | 243 | } |
208 | 244 | ||
209 | desc = irq_to_desc(i); | 245 | desc = irq_to_desc(i); |
@@ -211,37 +247,48 @@ int show_interrupts(struct seq_file *p, void *v) | |||
211 | return 0; | 247 | return 0; |
212 | 248 | ||
213 | raw_spin_lock_irqsave(&desc->lock, flags); | 249 | raw_spin_lock_irqsave(&desc->lock, flags); |
214 | 250 | for_each_online_cpu(j) | |
251 | any_count |= kstat_irqs_cpu(i, j); | ||
215 | action = desc->action; | 252 | action = desc->action; |
216 | if (!action || !action->handler) | 253 | if (!action && !any_count) |
217 | goto skip; | 254 | goto out; |
218 | 255 | ||
219 | seq_printf(p, "%3d: ", i); | 256 | seq_printf(p, "%*d: ", prec, i); |
220 | #ifdef CONFIG_SMP | ||
221 | for_each_online_cpu(j) | 257 | for_each_online_cpu(j) |
222 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 258 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
223 | #else | ||
224 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
225 | #endif /* CONFIG_SMP */ | ||
226 | 259 | ||
227 | if (desc->chip) | 260 | if (desc->chip) |
228 | seq_printf(p, " %s ", desc->chip->name); | 261 | seq_printf(p, " %-16s", desc->chip->name); |
229 | else | 262 | else |
230 | seq_puts(p, " None "); | 263 | seq_printf(p, " %-16s", "None"); |
264 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
231 | 265 | ||
232 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | 266 | if (action) { |
233 | seq_printf(p, " %s", action->name); | 267 | seq_printf(p, " %s", action->name); |
268 | while ((action = action->next) != NULL) | ||
269 | seq_printf(p, ", %s", action->name); | ||
270 | } | ||
234 | 271 | ||
235 | for (action = action->next; action; action = action->next) | ||
236 | seq_printf(p, ", %s", action->name); | ||
237 | seq_putc(p, '\n'); | 272 | seq_putc(p, '\n'); |
238 | 273 | out: | |
239 | skip: | ||
240 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 274 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
241 | |||
242 | return 0; | 275 | return 0; |
243 | } | 276 | } |
244 | 277 | ||
278 | /* | ||
279 | * /proc/stat helpers | ||
280 | */ | ||
281 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
282 | { | ||
283 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | ||
284 | |||
285 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | ||
286 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | ||
287 | sum += per_cpu(irq_stat, cpu).spurious_irqs; | ||
288 | |||
289 | return sum; | ||
290 | } | ||
291 | |||
245 | #ifdef CONFIG_HOTPLUG_CPU | 292 | #ifdef CONFIG_HOTPLUG_CPU |
246 | void fixup_irqs(cpumask_t map) | 293 | void fixup_irqs(cpumask_t map) |
247 | { | 294 | { |
@@ -353,8 +400,7 @@ void do_IRQ(struct pt_regs *regs) | |||
353 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) | 400 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
354 | handle_one_irq(irq); | 401 | handle_one_irq(irq); |
355 | else if (irq != NO_IRQ_IGNORE) | 402 | else if (irq != NO_IRQ_IGNORE) |
356 | /* That's not SMP safe ... but who cares ? */ | 403 | __get_cpu_var(irq_stat).spurious_irqs++; |
357 | ppc_spurious_interrupts++; | ||
358 | 404 | ||
359 | irq_exit(); | 405 | irq_exit(); |
360 | set_irq_regs(old_regs); | 406 | set_irq_regs(old_regs); |
@@ -474,7 +520,7 @@ void do_softirq(void) | |||
474 | */ | 520 | */ |
475 | 521 | ||
476 | static LIST_HEAD(irq_hosts); | 522 | static LIST_HEAD(irq_hosts); |
477 | static DEFINE_SPINLOCK(irq_big_lock); | 523 | static DEFINE_RAW_SPINLOCK(irq_big_lock); |
478 | static unsigned int revmap_trees_allocated; | 524 | static unsigned int revmap_trees_allocated; |
479 | static DEFINE_MUTEX(revmap_trees_mutex); | 525 | static DEFINE_MUTEX(revmap_trees_mutex); |
480 | struct irq_map_entry irq_map[NR_IRQS]; | 526 | struct irq_map_entry irq_map[NR_IRQS]; |
@@ -520,14 +566,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
520 | if (host->ops->match == NULL) | 566 | if (host->ops->match == NULL) |
521 | host->ops->match = default_irq_host_match; | 567 | host->ops->match = default_irq_host_match; |
522 | 568 | ||
523 | spin_lock_irqsave(&irq_big_lock, flags); | 569 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
524 | 570 | ||
525 | /* If it's a legacy controller, check for duplicates and | 571 | /* If it's a legacy controller, check for duplicates and |
526 | * mark it as allocated (we use irq 0 host pointer for that | 572 | * mark it as allocated (we use irq 0 host pointer for that |
527 | */ | 573 | */ |
528 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 574 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
529 | if (irq_map[0].host != NULL) { | 575 | if (irq_map[0].host != NULL) { |
530 | spin_unlock_irqrestore(&irq_big_lock, flags); | 576 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
531 | /* If we are early boot, we can't free the structure, | 577 | /* If we are early boot, we can't free the structure, |
532 | * too bad... | 578 | * too bad... |
533 | * this will be fixed once slab is made available early | 579 | * this will be fixed once slab is made available early |
@@ -541,7 +587,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
541 | } | 587 | } |
542 | 588 | ||
543 | list_add(&host->link, &irq_hosts); | 589 | list_add(&host->link, &irq_hosts); |
544 | spin_unlock_irqrestore(&irq_big_lock, flags); | 590 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
545 | 591 | ||
546 | /* Additional setups per revmap type */ | 592 | /* Additional setups per revmap type */ |
547 | switch(revmap_type) { | 593 | switch(revmap_type) { |
@@ -592,13 +638,13 @@ struct irq_host *irq_find_host(struct device_node *node) | |||
592 | * the absence of a device node. This isn't a problem so far | 638 | * the absence of a device node. This isn't a problem so far |
593 | * yet though... | 639 | * yet though... |
594 | */ | 640 | */ |
595 | spin_lock_irqsave(&irq_big_lock, flags); | 641 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
596 | list_for_each_entry(h, &irq_hosts, link) | 642 | list_for_each_entry(h, &irq_hosts, link) |
597 | if (h->ops->match(h, node)) { | 643 | if (h->ops->match(h, node)) { |
598 | found = h; | 644 | found = h; |
599 | break; | 645 | break; |
600 | } | 646 | } |
601 | spin_unlock_irqrestore(&irq_big_lock, flags); | 647 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
602 | return found; | 648 | return found; |
603 | } | 649 | } |
604 | EXPORT_SYMBOL_GPL(irq_find_host); | 650 | EXPORT_SYMBOL_GPL(irq_find_host); |
@@ -967,7 +1013,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
967 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) | 1013 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
968 | return NO_IRQ; | 1014 | return NO_IRQ; |
969 | 1015 | ||
970 | spin_lock_irqsave(&irq_big_lock, flags); | 1016 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
971 | 1017 | ||
972 | /* Use hint for 1 interrupt if any */ | 1018 | /* Use hint for 1 interrupt if any */ |
973 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | 1019 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && |
@@ -991,7 +1037,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
991 | } | 1037 | } |
992 | } | 1038 | } |
993 | if (found == NO_IRQ) { | 1039 | if (found == NO_IRQ) { |
994 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1040 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
995 | return NO_IRQ; | 1041 | return NO_IRQ; |
996 | } | 1042 | } |
997 | hint_found: | 1043 | hint_found: |
@@ -1000,7 +1046,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
1000 | smp_wmb(); | 1046 | smp_wmb(); |
1001 | irq_map[i].host = host; | 1047 | irq_map[i].host = host; |
1002 | } | 1048 | } |
1003 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1049 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
1004 | return found; | 1050 | return found; |
1005 | } | 1051 | } |
1006 | 1052 | ||
@@ -1012,7 +1058,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1012 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 1058 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
1013 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 1059 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
1014 | 1060 | ||
1015 | spin_lock_irqsave(&irq_big_lock, flags); | 1061 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
1016 | for (i = virq; i < (virq + count); i++) { | 1062 | for (i = virq; i < (virq + count); i++) { |
1017 | struct irq_host *host; | 1063 | struct irq_host *host; |
1018 | 1064 | ||
@@ -1025,7 +1071,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1025 | smp_wmb(); | 1071 | smp_wmb(); |
1026 | irq_map[i].host = NULL; | 1072 | irq_map[i].host = NULL; |
1027 | } | 1073 | } |
1028 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1074 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
1029 | } | 1075 | } |
1030 | 1076 | ||
1031 | int arch_early_irq_init(void) | 1077 | int arch_early_irq_init(void) |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index b6bd1eaa1c24..41bada0298c8 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -333,7 +333,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
333 | atomic_set(&kgdb_cpu_doing_single_step, -1); | 333 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
334 | /* set the trace bit if we're stepping */ | 334 | /* set the trace bit if we're stepping */ |
335 | if (remcom_in_buffer[0] == 's') { | 335 | if (remcom_in_buffer[0] == 's') { |
336 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 336 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
337 | mtspr(SPRN_DBCR0, | 337 | mtspr(SPRN_DBCR0, |
338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
339 | linux_regs->msr |= MSR_DE; | 339 | linux_regs->msr |= MSR_DE; |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index c9329786073b..3fd1af902112 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | 38 | ||
39 | #ifdef CONFIG_BOOKE | 39 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
40 | #define MSR_SINGLESTEP (MSR_DE) | 40 | #define MSR_SINGLESTEP (MSR_DE) |
41 | #else | 41 | #else |
42 | #define MSR_SINGLESTEP (MSR_SE) | 42 | #define MSR_SINGLESTEP (MSR_SE) |
@@ -110,7 +110,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
110 | * like Decrementer or External Interrupt */ | 110 | * like Decrementer or External Interrupt */ |
111 | regs->msr &= ~MSR_EE; | 111 | regs->msr &= ~MSR_EE; |
112 | regs->msr |= MSR_SINGLESTEP; | 112 | regs->msr |= MSR_SINGLESTEP; |
113 | #ifdef CONFIG_BOOKE | 113 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
114 | regs->msr &= ~MSR_CE; | 114 | regs->msr &= ~MSR_CE; |
115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
116 | #endif | 116 | #endif |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 79a00bb9c64c..d09d1c615150 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -359,7 +359,7 @@ static void parse_system_parameter_string(struct seq_file *m) | |||
359 | 359 | ||
360 | unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); | 360 | unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); |
361 | if (!local_buffer) { | 361 | if (!local_buffer) { |
362 | printk(KERN_ERR "%s %s kmalloc failure at line %d \n", | 362 | printk(KERN_ERR "%s %s kmalloc failure at line %d\n", |
363 | __FILE__, __func__, __LINE__); | 363 | __FILE__, __func__, __LINE__); |
364 | return; | 364 | return; |
365 | } | 365 | } |
@@ -383,13 +383,13 @@ static void parse_system_parameter_string(struct seq_file *m) | |||
383 | int idx, w_idx; | 383 | int idx, w_idx; |
384 | char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); | 384 | char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); |
385 | if (!workbuffer) { | 385 | if (!workbuffer) { |
386 | printk(KERN_ERR "%s %s kmalloc failure at line %d \n", | 386 | printk(KERN_ERR "%s %s kmalloc failure at line %d\n", |
387 | __FILE__, __func__, __LINE__); | 387 | __FILE__, __func__, __LINE__); |
388 | kfree(local_buffer); | 388 | kfree(local_buffer); |
389 | return; | 389 | return; |
390 | } | 390 | } |
391 | #ifdef LPARCFG_DEBUG | 391 | #ifdef LPARCFG_DEBUG |
392 | printk(KERN_INFO "success calling get-system-parameter \n"); | 392 | printk(KERN_INFO "success calling get-system-parameter\n"); |
393 | #endif | 393 | #endif |
394 | splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; | 394 | splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; |
395 | local_buffer += 2; /* step over strlen value */ | 395 | local_buffer += 2; /* step over strlen value */ |
@@ -440,7 +440,7 @@ static int lparcfg_count_active_processors(void) | |||
440 | 440 | ||
441 | while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) { | 441 | while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) { |
442 | #ifdef LPARCFG_DEBUG | 442 | #ifdef LPARCFG_DEBUG |
443 | printk(KERN_ERR "cpus_dn %p \n", cpus_dn); | 443 | printk(KERN_ERR "cpus_dn %p\n", cpus_dn); |
444 | #endif | 444 | #endif |
445 | count++; | 445 | count++; |
446 | } | 446 | } |
@@ -725,7 +725,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
725 | const unsigned int *lp_index_ptr; | 725 | const unsigned int *lp_index_ptr; |
726 | unsigned int lp_index = 0; | 726 | unsigned int lp_index = 0; |
727 | 727 | ||
728 | seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); | 728 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
729 | 729 | ||
730 | rootdn = of_find_node_by_path("/"); | 730 | rootdn = of_find_node_by_path("/"); |
731 | if (rootdn) { | 731 | if (rootdn) { |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index ad461e735aec..9cf197f01e94 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -338,8 +338,8 @@ static int __init nvram_create_os_partition(void) | |||
338 | 338 | ||
339 | rc = nvram_write_header(new_part); | 339 | rc = nvram_write_header(new_part); |
340 | if (rc <= 0) { | 340 | if (rc <= 0) { |
341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header \ | 341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " |
342 | failed (%d)\n", rc); | 342 | "failed (%d)\n", rc); |
343 | return rc; | 343 | return rc; |
344 | } | 344 | } |
345 | 345 | ||
@@ -349,7 +349,7 @@ static int __init nvram_create_os_partition(void) | |||
349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); | 349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); |
350 | if (rc <= 0) { | 350 | if (rc <= 0) { |
351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " | 351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " |
352 | "failed (%d)\n", rc); | 352 | "failed (%d)\n", rc); |
353 | return rc; | 353 | return rc; |
354 | } | 354 | } |
355 | 355 | ||
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 1a4fc0d11a03..666d08db319e 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -214,7 +214,7 @@ EXPORT_SYMBOL(of_find_device_by_node); | |||
214 | static int of_dev_phandle_match(struct device *dev, void *data) | 214 | static int of_dev_phandle_match(struct device *dev, void *data) |
215 | { | 215 | { |
216 | phandle *ph = data; | 216 | phandle *ph = data; |
217 | return to_of_device(dev)->node->linux_phandle == *ph; | 217 | return to_of_device(dev)->node->phandle == *ph; |
218 | } | 218 | } |
219 | 219 | ||
220 | struct of_device *of_find_device_by_phandle(phandle ph) | 220 | struct of_device *of_find_device_by_phandle(phandle ph) |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index cadbed679fbb..2597f9545d8a 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1047,10 +1047,8 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) | |||
1047 | 1047 | ||
1048 | struct pci_dev *dev = bus->self; | 1048 | struct pci_dev *dev = bus->self; |
1049 | 1049 | ||
1050 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | 1050 | pci_bus_for_each_resource(bus, res, i) { |
1051 | if ((res = bus->resource[i]) == NULL) | 1051 | if (!res || !res->flags) |
1052 | continue; | ||
1053 | if (!res->flags) | ||
1054 | continue; | 1052 | continue; |
1055 | if (i >= 3 && bus->self->transparent) | 1053 | if (i >= 3 && bus->self->transparent) |
1056 | continue; | 1054 | continue; |
@@ -1181,21 +1179,20 @@ static int skip_isa_ioresource_align(struct pci_dev *dev) | |||
1181 | * but we want to try to avoid allocating at 0x2900-0x2bff | 1179 | * but we want to try to avoid allocating at 0x2900-0x2bff |
1182 | * which might have be mirrored at 0x0100-0x03ff.. | 1180 | * which might have be mirrored at 0x0100-0x03ff.. |
1183 | */ | 1181 | */ |
1184 | void pcibios_align_resource(void *data, struct resource *res, | 1182 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, |
1185 | resource_size_t size, resource_size_t align) | 1183 | resource_size_t size, resource_size_t align) |
1186 | { | 1184 | { |
1187 | struct pci_dev *dev = data; | 1185 | struct pci_dev *dev = data; |
1186 | resource_size_t start = res->start; | ||
1188 | 1187 | ||
1189 | if (res->flags & IORESOURCE_IO) { | 1188 | if (res->flags & IORESOURCE_IO) { |
1190 | resource_size_t start = res->start; | ||
1191 | |||
1192 | if (skip_isa_ioresource_align(dev)) | 1189 | if (skip_isa_ioresource_align(dev)) |
1193 | return; | 1190 | return start; |
1194 | if (start & 0x300) { | 1191 | if (start & 0x300) |
1195 | start = (start + 0x3ff) & ~0x3ff; | 1192 | start = (start + 0x3ff) & ~0x3ff; |
1196 | res->start = start; | ||
1197 | } | ||
1198 | } | 1193 | } |
1194 | |||
1195 | return start; | ||
1199 | } | 1196 | } |
1200 | EXPORT_SYMBOL(pcibios_align_resource); | 1197 | EXPORT_SYMBOL(pcibios_align_resource); |
1201 | 1198 | ||
@@ -1278,9 +1275,8 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) | |||
1278 | pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", | 1275 | pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", |
1279 | pci_domain_nr(bus), bus->number); | 1276 | pci_domain_nr(bus), bus->number); |
1280 | 1277 | ||
1281 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { | 1278 | pci_bus_for_each_resource(bus, res, i) { |
1282 | if ((res = bus->resource[i]) == NULL || !res->flags | 1279 | if (!res || !res->flags || res->start > res->end || res->parent) |
1283 | || res->start > res->end || res->parent) | ||
1284 | continue; | 1280 | continue; |
1285 | if (bus->parent == NULL) | 1281 | if (bus->parent == NULL) |
1286 | pr = (res->flags & IORESOURCE_IO) ? | 1282 | pr = (res->flags & IORESOURCE_IO) ? |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ccf56ac92de5..d43fc65749c1 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -224,7 +224,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, | |||
224 | * G5 machines... So when something asks for bus 0 io base | 224 | * G5 machines... So when something asks for bus 0 io base |
225 | * (bus 0 is HT root), we return the AGP one instead. | 225 | * (bus 0 is HT root), we return the AGP one instead. |
226 | */ | 226 | */ |
227 | if (in_bus == 0 && machine_is_compatible("MacRISC4")) { | 227 | if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) { |
228 | struct device_node *agp; | 228 | struct device_node *agp; |
229 | 229 | ||
230 | agp = of_find_compatible_node(NULL, NULL, "u3-agp"); | 230 | agp = of_find_compatible_node(NULL, NULL, "u3-agp"); |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 4aa17401657b..cd11d5ca80df 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -304,7 +304,7 @@ static void __devinit __of_scan_bus(struct device_node *node, | |||
304 | int reglen, devfn; | 304 | int reglen, devfn; |
305 | struct pci_dev *dev; | 305 | struct pci_dev *dev; |
306 | 306 | ||
307 | pr_debug("of_scan_bus(%s) bus no %d... \n", | 307 | pr_debug("of_scan_bus(%s) bus no %d...\n", |
308 | node->full_name, bus->number); | 308 | node->full_name, bus->number); |
309 | 309 | ||
310 | /* Scan direct children */ | 310 | /* Scan direct children */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index a3c11cac3d71..95ad9dad298e 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
495 | 495 | ||
496 | entry->nr = 0; | 496 | entry->nr = 0; |
497 | 497 | ||
498 | if (current->pid == 0) /* idle task? */ | ||
499 | return entry; | ||
500 | |||
501 | if (!user_mode(regs)) { | 498 | if (!user_mode(regs)) { |
502 | perf_callchain_kernel(regs, entry); | 499 | perf_callchain_kernel(regs, entry); |
503 | if (current->mm) | 500 | if (current->mm) |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 1eb85fbf53a5..b6cf8f1f4d35 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count, | |||
718 | return n; | 718 | return n; |
719 | } | 719 | } |
720 | 720 | ||
721 | static void event_sched_in(struct perf_event *event, int cpu) | 721 | static void event_sched_in(struct perf_event *event) |
722 | { | 722 | { |
723 | event->state = PERF_EVENT_STATE_ACTIVE; | 723 | event->state = PERF_EVENT_STATE_ACTIVE; |
724 | event->oncpu = cpu; | 724 | event->oncpu = smp_processor_id(); |
725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | 725 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; |
726 | if (is_software_event(event)) | 726 | if (is_software_event(event)) |
727 | event->pmu->enable(event); | 727 | event->pmu->enable(event); |
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu) | |||
735 | */ | 735 | */ |
736 | int hw_perf_group_sched_in(struct perf_event *group_leader, | 736 | int hw_perf_group_sched_in(struct perf_event *group_leader, |
737 | struct perf_cpu_context *cpuctx, | 737 | struct perf_cpu_context *cpuctx, |
738 | struct perf_event_context *ctx, int cpu) | 738 | struct perf_event_context *ctx) |
739 | { | 739 | { |
740 | struct cpu_hw_events *cpuhw; | 740 | struct cpu_hw_events *cpuhw; |
741 | long i, n, n0; | 741 | long i, n, n0; |
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 766 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
767 | cpuctx->active_oncpu += n; | 767 | cpuctx->active_oncpu += n; |
768 | n = 1; | 768 | n = 1; |
769 | event_sched_in(group_leader, cpu); | 769 | event_sched_in(group_leader); |
770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | 770 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
771 | if (sub->state != PERF_EVENT_STATE_OFF) { | 771 | if (sub->state != PERF_EVENT_STATE_OFF) { |
772 | event_sched_in(sub, cpu); | 772 | event_sched_in(sub); |
773 | ++n; | 773 | ++n; |
774 | } | 774 | } |
775 | } | 775 | } |
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 0516e2d3e02e..461499b43cff 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c | |||
@@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs) | |||
37 | } | 37 | } |
38 | 38 | ||
39 | 39 | ||
40 | static DEFINE_SPINLOCK(pmc_owner_lock); | 40 | static DEFINE_RAW_SPINLOCK(pmc_owner_lock); |
41 | static void *pmc_owner_caller; /* mostly for debugging */ | 41 | static void *pmc_owner_caller; /* mostly for debugging */ |
42 | perf_irq_t perf_irq = dummy_perf; | 42 | perf_irq_t perf_irq = dummy_perf; |
43 | 43 | ||
@@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) | |||
45 | { | 45 | { |
46 | int err = 0; | 46 | int err = 0; |
47 | 47 | ||
48 | spin_lock(&pmc_owner_lock); | 48 | raw_spin_lock(&pmc_owner_lock); |
49 | 49 | ||
50 | if (pmc_owner_caller) { | 50 | if (pmc_owner_caller) { |
51 | printk(KERN_WARNING "reserve_pmc_hardware: " | 51 | printk(KERN_WARNING "reserve_pmc_hardware: " |
@@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) | |||
59 | perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; | 59 | perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; |
60 | 60 | ||
61 | out: | 61 | out: |
62 | spin_unlock(&pmc_owner_lock); | 62 | raw_spin_unlock(&pmc_owner_lock); |
63 | return err; | 63 | return err; |
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(reserve_pmc_hardware); | 65 | EXPORT_SYMBOL_GPL(reserve_pmc_hardware); |
66 | 66 | ||
67 | void release_pmc_hardware(void) | 67 | void release_pmc_hardware(void) |
68 | { | 68 | { |
69 | spin_lock(&pmc_owner_lock); | 69 | raw_spin_lock(&pmc_owner_lock); |
70 | 70 | ||
71 | WARN_ON(! pmc_owner_caller); | 71 | WARN_ON(! pmc_owner_caller); |
72 | 72 | ||
73 | pmc_owner_caller = NULL; | 73 | pmc_owner_caller = NULL; |
74 | perf_irq = dummy_perf; | 74 | perf_irq = dummy_perf; |
75 | 75 | ||
76 | spin_unlock(&pmc_owner_lock); | 76 | raw_spin_unlock(&pmc_owner_lock); |
77 | } | 77 | } |
78 | EXPORT_SYMBOL_GPL(release_pmc_hardware); | 78 | EXPORT_SYMBOL_GPL(release_pmc_hardware); |
79 | 79 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b816daf3eba..e4d71ced97ef 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -245,6 +245,24 @@ void discard_lazy_cpu_state(void) | |||
245 | } | 245 | } |
246 | #endif /* CONFIG_SMP */ | 246 | #endif /* CONFIG_SMP */ |
247 | 247 | ||
248 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
249 | void do_send_trap(struct pt_regs *regs, unsigned long address, | ||
250 | unsigned long error_code, int signal_code, int breakpt) | ||
251 | { | ||
252 | siginfo_t info; | ||
253 | |||
254 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | ||
255 | 11, SIGSEGV) == NOTIFY_STOP) | ||
256 | return; | ||
257 | |||
258 | /* Deliver the signal to userspace */ | ||
259 | info.si_signo = SIGTRAP; | ||
260 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ | ||
261 | info.si_code = signal_code; | ||
262 | info.si_addr = (void __user *)address; | ||
263 | force_sig_info(SIGTRAP, &info, current); | ||
264 | } | ||
265 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
248 | void do_dabr(struct pt_regs *regs, unsigned long address, | 266 | void do_dabr(struct pt_regs *regs, unsigned long address, |
249 | unsigned long error_code) | 267 | unsigned long error_code) |
250 | { | 268 | { |
@@ -257,12 +275,6 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
257 | if (debugger_dabr_match(regs)) | 275 | if (debugger_dabr_match(regs)) |
258 | return; | 276 | return; |
259 | 277 | ||
260 | /* Clear the DAC and struct entries. One shot trigger */ | ||
261 | #if defined(CONFIG_BOOKE) | ||
262 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W | ||
263 | | DBCR0_IDM)); | ||
264 | #endif | ||
265 | |||
266 | /* Clear the DABR */ | 278 | /* Clear the DABR */ |
267 | set_dabr(0); | 279 | set_dabr(0); |
268 | 280 | ||
@@ -273,9 +285,82 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
273 | info.si_addr = (void __user *)address; | 285 | info.si_addr = (void __user *)address; |
274 | force_sig_info(SIGTRAP, &info, current); | 286 | force_sig_info(SIGTRAP, &info, current); |
275 | } | 287 | } |
288 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
276 | 289 | ||
277 | static DEFINE_PER_CPU(unsigned long, current_dabr); | 290 | static DEFINE_PER_CPU(unsigned long, current_dabr); |
278 | 291 | ||
292 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
293 | /* | ||
294 | * Set the debug registers back to their default "safe" values. | ||
295 | */ | ||
296 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
297 | { | ||
298 | thread->iac1 = thread->iac2 = 0; | ||
299 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
300 | thread->iac3 = thread->iac4 = 0; | ||
301 | #endif | ||
302 | thread->dac1 = thread->dac2 = 0; | ||
303 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
304 | thread->dvc1 = thread->dvc2 = 0; | ||
305 | #endif | ||
306 | thread->dbcr0 = 0; | ||
307 | #ifdef CONFIG_BOOKE | ||
308 | /* | ||
309 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | ||
310 | */ | ||
311 | thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ | ||
312 | DBCR1_IAC3US | DBCR1_IAC4US; | ||
313 | /* | ||
314 | * Force Data Address Compare User/Supervisor bits to be User-only | ||
315 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | ||
316 | */ | ||
317 | thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | ||
318 | #else | ||
319 | thread->dbcr1 = 0; | ||
320 | #endif | ||
321 | } | ||
322 | |||
323 | static void prime_debug_regs(struct thread_struct *thread) | ||
324 | { | ||
325 | mtspr(SPRN_IAC1, thread->iac1); | ||
326 | mtspr(SPRN_IAC2, thread->iac2); | ||
327 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
328 | mtspr(SPRN_IAC3, thread->iac3); | ||
329 | mtspr(SPRN_IAC4, thread->iac4); | ||
330 | #endif | ||
331 | mtspr(SPRN_DAC1, thread->dac1); | ||
332 | mtspr(SPRN_DAC2, thread->dac2); | ||
333 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
334 | mtspr(SPRN_DVC1, thread->dvc1); | ||
335 | mtspr(SPRN_DVC2, thread->dvc2); | ||
336 | #endif | ||
337 | mtspr(SPRN_DBCR0, thread->dbcr0); | ||
338 | mtspr(SPRN_DBCR1, thread->dbcr1); | ||
339 | #ifdef CONFIG_BOOKE | ||
340 | mtspr(SPRN_DBCR2, thread->dbcr2); | ||
341 | #endif | ||
342 | } | ||
343 | /* | ||
344 | * Unless neither the old or new thread are making use of the | ||
345 | * debug registers, set the debug registers from the values | ||
346 | * stored in the new thread. | ||
347 | */ | ||
348 | static void switch_booke_debug_regs(struct thread_struct *new_thread) | ||
349 | { | ||
350 | if ((current->thread.dbcr0 & DBCR0_IDM) | ||
351 | || (new_thread->dbcr0 & DBCR0_IDM)) | ||
352 | prime_debug_regs(new_thread); | ||
353 | } | ||
354 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
355 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
356 | { | ||
357 | if (thread->dabr) { | ||
358 | thread->dabr = 0; | ||
359 | set_dabr(0); | ||
360 | } | ||
361 | } | ||
362 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
363 | |||
279 | int set_dabr(unsigned long dabr) | 364 | int set_dabr(unsigned long dabr) |
280 | { | 365 | { |
281 | __get_cpu_var(current_dabr) = dabr; | 366 | __get_cpu_var(current_dabr) = dabr; |
@@ -284,7 +369,7 @@ int set_dabr(unsigned long dabr) | |||
284 | return ppc_md.set_dabr(dabr); | 369 | return ppc_md.set_dabr(dabr); |
285 | 370 | ||
286 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ | 371 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ |
287 | #if defined(CONFIG_BOOKE) | 372 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
288 | mtspr(SPRN_DAC1, dabr); | 373 | mtspr(SPRN_DAC1, dabr); |
289 | #elif defined(CONFIG_PPC_BOOK3S) | 374 | #elif defined(CONFIG_PPC_BOOK3S) |
290 | mtspr(SPRN_DABR, dabr); | 375 | mtspr(SPRN_DABR, dabr); |
@@ -371,10 +456,8 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
371 | 456 | ||
372 | #endif /* CONFIG_SMP */ | 457 | #endif /* CONFIG_SMP */ |
373 | 458 | ||
374 | #if defined(CONFIG_BOOKE) | 459 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
375 | /* If new thread DAC (HW breakpoint) is the same then leave it */ | 460 | switch_booke_debug_regs(&new->thread); |
376 | if (new->thread.dabr) | ||
377 | set_dabr(new->thread.dabr); | ||
378 | #else | 461 | #else |
379 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 462 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) |
380 | set_dabr(new->thread.dabr); | 463 | set_dabr(new->thread.dabr); |
@@ -514,7 +597,7 @@ void show_regs(struct pt_regs * regs) | |||
514 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 597 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
515 | trap = TRAP(regs); | 598 | trap = TRAP(regs); |
516 | if (trap == 0x300 || trap == 0x600) | 599 | if (trap == 0x300 || trap == 0x600) |
517 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 600 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
518 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 601 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
519 | #else | 602 | #else |
520 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 603 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); |
@@ -556,14 +639,7 @@ void flush_thread(void) | |||
556 | { | 639 | { |
557 | discard_lazy_cpu_state(); | 640 | discard_lazy_cpu_state(); |
558 | 641 | ||
559 | if (current->thread.dabr) { | 642 | set_debug_reg_defaults(¤t->thread); |
560 | current->thread.dabr = 0; | ||
561 | set_dabr(0); | ||
562 | |||
563 | #if defined(CONFIG_BOOKE) | ||
564 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); | ||
565 | #endif | ||
566 | } | ||
567 | } | 643 | } |
568 | 644 | ||
569 | void | 645 | void |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4ec300862466..43238b2054b6 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -61,365 +61,12 @@ | |||
61 | #define DBG(fmt...) | 61 | #define DBG(fmt...) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | |||
65 | static int __initdata dt_root_addr_cells; | ||
66 | static int __initdata dt_root_size_cells; | ||
67 | |||
68 | #ifdef CONFIG_PPC64 | 64 | #ifdef CONFIG_PPC64 |
69 | int __initdata iommu_is_off; | 65 | int __initdata iommu_is_off; |
70 | int __initdata iommu_force_on; | 66 | int __initdata iommu_force_on; |
71 | unsigned long tce_alloc_start, tce_alloc_end; | 67 | unsigned long tce_alloc_start, tce_alloc_end; |
72 | #endif | 68 | #endif |
73 | 69 | ||
74 | typedef u32 cell_t; | ||
75 | |||
76 | #if 0 | ||
77 | static struct boot_param_header *initial_boot_params __initdata; | ||
78 | #else | ||
79 | struct boot_param_header *initial_boot_params; | ||
80 | #endif | ||
81 | |||
82 | extern struct device_node *allnodes; /* temporary while merging */ | ||
83 | |||
84 | extern rwlock_t devtree_lock; /* temporary while merging */ | ||
85 | |||
86 | /* export that to outside world */ | ||
87 | struct device_node *of_chosen; | ||
88 | |||
89 | static inline char *find_flat_dt_string(u32 offset) | ||
90 | { | ||
91 | return ((char *)initial_boot_params) + | ||
92 | initial_boot_params->off_dt_strings + offset; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * This function is used to scan the flattened device-tree, it is | ||
97 | * used to extract the memory informations at boot before we can | ||
98 | * unflatten the tree | ||
99 | */ | ||
100 | int __init of_scan_flat_dt(int (*it)(unsigned long node, | ||
101 | const char *uname, int depth, | ||
102 | void *data), | ||
103 | void *data) | ||
104 | { | ||
105 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
106 | initial_boot_params->off_dt_struct; | ||
107 | int rc = 0; | ||
108 | int depth = -1; | ||
109 | |||
110 | do { | ||
111 | u32 tag = *((u32 *)p); | ||
112 | char *pathp; | ||
113 | |||
114 | p += 4; | ||
115 | if (tag == OF_DT_END_NODE) { | ||
116 | depth --; | ||
117 | continue; | ||
118 | } | ||
119 | if (tag == OF_DT_NOP) | ||
120 | continue; | ||
121 | if (tag == OF_DT_END) | ||
122 | break; | ||
123 | if (tag == OF_DT_PROP) { | ||
124 | u32 sz = *((u32 *)p); | ||
125 | p += 8; | ||
126 | if (initial_boot_params->version < 0x10) | ||
127 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
128 | p += sz; | ||
129 | p = _ALIGN(p, 4); | ||
130 | continue; | ||
131 | } | ||
132 | if (tag != OF_DT_BEGIN_NODE) { | ||
133 | printk(KERN_WARNING "Invalid tag %x scanning flattened" | ||
134 | " device tree !\n", tag); | ||
135 | return -EINVAL; | ||
136 | } | ||
137 | depth++; | ||
138 | pathp = (char *)p; | ||
139 | p = _ALIGN(p + strlen(pathp) + 1, 4); | ||
140 | if ((*pathp) == '/') { | ||
141 | char *lp, *np; | ||
142 | for (lp = NULL, np = pathp; *np; np++) | ||
143 | if ((*np) == '/') | ||
144 | lp = np+1; | ||
145 | if (lp != NULL) | ||
146 | pathp = lp; | ||
147 | } | ||
148 | rc = it(p, pathp, depth, data); | ||
149 | if (rc != 0) | ||
150 | break; | ||
151 | } while(1); | ||
152 | |||
153 | return rc; | ||
154 | } | ||
155 | |||
156 | unsigned long __init of_get_flat_dt_root(void) | ||
157 | { | ||
158 | unsigned long p = ((unsigned long)initial_boot_params) + | ||
159 | initial_boot_params->off_dt_struct; | ||
160 | |||
161 | while(*((u32 *)p) == OF_DT_NOP) | ||
162 | p += 4; | ||
163 | BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); | ||
164 | p += 4; | ||
165 | return _ALIGN(p + strlen((char *)p) + 1, 4); | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * This function can be used within scan_flattened_dt callback to get | ||
170 | * access to properties | ||
171 | */ | ||
172 | void* __init of_get_flat_dt_prop(unsigned long node, const char *name, | ||
173 | unsigned long *size) | ||
174 | { | ||
175 | unsigned long p = node; | ||
176 | |||
177 | do { | ||
178 | u32 tag = *((u32 *)p); | ||
179 | u32 sz, noff; | ||
180 | const char *nstr; | ||
181 | |||
182 | p += 4; | ||
183 | if (tag == OF_DT_NOP) | ||
184 | continue; | ||
185 | if (tag != OF_DT_PROP) | ||
186 | return NULL; | ||
187 | |||
188 | sz = *((u32 *)p); | ||
189 | noff = *((u32 *)(p + 4)); | ||
190 | p += 8; | ||
191 | if (initial_boot_params->version < 0x10) | ||
192 | p = _ALIGN(p, sz >= 8 ? 8 : 4); | ||
193 | |||
194 | nstr = find_flat_dt_string(noff); | ||
195 | if (nstr == NULL) { | ||
196 | printk(KERN_WARNING "Can't find property index" | ||
197 | " name !\n"); | ||
198 | return NULL; | ||
199 | } | ||
200 | if (strcmp(name, nstr) == 0) { | ||
201 | if (size) | ||
202 | *size = sz; | ||
203 | return (void *)p; | ||
204 | } | ||
205 | p += sz; | ||
206 | p = _ALIGN(p, 4); | ||
207 | } while(1); | ||
208 | } | ||
209 | |||
210 | int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) | ||
211 | { | ||
212 | const char* cp; | ||
213 | unsigned long cplen, l; | ||
214 | |||
215 | cp = of_get_flat_dt_prop(node, "compatible", &cplen); | ||
216 | if (cp == NULL) | ||
217 | return 0; | ||
218 | while (cplen > 0) { | ||
219 | if (strncasecmp(cp, compat, strlen(compat)) == 0) | ||
220 | return 1; | ||
221 | l = strlen(cp) + 1; | ||
222 | cp += l; | ||
223 | cplen -= l; | ||
224 | } | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, | ||
230 | unsigned long align) | ||
231 | { | ||
232 | void *res; | ||
233 | |||
234 | *mem = _ALIGN(*mem, align); | ||
235 | res = (void *)*mem; | ||
236 | *mem += size; | ||
237 | |||
238 | return res; | ||
239 | } | ||
240 | |||
241 | static unsigned long __init unflatten_dt_node(unsigned long mem, | ||
242 | unsigned long *p, | ||
243 | struct device_node *dad, | ||
244 | struct device_node ***allnextpp, | ||
245 | unsigned long fpsize) | ||
246 | { | ||
247 | struct device_node *np; | ||
248 | struct property *pp, **prev_pp = NULL; | ||
249 | char *pathp; | ||
250 | u32 tag; | ||
251 | unsigned int l, allocl; | ||
252 | int has_name = 0; | ||
253 | int new_format = 0; | ||
254 | |||
255 | tag = *((u32 *)(*p)); | ||
256 | if (tag != OF_DT_BEGIN_NODE) { | ||
257 | printk("Weird tag at start of node: %x\n", tag); | ||
258 | return mem; | ||
259 | } | ||
260 | *p += 4; | ||
261 | pathp = (char *)*p; | ||
262 | l = allocl = strlen(pathp) + 1; | ||
263 | *p = _ALIGN(*p + l, 4); | ||
264 | |||
265 | /* version 0x10 has a more compact unit name here instead of the full | ||
266 | * path. we accumulate the full path size using "fpsize", we'll rebuild | ||
267 | * it later. We detect this because the first character of the name is | ||
268 | * not '/'. | ||
269 | */ | ||
270 | if ((*pathp) != '/') { | ||
271 | new_format = 1; | ||
272 | if (fpsize == 0) { | ||
273 | /* root node: special case. fpsize accounts for path | ||
274 | * plus terminating zero. root node only has '/', so | ||
275 | * fpsize should be 2, but we want to avoid the first | ||
276 | * level nodes to have two '/' so we use fpsize 1 here | ||
277 | */ | ||
278 | fpsize = 1; | ||
279 | allocl = 2; | ||
280 | } else { | ||
281 | /* account for '/' and path size minus terminal 0 | ||
282 | * already in 'l' | ||
283 | */ | ||
284 | fpsize += l; | ||
285 | allocl = fpsize; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | |||
290 | np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, | ||
291 | __alignof__(struct device_node)); | ||
292 | if (allnextpp) { | ||
293 | memset(np, 0, sizeof(*np)); | ||
294 | np->full_name = ((char*)np) + sizeof(struct device_node); | ||
295 | if (new_format) { | ||
296 | char *p = np->full_name; | ||
297 | /* rebuild full path for new format */ | ||
298 | if (dad && dad->parent) { | ||
299 | strcpy(p, dad->full_name); | ||
300 | #ifdef DEBUG | ||
301 | if ((strlen(p) + l + 1) != allocl) { | ||
302 | DBG("%s: p: %d, l: %d, a: %d\n", | ||
303 | pathp, (int)strlen(p), l, allocl); | ||
304 | } | ||
305 | #endif | ||
306 | p += strlen(p); | ||
307 | } | ||
308 | *(p++) = '/'; | ||
309 | memcpy(p, pathp, l); | ||
310 | } else | ||
311 | memcpy(np->full_name, pathp, l); | ||
312 | prev_pp = &np->properties; | ||
313 | **allnextpp = np; | ||
314 | *allnextpp = &np->allnext; | ||
315 | if (dad != NULL) { | ||
316 | np->parent = dad; | ||
317 | /* we temporarily use the next field as `last_child'*/ | ||
318 | if (dad->next == 0) | ||
319 | dad->child = np; | ||
320 | else | ||
321 | dad->next->sibling = np; | ||
322 | dad->next = np; | ||
323 | } | ||
324 | kref_init(&np->kref); | ||
325 | } | ||
326 | while(1) { | ||
327 | u32 sz, noff; | ||
328 | char *pname; | ||
329 | |||
330 | tag = *((u32 *)(*p)); | ||
331 | if (tag == OF_DT_NOP) { | ||
332 | *p += 4; | ||
333 | continue; | ||
334 | } | ||
335 | if (tag != OF_DT_PROP) | ||
336 | break; | ||
337 | *p += 4; | ||
338 | sz = *((u32 *)(*p)); | ||
339 | noff = *((u32 *)((*p) + 4)); | ||
340 | *p += 8; | ||
341 | if (initial_boot_params->version < 0x10) | ||
342 | *p = _ALIGN(*p, sz >= 8 ? 8 : 4); | ||
343 | |||
344 | pname = find_flat_dt_string(noff); | ||
345 | if (pname == NULL) { | ||
346 | printk("Can't find property name in list !\n"); | ||
347 | break; | ||
348 | } | ||
349 | if (strcmp(pname, "name") == 0) | ||
350 | has_name = 1; | ||
351 | l = strlen(pname) + 1; | ||
352 | pp = unflatten_dt_alloc(&mem, sizeof(struct property), | ||
353 | __alignof__(struct property)); | ||
354 | if (allnextpp) { | ||
355 | if (strcmp(pname, "linux,phandle") == 0) { | ||
356 | np->node = *((u32 *)*p); | ||
357 | if (np->linux_phandle == 0) | ||
358 | np->linux_phandle = np->node; | ||
359 | } | ||
360 | if (strcmp(pname, "ibm,phandle") == 0) | ||
361 | np->linux_phandle = *((u32 *)*p); | ||
362 | pp->name = pname; | ||
363 | pp->length = sz; | ||
364 | pp->value = (void *)*p; | ||
365 | *prev_pp = pp; | ||
366 | prev_pp = &pp->next; | ||
367 | } | ||
368 | *p = _ALIGN((*p) + sz, 4); | ||
369 | } | ||
370 | /* with version 0x10 we may not have the name property, recreate | ||
371 | * it here from the unit name if absent | ||
372 | */ | ||
373 | if (!has_name) { | ||
374 | char *p = pathp, *ps = pathp, *pa = NULL; | ||
375 | int sz; | ||
376 | |||
377 | while (*p) { | ||
378 | if ((*p) == '@') | ||
379 | pa = p; | ||
380 | if ((*p) == '/') | ||
381 | ps = p + 1; | ||
382 | p++; | ||
383 | } | ||
384 | if (pa < ps) | ||
385 | pa = p; | ||
386 | sz = (pa - ps) + 1; | ||
387 | pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, | ||
388 | __alignof__(struct property)); | ||
389 | if (allnextpp) { | ||
390 | pp->name = "name"; | ||
391 | pp->length = sz; | ||
392 | pp->value = pp + 1; | ||
393 | *prev_pp = pp; | ||
394 | prev_pp = &pp->next; | ||
395 | memcpy(pp->value, ps, sz - 1); | ||
396 | ((char *)pp->value)[sz - 1] = 0; | ||
397 | DBG("fixed up name for %s -> %s\n", pathp, | ||
398 | (char *)pp->value); | ||
399 | } | ||
400 | } | ||
401 | if (allnextpp) { | ||
402 | *prev_pp = NULL; | ||
403 | np->name = of_get_property(np, "name", NULL); | ||
404 | np->type = of_get_property(np, "device_type", NULL); | ||
405 | |||
406 | if (!np->name) | ||
407 | np->name = "<NULL>"; | ||
408 | if (!np->type) | ||
409 | np->type = "<NULL>"; | ||
410 | } | ||
411 | while (tag == OF_DT_BEGIN_NODE) { | ||
412 | mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); | ||
413 | tag = *((u32 *)(*p)); | ||
414 | } | ||
415 | if (tag != OF_DT_END_NODE) { | ||
416 | printk("Weird tag at end of node: %x\n", tag); | ||
417 | return mem; | ||
418 | } | ||
419 | *p += 4; | ||
420 | return mem; | ||
421 | } | ||
422 | |||
423 | static int __init early_parse_mem(char *p) | 70 | static int __init early_parse_mem(char *p) |
424 | { | 71 | { |
425 | if (!p) | 72 | if (!p) |
@@ -446,7 +93,7 @@ static void __init move_device_tree(void) | |||
446 | DBG("-> move_device_tree\n"); | 93 | DBG("-> move_device_tree\n"); |
447 | 94 | ||
448 | start = __pa(initial_boot_params); | 95 | start = __pa(initial_boot_params); |
449 | size = initial_boot_params->totalsize; | 96 | size = be32_to_cpu(initial_boot_params->totalsize); |
450 | 97 | ||
451 | if ((memory_limit && (start + size) > memory_limit) || | 98 | if ((memory_limit && (start + size) > memory_limit) || |
452 | overlaps_crashkernel(start, size)) { | 99 | overlaps_crashkernel(start, size)) { |
@@ -459,54 +106,6 @@ static void __init move_device_tree(void) | |||
459 | DBG("<- move_device_tree\n"); | 106 | DBG("<- move_device_tree\n"); |
460 | } | 107 | } |
461 | 108 | ||
462 | /** | ||
463 | * unflattens the device-tree passed by the firmware, creating the | ||
464 | * tree of struct device_node. It also fills the "name" and "type" | ||
465 | * pointers of the nodes so the normal device-tree walking functions | ||
466 | * can be used (this used to be done by finish_device_tree) | ||
467 | */ | ||
468 | void __init unflatten_device_tree(void) | ||
469 | { | ||
470 | unsigned long start, mem, size; | ||
471 | struct device_node **allnextp = &allnodes; | ||
472 | |||
473 | DBG(" -> unflatten_device_tree()\n"); | ||
474 | |||
475 | /* First pass, scan for size */ | ||
476 | start = ((unsigned long)initial_boot_params) + | ||
477 | initial_boot_params->off_dt_struct; | ||
478 | size = unflatten_dt_node(0, &start, NULL, NULL, 0); | ||
479 | size = (size | 3) + 1; | ||
480 | |||
481 | DBG(" size is %lx, allocating...\n", size); | ||
482 | |||
483 | /* Allocate memory for the expanded device tree */ | ||
484 | mem = lmb_alloc(size + 4, __alignof__(struct device_node)); | ||
485 | mem = (unsigned long) __va(mem); | ||
486 | |||
487 | ((u32 *)mem)[size / 4] = 0xdeadbeef; | ||
488 | |||
489 | DBG(" unflattening %lx...\n", mem); | ||
490 | |||
491 | /* Second pass, do actual unflattening */ | ||
492 | start = ((unsigned long)initial_boot_params) + | ||
493 | initial_boot_params->off_dt_struct; | ||
494 | unflatten_dt_node(mem, &start, NULL, &allnextp, 0); | ||
495 | if (*((u32 *)start) != OF_DT_END) | ||
496 | printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); | ||
497 | if (((u32 *)mem)[size / 4] != 0xdeadbeef) | ||
498 | printk(KERN_WARNING "End of tree marker overwritten: %08x\n", | ||
499 | ((u32 *)mem)[size / 4] ); | ||
500 | *allnextp = NULL; | ||
501 | |||
502 | /* Get pointer to OF "/chosen" node for use everywhere */ | ||
503 | of_chosen = of_find_node_by_path("/chosen"); | ||
504 | if (of_chosen == NULL) | ||
505 | of_chosen = of_find_node_by_path("/chosen@0"); | ||
506 | |||
507 | DBG(" <- unflatten_device_tree()\n"); | ||
508 | } | ||
509 | |||
510 | /* | 109 | /* |
511 | * ibm,pa-features is a per-cpu property that contains a string of | 110 | * ibm,pa-features is a per-cpu property that contains a string of |
512 | * attribute descriptors, each of which has a 2 byte header plus up | 111 | * attribute descriptors, each of which has a 2 byte header plus up |
@@ -763,48 +362,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
763 | return 0; | 362 | return 0; |
764 | } | 363 | } |
765 | 364 | ||
766 | #ifdef CONFIG_BLK_DEV_INITRD | 365 | void __init early_init_dt_scan_chosen_arch(unsigned long node) |
767 | static void __init early_init_dt_check_for_initrd(unsigned long node) | ||
768 | { | ||
769 | unsigned long l; | ||
770 | u32 *prop; | ||
771 | |||
772 | DBG("Looking for initrd properties... "); | ||
773 | |||
774 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); | ||
775 | if (prop) { | ||
776 | initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); | ||
777 | |||
778 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); | ||
779 | if (prop) { | ||
780 | initrd_end = (unsigned long) | ||
781 | __va(of_read_ulong(prop, l/4)); | ||
782 | initrd_below_start_ok = 1; | ||
783 | } else { | ||
784 | initrd_start = 0; | ||
785 | } | ||
786 | } | ||
787 | |||
788 | DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end); | ||
789 | } | ||
790 | #else | ||
791 | static inline void early_init_dt_check_for_initrd(unsigned long node) | ||
792 | { | ||
793 | } | ||
794 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
795 | |||
796 | static int __init early_init_dt_scan_chosen(unsigned long node, | ||
797 | const char *uname, int depth, void *data) | ||
798 | { | 366 | { |
799 | unsigned long *lprop; | 367 | unsigned long *lprop; |
800 | unsigned long l; | ||
801 | char *p; | ||
802 | |||
803 | DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | ||
804 | |||
805 | if (depth != 1 || | ||
806 | (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) | ||
807 | return 0; | ||
808 | 368 | ||
809 | #ifdef CONFIG_PPC64 | 369 | #ifdef CONFIG_PPC64 |
810 | /* check if iommu is forced on or off */ | 370 | /* check if iommu is forced on or off */ |
@@ -815,17 +375,17 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
815 | #endif | 375 | #endif |
816 | 376 | ||
817 | /* mem=x on the command line is the preferred mechanism */ | 377 | /* mem=x on the command line is the preferred mechanism */ |
818 | lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); | 378 | lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); |
819 | if (lprop) | 379 | if (lprop) |
820 | memory_limit = *lprop; | 380 | memory_limit = *lprop; |
821 | 381 | ||
822 | #ifdef CONFIG_PPC64 | 382 | #ifdef CONFIG_PPC64 |
823 | lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); | 383 | lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); |
824 | if (lprop) | 384 | if (lprop) |
825 | tce_alloc_start = *lprop; | 385 | tce_alloc_start = *lprop; |
826 | lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); | 386 | lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); |
827 | if (lprop) | 387 | if (lprop) |
828 | tce_alloc_end = *lprop; | 388 | tce_alloc_end = *lprop; |
829 | #endif | 389 | #endif |
830 | 390 | ||
831 | #ifdef CONFIG_KEXEC | 391 | #ifdef CONFIG_KEXEC |
@@ -837,51 +397,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
837 | if (lprop) | 397 | if (lprop) |
838 | crashk_res.end = crashk_res.start + *lprop - 1; | 398 | crashk_res.end = crashk_res.start + *lprop - 1; |
839 | #endif | 399 | #endif |
840 | |||
841 | early_init_dt_check_for_initrd(node); | ||
842 | |||
843 | /* Retreive command line */ | ||
844 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
845 | if (p != NULL && l > 0) | ||
846 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | ||
847 | |||
848 | #ifdef CONFIG_CMDLINE | ||
849 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) | ||
850 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
851 | #endif /* CONFIG_CMDLINE */ | ||
852 | |||
853 | DBG("Command line is: %s\n", cmd_line); | ||
854 | |||
855 | /* break now */ | ||
856 | return 1; | ||
857 | } | ||
858 | |||
859 | static int __init early_init_dt_scan_root(unsigned long node, | ||
860 | const char *uname, int depth, void *data) | ||
861 | { | ||
862 | u32 *prop; | ||
863 | |||
864 | if (depth != 0) | ||
865 | return 0; | ||
866 | |||
867 | prop = of_get_flat_dt_prop(node, "#size-cells", NULL); | ||
868 | dt_root_size_cells = (prop == NULL) ? 1 : *prop; | ||
869 | DBG("dt_root_size_cells = %x\n", dt_root_size_cells); | ||
870 | |||
871 | prop = of_get_flat_dt_prop(node, "#address-cells", NULL); | ||
872 | dt_root_addr_cells = (prop == NULL) ? 2 : *prop; | ||
873 | DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); | ||
874 | |||
875 | /* break now */ | ||
876 | return 1; | ||
877 | } | ||
878 | |||
879 | static u64 __init dt_mem_next_cell(int s, cell_t **cellp) | ||
880 | { | ||
881 | cell_t *p = *cellp; | ||
882 | |||
883 | *cellp = p + s; | ||
884 | return of_read_number(p, s); | ||
885 | } | 400 | } |
886 | 401 | ||
887 | #ifdef CONFIG_PPC_PSERIES | 402 | #ifdef CONFIG_PPC_PSERIES |
@@ -893,22 +408,22 @@ static u64 __init dt_mem_next_cell(int s, cell_t **cellp) | |||
893 | */ | 408 | */ |
894 | static int __init early_init_dt_scan_drconf_memory(unsigned long node) | 409 | static int __init early_init_dt_scan_drconf_memory(unsigned long node) |
895 | { | 410 | { |
896 | cell_t *dm, *ls, *usm; | 411 | __be32 *dm, *ls, *usm; |
897 | unsigned long l, n, flags; | 412 | unsigned long l, n, flags; |
898 | u64 base, size, lmb_size; | 413 | u64 base, size, lmb_size; |
899 | unsigned int is_kexec_kdump = 0, rngs; | 414 | unsigned int is_kexec_kdump = 0, rngs; |
900 | 415 | ||
901 | ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); | 416 | ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); |
902 | if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) | 417 | if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) |
903 | return 0; | 418 | return 0; |
904 | lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); | 419 | lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); |
905 | 420 | ||
906 | dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); | 421 | dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); |
907 | if (dm == NULL || l < sizeof(cell_t)) | 422 | if (dm == NULL || l < sizeof(__be32)) |
908 | return 0; | 423 | return 0; |
909 | 424 | ||
910 | n = *dm++; /* number of entries */ | 425 | n = *dm++; /* number of entries */ |
911 | if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t)) | 426 | if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32)) |
912 | return 0; | 427 | return 0; |
913 | 428 | ||
914 | /* check if this is a kexec/kdump kernel. */ | 429 | /* check if this is a kexec/kdump kernel. */ |
@@ -963,65 +478,47 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) | |||
963 | #define early_init_dt_scan_drconf_memory(node) 0 | 478 | #define early_init_dt_scan_drconf_memory(node) 0 |
964 | #endif /* CONFIG_PPC_PSERIES */ | 479 | #endif /* CONFIG_PPC_PSERIES */ |
965 | 480 | ||
966 | static int __init early_init_dt_scan_memory(unsigned long node, | 481 | static int __init early_init_dt_scan_memory_ppc(unsigned long node, |
967 | const char *uname, int depth, void *data) | 482 | const char *uname, |
483 | int depth, void *data) | ||
968 | { | 484 | { |
969 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | ||
970 | cell_t *reg, *endp; | ||
971 | unsigned long l; | ||
972 | |||
973 | /* Look for the ibm,dynamic-reconfiguration-memory node */ | ||
974 | if (depth == 1 && | 485 | if (depth == 1 && |
975 | strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) | 486 | strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) |
976 | return early_init_dt_scan_drconf_memory(node); | 487 | return early_init_dt_scan_drconf_memory(node); |
488 | |||
489 | return early_init_dt_scan_memory(node, uname, depth, data); | ||
490 | } | ||
977 | 491 | ||
978 | /* We are scanning "memory" nodes only */ | 492 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
979 | if (type == NULL) { | 493 | { |
980 | /* | 494 | #if defined(CONFIG_PPC64) |
981 | * The longtrail doesn't have a device_type on the | 495 | if (iommu_is_off) { |
982 | * /memory node, so look for the node called /memory@0. | 496 | if (base >= 0x80000000ul) |
983 | */ | 497 | return; |
984 | if (depth != 1 || strcmp(uname, "memory@0") != 0) | 498 | if ((base + size) > 0x80000000ul) |
985 | return 0; | 499 | size = 0x80000000ul - base; |
986 | } else if (strcmp(type, "memory") != 0) | 500 | } |
987 | return 0; | 501 | #endif |
988 | |||
989 | reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); | ||
990 | if (reg == NULL) | ||
991 | reg = of_get_flat_dt_prop(node, "reg", &l); | ||
992 | if (reg == NULL) | ||
993 | return 0; | ||
994 | |||
995 | endp = reg + (l / sizeof(cell_t)); | ||
996 | |||
997 | DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", | ||
998 | uname, l, reg[0], reg[1], reg[2], reg[3]); | ||
999 | |||
1000 | while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { | ||
1001 | u64 base, size; | ||
1002 | 502 | ||
1003 | base = dt_mem_next_cell(dt_root_addr_cells, ®); | 503 | lmb_add(base, size); |
1004 | size = dt_mem_next_cell(dt_root_size_cells, ®); | ||
1005 | 504 | ||
1006 | if (size == 0) | 505 | memstart_addr = min((u64)memstart_addr, base); |
1007 | continue; | 506 | } |
1008 | DBG(" - %llx , %llx\n", (unsigned long long)base, | ||
1009 | (unsigned long long)size); | ||
1010 | #ifdef CONFIG_PPC64 | ||
1011 | if (iommu_is_off) { | ||
1012 | if (base >= 0x80000000ul) | ||
1013 | continue; | ||
1014 | if ((base + size) > 0x80000000ul) | ||
1015 | size = 0x80000000ul - base; | ||
1016 | } | ||
1017 | #endif | ||
1018 | lmb_add(base, size); | ||
1019 | 507 | ||
1020 | memstart_addr = min((u64)memstart_addr, base); | 508 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
1021 | } | 509 | { |
510 | return lmb_alloc(size, align); | ||
511 | } | ||
1022 | 512 | ||
1023 | return 0; | 513 | #ifdef CONFIG_BLK_DEV_INITRD |
514 | void __init early_init_dt_setup_initrd_arch(unsigned long start, | ||
515 | unsigned long end) | ||
516 | { | ||
517 | initrd_start = (unsigned long)__va(start); | ||
518 | initrd_end = (unsigned long)__va(end); | ||
519 | initrd_below_start_ok = 1; | ||
1024 | } | 520 | } |
521 | #endif | ||
1025 | 522 | ||
1026 | static void __init early_reserve_mem(void) | 523 | static void __init early_reserve_mem(void) |
1027 | { | 524 | { |
@@ -1186,7 +683,7 @@ void __init early_init_devtree(void *params) | |||
1186 | /* Scan memory nodes and rebuild LMBs */ | 683 | /* Scan memory nodes and rebuild LMBs */ |
1187 | lmb_init(); | 684 | lmb_init(); |
1188 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 685 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
1189 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | 686 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
1190 | 687 | ||
1191 | /* Save command line for /proc/cmdline and then parse parameters */ | 688 | /* Save command line for /proc/cmdline and then parse parameters */ |
1192 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 689 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
@@ -1234,25 +731,6 @@ void __init early_init_devtree(void *params) | |||
1234 | DBG(" <- early_init_devtree()\n"); | 731 | DBG(" <- early_init_devtree()\n"); |
1235 | } | 732 | } |
1236 | 733 | ||
1237 | |||
1238 | /** | ||
1239 | * Indicates whether the root node has a given value in its | ||
1240 | * compatible property. | ||
1241 | */ | ||
1242 | int machine_is_compatible(const char *compat) | ||
1243 | { | ||
1244 | struct device_node *root; | ||
1245 | int rc = 0; | ||
1246 | |||
1247 | root = of_find_node_by_path("/"); | ||
1248 | if (root) { | ||
1249 | rc = of_device_is_compatible(root, compat); | ||
1250 | of_node_put(root); | ||
1251 | } | ||
1252 | return rc; | ||
1253 | } | ||
1254 | EXPORT_SYMBOL(machine_is_compatible); | ||
1255 | |||
1256 | /******* | 734 | /******* |
1257 | * | 735 | * |
1258 | * New implementation of the OF "find" APIs, return a refcounted | 736 | * New implementation of the OF "find" APIs, return a refcounted |
@@ -1265,27 +743,6 @@ EXPORT_SYMBOL(machine_is_compatible); | |||
1265 | *******/ | 743 | *******/ |
1266 | 744 | ||
1267 | /** | 745 | /** |
1268 | * of_find_node_by_phandle - Find a node given a phandle | ||
1269 | * @handle: phandle of the node to find | ||
1270 | * | ||
1271 | * Returns a node pointer with refcount incremented, use | ||
1272 | * of_node_put() on it when done. | ||
1273 | */ | ||
1274 | struct device_node *of_find_node_by_phandle(phandle handle) | ||
1275 | { | ||
1276 | struct device_node *np; | ||
1277 | |||
1278 | read_lock(&devtree_lock); | ||
1279 | for (np = allnodes; np != 0; np = np->allnext) | ||
1280 | if (np->linux_phandle == handle) | ||
1281 | break; | ||
1282 | of_node_get(np); | ||
1283 | read_unlock(&devtree_lock); | ||
1284 | return np; | ||
1285 | } | ||
1286 | EXPORT_SYMBOL(of_find_node_by_phandle); | ||
1287 | |||
1288 | /** | ||
1289 | * of_find_next_cache_node - Find a node's subsidiary cache | 746 | * of_find_next_cache_node - Find a node's subsidiary cache |
1290 | * @np: node of type "cpu" or "cache" | 747 | * @np: node of type "cpu" or "cache" |
1291 | * | 748 | * |
@@ -1316,138 +773,6 @@ struct device_node *of_find_next_cache_node(struct device_node *np) | |||
1316 | return NULL; | 773 | return NULL; |
1317 | } | 774 | } |
1318 | 775 | ||
1319 | /** | ||
1320 | * of_node_get - Increment refcount of a node | ||
1321 | * @node: Node to inc refcount, NULL is supported to | ||
1322 | * simplify writing of callers | ||
1323 | * | ||
1324 | * Returns node. | ||
1325 | */ | ||
1326 | struct device_node *of_node_get(struct device_node *node) | ||
1327 | { | ||
1328 | if (node) | ||
1329 | kref_get(&node->kref); | ||
1330 | return node; | ||
1331 | } | ||
1332 | EXPORT_SYMBOL(of_node_get); | ||
1333 | |||
1334 | static inline struct device_node * kref_to_device_node(struct kref *kref) | ||
1335 | { | ||
1336 | return container_of(kref, struct device_node, kref); | ||
1337 | } | ||
1338 | |||
1339 | /** | ||
1340 | * of_node_release - release a dynamically allocated node | ||
1341 | * @kref: kref element of the node to be released | ||
1342 | * | ||
1343 | * In of_node_put() this function is passed to kref_put() | ||
1344 | * as the destructor. | ||
1345 | */ | ||
1346 | static void of_node_release(struct kref *kref) | ||
1347 | { | ||
1348 | struct device_node *node = kref_to_device_node(kref); | ||
1349 | struct property *prop = node->properties; | ||
1350 | |||
1351 | /* We should never be releasing nodes that haven't been detached. */ | ||
1352 | if (!of_node_check_flag(node, OF_DETACHED)) { | ||
1353 | printk("WARNING: Bad of_node_put() on %s\n", node->full_name); | ||
1354 | dump_stack(); | ||
1355 | kref_init(&node->kref); | ||
1356 | return; | ||
1357 | } | ||
1358 | |||
1359 | if (!of_node_check_flag(node, OF_DYNAMIC)) | ||
1360 | return; | ||
1361 | |||
1362 | while (prop) { | ||
1363 | struct property *next = prop->next; | ||
1364 | kfree(prop->name); | ||
1365 | kfree(prop->value); | ||
1366 | kfree(prop); | ||
1367 | prop = next; | ||
1368 | |||
1369 | if (!prop) { | ||
1370 | prop = node->deadprops; | ||
1371 | node->deadprops = NULL; | ||
1372 | } | ||
1373 | } | ||
1374 | kfree(node->full_name); | ||
1375 | kfree(node->data); | ||
1376 | kfree(node); | ||
1377 | } | ||
1378 | |||
1379 | /** | ||
1380 | * of_node_put - Decrement refcount of a node | ||
1381 | * @node: Node to dec refcount, NULL is supported to | ||
1382 | * simplify writing of callers | ||
1383 | * | ||
1384 | */ | ||
1385 | void of_node_put(struct device_node *node) | ||
1386 | { | ||
1387 | if (node) | ||
1388 | kref_put(&node->kref, of_node_release); | ||
1389 | } | ||
1390 | EXPORT_SYMBOL(of_node_put); | ||
1391 | |||
1392 | /* | ||
1393 | * Plug a device node into the tree and global list. | ||
1394 | */ | ||
1395 | void of_attach_node(struct device_node *np) | ||
1396 | { | ||
1397 | unsigned long flags; | ||
1398 | |||
1399 | write_lock_irqsave(&devtree_lock, flags); | ||
1400 | np->sibling = np->parent->child; | ||
1401 | np->allnext = allnodes; | ||
1402 | np->parent->child = np; | ||
1403 | allnodes = np; | ||
1404 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1405 | } | ||
1406 | |||
1407 | /* | ||
1408 | * "Unplug" a node from the device tree. The caller must hold | ||
1409 | * a reference to the node. The memory associated with the node | ||
1410 | * is not freed until its refcount goes to zero. | ||
1411 | */ | ||
1412 | void of_detach_node(struct device_node *np) | ||
1413 | { | ||
1414 | struct device_node *parent; | ||
1415 | unsigned long flags; | ||
1416 | |||
1417 | write_lock_irqsave(&devtree_lock, flags); | ||
1418 | |||
1419 | parent = np->parent; | ||
1420 | if (!parent) | ||
1421 | goto out_unlock; | ||
1422 | |||
1423 | if (allnodes == np) | ||
1424 | allnodes = np->allnext; | ||
1425 | else { | ||
1426 | struct device_node *prev; | ||
1427 | for (prev = allnodes; | ||
1428 | prev->allnext != np; | ||
1429 | prev = prev->allnext) | ||
1430 | ; | ||
1431 | prev->allnext = np->allnext; | ||
1432 | } | ||
1433 | |||
1434 | if (parent->child == np) | ||
1435 | parent->child = np->sibling; | ||
1436 | else { | ||
1437 | struct device_node *prevsib; | ||
1438 | for (prevsib = np->parent->child; | ||
1439 | prevsib->sibling != np; | ||
1440 | prevsib = prevsib->sibling) | ||
1441 | ; | ||
1442 | prevsib->sibling = np->sibling; | ||
1443 | } | ||
1444 | |||
1445 | of_node_set_flag(np, OF_DETACHED); | ||
1446 | |||
1447 | out_unlock: | ||
1448 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1449 | } | ||
1450 | |||
1451 | #ifdef CONFIG_PPC_PSERIES | 776 | #ifdef CONFIG_PPC_PSERIES |
1452 | /* | 777 | /* |
1453 | * Fix up the uninitialized fields in a new device node: | 778 | * Fix up the uninitialized fields in a new device node: |
@@ -1479,9 +804,9 @@ static int of_finish_dynamic_node(struct device_node *node) | |||
1479 | if (machine_is(powermac)) | 804 | if (machine_is(powermac)) |
1480 | return -ENODEV; | 805 | return -ENODEV; |
1481 | 806 | ||
1482 | /* fix up new node's linux_phandle field */ | 807 | /* fix up new node's phandle field */ |
1483 | if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) | 808 | if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) |
1484 | node->linux_phandle = *ibm_phandle; | 809 | node->phandle = *ibm_phandle; |
1485 | 810 | ||
1486 | out: | 811 | out: |
1487 | of_node_put(parent); | 812 | of_node_put(parent); |
@@ -1520,120 +845,6 @@ static int __init prom_reconfig_setup(void) | |||
1520 | __initcall(prom_reconfig_setup); | 845 | __initcall(prom_reconfig_setup); |
1521 | #endif | 846 | #endif |
1522 | 847 | ||
1523 | /* | ||
1524 | * Add a property to a node | ||
1525 | */ | ||
1526 | int prom_add_property(struct device_node* np, struct property* prop) | ||
1527 | { | ||
1528 | struct property **next; | ||
1529 | unsigned long flags; | ||
1530 | |||
1531 | prop->next = NULL; | ||
1532 | write_lock_irqsave(&devtree_lock, flags); | ||
1533 | next = &np->properties; | ||
1534 | while (*next) { | ||
1535 | if (strcmp(prop->name, (*next)->name) == 0) { | ||
1536 | /* duplicate ! don't insert it */ | ||
1537 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1538 | return -1; | ||
1539 | } | ||
1540 | next = &(*next)->next; | ||
1541 | } | ||
1542 | *next = prop; | ||
1543 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1544 | |||
1545 | #ifdef CONFIG_PROC_DEVICETREE | ||
1546 | /* try to add to proc as well if it was initialized */ | ||
1547 | if (np->pde) | ||
1548 | proc_device_tree_add_prop(np->pde, prop); | ||
1549 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
1550 | |||
1551 | return 0; | ||
1552 | } | ||
1553 | |||
1554 | /* | ||
1555 | * Remove a property from a node. Note that we don't actually | ||
1556 | * remove it, since we have given out who-knows-how-many pointers | ||
1557 | * to the data using get-property. Instead we just move the property | ||
1558 | * to the "dead properties" list, so it won't be found any more. | ||
1559 | */ | ||
1560 | int prom_remove_property(struct device_node *np, struct property *prop) | ||
1561 | { | ||
1562 | struct property **next; | ||
1563 | unsigned long flags; | ||
1564 | int found = 0; | ||
1565 | |||
1566 | write_lock_irqsave(&devtree_lock, flags); | ||
1567 | next = &np->properties; | ||
1568 | while (*next) { | ||
1569 | if (*next == prop) { | ||
1570 | /* found the node */ | ||
1571 | *next = prop->next; | ||
1572 | prop->next = np->deadprops; | ||
1573 | np->deadprops = prop; | ||
1574 | found = 1; | ||
1575 | break; | ||
1576 | } | ||
1577 | next = &(*next)->next; | ||
1578 | } | ||
1579 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1580 | |||
1581 | if (!found) | ||
1582 | return -ENODEV; | ||
1583 | |||
1584 | #ifdef CONFIG_PROC_DEVICETREE | ||
1585 | /* try to remove the proc node as well */ | ||
1586 | if (np->pde) | ||
1587 | proc_device_tree_remove_prop(np->pde, prop); | ||
1588 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
1589 | |||
1590 | return 0; | ||
1591 | } | ||
1592 | |||
1593 | /* | ||
1594 | * Update a property in a node. Note that we don't actually | ||
1595 | * remove it, since we have given out who-knows-how-many pointers | ||
1596 | * to the data using get-property. Instead we just move the property | ||
1597 | * to the "dead properties" list, and add the new property to the | ||
1598 | * property list | ||
1599 | */ | ||
1600 | int prom_update_property(struct device_node *np, | ||
1601 | struct property *newprop, | ||
1602 | struct property *oldprop) | ||
1603 | { | ||
1604 | struct property **next; | ||
1605 | unsigned long flags; | ||
1606 | int found = 0; | ||
1607 | |||
1608 | write_lock_irqsave(&devtree_lock, flags); | ||
1609 | next = &np->properties; | ||
1610 | while (*next) { | ||
1611 | if (*next == oldprop) { | ||
1612 | /* found the node */ | ||
1613 | newprop->next = oldprop->next; | ||
1614 | *next = newprop; | ||
1615 | oldprop->next = np->deadprops; | ||
1616 | np->deadprops = oldprop; | ||
1617 | found = 1; | ||
1618 | break; | ||
1619 | } | ||
1620 | next = &(*next)->next; | ||
1621 | } | ||
1622 | write_unlock_irqrestore(&devtree_lock, flags); | ||
1623 | |||
1624 | if (!found) | ||
1625 | return -ENODEV; | ||
1626 | |||
1627 | #ifdef CONFIG_PROC_DEVICETREE | ||
1628 | /* try to add to proc as well if it was initialized */ | ||
1629 | if (np->pde) | ||
1630 | proc_device_tree_update_prop(np->pde, newprop, oldprop); | ||
1631 | #endif /* CONFIG_PROC_DEVICETREE */ | ||
1632 | |||
1633 | return 0; | ||
1634 | } | ||
1635 | |||
1636 | |||
1637 | /* Find the device node for a given logical cpu number, also returns the cpu | 848 | /* Find the device node for a given logical cpu number, also returns the cpu |
1638 | * local thread number (index in ibm,interrupt-server#s) if relevant and | 849 | * local thread number (index in ibm,interrupt-server#s) if relevant and |
1639 | * asked for (non NULL) | 850 | * asked for (non NULL) |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index bafac2e41ae1..5f306c4946e5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -654,6 +654,9 @@ static void __init early_cmdline_parse(void) | |||
654 | #define OV5_CMO 0x00 | 654 | #define OV5_CMO 0x00 |
655 | #endif | 655 | #endif |
656 | 656 | ||
657 | /* Option Vector 6: IBM PAPR hints */ | ||
658 | #define OV6_LINUX 0x02 /* Linux is our OS */ | ||
659 | |||
657 | /* | 660 | /* |
658 | * The architecture vector has an array of PVR mask/value pairs, | 661 | * The architecture vector has an array of PVR mask/value pairs, |
659 | * followed by # option vectors - 1, followed by the option vectors. | 662 | * followed by # option vectors - 1, followed by the option vectors. |
@@ -665,7 +668,7 @@ static unsigned char ibm_architecture_vec[] = { | |||
665 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ | 668 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ |
666 | W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ | 669 | W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ |
667 | W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ | 670 | W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ |
668 | 5 - 1, /* 5 option vectors */ | 671 | 6 - 1, /* 6 option vectors */ |
669 | 672 | ||
670 | /* option vector 1: processor architectures supported */ | 673 | /* option vector 1: processor architectures supported */ |
671 | 3 - 2, /* length */ | 674 | 3 - 2, /* length */ |
@@ -697,12 +700,29 @@ static unsigned char ibm_architecture_vec[] = { | |||
697 | 0, /* don't halt */ | 700 | 0, /* don't halt */ |
698 | 701 | ||
699 | /* option vector 5: PAPR/OF options */ | 702 | /* option vector 5: PAPR/OF options */ |
700 | 5 - 2, /* length */ | 703 | 13 - 2, /* length */ |
701 | 0, /* don't ignore, don't halt */ | 704 | 0, /* don't ignore, don't halt */ |
702 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | | 705 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | |
703 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, | 706 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, |
704 | 0, | 707 | 0, |
705 | OV5_CMO, | 708 | OV5_CMO, |
709 | 0, | ||
710 | 0, | ||
711 | 0, | ||
712 | 0, | ||
713 | /* WARNING: The offset of the "number of cores" field below | ||
714 | * must match by the macro below. Update the definition if | ||
715 | * the structure layout changes. | ||
716 | */ | ||
717 | #define IBM_ARCH_VEC_NRCORES_OFFSET 100 | ||
718 | W(NR_CPUS), /* number of cores supported */ | ||
719 | |||
720 | /* option vector 6: IBM PAPR hints */ | ||
721 | 4 - 2, /* length */ | ||
722 | 0, | ||
723 | 0, | ||
724 | OV6_LINUX, | ||
725 | |||
706 | }; | 726 | }; |
707 | 727 | ||
708 | /* Old method - ELF header with PT_NOTE sections */ | 728 | /* Old method - ELF header with PT_NOTE sections */ |
@@ -792,13 +812,70 @@ static struct fake_elf { | |||
792 | } | 812 | } |
793 | }; | 813 | }; |
794 | 814 | ||
815 | static int __init prom_count_smt_threads(void) | ||
816 | { | ||
817 | phandle node; | ||
818 | char type[64]; | ||
819 | unsigned int plen; | ||
820 | |||
821 | /* Pick up th first CPU node we can find */ | ||
822 | for (node = 0; prom_next_node(&node); ) { | ||
823 | type[0] = 0; | ||
824 | prom_getprop(node, "device_type", type, sizeof(type)); | ||
825 | |||
826 | if (strcmp(type, RELOC("cpu"))) | ||
827 | continue; | ||
828 | /* | ||
829 | * There is an entry for each smt thread, each entry being | ||
830 | * 4 bytes long. All cpus should have the same number of | ||
831 | * smt threads, so return after finding the first. | ||
832 | */ | ||
833 | plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); | ||
834 | if (plen == PROM_ERROR) | ||
835 | break; | ||
836 | plen >>= 2; | ||
837 | prom_debug("Found 0x%x smt threads per core\n", (unsigned long)plen); | ||
838 | |||
839 | /* Sanity check */ | ||
840 | if (plen < 1 || plen > 64) { | ||
841 | prom_printf("Threads per core 0x%x out of bounds, assuming 1\n", | ||
842 | (unsigned long)plen); | ||
843 | return 1; | ||
844 | } | ||
845 | return plen; | ||
846 | } | ||
847 | prom_debug("No threads found, assuming 1 per core\n"); | ||
848 | |||
849 | return 1; | ||
850 | |||
851 | } | ||
852 | |||
853 | |||
795 | static void __init prom_send_capabilities(void) | 854 | static void __init prom_send_capabilities(void) |
796 | { | 855 | { |
797 | ihandle elfloader, root; | 856 | ihandle elfloader, root; |
798 | prom_arg_t ret; | 857 | prom_arg_t ret; |
858 | u32 *cores; | ||
799 | 859 | ||
800 | root = call_prom("open", 1, 1, ADDR("/")); | 860 | root = call_prom("open", 1, 1, ADDR("/")); |
801 | if (root != 0) { | 861 | if (root != 0) { |
862 | /* We need to tell the FW about the number of cores we support. | ||
863 | * | ||
864 | * To do that, we count the number of threads on the first core | ||
865 | * (we assume this is the same for all cores) and use it to | ||
866 | * divide NR_CPUS. | ||
867 | */ | ||
868 | cores = (u32 *)PTRRELOC(&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]); | ||
869 | if (*cores != NR_CPUS) { | ||
870 | prom_printf("WARNING ! " | ||
871 | "ibm_architecture_vec structure inconsistent: 0x%x !\n", | ||
872 | *cores); | ||
873 | } else { | ||
874 | *cores = NR_CPUS / prom_count_smt_threads(); | ||
875 | prom_printf("Max number of cores passed to firmware: 0x%x\n", | ||
876 | (unsigned long)*cores); | ||
877 | } | ||
878 | |||
802 | /* try calling the ibm,client-architecture-support method */ | 879 | /* try calling the ibm,client-architecture-support method */ |
803 | prom_printf("Calling ibm,client-architecture-support..."); | 880 | prom_printf("Calling ibm,client-architecture-support..."); |
804 | if (call_prom_ret("call-method", 3, 2, &ret, | 881 | if (call_prom_ret("call-method", 3, 2, &ret, |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ef149880c145..d9b05866615f 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -46,7 +46,7 @@ | |||
46 | /* | 46 | /* |
47 | * Set of msr bits that gdb can change on behalf of a process. | 47 | * Set of msr bits that gdb can change on behalf of a process. |
48 | */ | 48 | */ |
49 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 49 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
50 | #define MSR_DEBUGCHANGE 0 | 50 | #define MSR_DEBUGCHANGE 0 |
51 | #else | 51 | #else |
52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) | 52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) |
@@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task) | |||
703 | struct pt_regs *regs = task->thread.regs; | 703 | struct pt_regs *regs = task->thread.regs; |
704 | 704 | ||
705 | if (regs != NULL) { | 705 | if (regs != NULL) { |
706 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 706 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
707 | task->thread.dbcr0 &= ~DBCR0_BT; | 707 | task->thread.dbcr0 &= ~DBCR0_BT; |
708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | 708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
709 | regs->msr |= MSR_DE; | 709 | regs->msr |= MSR_DE; |
@@ -720,7 +720,7 @@ void user_enable_block_step(struct task_struct *task) | |||
720 | struct pt_regs *regs = task->thread.regs; | 720 | struct pt_regs *regs = task->thread.regs; |
721 | 721 | ||
722 | if (regs != NULL) { | 722 | if (regs != NULL) { |
723 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 723 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
724 | task->thread.dbcr0 &= ~DBCR0_IC; | 724 | task->thread.dbcr0 &= ~DBCR0_IC; |
725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; | 725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; |
726 | regs->msr |= MSR_DE; | 726 | regs->msr |= MSR_DE; |
@@ -737,17 +737,25 @@ void user_disable_single_step(struct task_struct *task) | |||
737 | struct pt_regs *regs = task->thread.regs; | 737 | struct pt_regs *regs = task->thread.regs; |
738 | 738 | ||
739 | if (regs != NULL) { | 739 | if (regs != NULL) { |
740 | #if defined(CONFIG_BOOKE) | 740 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
741 | /* If DAC don't clear DBCRO_IDM or MSR_DE */ | 741 | /* |
742 | if (task->thread.dabr) | 742 | * The logic to disable single stepping should be as |
743 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT); | 743 | * simple as turning off the Instruction Complete flag. |
744 | else { | 744 | * And, after doing so, if all debug flags are off, turn |
745 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | 745 | * off DBCR0(IDM) and MSR(DE) .... Torez |
746 | */ | ||
747 | task->thread.dbcr0 &= ~DBCR0_IC; | ||
748 | /* | ||
749 | * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. | ||
750 | */ | ||
751 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, | ||
752 | task->thread.dbcr1)) { | ||
753 | /* | ||
754 | * All debug events were off..... | ||
755 | */ | ||
756 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
746 | regs->msr &= ~MSR_DE; | 757 | regs->msr &= ~MSR_DE; |
747 | } | 758 | } |
748 | #elif defined(CONFIG_40x) | ||
749 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | ||
750 | regs->msr &= ~MSR_DE; | ||
751 | #else | 759 | #else |
752 | regs->msr &= ~(MSR_SE | MSR_BE); | 760 | regs->msr &= ~(MSR_SE | MSR_BE); |
753 | #endif | 761 | #endif |
@@ -769,8 +777,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
769 | if ((data & ~0x7UL) >= TASK_SIZE) | 777 | if ((data & ~0x7UL) >= TASK_SIZE) |
770 | return -EIO; | 778 | return -EIO; |
771 | 779 | ||
772 | #ifndef CONFIG_BOOKE | 780 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS |
773 | |||
774 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. | 781 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. |
775 | * It was assumed, on previous implementations, that 3 bits were | 782 | * It was assumed, on previous implementations, that 3 bits were |
776 | * passed together with the data address, fitting the design of the | 783 | * passed together with the data address, fitting the design of the |
@@ -789,21 +796,22 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
789 | 796 | ||
790 | /* Move contents to the DABR register */ | 797 | /* Move contents to the DABR register */ |
791 | task->thread.dabr = data; | 798 | task->thread.dabr = data; |
792 | 799 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ | |
793 | #endif | ||
794 | #if defined(CONFIG_BOOKE) | ||
795 | |||
796 | /* As described above, it was assumed 3 bits were passed with the data | 800 | /* As described above, it was assumed 3 bits were passed with the data |
797 | * address, but we will assume only the mode bits will be passed | 801 | * address, but we will assume only the mode bits will be passed |
798 | * as to not cause alignment restrictions for DAC-based processors. | 802 | * as to not cause alignment restrictions for DAC-based processors. |
799 | */ | 803 | */ |
800 | 804 | ||
801 | /* DAC's hold the whole address without any mode flags */ | 805 | /* DAC's hold the whole address without any mode flags */ |
802 | task->thread.dabr = data & ~0x3UL; | 806 | task->thread.dac1 = data & ~0x3UL; |
803 | 807 | ||
804 | if (task->thread.dabr == 0) { | 808 | if (task->thread.dac1 == 0) { |
805 | task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM); | 809 | dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); |
806 | task->thread.regs->msr &= ~MSR_DE; | 810 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, |
811 | task->thread.dbcr1)) { | ||
812 | task->thread.regs->msr &= ~MSR_DE; | ||
813 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
814 | } | ||
807 | return 0; | 815 | return 0; |
808 | } | 816 | } |
809 | 817 | ||
@@ -814,17 +822,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
814 | 822 | ||
815 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 | 823 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 |
816 | register */ | 824 | register */ |
817 | task->thread.dbcr0 = DBCR0_IDM; | 825 | task->thread.dbcr0 |= DBCR0_IDM; |
818 | 826 | ||
819 | /* Check for write and read flags and set DBCR0 | 827 | /* Check for write and read flags and set DBCR0 |
820 | accordingly */ | 828 | accordingly */ |
829 | dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); | ||
821 | if (data & 0x1UL) | 830 | if (data & 0x1UL) |
822 | task->thread.dbcr0 |= DBSR_DAC1R; | 831 | dbcr_dac(task) |= DBCR_DAC1R; |
823 | if (data & 0x2UL) | 832 | if (data & 0x2UL) |
824 | task->thread.dbcr0 |= DBSR_DAC1W; | 833 | dbcr_dac(task) |= DBCR_DAC1W; |
825 | |||
826 | task->thread.regs->msr |= MSR_DE; | 834 | task->thread.regs->msr |= MSR_DE; |
827 | #endif | 835 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
828 | return 0; | 836 | return 0; |
829 | } | 837 | } |
830 | 838 | ||
@@ -839,6 +847,394 @@ void ptrace_disable(struct task_struct *child) | |||
839 | user_disable_single_step(child); | 847 | user_disable_single_step(child); |
840 | } | 848 | } |
841 | 849 | ||
850 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
851 | static long set_intruction_bp(struct task_struct *child, | ||
852 | struct ppc_hw_breakpoint *bp_info) | ||
853 | { | ||
854 | int slot; | ||
855 | int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); | ||
856 | int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); | ||
857 | int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); | ||
858 | int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); | ||
859 | |||
860 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
861 | slot2_in_use = 1; | ||
862 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
863 | slot4_in_use = 1; | ||
864 | |||
865 | if (bp_info->addr >= TASK_SIZE) | ||
866 | return -EIO; | ||
867 | |||
868 | if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { | ||
869 | |||
870 | /* Make sure range is valid. */ | ||
871 | if (bp_info->addr2 >= TASK_SIZE) | ||
872 | return -EIO; | ||
873 | |||
874 | /* We need a pair of IAC regsisters */ | ||
875 | if ((!slot1_in_use) && (!slot2_in_use)) { | ||
876 | slot = 1; | ||
877 | child->thread.iac1 = bp_info->addr; | ||
878 | child->thread.iac2 = bp_info->addr2; | ||
879 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
880 | if (bp_info->addr_mode == | ||
881 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
882 | dbcr_iac_range(child) |= DBCR_IAC12X; | ||
883 | else | ||
884 | dbcr_iac_range(child) |= DBCR_IAC12I; | ||
885 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
886 | } else if ((!slot3_in_use) && (!slot4_in_use)) { | ||
887 | slot = 3; | ||
888 | child->thread.iac3 = bp_info->addr; | ||
889 | child->thread.iac4 = bp_info->addr2; | ||
890 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
891 | if (bp_info->addr_mode == | ||
892 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
893 | dbcr_iac_range(child) |= DBCR_IAC34X; | ||
894 | else | ||
895 | dbcr_iac_range(child) |= DBCR_IAC34I; | ||
896 | #endif | ||
897 | } else | ||
898 | return -ENOSPC; | ||
899 | } else { | ||
900 | /* We only need one. If possible leave a pair free in | ||
901 | * case a range is needed later | ||
902 | */ | ||
903 | if (!slot1_in_use) { | ||
904 | /* | ||
905 | * Don't use iac1 if iac1-iac2 are free and either | ||
906 | * iac3 or iac4 (but not both) are free | ||
907 | */ | ||
908 | if (slot2_in_use || (slot3_in_use == slot4_in_use)) { | ||
909 | slot = 1; | ||
910 | child->thread.iac1 = bp_info->addr; | ||
911 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
912 | goto out; | ||
913 | } | ||
914 | } | ||
915 | if (!slot2_in_use) { | ||
916 | slot = 2; | ||
917 | child->thread.iac2 = bp_info->addr; | ||
918 | child->thread.dbcr0 |= DBCR0_IAC2; | ||
919 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
920 | } else if (!slot3_in_use) { | ||
921 | slot = 3; | ||
922 | child->thread.iac3 = bp_info->addr; | ||
923 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
924 | } else if (!slot4_in_use) { | ||
925 | slot = 4; | ||
926 | child->thread.iac4 = bp_info->addr; | ||
927 | child->thread.dbcr0 |= DBCR0_IAC4; | ||
928 | #endif | ||
929 | } else | ||
930 | return -ENOSPC; | ||
931 | } | ||
932 | out: | ||
933 | child->thread.dbcr0 |= DBCR0_IDM; | ||
934 | child->thread.regs->msr |= MSR_DE; | ||
935 | |||
936 | return slot; | ||
937 | } | ||
938 | |||
939 | static int del_instruction_bp(struct task_struct *child, int slot) | ||
940 | { | ||
941 | switch (slot) { | ||
942 | case 1: | ||
943 | if (child->thread.iac1 == 0) | ||
944 | return -ENOENT; | ||
945 | |||
946 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) { | ||
947 | /* address range - clear slots 1 & 2 */ | ||
948 | child->thread.iac2 = 0; | ||
949 | dbcr_iac_range(child) &= ~DBCR_IAC12MODE; | ||
950 | } | ||
951 | child->thread.iac1 = 0; | ||
952 | child->thread.dbcr0 &= ~DBCR0_IAC1; | ||
953 | break; | ||
954 | case 2: | ||
955 | if (child->thread.iac2 == 0) | ||
956 | return -ENOENT; | ||
957 | |||
958 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
959 | /* used in a range */ | ||
960 | return -EINVAL; | ||
961 | child->thread.iac2 = 0; | ||
962 | child->thread.dbcr0 &= ~DBCR0_IAC2; | ||
963 | break; | ||
964 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
965 | case 3: | ||
966 | if (child->thread.iac3 == 0) | ||
967 | return -ENOENT; | ||
968 | |||
969 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) { | ||
970 | /* address range - clear slots 3 & 4 */ | ||
971 | child->thread.iac4 = 0; | ||
972 | dbcr_iac_range(child) &= ~DBCR_IAC34MODE; | ||
973 | } | ||
974 | child->thread.iac3 = 0; | ||
975 | child->thread.dbcr0 &= ~DBCR0_IAC3; | ||
976 | break; | ||
977 | case 4: | ||
978 | if (child->thread.iac4 == 0) | ||
979 | return -ENOENT; | ||
980 | |||
981 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
982 | /* Used in a range */ | ||
983 | return -EINVAL; | ||
984 | child->thread.iac4 = 0; | ||
985 | child->thread.dbcr0 &= ~DBCR0_IAC4; | ||
986 | break; | ||
987 | #endif | ||
988 | default: | ||
989 | return -EINVAL; | ||
990 | } | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) | ||
995 | { | ||
996 | int byte_enable = | ||
997 | (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) | ||
998 | & 0xf; | ||
999 | int condition_mode = | ||
1000 | bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; | ||
1001 | int slot; | ||
1002 | |||
1003 | if (byte_enable && (condition_mode == 0)) | ||
1004 | return -EINVAL; | ||
1005 | |||
1006 | if (bp_info->addr >= TASK_SIZE) | ||
1007 | return -EIO; | ||
1008 | |||
1009 | if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { | ||
1010 | slot = 1; | ||
1011 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1012 | dbcr_dac(child) |= DBCR_DAC1R; | ||
1013 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1014 | dbcr_dac(child) |= DBCR_DAC1W; | ||
1015 | child->thread.dac1 = (unsigned long)bp_info->addr; | ||
1016 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1017 | if (byte_enable) { | ||
1018 | child->thread.dvc1 = | ||
1019 | (unsigned long)bp_info->condition_value; | ||
1020 | child->thread.dbcr2 |= | ||
1021 | ((byte_enable << DBCR2_DVC1BE_SHIFT) | | ||
1022 | (condition_mode << DBCR2_DVC1M_SHIFT)); | ||
1023 | } | ||
1024 | #endif | ||
1025 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1026 | } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
1027 | /* Both dac1 and dac2 are part of a range */ | ||
1028 | return -ENOSPC; | ||
1029 | #endif | ||
1030 | } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { | ||
1031 | slot = 2; | ||
1032 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1033 | dbcr_dac(child) |= DBCR_DAC2R; | ||
1034 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1035 | dbcr_dac(child) |= DBCR_DAC2W; | ||
1036 | child->thread.dac2 = (unsigned long)bp_info->addr; | ||
1037 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1038 | if (byte_enable) { | ||
1039 | child->thread.dvc2 = | ||
1040 | (unsigned long)bp_info->condition_value; | ||
1041 | child->thread.dbcr2 |= | ||
1042 | ((byte_enable << DBCR2_DVC2BE_SHIFT) | | ||
1043 | (condition_mode << DBCR2_DVC2M_SHIFT)); | ||
1044 | } | ||
1045 | #endif | ||
1046 | } else | ||
1047 | return -ENOSPC; | ||
1048 | child->thread.dbcr0 |= DBCR0_IDM; | ||
1049 | child->thread.regs->msr |= MSR_DE; | ||
1050 | |||
1051 | return slot + 4; | ||
1052 | } | ||
1053 | |||
1054 | static int del_dac(struct task_struct *child, int slot) | ||
1055 | { | ||
1056 | if (slot == 1) { | ||
1057 | if (child->thread.dac1 == 0) | ||
1058 | return -ENOENT; | ||
1059 | |||
1060 | child->thread.dac1 = 0; | ||
1061 | dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
1062 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1063 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
1064 | child->thread.dac2 = 0; | ||
1065 | child->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
1066 | } | ||
1067 | child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); | ||
1068 | #endif | ||
1069 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1070 | child->thread.dvc1 = 0; | ||
1071 | #endif | ||
1072 | } else if (slot == 2) { | ||
1073 | if (child->thread.dac1 == 0) | ||
1074 | return -ENOENT; | ||
1075 | |||
1076 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1077 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) | ||
1078 | /* Part of a range */ | ||
1079 | return -EINVAL; | ||
1080 | child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); | ||
1081 | #endif | ||
1082 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
1083 | child->thread.dvc2 = 0; | ||
1084 | #endif | ||
1085 | child->thread.dac2 = 0; | ||
1086 | dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
1087 | } else | ||
1088 | return -EINVAL; | ||
1089 | |||
1090 | return 0; | ||
1091 | } | ||
1092 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1093 | |||
1094 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1095 | static int set_dac_range(struct task_struct *child, | ||
1096 | struct ppc_hw_breakpoint *bp_info) | ||
1097 | { | ||
1098 | int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; | ||
1099 | |||
1100 | /* We don't allow range watchpoints to be used with DVC */ | ||
1101 | if (bp_info->condition_mode) | ||
1102 | return -EINVAL; | ||
1103 | |||
1104 | /* | ||
1105 | * Best effort to verify the address range. The user/supervisor bits | ||
1106 | * prevent trapping in kernel space, but let's fail on an obvious bad | ||
1107 | * range. The simple test on the mask is not fool-proof, and any | ||
1108 | * exclusive range will spill over into kernel space. | ||
1109 | */ | ||
1110 | if (bp_info->addr >= TASK_SIZE) | ||
1111 | return -EIO; | ||
1112 | if (mode == PPC_BREAKPOINT_MODE_MASK) { | ||
1113 | /* | ||
1114 | * dac2 is a bitmask. Don't allow a mask that makes a | ||
1115 | * kernel space address from a valid dac1 value | ||
1116 | */ | ||
1117 | if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) | ||
1118 | return -EIO; | ||
1119 | } else { | ||
1120 | /* | ||
1121 | * For range breakpoints, addr2 must also be a valid address | ||
1122 | */ | ||
1123 | if (bp_info->addr2 >= TASK_SIZE) | ||
1124 | return -EIO; | ||
1125 | } | ||
1126 | |||
1127 | if (child->thread.dbcr0 & | ||
1128 | (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) | ||
1129 | return -ENOSPC; | ||
1130 | |||
1131 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1132 | child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); | ||
1133 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1134 | child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); | ||
1135 | child->thread.dac1 = bp_info->addr; | ||
1136 | child->thread.dac2 = bp_info->addr2; | ||
1137 | if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) | ||
1138 | child->thread.dbcr2 |= DBCR2_DAC12M; | ||
1139 | else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
1140 | child->thread.dbcr2 |= DBCR2_DAC12MX; | ||
1141 | else /* PPC_BREAKPOINT_MODE_MASK */ | ||
1142 | child->thread.dbcr2 |= DBCR2_DAC12MM; | ||
1143 | child->thread.regs->msr |= MSR_DE; | ||
1144 | |||
1145 | return 5; | ||
1146 | } | ||
1147 | #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ | ||
1148 | |||
1149 | static long ppc_set_hwdebug(struct task_struct *child, | ||
1150 | struct ppc_hw_breakpoint *bp_info) | ||
1151 | { | ||
1152 | if (bp_info->version != 1) | ||
1153 | return -ENOTSUPP; | ||
1154 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1155 | /* | ||
1156 | * Check for invalid flags and combinations | ||
1157 | */ | ||
1158 | if ((bp_info->trigger_type == 0) || | ||
1159 | (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | | ||
1160 | PPC_BREAKPOINT_TRIGGER_RW)) || | ||
1161 | (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || | ||
1162 | (bp_info->condition_mode & | ||
1163 | ~(PPC_BREAKPOINT_CONDITION_MODE | | ||
1164 | PPC_BREAKPOINT_CONDITION_BE_ALL))) | ||
1165 | return -EINVAL; | ||
1166 | #if CONFIG_PPC_ADV_DEBUG_DVCS == 0 | ||
1167 | if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) | ||
1168 | return -EINVAL; | ||
1169 | #endif | ||
1170 | |||
1171 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { | ||
1172 | if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || | ||
1173 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1174 | return -EINVAL; | ||
1175 | return set_intruction_bp(child, bp_info); | ||
1176 | } | ||
1177 | if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) | ||
1178 | return set_dac(child, bp_info); | ||
1179 | |||
1180 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1181 | return set_dac_range(child, bp_info); | ||
1182 | #else | ||
1183 | return -EINVAL; | ||
1184 | #endif | ||
1185 | #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
1186 | /* | ||
1187 | * We only support one data breakpoint | ||
1188 | */ | ||
1189 | if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || | ||
1190 | ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || | ||
1191 | (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || | ||
1192 | (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || | ||
1193 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1194 | return -EINVAL; | ||
1195 | |||
1196 | if (child->thread.dabr) | ||
1197 | return -ENOSPC; | ||
1198 | |||
1199 | if ((unsigned long)bp_info->addr >= TASK_SIZE) | ||
1200 | return -EIO; | ||
1201 | |||
1202 | child->thread.dabr = (unsigned long)bp_info->addr; | ||
1203 | |||
1204 | return 1; | ||
1205 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
1206 | } | ||
1207 | |||
1208 | static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | ||
1209 | { | ||
1210 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1211 | int rc; | ||
1212 | |||
1213 | if (data <= 4) | ||
1214 | rc = del_instruction_bp(child, (int)data); | ||
1215 | else | ||
1216 | rc = del_dac(child, (int)data - 4); | ||
1217 | |||
1218 | if (!rc) { | ||
1219 | if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, | ||
1220 | child->thread.dbcr1)) { | ||
1221 | child->thread.dbcr0 &= ~DBCR0_IDM; | ||
1222 | child->thread.regs->msr &= ~MSR_DE; | ||
1223 | } | ||
1224 | } | ||
1225 | return rc; | ||
1226 | #else | ||
1227 | if (data != 1) | ||
1228 | return -EINVAL; | ||
1229 | if (child->thread.dabr == 0) | ||
1230 | return -ENOENT; | ||
1231 | |||
1232 | child->thread.dabr = 0; | ||
1233 | |||
1234 | return 0; | ||
1235 | #endif | ||
1236 | } | ||
1237 | |||
842 | /* | 1238 | /* |
843 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | 1239 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, |
844 | * we mark them as obsolete now, they will be removed in a future version | 1240 | * we mark them as obsolete now, they will be removed in a future version |
@@ -932,13 +1328,77 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
932 | break; | 1328 | break; |
933 | } | 1329 | } |
934 | 1330 | ||
1331 | case PPC_PTRACE_GETHWDBGINFO: { | ||
1332 | struct ppc_debug_info dbginfo; | ||
1333 | |||
1334 | dbginfo.version = 1; | ||
1335 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1336 | dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; | ||
1337 | dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; | ||
1338 | dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; | ||
1339 | dbginfo.data_bp_alignment = 4; | ||
1340 | dbginfo.sizeof_condition = 4; | ||
1341 | dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | | ||
1342 | PPC_DEBUG_FEATURE_INSN_BP_MASK; | ||
1343 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1344 | dbginfo.features |= | ||
1345 | PPC_DEBUG_FEATURE_DATA_BP_RANGE | | ||
1346 | PPC_DEBUG_FEATURE_DATA_BP_MASK; | ||
1347 | #endif | ||
1348 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1349 | dbginfo.num_instruction_bps = 0; | ||
1350 | dbginfo.num_data_bps = 1; | ||
1351 | dbginfo.num_condition_regs = 0; | ||
1352 | #ifdef CONFIG_PPC64 | ||
1353 | dbginfo.data_bp_alignment = 8; | ||
1354 | #else | ||
1355 | dbginfo.data_bp_alignment = 4; | ||
1356 | #endif | ||
1357 | dbginfo.sizeof_condition = 0; | ||
1358 | dbginfo.features = 0; | ||
1359 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
1360 | |||
1361 | if (!access_ok(VERIFY_WRITE, data, | ||
1362 | sizeof(struct ppc_debug_info))) | ||
1363 | return -EFAULT; | ||
1364 | ret = __copy_to_user((struct ppc_debug_info __user *)data, | ||
1365 | &dbginfo, sizeof(struct ppc_debug_info)) ? | ||
1366 | -EFAULT : 0; | ||
1367 | break; | ||
1368 | } | ||
1369 | |||
1370 | case PPC_PTRACE_SETHWDEBUG: { | ||
1371 | struct ppc_hw_breakpoint bp_info; | ||
1372 | |||
1373 | if (!access_ok(VERIFY_READ, data, | ||
1374 | sizeof(struct ppc_hw_breakpoint))) | ||
1375 | return -EFAULT; | ||
1376 | ret = __copy_from_user(&bp_info, | ||
1377 | (struct ppc_hw_breakpoint __user *)data, | ||
1378 | sizeof(struct ppc_hw_breakpoint)) ? | ||
1379 | -EFAULT : 0; | ||
1380 | if (!ret) | ||
1381 | ret = ppc_set_hwdebug(child, &bp_info); | ||
1382 | break; | ||
1383 | } | ||
1384 | |||
1385 | case PPC_PTRACE_DELHWDEBUG: { | ||
1386 | ret = ppc_del_hwdebug(child, addr, data); | ||
1387 | break; | ||
1388 | } | ||
1389 | |||
935 | case PTRACE_GET_DEBUGREG: { | 1390 | case PTRACE_GET_DEBUGREG: { |
936 | ret = -EINVAL; | 1391 | ret = -EINVAL; |
937 | /* We only support one DABR and no IABRS at the moment */ | 1392 | /* We only support one DABR and no IABRS at the moment */ |
938 | if (addr > 0) | 1393 | if (addr > 0) |
939 | break; | 1394 | break; |
1395 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
1396 | ret = put_user(child->thread.dac1, | ||
1397 | (unsigned long __user *)data); | ||
1398 | #else | ||
940 | ret = put_user(child->thread.dabr, | 1399 | ret = put_user(child->thread.dabr, |
941 | (unsigned long __user *)data); | 1400 | (unsigned long __user *)data); |
1401 | #endif | ||
942 | break; | 1402 | break; |
943 | } | 1403 | } |
944 | 1404 | ||
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 00b5078da9a3..a0afb555a7c9 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -140,17 +140,15 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) | |||
140 | return 0; /* no signals delivered */ | 140 | return 0; /* no signals delivered */ |
141 | } | 141 | } |
142 | 142 | ||
143 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | ||
143 | /* | 144 | /* |
144 | * Reenable the DABR before delivering the signal to | 145 | * Reenable the DABR before delivering the signal to |
145 | * user space. The DABR will have been cleared if it | 146 | * user space. The DABR will have been cleared if it |
146 | * triggered inside the kernel. | 147 | * triggered inside the kernel. |
147 | */ | 148 | */ |
148 | if (current->thread.dabr) { | 149 | if (current->thread.dabr) |
149 | set_dabr(current->thread.dabr); | 150 | set_dabr(current->thread.dabr); |
150 | #if defined(CONFIG_BOOKE) | ||
151 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
152 | #endif | 151 | #endif |
153 | } | ||
154 | 152 | ||
155 | if (is32) { | 153 | if (is32) { |
156 | if (ka.sa.sa_flags & SA_SIGINFO) | 154 | if (ka.sa.sa_flags & SA_SIGINFO) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d670429a1608..266610119f66 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -1078,7 +1078,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1078 | int i; | 1078 | int i; |
1079 | unsigned char tmp; | 1079 | unsigned char tmp; |
1080 | unsigned long new_msr = regs->msr; | 1080 | unsigned long new_msr = regs->msr; |
1081 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1081 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1082 | unsigned long new_dbcr0 = current->thread.dbcr0; | 1082 | unsigned long new_dbcr0 = current->thread.dbcr0; |
1083 | #endif | 1083 | #endif |
1084 | 1084 | ||
@@ -1087,13 +1087,17 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1087 | return -EFAULT; | 1087 | return -EFAULT; |
1088 | switch (op.dbg_type) { | 1088 | switch (op.dbg_type) { |
1089 | case SIG_DBG_SINGLE_STEPPING: | 1089 | case SIG_DBG_SINGLE_STEPPING: |
1090 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1090 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1091 | if (op.dbg_value) { | 1091 | if (op.dbg_value) { |
1092 | new_msr |= MSR_DE; | 1092 | new_msr |= MSR_DE; |
1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | 1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); |
1094 | } else { | 1094 | } else { |
1095 | new_msr &= ~MSR_DE; | 1095 | new_dbcr0 &= ~DBCR0_IC; |
1096 | new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); | 1096 | if (!DBCR_ACTIVE_EVENTS(new_dbcr0, |
1097 | current->thread.dbcr1)) { | ||
1098 | new_msr &= ~MSR_DE; | ||
1099 | new_dbcr0 &= ~DBCR0_IDM; | ||
1100 | } | ||
1097 | } | 1101 | } |
1098 | #else | 1102 | #else |
1099 | if (op.dbg_value) | 1103 | if (op.dbg_value) |
@@ -1103,7 +1107,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1103 | #endif | 1107 | #endif |
1104 | break; | 1108 | break; |
1105 | case SIG_DBG_BRANCH_TRACING: | 1109 | case SIG_DBG_BRANCH_TRACING: |
1106 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1110 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1107 | return -EINVAL; | 1111 | return -EINVAL; |
1108 | #else | 1112 | #else |
1109 | if (op.dbg_value) | 1113 | if (op.dbg_value) |
@@ -1124,7 +1128,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1124 | failure is a problem, anyway, and it's very unlikely unless | 1128 | failure is a problem, anyway, and it's very unlikely unless |
1125 | the user is really doing something wrong. */ | 1129 | the user is really doing something wrong. */ |
1126 | regs->msr = new_msr; | 1130 | regs->msr = new_msr; |
1127 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1131 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1128 | current->thread.dbcr0 = new_dbcr0; | 1132 | current->thread.dbcr0 = new_dbcr0; |
1129 | #endif | 1133 | #endif |
1130 | 1134 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6c6093d67f30..1b16b9a3e49a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -265,8 +265,8 @@ void account_system_vtime(struct task_struct *tsk) | |||
265 | account_system_time(tsk, 0, delta, deltascaled); | 265 | account_system_time(tsk, 0, delta, deltascaled); |
266 | else | 266 | else |
267 | account_idle_time(delta); | 267 | account_idle_time(delta); |
268 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; | 268 | __get_cpu_var(cputime_last_delta) = delta; |
269 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 269 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; |
270 | local_irq_restore(flags); | 270 | local_irq_restore(flags); |
271 | } | 271 | } |
272 | EXPORT_SYMBOL_GPL(account_system_vtime); | 272 | EXPORT_SYMBOL_GPL(account_system_vtime); |
@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
575 | 575 | ||
576 | trace_timer_interrupt_entry(regs); | 576 | trace_timer_interrupt_entry(regs); |
577 | 577 | ||
578 | __get_cpu_var(irq_stat).timer_irqs++; | ||
579 | |||
578 | /* Ensure a positive value is written to the decrementer, or else | 580 | /* Ensure a positive value is written to the decrementer, or else |
579 | * some CPUs will continuue to take decrementer exceptions */ | 581 | * some CPUs will continuue to take decrementer exceptions */ |
580 | set_dec(DECREMENTER_MAX); | 582 | set_dec(DECREMENTER_MAX); |
@@ -935,8 +937,8 @@ static void register_decrementer_clockevent(int cpu) | |||
935 | *dec = decrementer_clockevent; | 937 | *dec = decrementer_clockevent; |
936 | dec->cpumask = cpumask_of(cpu); | 938 | dec->cpumask = cpumask_of(cpu); |
937 | 939 | ||
938 | printk(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", | 940 | printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", |
939 | dec->name, dec->mult, dec->shift, cpu); | 941 | dec->name, dec->mult, dec->shift, cpu); |
940 | 942 | ||
941 | clockevents_register_device(dec); | 943 | clockevents_register_device(dec); |
942 | } | 944 | } |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d069ff8a7e03..696626a2e835 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -60,13 +60,13 @@ | |||
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
63 | int (*__debugger)(struct pt_regs *regs); | 63 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
64 | int (*__debugger_ipi)(struct pt_regs *regs); | 64 | int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; |
65 | int (*__debugger_bpt)(struct pt_regs *regs); | 65 | int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; |
66 | int (*__debugger_sstep)(struct pt_regs *regs); | 66 | int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; |
67 | int (*__debugger_iabr_match)(struct pt_regs *regs); | 67 | int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; |
68 | int (*__debugger_dabr_match)(struct pt_regs *regs); | 68 | int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; |
69 | int (*__debugger_fault_handler)(struct pt_regs *regs); | 69 | int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; |
70 | 70 | ||
71 | EXPORT_SYMBOL(__debugger); | 71 | EXPORT_SYMBOL(__debugger); |
72 | EXPORT_SYMBOL(__debugger_ipi); | 72 | EXPORT_SYMBOL(__debugger_ipi); |
@@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { } | |||
102 | int die(const char *str, struct pt_regs *regs, long err) | 102 | int die(const char *str, struct pt_regs *regs, long err) |
103 | { | 103 | { |
104 | static struct { | 104 | static struct { |
105 | spinlock_t lock; | 105 | raw_spinlock_t lock; |
106 | u32 lock_owner; | 106 | u32 lock_owner; |
107 | int lock_owner_depth; | 107 | int lock_owner_depth; |
108 | } die = { | 108 | } die = { |
109 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), | 109 | .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), |
110 | .lock_owner = -1, | 110 | .lock_owner = -1, |
111 | .lock_owner_depth = 0 | 111 | .lock_owner_depth = 0 |
112 | }; | 112 | }; |
@@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
120 | 120 | ||
121 | if (die.lock_owner != raw_smp_processor_id()) { | 121 | if (die.lock_owner != raw_smp_processor_id()) { |
122 | console_verbose(); | 122 | console_verbose(); |
123 | spin_lock_irqsave(&die.lock, flags); | 123 | raw_spin_lock_irqsave(&die.lock, flags); |
124 | die.lock_owner = smp_processor_id(); | 124 | die.lock_owner = smp_processor_id(); |
125 | die.lock_owner_depth = 0; | 125 | die.lock_owner_depth = 0; |
126 | bust_spinlocks(1); | 126 | bust_spinlocks(1); |
@@ -146,6 +146,11 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
146 | #endif | 146 | #endif |
147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
148 | 148 | ||
149 | sysfs_printk_last_file(); | ||
150 | if (notify_die(DIE_OOPS, str, regs, err, 255, | ||
151 | SIGSEGV) == NOTIFY_STOP) | ||
152 | return 1; | ||
153 | |||
149 | print_modules(); | 154 | print_modules(); |
150 | show_regs(regs); | 155 | show_regs(regs); |
151 | } else { | 156 | } else { |
@@ -155,7 +160,7 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
155 | bust_spinlocks(0); | 160 | bust_spinlocks(0); |
156 | die.lock_owner = -1; | 161 | die.lock_owner = -1; |
157 | add_taint(TAINT_DIE); | 162 | add_taint(TAINT_DIE); |
158 | spin_unlock_irqrestore(&die.lock, flags); | 163 | raw_spin_unlock_irqrestore(&die.lock, flags); |
159 | 164 | ||
160 | if (kexec_should_crash(current) || | 165 | if (kexec_should_crash(current) || |
161 | kexec_sr_activated(smp_processor_id())) | 166 | kexec_sr_activated(smp_processor_id())) |
@@ -294,7 +299,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
294 | return 0; | 299 | return 0; |
295 | } | 300 | } |
296 | 301 | ||
297 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 302 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
298 | /* On 4xx, the reason for the machine check or program exception | 303 | /* On 4xx, the reason for the machine check or program exception |
299 | is in the ESR. */ | 304 | is in the ESR. */ |
300 | #define get_reason(regs) ((regs)->dsisr) | 305 | #define get_reason(regs) ((regs)->dsisr) |
@@ -478,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs) | |||
478 | { | 483 | { |
479 | int recover = 0; | 484 | int recover = 0; |
480 | 485 | ||
486 | __get_cpu_var(irq_stat).mce_exceptions++; | ||
487 | |||
481 | /* See if any machine dependent calls. In theory, we would want | 488 | /* See if any machine dependent calls. In theory, we would want |
482 | * to call the CPU first, and call the ppc_md. one if the CPU | 489 | * to call the CPU first, and call the ppc_md. one if the CPU |
483 | * one returns a positive number. However there is existing code | 490 | * one returns a positive number. However there is existing code |
@@ -960,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs) | |||
960 | 967 | ||
961 | void performance_monitor_exception(struct pt_regs *regs) | 968 | void performance_monitor_exception(struct pt_regs *regs) |
962 | { | 969 | { |
970 | __get_cpu_var(irq_stat).pmu_irqs++; | ||
971 | |||
963 | perf_irq(regs); | 972 | perf_irq(regs); |
964 | } | 973 | } |
965 | 974 | ||
@@ -1024,10 +1033,69 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1024 | } | 1033 | } |
1025 | #endif /* CONFIG_8xx */ | 1034 | #endif /* CONFIG_8xx */ |
1026 | 1035 | ||
1027 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 1036 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1037 | static void handle_debug(struct pt_regs *regs, unsigned long debug_status) | ||
1038 | { | ||
1039 | int changed = 0; | ||
1040 | /* | ||
1041 | * Determine the cause of the debug event, clear the | ||
1042 | * event flags and send a trap to the handler. Torez | ||
1043 | */ | ||
1044 | if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
1045 | dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
1046 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
1047 | current->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
1048 | #endif | ||
1049 | do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, | ||
1050 | 5); | ||
1051 | changed |= 0x01; | ||
1052 | } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { | ||
1053 | dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
1054 | do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, | ||
1055 | 6); | ||
1056 | changed |= 0x01; | ||
1057 | } else if (debug_status & DBSR_IAC1) { | ||
1058 | current->thread.dbcr0 &= ~DBCR0_IAC1; | ||
1059 | dbcr_iac_range(current) &= ~DBCR_IAC12MODE; | ||
1060 | do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, | ||
1061 | 1); | ||
1062 | changed |= 0x01; | ||
1063 | } else if (debug_status & DBSR_IAC2) { | ||
1064 | current->thread.dbcr0 &= ~DBCR0_IAC2; | ||
1065 | do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, | ||
1066 | 2); | ||
1067 | changed |= 0x01; | ||
1068 | } else if (debug_status & DBSR_IAC3) { | ||
1069 | current->thread.dbcr0 &= ~DBCR0_IAC3; | ||
1070 | dbcr_iac_range(current) &= ~DBCR_IAC34MODE; | ||
1071 | do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, | ||
1072 | 3); | ||
1073 | changed |= 0x01; | ||
1074 | } else if (debug_status & DBSR_IAC4) { | ||
1075 | current->thread.dbcr0 &= ~DBCR0_IAC4; | ||
1076 | do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, | ||
1077 | 4); | ||
1078 | changed |= 0x01; | ||
1079 | } | ||
1080 | /* | ||
1081 | * At the point this routine was called, the MSR(DE) was turned off. | ||
1082 | * Check all other debug flags and see if that bit needs to be turned | ||
1083 | * back on or not. | ||
1084 | */ | ||
1085 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) | ||
1086 | regs->msr |= MSR_DE; | ||
1087 | else | ||
1088 | /* Make sure the IDM flag is off */ | ||
1089 | current->thread.dbcr0 &= ~DBCR0_IDM; | ||
1090 | |||
1091 | if (changed & 0x01) | ||
1092 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
1093 | } | ||
1028 | 1094 | ||
1029 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | 1095 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
1030 | { | 1096 | { |
1097 | current->thread.dbsr = debug_status; | ||
1098 | |||
1031 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while | 1099 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
1032 | * on server, it stops on the target of the branch. In order to simulate | 1100 | * on server, it stops on the target of the branch. In order to simulate |
1033 | * the server behaviour, we thus restart right away with a single step | 1101 | * the server behaviour, we thus restart right away with a single step |
@@ -1071,29 +1139,23 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | |||
1071 | if (debugger_sstep(regs)) | 1139 | if (debugger_sstep(regs)) |
1072 | return; | 1140 | return; |
1073 | 1141 | ||
1074 | if (user_mode(regs)) | ||
1075 | current->thread.dbcr0 &= ~(DBCR0_IC); | ||
1076 | |||
1077 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
1078 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
1079 | regs->msr &= ~MSR_DE; | ||
1080 | |||
1081 | if (user_mode(regs)) { | 1142 | if (user_mode(regs)) { |
1082 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | | 1143 | current->thread.dbcr0 &= ~DBCR0_IC; |
1083 | DBCR0_IDM); | 1144 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1084 | } else { | 1145 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, |
1085 | /* Disable DAC interupts */ | 1146 | current->thread.dbcr1)) |
1086 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | | 1147 | regs->msr |= MSR_DE; |
1087 | DBSR_DAC1W | DBCR0_IDM)); | 1148 | else |
1088 | 1149 | /* Make sure the IDM bit is off */ | |
1089 | /* Clear the DAC event */ | 1150 | current->thread.dbcr0 &= ~DBCR0_IDM; |
1090 | mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W)); | 1151 | #endif |
1091 | } | 1152 | } |
1092 | /* Setup and send the trap to the handler */ | 1153 | |
1093 | do_dabr(regs, mfspr(SPRN_DAC1), debug_status); | 1154 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
1094 | } | 1155 | } else |
1156 | handle_debug(regs, debug_status); | ||
1095 | } | 1157 | } |
1096 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | 1158 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
1097 | 1159 | ||
1098 | #if !defined(CONFIG_TAU_INT) | 1160 | #if !defined(CONFIG_TAU_INT) |
1099 | void TAUException(struct pt_regs *regs) | 1161 | void TAUException(struct pt_regs *regs) |