diff options
Diffstat (limited to 'arch/powerpc/kernel')
| -rw-r--r-- | arch/powerpc/kernel/entry_64.S | 3 | ||||
| -rw-r--r-- | arch/powerpc/kernel/firmware.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 4 | ||||
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 140 | ||||
| -rw-r--r-- | arch/powerpc/kernel/kgdb.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/kprobes.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 10 | ||||
| -rw-r--r-- | arch/powerpc/kernel/nvram_64.c | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/pci_of_scan.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/pmc.c | 10 | ||||
| -rw-r--r-- | arch/powerpc/kernel/process.c | 116 | ||||
| -rw-r--r-- | arch/powerpc/kernel/prom_init.c | 81 | ||||
| -rw-r--r-- | arch/powerpc/kernel/ptrace.c | 516 | ||||
| -rw-r--r-- | arch/powerpc/kernel/signal.c | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/signal_32.c | 16 | ||||
| -rw-r--r-- | arch/powerpc/kernel/time.c | 10 | ||||
| -rw-r--r-- | arch/powerpc/kernel/traps.c | 128 |
17 files changed, 890 insertions, 166 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index bdcb557d470a..07109d843787 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
| @@ -791,9 +791,8 @@ _GLOBAL(enter_rtas) | |||
| 791 | 791 | ||
| 792 | li r9,1 | 792 | li r9,1 |
| 793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | 793 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) |
| 794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP | 794 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI |
| 795 | andc r6,r0,r9 | 795 | andc r6,r0,r9 |
| 796 | ori r6,r6,MSR_RI | ||
| 797 | sync /* disable interrupts so SRR0/1 */ | 796 | sync /* disable interrupts so SRR0/1 */ |
| 798 | mtmsrd r0 /* don't get trashed */ | 797 | mtmsrd r0 /* don't get trashed */ |
| 799 | 798 | ||
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index 1679a70bbcad..6b1f4271eb53 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c | |||
| @@ -17,5 +17,5 @@ | |||
| 17 | 17 | ||
| 18 | #include <asm/firmware.h> | 18 | #include <asm/firmware.h> |
| 19 | 19 | ||
| 20 | unsigned long powerpc_firmware_features; | 20 | unsigned long powerpc_firmware_features __read_mostly; |
| 21 | EXPORT_SYMBOL_GPL(powerpc_firmware_features); | 21 | EXPORT_SYMBOL_GPL(powerpc_firmware_features); |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 7f4bd7f3b6af..25793bb0e782 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
| @@ -214,11 +214,11 @@ skpinv: addi r6,r6,1 /* Increment */ | |||
| 214 | bl 1f /* Find our address */ | 214 | bl 1f /* Find our address */ |
| 215 | 1: mflr r9 | 215 | 1: mflr r9 |
| 216 | rlwimi r7,r9,0,20,31 | 216 | rlwimi r7,r9,0,20,31 |
| 217 | addi r7,r7,24 | 217 | addi r7,r7,(2f - 1b) |
| 218 | mtspr SPRN_SRR0,r7 | 218 | mtspr SPRN_SRR0,r7 |
| 219 | mtspr SPRN_SRR1,r6 | 219 | mtspr SPRN_SRR1,r6 |
| 220 | rfi | 220 | rfi |
| 221 | 221 | 2: | |
| 222 | /* 4. Clear out PIDs & Search info */ | 222 | /* 4. Clear out PIDs & Search info */ |
| 223 | li r6,0 | 223 | li r6,0 |
| 224 | mtspr SPRN_MAS6,r6 | 224 | mtspr SPRN_MAS6,r6 |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9040330b0530..64f6f2031c22 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -73,8 +73,10 @@ | |||
| 73 | #define CREATE_TRACE_POINTS | 73 | #define CREATE_TRACE_POINTS |
| 74 | #include <asm/trace.h> | 74 | #include <asm/trace.h> |
| 75 | 75 | ||
| 76 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | ||
| 77 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
| 78 | |||
| 76 | int __irq_offset_value; | 79 | int __irq_offset_value; |
| 77 | static int ppc_spurious_interrupts; | ||
| 78 | 80 | ||
| 79 | #ifdef CONFIG_PPC32 | 81 | #ifdef CONFIG_PPC32 |
| 80 | EXPORT_SYMBOL(__irq_offset_value); | 82 | EXPORT_SYMBOL(__irq_offset_value); |
| @@ -180,30 +182,64 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
| 180 | EXPORT_SYMBOL(raw_local_irq_restore); | 182 | EXPORT_SYMBOL(raw_local_irq_restore); |
| 181 | #endif /* CONFIG_PPC64 */ | 183 | #endif /* CONFIG_PPC64 */ |
| 182 | 184 | ||
| 185 | static int show_other_interrupts(struct seq_file *p, int prec) | ||
| 186 | { | ||
| 187 | int j; | ||
| 188 | |||
| 189 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
| 190 | if (tau_initialized) { | ||
| 191 | seq_printf(p, "%*s: ", prec, "TAU"); | ||
| 192 | for_each_online_cpu(j) | ||
| 193 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
| 194 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
| 195 | } | ||
| 196 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | ||
| 197 | |||
| 198 | seq_printf(p, "%*s: ", prec, "LOC"); | ||
| 199 | for_each_online_cpu(j) | ||
| 200 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | ||
| 201 | seq_printf(p, " Local timer interrupts\n"); | ||
| 202 | |||
| 203 | seq_printf(p, "%*s: ", prec, "SPU"); | ||
| 204 | for_each_online_cpu(j) | ||
| 205 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | ||
| 206 | seq_printf(p, " Spurious interrupts\n"); | ||
| 207 | |||
| 208 | seq_printf(p, "%*s: ", prec, "CNT"); | ||
| 209 | for_each_online_cpu(j) | ||
| 210 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | ||
| 211 | seq_printf(p, " Performance monitoring interrupts\n"); | ||
| 212 | |||
| 213 | seq_printf(p, "%*s: ", prec, "MCE"); | ||
| 214 | for_each_online_cpu(j) | ||
| 215 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | ||
| 216 | seq_printf(p, " Machine check exceptions\n"); | ||
| 217 | |||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 183 | int show_interrupts(struct seq_file *p, void *v) | 221 | int show_interrupts(struct seq_file *p, void *v) |
| 184 | { | 222 | { |
| 185 | int i = *(loff_t *)v, j; | 223 | unsigned long flags, any_count = 0; |
| 224 | int i = *(loff_t *) v, j, prec; | ||
| 186 | struct irqaction *action; | 225 | struct irqaction *action; |
| 187 | struct irq_desc *desc; | 226 | struct irq_desc *desc; |
| 188 | unsigned long flags; | ||
| 189 | 227 | ||
| 228 | if (i > nr_irqs) | ||
| 229 | return 0; | ||
| 230 | |||
| 231 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
| 232 | j *= 10; | ||
| 233 | |||
| 234 | if (i == nr_irqs) | ||
| 235 | return show_other_interrupts(p, prec); | ||
| 236 | |||
| 237 | /* print header */ | ||
| 190 | if (i == 0) { | 238 | if (i == 0) { |
| 191 | seq_puts(p, " "); | 239 | seq_printf(p, "%*s", prec + 8, ""); |
| 192 | for_each_online_cpu(j) | 240 | for_each_online_cpu(j) |
| 193 | seq_printf(p, "CPU%d ", j); | 241 | seq_printf(p, "CPU%-8d", j); |
| 194 | seq_putc(p, '\n'); | 242 | seq_putc(p, '\n'); |
| 195 | } else if (i == nr_irqs) { | ||
| 196 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | ||
| 197 | if (tau_initialized){ | ||
| 198 | seq_puts(p, "TAU: "); | ||
| 199 | for_each_online_cpu(j) | ||
| 200 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
| 201 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
| 202 | } | ||
| 203 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ | ||
| 204 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | ||
| 205 | |||
| 206 | return 0; | ||
| 207 | } | 243 | } |
| 208 | 244 | ||
| 209 | desc = irq_to_desc(i); | 245 | desc = irq_to_desc(i); |
| @@ -211,37 +247,48 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 211 | return 0; | 247 | return 0; |
| 212 | 248 | ||
| 213 | raw_spin_lock_irqsave(&desc->lock, flags); | 249 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 214 | 250 | for_each_online_cpu(j) | |
| 251 | any_count |= kstat_irqs_cpu(i, j); | ||
| 215 | action = desc->action; | 252 | action = desc->action; |
| 216 | if (!action || !action->handler) | 253 | if (!action && !any_count) |
| 217 | goto skip; | 254 | goto out; |
| 218 | 255 | ||
| 219 | seq_printf(p, "%3d: ", i); | 256 | seq_printf(p, "%*d: ", prec, i); |
| 220 | #ifdef CONFIG_SMP | ||
| 221 | for_each_online_cpu(j) | 257 | for_each_online_cpu(j) |
| 222 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 258 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 223 | #else | ||
| 224 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
| 225 | #endif /* CONFIG_SMP */ | ||
| 226 | 259 | ||
| 227 | if (desc->chip) | 260 | if (desc->chip) |
| 228 | seq_printf(p, " %s ", desc->chip->name); | 261 | seq_printf(p, " %-16s", desc->chip->name); |
| 229 | else | 262 | else |
| 230 | seq_puts(p, " None "); | 263 | seq_printf(p, " %-16s", "None"); |
| 264 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
| 231 | 265 | ||
| 232 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | 266 | if (action) { |
| 233 | seq_printf(p, " %s", action->name); | 267 | seq_printf(p, " %s", action->name); |
| 268 | while ((action = action->next) != NULL) | ||
| 269 | seq_printf(p, ", %s", action->name); | ||
| 270 | } | ||
| 234 | 271 | ||
| 235 | for (action = action->next; action; action = action->next) | ||
| 236 | seq_printf(p, ", %s", action->name); | ||
| 237 | seq_putc(p, '\n'); | 272 | seq_putc(p, '\n'); |
| 238 | 273 | out: | |
| 239 | skip: | ||
| 240 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 274 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 241 | |||
| 242 | return 0; | 275 | return 0; |
| 243 | } | 276 | } |
| 244 | 277 | ||
| 278 | /* | ||
| 279 | * /proc/stat helpers | ||
| 280 | */ | ||
| 281 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
| 282 | { | ||
| 283 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | ||
| 284 | |||
| 285 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | ||
| 286 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | ||
| 287 | sum += per_cpu(irq_stat, cpu).spurious_irqs; | ||
| 288 | |||
| 289 | return sum; | ||
| 290 | } | ||
| 291 | |||
| 245 | #ifdef CONFIG_HOTPLUG_CPU | 292 | #ifdef CONFIG_HOTPLUG_CPU |
| 246 | void fixup_irqs(cpumask_t map) | 293 | void fixup_irqs(cpumask_t map) |
| 247 | { | 294 | { |
| @@ -353,8 +400,7 @@ void do_IRQ(struct pt_regs *regs) | |||
| 353 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) | 400 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
| 354 | handle_one_irq(irq); | 401 | handle_one_irq(irq); |
| 355 | else if (irq != NO_IRQ_IGNORE) | 402 | else if (irq != NO_IRQ_IGNORE) |
| 356 | /* That's not SMP safe ... but who cares ? */ | 403 | __get_cpu_var(irq_stat).spurious_irqs++; |
| 357 | ppc_spurious_interrupts++; | ||
| 358 | 404 | ||
| 359 | irq_exit(); | 405 | irq_exit(); |
| 360 | set_irq_regs(old_regs); | 406 | set_irq_regs(old_regs); |
| @@ -474,7 +520,7 @@ void do_softirq(void) | |||
| 474 | */ | 520 | */ |
| 475 | 521 | ||
| 476 | static LIST_HEAD(irq_hosts); | 522 | static LIST_HEAD(irq_hosts); |
| 477 | static DEFINE_SPINLOCK(irq_big_lock); | 523 | static DEFINE_RAW_SPINLOCK(irq_big_lock); |
| 478 | static unsigned int revmap_trees_allocated; | 524 | static unsigned int revmap_trees_allocated; |
| 479 | static DEFINE_MUTEX(revmap_trees_mutex); | 525 | static DEFINE_MUTEX(revmap_trees_mutex); |
| 480 | struct irq_map_entry irq_map[NR_IRQS]; | 526 | struct irq_map_entry irq_map[NR_IRQS]; |
| @@ -520,14 +566,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
| 520 | if (host->ops->match == NULL) | 566 | if (host->ops->match == NULL) |
| 521 | host->ops->match = default_irq_host_match; | 567 | host->ops->match = default_irq_host_match; |
| 522 | 568 | ||
| 523 | spin_lock_irqsave(&irq_big_lock, flags); | 569 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
| 524 | 570 | ||
| 525 | /* If it's a legacy controller, check for duplicates and | 571 | /* If it's a legacy controller, check for duplicates and |
| 526 | * mark it as allocated (we use irq 0 host pointer for that | 572 | * mark it as allocated (we use irq 0 host pointer for that |
| 527 | */ | 573 | */ |
| 528 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 574 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
| 529 | if (irq_map[0].host != NULL) { | 575 | if (irq_map[0].host != NULL) { |
| 530 | spin_unlock_irqrestore(&irq_big_lock, flags); | 576 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 531 | /* If we are early boot, we can't free the structure, | 577 | /* If we are early boot, we can't free the structure, |
| 532 | * too bad... | 578 | * too bad... |
| 533 | * this will be fixed once slab is made available early | 579 | * this will be fixed once slab is made available early |
| @@ -541,7 +587,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
| 541 | } | 587 | } |
| 542 | 588 | ||
| 543 | list_add(&host->link, &irq_hosts); | 589 | list_add(&host->link, &irq_hosts); |
| 544 | spin_unlock_irqrestore(&irq_big_lock, flags); | 590 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 545 | 591 | ||
| 546 | /* Additional setups per revmap type */ | 592 | /* Additional setups per revmap type */ |
| 547 | switch(revmap_type) { | 593 | switch(revmap_type) { |
| @@ -592,13 +638,13 @@ struct irq_host *irq_find_host(struct device_node *node) | |||
| 592 | * the absence of a device node. This isn't a problem so far | 638 | * the absence of a device node. This isn't a problem so far |
| 593 | * yet though... | 639 | * yet though... |
| 594 | */ | 640 | */ |
| 595 | spin_lock_irqsave(&irq_big_lock, flags); | 641 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
| 596 | list_for_each_entry(h, &irq_hosts, link) | 642 | list_for_each_entry(h, &irq_hosts, link) |
| 597 | if (h->ops->match(h, node)) { | 643 | if (h->ops->match(h, node)) { |
| 598 | found = h; | 644 | found = h; |
| 599 | break; | 645 | break; |
| 600 | } | 646 | } |
| 601 | spin_unlock_irqrestore(&irq_big_lock, flags); | 647 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 602 | return found; | 648 | return found; |
| 603 | } | 649 | } |
| 604 | EXPORT_SYMBOL_GPL(irq_find_host); | 650 | EXPORT_SYMBOL_GPL(irq_find_host); |
| @@ -967,7 +1013,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
| 967 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) | 1013 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
| 968 | return NO_IRQ; | 1014 | return NO_IRQ; |
| 969 | 1015 | ||
| 970 | spin_lock_irqsave(&irq_big_lock, flags); | 1016 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
| 971 | 1017 | ||
| 972 | /* Use hint for 1 interrupt if any */ | 1018 | /* Use hint for 1 interrupt if any */ |
| 973 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | 1019 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && |
| @@ -991,7 +1037,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
| 991 | } | 1037 | } |
| 992 | } | 1038 | } |
| 993 | if (found == NO_IRQ) { | 1039 | if (found == NO_IRQ) { |
| 994 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1040 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 995 | return NO_IRQ; | 1041 | return NO_IRQ; |
| 996 | } | 1042 | } |
| 997 | hint_found: | 1043 | hint_found: |
| @@ -1000,7 +1046,7 @@ unsigned int irq_alloc_virt(struct irq_host *host, | |||
| 1000 | smp_wmb(); | 1046 | smp_wmb(); |
| 1001 | irq_map[i].host = host; | 1047 | irq_map[i].host = host; |
| 1002 | } | 1048 | } |
| 1003 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1049 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 1004 | return found; | 1050 | return found; |
| 1005 | } | 1051 | } |
| 1006 | 1052 | ||
| @@ -1012,7 +1058,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
| 1012 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 1058 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
| 1013 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 1059 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
| 1014 | 1060 | ||
| 1015 | spin_lock_irqsave(&irq_big_lock, flags); | 1061 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
| 1016 | for (i = virq; i < (virq + count); i++) { | 1062 | for (i = virq; i < (virq + count); i++) { |
| 1017 | struct irq_host *host; | 1063 | struct irq_host *host; |
| 1018 | 1064 | ||
| @@ -1025,7 +1071,7 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
| 1025 | smp_wmb(); | 1071 | smp_wmb(); |
| 1026 | irq_map[i].host = NULL; | 1072 | irq_map[i].host = NULL; |
| 1027 | } | 1073 | } |
| 1028 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1074 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
| 1029 | } | 1075 | } |
| 1030 | 1076 | ||
| 1031 | int arch_early_irq_init(void) | 1077 | int arch_early_irq_init(void) |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index b6bd1eaa1c24..41bada0298c8 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
| @@ -333,7 +333,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
| 333 | atomic_set(&kgdb_cpu_doing_single_step, -1); | 333 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
| 334 | /* set the trace bit if we're stepping */ | 334 | /* set the trace bit if we're stepping */ |
| 335 | if (remcom_in_buffer[0] == 's') { | 335 | if (remcom_in_buffer[0] == 's') { |
| 336 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 336 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 337 | mtspr(SPRN_DBCR0, | 337 | mtspr(SPRN_DBCR0, |
| 338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 338 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
| 339 | linux_regs->msr |= MSR_DE; | 339 | linux_regs->msr |= MSR_DE; |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index c9329786073b..3fd1af902112 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
| @@ -36,7 +36,7 @@ | |||
| 36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
| 37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
| 38 | 38 | ||
| 39 | #ifdef CONFIG_BOOKE | 39 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 40 | #define MSR_SINGLESTEP (MSR_DE) | 40 | #define MSR_SINGLESTEP (MSR_DE) |
| 41 | #else | 41 | #else |
| 42 | #define MSR_SINGLESTEP (MSR_SE) | 42 | #define MSR_SINGLESTEP (MSR_SE) |
| @@ -110,7 +110,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
| 110 | * like Decrementer or External Interrupt */ | 110 | * like Decrementer or External Interrupt */ |
| 111 | regs->msr &= ~MSR_EE; | 111 | regs->msr &= ~MSR_EE; |
| 112 | regs->msr |= MSR_SINGLESTEP; | 112 | regs->msr |= MSR_SINGLESTEP; |
| 113 | #ifdef CONFIG_BOOKE | 113 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 114 | regs->msr &= ~MSR_CE; | 114 | regs->msr &= ~MSR_CE; |
| 115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | 115 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); |
| 116 | #endif | 116 | #endif |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 79a00bb9c64c..d09d1c615150 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
| @@ -359,7 +359,7 @@ static void parse_system_parameter_string(struct seq_file *m) | |||
| 359 | 359 | ||
| 360 | unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); | 360 | unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); |
| 361 | if (!local_buffer) { | 361 | if (!local_buffer) { |
| 362 | printk(KERN_ERR "%s %s kmalloc failure at line %d \n", | 362 | printk(KERN_ERR "%s %s kmalloc failure at line %d\n", |
| 363 | __FILE__, __func__, __LINE__); | 363 | __FILE__, __func__, __LINE__); |
| 364 | return; | 364 | return; |
| 365 | } | 365 | } |
| @@ -383,13 +383,13 @@ static void parse_system_parameter_string(struct seq_file *m) | |||
| 383 | int idx, w_idx; | 383 | int idx, w_idx; |
| 384 | char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); | 384 | char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); |
| 385 | if (!workbuffer) { | 385 | if (!workbuffer) { |
| 386 | printk(KERN_ERR "%s %s kmalloc failure at line %d \n", | 386 | printk(KERN_ERR "%s %s kmalloc failure at line %d\n", |
| 387 | __FILE__, __func__, __LINE__); | 387 | __FILE__, __func__, __LINE__); |
| 388 | kfree(local_buffer); | 388 | kfree(local_buffer); |
| 389 | return; | 389 | return; |
| 390 | } | 390 | } |
| 391 | #ifdef LPARCFG_DEBUG | 391 | #ifdef LPARCFG_DEBUG |
| 392 | printk(KERN_INFO "success calling get-system-parameter \n"); | 392 | printk(KERN_INFO "success calling get-system-parameter\n"); |
| 393 | #endif | 393 | #endif |
| 394 | splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; | 394 | splpar_strlen = local_buffer[0] * 256 + local_buffer[1]; |
| 395 | local_buffer += 2; /* step over strlen value */ | 395 | local_buffer += 2; /* step over strlen value */ |
| @@ -440,7 +440,7 @@ static int lparcfg_count_active_processors(void) | |||
| 440 | 440 | ||
| 441 | while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) { | 441 | while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) { |
| 442 | #ifdef LPARCFG_DEBUG | 442 | #ifdef LPARCFG_DEBUG |
| 443 | printk(KERN_ERR "cpus_dn %p \n", cpus_dn); | 443 | printk(KERN_ERR "cpus_dn %p\n", cpus_dn); |
| 444 | #endif | 444 | #endif |
| 445 | count++; | 445 | count++; |
| 446 | } | 446 | } |
| @@ -725,7 +725,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
| 725 | const unsigned int *lp_index_ptr; | 725 | const unsigned int *lp_index_ptr; |
| 726 | unsigned int lp_index = 0; | 726 | unsigned int lp_index = 0; |
| 727 | 727 | ||
| 728 | seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); | 728 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
| 729 | 729 | ||
| 730 | rootdn = of_find_node_by_path("/"); | 730 | rootdn = of_find_node_by_path("/"); |
| 731 | if (rootdn) { | 731 | if (rootdn) { |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index ad461e735aec..9cf197f01e94 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
| @@ -338,8 +338,8 @@ static int __init nvram_create_os_partition(void) | |||
| 338 | 338 | ||
| 339 | rc = nvram_write_header(new_part); | 339 | rc = nvram_write_header(new_part); |
| 340 | if (rc <= 0) { | 340 | if (rc <= 0) { |
| 341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header \ | 341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " |
| 342 | failed (%d)\n", rc); | 342 | "failed (%d)\n", rc); |
| 343 | return rc; | 343 | return rc; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| @@ -349,7 +349,7 @@ static int __init nvram_create_os_partition(void) | |||
| 349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); | 349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); |
| 350 | if (rc <= 0) { | 350 | if (rc <= 0) { |
| 351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " | 351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " |
| 352 | "failed (%d)\n", rc); | 352 | "failed (%d)\n", rc); |
| 353 | return rc; | 353 | return rc; |
| 354 | } | 354 | } |
| 355 | 355 | ||
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 4aa17401657b..cd11d5ca80df 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
| @@ -304,7 +304,7 @@ static void __devinit __of_scan_bus(struct device_node *node, | |||
| 304 | int reglen, devfn; | 304 | int reglen, devfn; |
| 305 | struct pci_dev *dev; | 305 | struct pci_dev *dev; |
| 306 | 306 | ||
| 307 | pr_debug("of_scan_bus(%s) bus no %d... \n", | 307 | pr_debug("of_scan_bus(%s) bus no %d...\n", |
| 308 | node->full_name, bus->number); | 308 | node->full_name, bus->number); |
| 309 | 309 | ||
| 310 | /* Scan direct children */ | 310 | /* Scan direct children */ |
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 0516e2d3e02e..461499b43cff 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c | |||
| @@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs) | |||
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | 39 | ||
| 40 | static DEFINE_SPINLOCK(pmc_owner_lock); | 40 | static DEFINE_RAW_SPINLOCK(pmc_owner_lock); |
| 41 | static void *pmc_owner_caller; /* mostly for debugging */ | 41 | static void *pmc_owner_caller; /* mostly for debugging */ |
| 42 | perf_irq_t perf_irq = dummy_perf; | 42 | perf_irq_t perf_irq = dummy_perf; |
| 43 | 43 | ||
| @@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) | |||
| 45 | { | 45 | { |
| 46 | int err = 0; | 46 | int err = 0; |
| 47 | 47 | ||
| 48 | spin_lock(&pmc_owner_lock); | 48 | raw_spin_lock(&pmc_owner_lock); |
| 49 | 49 | ||
| 50 | if (pmc_owner_caller) { | 50 | if (pmc_owner_caller) { |
| 51 | printk(KERN_WARNING "reserve_pmc_hardware: " | 51 | printk(KERN_WARNING "reserve_pmc_hardware: " |
| @@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) | |||
| 59 | perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; | 59 | perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; |
| 60 | 60 | ||
| 61 | out: | 61 | out: |
| 62 | spin_unlock(&pmc_owner_lock); | 62 | raw_spin_unlock(&pmc_owner_lock); |
| 63 | return err; | 63 | return err; |
| 64 | } | 64 | } |
| 65 | EXPORT_SYMBOL_GPL(reserve_pmc_hardware); | 65 | EXPORT_SYMBOL_GPL(reserve_pmc_hardware); |
| 66 | 66 | ||
| 67 | void release_pmc_hardware(void) | 67 | void release_pmc_hardware(void) |
| 68 | { | 68 | { |
| 69 | spin_lock(&pmc_owner_lock); | 69 | raw_spin_lock(&pmc_owner_lock); |
| 70 | 70 | ||
| 71 | WARN_ON(! pmc_owner_caller); | 71 | WARN_ON(! pmc_owner_caller); |
| 72 | 72 | ||
| 73 | pmc_owner_caller = NULL; | 73 | pmc_owner_caller = NULL; |
| 74 | perf_irq = dummy_perf; | 74 | perf_irq = dummy_perf; |
| 75 | 75 | ||
| 76 | spin_unlock(&pmc_owner_lock); | 76 | raw_spin_unlock(&pmc_owner_lock); |
| 77 | } | 77 | } |
| 78 | EXPORT_SYMBOL_GPL(release_pmc_hardware); | 78 | EXPORT_SYMBOL_GPL(release_pmc_hardware); |
| 79 | 79 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b816daf3eba..e4d71ced97ef 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -245,6 +245,24 @@ void discard_lazy_cpu_state(void) | |||
| 245 | } | 245 | } |
| 246 | #endif /* CONFIG_SMP */ | 246 | #endif /* CONFIG_SMP */ |
| 247 | 247 | ||
| 248 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 249 | void do_send_trap(struct pt_regs *regs, unsigned long address, | ||
| 250 | unsigned long error_code, int signal_code, int breakpt) | ||
| 251 | { | ||
| 252 | siginfo_t info; | ||
| 253 | |||
| 254 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | ||
| 255 | 11, SIGSEGV) == NOTIFY_STOP) | ||
| 256 | return; | ||
| 257 | |||
| 258 | /* Deliver the signal to userspace */ | ||
| 259 | info.si_signo = SIGTRAP; | ||
| 260 | info.si_errno = breakpt; /* breakpoint or watchpoint id */ | ||
| 261 | info.si_code = signal_code; | ||
| 262 | info.si_addr = (void __user *)address; | ||
| 263 | force_sig_info(SIGTRAP, &info, current); | ||
| 264 | } | ||
| 265 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 248 | void do_dabr(struct pt_regs *regs, unsigned long address, | 266 | void do_dabr(struct pt_regs *regs, unsigned long address, |
| 249 | unsigned long error_code) | 267 | unsigned long error_code) |
| 250 | { | 268 | { |
| @@ -257,12 +275,6 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
| 257 | if (debugger_dabr_match(regs)) | 275 | if (debugger_dabr_match(regs)) |
| 258 | return; | 276 | return; |
| 259 | 277 | ||
| 260 | /* Clear the DAC and struct entries. One shot trigger */ | ||
| 261 | #if defined(CONFIG_BOOKE) | ||
| 262 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W | ||
| 263 | | DBCR0_IDM)); | ||
| 264 | #endif | ||
| 265 | |||
| 266 | /* Clear the DABR */ | 278 | /* Clear the DABR */ |
| 267 | set_dabr(0); | 279 | set_dabr(0); |
| 268 | 280 | ||
| @@ -273,9 +285,82 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
| 273 | info.si_addr = (void __user *)address; | 285 | info.si_addr = (void __user *)address; |
| 274 | force_sig_info(SIGTRAP, &info, current); | 286 | force_sig_info(SIGTRAP, &info, current); |
| 275 | } | 287 | } |
| 288 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 276 | 289 | ||
| 277 | static DEFINE_PER_CPU(unsigned long, current_dabr); | 290 | static DEFINE_PER_CPU(unsigned long, current_dabr); |
| 278 | 291 | ||
| 292 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 293 | /* | ||
| 294 | * Set the debug registers back to their default "safe" values. | ||
| 295 | */ | ||
| 296 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
| 297 | { | ||
| 298 | thread->iac1 = thread->iac2 = 0; | ||
| 299 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
| 300 | thread->iac3 = thread->iac4 = 0; | ||
| 301 | #endif | ||
| 302 | thread->dac1 = thread->dac2 = 0; | ||
| 303 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 304 | thread->dvc1 = thread->dvc2 = 0; | ||
| 305 | #endif | ||
| 306 | thread->dbcr0 = 0; | ||
| 307 | #ifdef CONFIG_BOOKE | ||
| 308 | /* | ||
| 309 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | ||
| 310 | */ | ||
| 311 | thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ | ||
| 312 | DBCR1_IAC3US | DBCR1_IAC4US; | ||
| 313 | /* | ||
| 314 | * Force Data Address Compare User/Supervisor bits to be User-only | ||
| 315 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | ||
| 316 | */ | ||
| 317 | thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | ||
| 318 | #else | ||
| 319 | thread->dbcr1 = 0; | ||
| 320 | #endif | ||
| 321 | } | ||
| 322 | |||
| 323 | static void prime_debug_regs(struct thread_struct *thread) | ||
| 324 | { | ||
| 325 | mtspr(SPRN_IAC1, thread->iac1); | ||
| 326 | mtspr(SPRN_IAC2, thread->iac2); | ||
| 327 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
| 328 | mtspr(SPRN_IAC3, thread->iac3); | ||
| 329 | mtspr(SPRN_IAC4, thread->iac4); | ||
| 330 | #endif | ||
| 331 | mtspr(SPRN_DAC1, thread->dac1); | ||
| 332 | mtspr(SPRN_DAC2, thread->dac2); | ||
| 333 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 334 | mtspr(SPRN_DVC1, thread->dvc1); | ||
| 335 | mtspr(SPRN_DVC2, thread->dvc2); | ||
| 336 | #endif | ||
| 337 | mtspr(SPRN_DBCR0, thread->dbcr0); | ||
| 338 | mtspr(SPRN_DBCR1, thread->dbcr1); | ||
| 339 | #ifdef CONFIG_BOOKE | ||
| 340 | mtspr(SPRN_DBCR2, thread->dbcr2); | ||
| 341 | #endif | ||
| 342 | } | ||
| 343 | /* | ||
| 344 | * Unless neither the old or new thread are making use of the | ||
| 345 | * debug registers, set the debug registers from the values | ||
| 346 | * stored in the new thread. | ||
| 347 | */ | ||
| 348 | static void switch_booke_debug_regs(struct thread_struct *new_thread) | ||
| 349 | { | ||
| 350 | if ((current->thread.dbcr0 & DBCR0_IDM) | ||
| 351 | || (new_thread->dbcr0 & DBCR0_IDM)) | ||
| 352 | prime_debug_regs(new_thread); | ||
| 353 | } | ||
| 354 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 355 | static void set_debug_reg_defaults(struct thread_struct *thread) | ||
| 356 | { | ||
| 357 | if (thread->dabr) { | ||
| 358 | thread->dabr = 0; | ||
| 359 | set_dabr(0); | ||
| 360 | } | ||
| 361 | } | ||
| 362 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 363 | |||
| 279 | int set_dabr(unsigned long dabr) | 364 | int set_dabr(unsigned long dabr) |
| 280 | { | 365 | { |
| 281 | __get_cpu_var(current_dabr) = dabr; | 366 | __get_cpu_var(current_dabr) = dabr; |
| @@ -284,7 +369,7 @@ int set_dabr(unsigned long dabr) | |||
| 284 | return ppc_md.set_dabr(dabr); | 369 | return ppc_md.set_dabr(dabr); |
| 285 | 370 | ||
| 286 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ | 371 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ |
| 287 | #if defined(CONFIG_BOOKE) | 372 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 288 | mtspr(SPRN_DAC1, dabr); | 373 | mtspr(SPRN_DAC1, dabr); |
| 289 | #elif defined(CONFIG_PPC_BOOK3S) | 374 | #elif defined(CONFIG_PPC_BOOK3S) |
| 290 | mtspr(SPRN_DABR, dabr); | 375 | mtspr(SPRN_DABR, dabr); |
| @@ -371,10 +456,8 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
| 371 | 456 | ||
| 372 | #endif /* CONFIG_SMP */ | 457 | #endif /* CONFIG_SMP */ |
| 373 | 458 | ||
| 374 | #if defined(CONFIG_BOOKE) | 459 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 375 | /* If new thread DAC (HW breakpoint) is the same then leave it */ | 460 | switch_booke_debug_regs(&new->thread); |
| 376 | if (new->thread.dabr) | ||
| 377 | set_dabr(new->thread.dabr); | ||
| 378 | #else | 461 | #else |
| 379 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 462 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) |
| 380 | set_dabr(new->thread.dabr); | 463 | set_dabr(new->thread.dabr); |
| @@ -514,7 +597,7 @@ void show_regs(struct pt_regs * regs) | |||
| 514 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 597 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
| 515 | trap = TRAP(regs); | 598 | trap = TRAP(regs); |
| 516 | if (trap == 0x300 || trap == 0x600) | 599 | if (trap == 0x300 || trap == 0x600) |
| 517 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 600 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 518 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 601 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
| 519 | #else | 602 | #else |
| 520 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 603 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); |
| @@ -556,14 +639,7 @@ void flush_thread(void) | |||
| 556 | { | 639 | { |
| 557 | discard_lazy_cpu_state(); | 640 | discard_lazy_cpu_state(); |
| 558 | 641 | ||
| 559 | if (current->thread.dabr) { | 642 | set_debug_reg_defaults(¤t->thread); |
| 560 | current->thread.dabr = 0; | ||
| 561 | set_dabr(0); | ||
| 562 | |||
| 563 | #if defined(CONFIG_BOOKE) | ||
| 564 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); | ||
| 565 | #endif | ||
| 566 | } | ||
| 567 | } | 643 | } |
| 568 | 644 | ||
| 569 | void | 645 | void |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index bafac2e41ae1..5f306c4946e5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
| @@ -654,6 +654,9 @@ static void __init early_cmdline_parse(void) | |||
| 654 | #define OV5_CMO 0x00 | 654 | #define OV5_CMO 0x00 |
| 655 | #endif | 655 | #endif |
| 656 | 656 | ||
| 657 | /* Option Vector 6: IBM PAPR hints */ | ||
| 658 | #define OV6_LINUX 0x02 /* Linux is our OS */ | ||
| 659 | |||
| 657 | /* | 660 | /* |
| 658 | * The architecture vector has an array of PVR mask/value pairs, | 661 | * The architecture vector has an array of PVR mask/value pairs, |
| 659 | * followed by # option vectors - 1, followed by the option vectors. | 662 | * followed by # option vectors - 1, followed by the option vectors. |
| @@ -665,7 +668,7 @@ static unsigned char ibm_architecture_vec[] = { | |||
| 665 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ | 668 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ |
| 666 | W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ | 669 | W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ |
| 667 | W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ | 670 | W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ |
| 668 | 5 - 1, /* 5 option vectors */ | 671 | 6 - 1, /* 6 option vectors */ |
| 669 | 672 | ||
| 670 | /* option vector 1: processor architectures supported */ | 673 | /* option vector 1: processor architectures supported */ |
| 671 | 3 - 2, /* length */ | 674 | 3 - 2, /* length */ |
| @@ -697,12 +700,29 @@ static unsigned char ibm_architecture_vec[] = { | |||
| 697 | 0, /* don't halt */ | 700 | 0, /* don't halt */ |
| 698 | 701 | ||
| 699 | /* option vector 5: PAPR/OF options */ | 702 | /* option vector 5: PAPR/OF options */ |
| 700 | 5 - 2, /* length */ | 703 | 13 - 2, /* length */ |
| 701 | 0, /* don't ignore, don't halt */ | 704 | 0, /* don't ignore, don't halt */ |
| 702 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | | 705 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | |
| 703 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, | 706 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, |
| 704 | 0, | 707 | 0, |
| 705 | OV5_CMO, | 708 | OV5_CMO, |
| 709 | 0, | ||
| 710 | 0, | ||
| 711 | 0, | ||
| 712 | 0, | ||
| 713 | /* WARNING: The offset of the "number of cores" field below | ||
| 714 | * must match by the macro below. Update the definition if | ||
| 715 | * the structure layout changes. | ||
| 716 | */ | ||
| 717 | #define IBM_ARCH_VEC_NRCORES_OFFSET 100 | ||
| 718 | W(NR_CPUS), /* number of cores supported */ | ||
| 719 | |||
| 720 | /* option vector 6: IBM PAPR hints */ | ||
| 721 | 4 - 2, /* length */ | ||
| 722 | 0, | ||
| 723 | 0, | ||
| 724 | OV6_LINUX, | ||
| 725 | |||
| 706 | }; | 726 | }; |
| 707 | 727 | ||
| 708 | /* Old method - ELF header with PT_NOTE sections */ | 728 | /* Old method - ELF header with PT_NOTE sections */ |
| @@ -792,13 +812,70 @@ static struct fake_elf { | |||
| 792 | } | 812 | } |
| 793 | }; | 813 | }; |
| 794 | 814 | ||
| 815 | static int __init prom_count_smt_threads(void) | ||
| 816 | { | ||
| 817 | phandle node; | ||
| 818 | char type[64]; | ||
| 819 | unsigned int plen; | ||
| 820 | |||
| 821 | /* Pick up th first CPU node we can find */ | ||
| 822 | for (node = 0; prom_next_node(&node); ) { | ||
| 823 | type[0] = 0; | ||
| 824 | prom_getprop(node, "device_type", type, sizeof(type)); | ||
| 825 | |||
| 826 | if (strcmp(type, RELOC("cpu"))) | ||
| 827 | continue; | ||
| 828 | /* | ||
| 829 | * There is an entry for each smt thread, each entry being | ||
| 830 | * 4 bytes long. All cpus should have the same number of | ||
| 831 | * smt threads, so return after finding the first. | ||
| 832 | */ | ||
| 833 | plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); | ||
| 834 | if (plen == PROM_ERROR) | ||
| 835 | break; | ||
| 836 | plen >>= 2; | ||
| 837 | prom_debug("Found 0x%x smt threads per core\n", (unsigned long)plen); | ||
| 838 | |||
| 839 | /* Sanity check */ | ||
| 840 | if (plen < 1 || plen > 64) { | ||
| 841 | prom_printf("Threads per core 0x%x out of bounds, assuming 1\n", | ||
| 842 | (unsigned long)plen); | ||
| 843 | return 1; | ||
| 844 | } | ||
| 845 | return plen; | ||
| 846 | } | ||
| 847 | prom_debug("No threads found, assuming 1 per core\n"); | ||
| 848 | |||
| 849 | return 1; | ||
| 850 | |||
| 851 | } | ||
| 852 | |||
| 853 | |||
| 795 | static void __init prom_send_capabilities(void) | 854 | static void __init prom_send_capabilities(void) |
| 796 | { | 855 | { |
| 797 | ihandle elfloader, root; | 856 | ihandle elfloader, root; |
| 798 | prom_arg_t ret; | 857 | prom_arg_t ret; |
| 858 | u32 *cores; | ||
| 799 | 859 | ||
| 800 | root = call_prom("open", 1, 1, ADDR("/")); | 860 | root = call_prom("open", 1, 1, ADDR("/")); |
| 801 | if (root != 0) { | 861 | if (root != 0) { |
| 862 | /* We need to tell the FW about the number of cores we support. | ||
| 863 | * | ||
| 864 | * To do that, we count the number of threads on the first core | ||
| 865 | * (we assume this is the same for all cores) and use it to | ||
| 866 | * divide NR_CPUS. | ||
| 867 | */ | ||
| 868 | cores = (u32 *)PTRRELOC(&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]); | ||
| 869 | if (*cores != NR_CPUS) { | ||
| 870 | prom_printf("WARNING ! " | ||
| 871 | "ibm_architecture_vec structure inconsistent: 0x%x !\n", | ||
| 872 | *cores); | ||
| 873 | } else { | ||
| 874 | *cores = NR_CPUS / prom_count_smt_threads(); | ||
| 875 | prom_printf("Max number of cores passed to firmware: 0x%x\n", | ||
| 876 | (unsigned long)*cores); | ||
| 877 | } | ||
| 878 | |||
| 802 | /* try calling the ibm,client-architecture-support method */ | 879 | /* try calling the ibm,client-architecture-support method */ |
| 803 | prom_printf("Calling ibm,client-architecture-support..."); | 880 | prom_printf("Calling ibm,client-architecture-support..."); |
| 804 | if (call_prom_ret("call-method", 3, 2, &ret, | 881 | if (call_prom_ret("call-method", 3, 2, &ret, |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ef149880c145..d9b05866615f 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | /* | 46 | /* |
| 47 | * Set of msr bits that gdb can change on behalf of a process. | 47 | * Set of msr bits that gdb can change on behalf of a process. |
| 48 | */ | 48 | */ |
| 49 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 49 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 50 | #define MSR_DEBUGCHANGE 0 | 50 | #define MSR_DEBUGCHANGE 0 |
| 51 | #else | 51 | #else |
| 52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) | 52 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) |
| @@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task) | |||
| 703 | struct pt_regs *regs = task->thread.regs; | 703 | struct pt_regs *regs = task->thread.regs; |
| 704 | 704 | ||
| 705 | if (regs != NULL) { | 705 | if (regs != NULL) { |
| 706 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 706 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 707 | task->thread.dbcr0 &= ~DBCR0_BT; | 707 | task->thread.dbcr0 &= ~DBCR0_BT; |
| 708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | 708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
| 709 | regs->msr |= MSR_DE; | 709 | regs->msr |= MSR_DE; |
| @@ -720,7 +720,7 @@ void user_enable_block_step(struct task_struct *task) | |||
| 720 | struct pt_regs *regs = task->thread.regs; | 720 | struct pt_regs *regs = task->thread.regs; |
| 721 | 721 | ||
| 722 | if (regs != NULL) { | 722 | if (regs != NULL) { |
| 723 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 723 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 724 | task->thread.dbcr0 &= ~DBCR0_IC; | 724 | task->thread.dbcr0 &= ~DBCR0_IC; |
| 725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; | 725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; |
| 726 | regs->msr |= MSR_DE; | 726 | regs->msr |= MSR_DE; |
| @@ -737,17 +737,25 @@ void user_disable_single_step(struct task_struct *task) | |||
| 737 | struct pt_regs *regs = task->thread.regs; | 737 | struct pt_regs *regs = task->thread.regs; |
| 738 | 738 | ||
| 739 | if (regs != NULL) { | 739 | if (regs != NULL) { |
| 740 | #if defined(CONFIG_BOOKE) | 740 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 741 | /* If DAC don't clear DBCRO_IDM or MSR_DE */ | 741 | /* |
| 742 | if (task->thread.dabr) | 742 | * The logic to disable single stepping should be as |
| 743 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT); | 743 | * simple as turning off the Instruction Complete flag. |
| 744 | else { | 744 | * And, after doing so, if all debug flags are off, turn |
| 745 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | 745 | * off DBCR0(IDM) and MSR(DE) .... Torez |
| 746 | */ | ||
| 747 | task->thread.dbcr0 &= ~DBCR0_IC; | ||
| 748 | /* | ||
| 749 | * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. | ||
| 750 | */ | ||
| 751 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, | ||
| 752 | task->thread.dbcr1)) { | ||
| 753 | /* | ||
| 754 | * All debug events were off..... | ||
| 755 | */ | ||
| 756 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
| 746 | regs->msr &= ~MSR_DE; | 757 | regs->msr &= ~MSR_DE; |
| 747 | } | 758 | } |
| 748 | #elif defined(CONFIG_40x) | ||
| 749 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); | ||
| 750 | regs->msr &= ~MSR_DE; | ||
| 751 | #else | 759 | #else |
| 752 | regs->msr &= ~(MSR_SE | MSR_BE); | 760 | regs->msr &= ~(MSR_SE | MSR_BE); |
| 753 | #endif | 761 | #endif |
| @@ -769,8 +777,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
| 769 | if ((data & ~0x7UL) >= TASK_SIZE) | 777 | if ((data & ~0x7UL) >= TASK_SIZE) |
| 770 | return -EIO; | 778 | return -EIO; |
| 771 | 779 | ||
| 772 | #ifndef CONFIG_BOOKE | 780 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS |
| 773 | |||
| 774 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. | 781 | /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. |
| 775 | * It was assumed, on previous implementations, that 3 bits were | 782 | * It was assumed, on previous implementations, that 3 bits were |
| 776 | * passed together with the data address, fitting the design of the | 783 | * passed together with the data address, fitting the design of the |
| @@ -789,21 +796,22 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
| 789 | 796 | ||
| 790 | /* Move contents to the DABR register */ | 797 | /* Move contents to the DABR register */ |
| 791 | task->thread.dabr = data; | 798 | task->thread.dabr = data; |
| 792 | 799 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ | |
| 793 | #endif | ||
| 794 | #if defined(CONFIG_BOOKE) | ||
| 795 | |||
| 796 | /* As described above, it was assumed 3 bits were passed with the data | 800 | /* As described above, it was assumed 3 bits were passed with the data |
| 797 | * address, but we will assume only the mode bits will be passed | 801 | * address, but we will assume only the mode bits will be passed |
| 798 | * as to not cause alignment restrictions for DAC-based processors. | 802 | * as to not cause alignment restrictions for DAC-based processors. |
| 799 | */ | 803 | */ |
| 800 | 804 | ||
| 801 | /* DAC's hold the whole address without any mode flags */ | 805 | /* DAC's hold the whole address without any mode flags */ |
| 802 | task->thread.dabr = data & ~0x3UL; | 806 | task->thread.dac1 = data & ~0x3UL; |
| 803 | 807 | ||
| 804 | if (task->thread.dabr == 0) { | 808 | if (task->thread.dac1 == 0) { |
| 805 | task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM); | 809 | dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); |
| 806 | task->thread.regs->msr &= ~MSR_DE; | 810 | if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, |
| 811 | task->thread.dbcr1)) { | ||
| 812 | task->thread.regs->msr &= ~MSR_DE; | ||
| 813 | task->thread.dbcr0 &= ~DBCR0_IDM; | ||
| 814 | } | ||
| 807 | return 0; | 815 | return 0; |
| 808 | } | 816 | } |
| 809 | 817 | ||
| @@ -814,17 +822,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
| 814 | 822 | ||
| 815 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 | 823 | /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 |
| 816 | register */ | 824 | register */ |
| 817 | task->thread.dbcr0 = DBCR0_IDM; | 825 | task->thread.dbcr0 |= DBCR0_IDM; |
| 818 | 826 | ||
| 819 | /* Check for write and read flags and set DBCR0 | 827 | /* Check for write and read flags and set DBCR0 |
| 820 | accordingly */ | 828 | accordingly */ |
| 829 | dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); | ||
| 821 | if (data & 0x1UL) | 830 | if (data & 0x1UL) |
| 822 | task->thread.dbcr0 |= DBSR_DAC1R; | 831 | dbcr_dac(task) |= DBCR_DAC1R; |
| 823 | if (data & 0x2UL) | 832 | if (data & 0x2UL) |
| 824 | task->thread.dbcr0 |= DBSR_DAC1W; | 833 | dbcr_dac(task) |= DBCR_DAC1W; |
| 825 | |||
| 826 | task->thread.regs->msr |= MSR_DE; | 834 | task->thread.regs->msr |= MSR_DE; |
| 827 | #endif | 835 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
| 828 | return 0; | 836 | return 0; |
| 829 | } | 837 | } |
| 830 | 838 | ||
| @@ -839,6 +847,394 @@ void ptrace_disable(struct task_struct *child) | |||
| 839 | user_disable_single_step(child); | 847 | user_disable_single_step(child); |
| 840 | } | 848 | } |
| 841 | 849 | ||
| 850 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 851 | static long set_intruction_bp(struct task_struct *child, | ||
| 852 | struct ppc_hw_breakpoint *bp_info) | ||
| 853 | { | ||
| 854 | int slot; | ||
| 855 | int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); | ||
| 856 | int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); | ||
| 857 | int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); | ||
| 858 | int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); | ||
| 859 | |||
| 860 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
| 861 | slot2_in_use = 1; | ||
| 862 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
| 863 | slot4_in_use = 1; | ||
| 864 | |||
| 865 | if (bp_info->addr >= TASK_SIZE) | ||
| 866 | return -EIO; | ||
| 867 | |||
| 868 | if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { | ||
| 869 | |||
| 870 | /* Make sure range is valid. */ | ||
| 871 | if (bp_info->addr2 >= TASK_SIZE) | ||
| 872 | return -EIO; | ||
| 873 | |||
| 874 | /* We need a pair of IAC regsisters */ | ||
| 875 | if ((!slot1_in_use) && (!slot2_in_use)) { | ||
| 876 | slot = 1; | ||
| 877 | child->thread.iac1 = bp_info->addr; | ||
| 878 | child->thread.iac2 = bp_info->addr2; | ||
| 879 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
| 880 | if (bp_info->addr_mode == | ||
| 881 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
| 882 | dbcr_iac_range(child) |= DBCR_IAC12X; | ||
| 883 | else | ||
| 884 | dbcr_iac_range(child) |= DBCR_IAC12I; | ||
| 885 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
| 886 | } else if ((!slot3_in_use) && (!slot4_in_use)) { | ||
| 887 | slot = 3; | ||
| 888 | child->thread.iac3 = bp_info->addr; | ||
| 889 | child->thread.iac4 = bp_info->addr2; | ||
| 890 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
| 891 | if (bp_info->addr_mode == | ||
| 892 | PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
| 893 | dbcr_iac_range(child) |= DBCR_IAC34X; | ||
| 894 | else | ||
| 895 | dbcr_iac_range(child) |= DBCR_IAC34I; | ||
| 896 | #endif | ||
| 897 | } else | ||
| 898 | return -ENOSPC; | ||
| 899 | } else { | ||
| 900 | /* We only need one. If possible leave a pair free in | ||
| 901 | * case a range is needed later | ||
| 902 | */ | ||
| 903 | if (!slot1_in_use) { | ||
| 904 | /* | ||
| 905 | * Don't use iac1 if iac1-iac2 are free and either | ||
| 906 | * iac3 or iac4 (but not both) are free | ||
| 907 | */ | ||
| 908 | if (slot2_in_use || (slot3_in_use == slot4_in_use)) { | ||
| 909 | slot = 1; | ||
| 910 | child->thread.iac1 = bp_info->addr; | ||
| 911 | child->thread.dbcr0 |= DBCR0_IAC1; | ||
| 912 | goto out; | ||
| 913 | } | ||
| 914 | } | ||
| 915 | if (!slot2_in_use) { | ||
| 916 | slot = 2; | ||
| 917 | child->thread.iac2 = bp_info->addr; | ||
| 918 | child->thread.dbcr0 |= DBCR0_IAC2; | ||
| 919 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
| 920 | } else if (!slot3_in_use) { | ||
| 921 | slot = 3; | ||
| 922 | child->thread.iac3 = bp_info->addr; | ||
| 923 | child->thread.dbcr0 |= DBCR0_IAC3; | ||
| 924 | } else if (!slot4_in_use) { | ||
| 925 | slot = 4; | ||
| 926 | child->thread.iac4 = bp_info->addr; | ||
| 927 | child->thread.dbcr0 |= DBCR0_IAC4; | ||
| 928 | #endif | ||
| 929 | } else | ||
| 930 | return -ENOSPC; | ||
| 931 | } | ||
| 932 | out: | ||
| 933 | child->thread.dbcr0 |= DBCR0_IDM; | ||
| 934 | child->thread.regs->msr |= MSR_DE; | ||
| 935 | |||
| 936 | return slot; | ||
| 937 | } | ||
| 938 | |||
| 939 | static int del_instruction_bp(struct task_struct *child, int slot) | ||
| 940 | { | ||
| 941 | switch (slot) { | ||
| 942 | case 1: | ||
| 943 | if (child->thread.iac1 == 0) | ||
| 944 | return -ENOENT; | ||
| 945 | |||
| 946 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) { | ||
| 947 | /* address range - clear slots 1 & 2 */ | ||
| 948 | child->thread.iac2 = 0; | ||
| 949 | dbcr_iac_range(child) &= ~DBCR_IAC12MODE; | ||
| 950 | } | ||
| 951 | child->thread.iac1 = 0; | ||
| 952 | child->thread.dbcr0 &= ~DBCR0_IAC1; | ||
| 953 | break; | ||
| 954 | case 2: | ||
| 955 | if (child->thread.iac2 == 0) | ||
| 956 | return -ENOENT; | ||
| 957 | |||
| 958 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | ||
| 959 | /* used in a range */ | ||
| 960 | return -EINVAL; | ||
| 961 | child->thread.iac2 = 0; | ||
| 962 | child->thread.dbcr0 &= ~DBCR0_IAC2; | ||
| 963 | break; | ||
| 964 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
| 965 | case 3: | ||
| 966 | if (child->thread.iac3 == 0) | ||
| 967 | return -ENOENT; | ||
| 968 | |||
| 969 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) { | ||
| 970 | /* address range - clear slots 3 & 4 */ | ||
| 971 | child->thread.iac4 = 0; | ||
| 972 | dbcr_iac_range(child) &= ~DBCR_IAC34MODE; | ||
| 973 | } | ||
| 974 | child->thread.iac3 = 0; | ||
| 975 | child->thread.dbcr0 &= ~DBCR0_IAC3; | ||
| 976 | break; | ||
| 977 | case 4: | ||
| 978 | if (child->thread.iac4 == 0) | ||
| 979 | return -ENOENT; | ||
| 980 | |||
| 981 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | ||
| 982 | /* Used in a range */ | ||
| 983 | return -EINVAL; | ||
| 984 | child->thread.iac4 = 0; | ||
| 985 | child->thread.dbcr0 &= ~DBCR0_IAC4; | ||
| 986 | break; | ||
| 987 | #endif | ||
| 988 | default: | ||
| 989 | return -EINVAL; | ||
| 990 | } | ||
| 991 | return 0; | ||
| 992 | } | ||
| 993 | |||
| 994 | static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) | ||
| 995 | { | ||
| 996 | int byte_enable = | ||
| 997 | (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) | ||
| 998 | & 0xf; | ||
| 999 | int condition_mode = | ||
| 1000 | bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; | ||
| 1001 | int slot; | ||
| 1002 | |||
| 1003 | if (byte_enable && (condition_mode == 0)) | ||
| 1004 | return -EINVAL; | ||
| 1005 | |||
| 1006 | if (bp_info->addr >= TASK_SIZE) | ||
| 1007 | return -EIO; | ||
| 1008 | |||
| 1009 | if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { | ||
| 1010 | slot = 1; | ||
| 1011 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
| 1012 | dbcr_dac(child) |= DBCR_DAC1R; | ||
| 1013 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
| 1014 | dbcr_dac(child) |= DBCR_DAC1W; | ||
| 1015 | child->thread.dac1 = (unsigned long)bp_info->addr; | ||
| 1016 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 1017 | if (byte_enable) { | ||
| 1018 | child->thread.dvc1 = | ||
| 1019 | (unsigned long)bp_info->condition_value; | ||
| 1020 | child->thread.dbcr2 |= | ||
| 1021 | ((byte_enable << DBCR2_DVC1BE_SHIFT) | | ||
| 1022 | (condition_mode << DBCR2_DVC1M_SHIFT)); | ||
| 1023 | } | ||
| 1024 | #endif | ||
| 1025 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1026 | } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
| 1027 | /* Both dac1 and dac2 are part of a range */ | ||
| 1028 | return -ENOSPC; | ||
| 1029 | #endif | ||
| 1030 | } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { | ||
| 1031 | slot = 2; | ||
| 1032 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
| 1033 | dbcr_dac(child) |= DBCR_DAC2R; | ||
| 1034 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
| 1035 | dbcr_dac(child) |= DBCR_DAC2W; | ||
| 1036 | child->thread.dac2 = (unsigned long)bp_info->addr; | ||
| 1037 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 1038 | if (byte_enable) { | ||
| 1039 | child->thread.dvc2 = | ||
| 1040 | (unsigned long)bp_info->condition_value; | ||
| 1041 | child->thread.dbcr2 |= | ||
| 1042 | ((byte_enable << DBCR2_DVC2BE_SHIFT) | | ||
| 1043 | (condition_mode << DBCR2_DVC2M_SHIFT)); | ||
| 1044 | } | ||
| 1045 | #endif | ||
| 1046 | } else | ||
| 1047 | return -ENOSPC; | ||
| 1048 | child->thread.dbcr0 |= DBCR0_IDM; | ||
| 1049 | child->thread.regs->msr |= MSR_DE; | ||
| 1050 | |||
| 1051 | return slot + 4; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | static int del_dac(struct task_struct *child, int slot) | ||
| 1055 | { | ||
| 1056 | if (slot == 1) { | ||
| 1057 | if (child->thread.dac1 == 0) | ||
| 1058 | return -ENOENT; | ||
| 1059 | |||
| 1060 | child->thread.dac1 = 0; | ||
| 1061 | dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
| 1062 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1063 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) { | ||
| 1064 | child->thread.dac2 = 0; | ||
| 1065 | child->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
| 1066 | } | ||
| 1067 | child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); | ||
| 1068 | #endif | ||
| 1069 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 1070 | child->thread.dvc1 = 0; | ||
| 1071 | #endif | ||
| 1072 | } else if (slot == 2) { | ||
| 1073 | if (child->thread.dac1 == 0) | ||
| 1074 | return -ENOENT; | ||
| 1075 | |||
| 1076 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1077 | if (child->thread.dbcr2 & DBCR2_DAC12MODE) | ||
| 1078 | /* Part of a range */ | ||
| 1079 | return -EINVAL; | ||
| 1080 | child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); | ||
| 1081 | #endif | ||
| 1082 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||
| 1083 | child->thread.dvc2 = 0; | ||
| 1084 | #endif | ||
| 1085 | child->thread.dac2 = 0; | ||
| 1086 | dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
| 1087 | } else | ||
| 1088 | return -EINVAL; | ||
| 1089 | |||
| 1090 | return 0; | ||
| 1091 | } | ||
| 1092 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 1093 | |||
| 1094 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1095 | static int set_dac_range(struct task_struct *child, | ||
| 1096 | struct ppc_hw_breakpoint *bp_info) | ||
| 1097 | { | ||
| 1098 | int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; | ||
| 1099 | |||
| 1100 | /* We don't allow range watchpoints to be used with DVC */ | ||
| 1101 | if (bp_info->condition_mode) | ||
| 1102 | return -EINVAL; | ||
| 1103 | |||
| 1104 | /* | ||
| 1105 | * Best effort to verify the address range. The user/supervisor bits | ||
| 1106 | * prevent trapping in kernel space, but let's fail on an obvious bad | ||
| 1107 | * range. The simple test on the mask is not fool-proof, and any | ||
| 1108 | * exclusive range will spill over into kernel space. | ||
| 1109 | */ | ||
| 1110 | if (bp_info->addr >= TASK_SIZE) | ||
| 1111 | return -EIO; | ||
| 1112 | if (mode == PPC_BREAKPOINT_MODE_MASK) { | ||
| 1113 | /* | ||
| 1114 | * dac2 is a bitmask. Don't allow a mask that makes a | ||
| 1115 | * kernel space address from a valid dac1 value | ||
| 1116 | */ | ||
| 1117 | if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) | ||
| 1118 | return -EIO; | ||
| 1119 | } else { | ||
| 1120 | /* | ||
| 1121 | * For range breakpoints, addr2 must also be a valid address | ||
| 1122 | */ | ||
| 1123 | if (bp_info->addr2 >= TASK_SIZE) | ||
| 1124 | return -EIO; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | if (child->thread.dbcr0 & | ||
| 1128 | (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) | ||
| 1129 | return -ENOSPC; | ||
| 1130 | |||
| 1131 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
| 1132 | child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); | ||
| 1133 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
| 1134 | child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); | ||
| 1135 | child->thread.dac1 = bp_info->addr; | ||
| 1136 | child->thread.dac2 = bp_info->addr2; | ||
| 1137 | if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) | ||
| 1138 | child->thread.dbcr2 |= DBCR2_DAC12M; | ||
| 1139 | else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) | ||
| 1140 | child->thread.dbcr2 |= DBCR2_DAC12MX; | ||
| 1141 | else /* PPC_BREAKPOINT_MODE_MASK */ | ||
| 1142 | child->thread.dbcr2 |= DBCR2_DAC12MM; | ||
| 1143 | child->thread.regs->msr |= MSR_DE; | ||
| 1144 | |||
| 1145 | return 5; | ||
| 1146 | } | ||
| 1147 | #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ | ||
| 1148 | |||
| 1149 | static long ppc_set_hwdebug(struct task_struct *child, | ||
| 1150 | struct ppc_hw_breakpoint *bp_info) | ||
| 1151 | { | ||
| 1152 | if (bp_info->version != 1) | ||
| 1153 | return -ENOTSUPP; | ||
| 1154 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 1155 | /* | ||
| 1156 | * Check for invalid flags and combinations | ||
| 1157 | */ | ||
| 1158 | if ((bp_info->trigger_type == 0) || | ||
| 1159 | (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | | ||
| 1160 | PPC_BREAKPOINT_TRIGGER_RW)) || | ||
| 1161 | (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || | ||
| 1162 | (bp_info->condition_mode & | ||
| 1163 | ~(PPC_BREAKPOINT_CONDITION_MODE | | ||
| 1164 | PPC_BREAKPOINT_CONDITION_BE_ALL))) | ||
| 1165 | return -EINVAL; | ||
| 1166 | #if CONFIG_PPC_ADV_DEBUG_DVCS == 0 | ||
| 1167 | if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) | ||
| 1168 | return -EINVAL; | ||
| 1169 | #endif | ||
| 1170 | |||
| 1171 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { | ||
| 1172 | if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || | ||
| 1173 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
| 1174 | return -EINVAL; | ||
| 1175 | return set_intruction_bp(child, bp_info); | ||
| 1176 | } | ||
| 1177 | if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) | ||
| 1178 | return set_dac(child, bp_info); | ||
| 1179 | |||
| 1180 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1181 | return set_dac_range(child, bp_info); | ||
| 1182 | #else | ||
| 1183 | return -EINVAL; | ||
| 1184 | #endif | ||
| 1185 | #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
| 1186 | /* | ||
| 1187 | * We only support one data breakpoint | ||
| 1188 | */ | ||
| 1189 | if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || | ||
| 1190 | ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || | ||
| 1191 | (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || | ||
| 1192 | (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || | ||
| 1193 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
| 1194 | return -EINVAL; | ||
| 1195 | |||
| 1196 | if (child->thread.dabr) | ||
| 1197 | return -ENOSPC; | ||
| 1198 | |||
| 1199 | if ((unsigned long)bp_info->addr >= TASK_SIZE) | ||
| 1200 | return -EIO; | ||
| 1201 | |||
| 1202 | child->thread.dabr = (unsigned long)bp_info->addr; | ||
| 1203 | |||
| 1204 | return 1; | ||
| 1205 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | ||
| 1209 | { | ||
| 1210 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 1211 | int rc; | ||
| 1212 | |||
| 1213 | if (data <= 4) | ||
| 1214 | rc = del_instruction_bp(child, (int)data); | ||
| 1215 | else | ||
| 1216 | rc = del_dac(child, (int)data - 4); | ||
| 1217 | |||
| 1218 | if (!rc) { | ||
| 1219 | if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, | ||
| 1220 | child->thread.dbcr1)) { | ||
| 1221 | child->thread.dbcr0 &= ~DBCR0_IDM; | ||
| 1222 | child->thread.regs->msr &= ~MSR_DE; | ||
| 1223 | } | ||
| 1224 | } | ||
| 1225 | return rc; | ||
| 1226 | #else | ||
| 1227 | if (data != 1) | ||
| 1228 | return -EINVAL; | ||
| 1229 | if (child->thread.dabr == 0) | ||
| 1230 | return -ENOENT; | ||
| 1231 | |||
| 1232 | child->thread.dabr = 0; | ||
| 1233 | |||
| 1234 | return 0; | ||
| 1235 | #endif | ||
| 1236 | } | ||
| 1237 | |||
| 842 | /* | 1238 | /* |
| 843 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | 1239 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, |
| 844 | * we mark them as obsolete now, they will be removed in a future version | 1240 | * we mark them as obsolete now, they will be removed in a future version |
| @@ -932,13 +1328,77 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 932 | break; | 1328 | break; |
| 933 | } | 1329 | } |
| 934 | 1330 | ||
| 1331 | case PPC_PTRACE_GETHWDBGINFO: { | ||
| 1332 | struct ppc_debug_info dbginfo; | ||
| 1333 | |||
| 1334 | dbginfo.version = 1; | ||
| 1335 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 1336 | dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; | ||
| 1337 | dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; | ||
| 1338 | dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; | ||
| 1339 | dbginfo.data_bp_alignment = 4; | ||
| 1340 | dbginfo.sizeof_condition = 4; | ||
| 1341 | dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | | ||
| 1342 | PPC_DEBUG_FEATURE_INSN_BP_MASK; | ||
| 1343 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1344 | dbginfo.features |= | ||
| 1345 | PPC_DEBUG_FEATURE_DATA_BP_RANGE | | ||
| 1346 | PPC_DEBUG_FEATURE_DATA_BP_MASK; | ||
| 1347 | #endif | ||
| 1348 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 1349 | dbginfo.num_instruction_bps = 0; | ||
| 1350 | dbginfo.num_data_bps = 1; | ||
| 1351 | dbginfo.num_condition_regs = 0; | ||
| 1352 | #ifdef CONFIG_PPC64 | ||
| 1353 | dbginfo.data_bp_alignment = 8; | ||
| 1354 | #else | ||
| 1355 | dbginfo.data_bp_alignment = 4; | ||
| 1356 | #endif | ||
| 1357 | dbginfo.sizeof_condition = 0; | ||
| 1358 | dbginfo.features = 0; | ||
| 1359 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | ||
| 1360 | |||
| 1361 | if (!access_ok(VERIFY_WRITE, data, | ||
| 1362 | sizeof(struct ppc_debug_info))) | ||
| 1363 | return -EFAULT; | ||
| 1364 | ret = __copy_to_user((struct ppc_debug_info __user *)data, | ||
| 1365 | &dbginfo, sizeof(struct ppc_debug_info)) ? | ||
| 1366 | -EFAULT : 0; | ||
| 1367 | break; | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | case PPC_PTRACE_SETHWDEBUG: { | ||
| 1371 | struct ppc_hw_breakpoint bp_info; | ||
| 1372 | |||
| 1373 | if (!access_ok(VERIFY_READ, data, | ||
| 1374 | sizeof(struct ppc_hw_breakpoint))) | ||
| 1375 | return -EFAULT; | ||
| 1376 | ret = __copy_from_user(&bp_info, | ||
| 1377 | (struct ppc_hw_breakpoint __user *)data, | ||
| 1378 | sizeof(struct ppc_hw_breakpoint)) ? | ||
| 1379 | -EFAULT : 0; | ||
| 1380 | if (!ret) | ||
| 1381 | ret = ppc_set_hwdebug(child, &bp_info); | ||
| 1382 | break; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | case PPC_PTRACE_DELHWDEBUG: { | ||
| 1386 | ret = ppc_del_hwdebug(child, addr, data); | ||
| 1387 | break; | ||
| 1388 | } | ||
| 1389 | |||
| 935 | case PTRACE_GET_DEBUGREG: { | 1390 | case PTRACE_GET_DEBUGREG: { |
| 936 | ret = -EINVAL; | 1391 | ret = -EINVAL; |
| 937 | /* We only support one DABR and no IABRS at the moment */ | 1392 | /* We only support one DABR and no IABRS at the moment */ |
| 938 | if (addr > 0) | 1393 | if (addr > 0) |
| 939 | break; | 1394 | break; |
| 1395 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 1396 | ret = put_user(child->thread.dac1, | ||
| 1397 | (unsigned long __user *)data); | ||
| 1398 | #else | ||
| 940 | ret = put_user(child->thread.dabr, | 1399 | ret = put_user(child->thread.dabr, |
| 941 | (unsigned long __user *)data); | 1400 | (unsigned long __user *)data); |
| 1401 | #endif | ||
| 942 | break; | 1402 | break; |
| 943 | } | 1403 | } |
| 944 | 1404 | ||
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 00b5078da9a3..a0afb555a7c9 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
| @@ -140,17 +140,15 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) | |||
| 140 | return 0; /* no signals delivered */ | 140 | return 0; /* no signals delivered */ |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | ||
| 143 | /* | 144 | /* |
| 144 | * Reenable the DABR before delivering the signal to | 145 | * Reenable the DABR before delivering the signal to |
| 145 | * user space. The DABR will have been cleared if it | 146 | * user space. The DABR will have been cleared if it |
| 146 | * triggered inside the kernel. | 147 | * triggered inside the kernel. |
| 147 | */ | 148 | */ |
| 148 | if (current->thread.dabr) { | 149 | if (current->thread.dabr) |
| 149 | set_dabr(current->thread.dabr); | 150 | set_dabr(current->thread.dabr); |
| 150 | #if defined(CONFIG_BOOKE) | ||
| 151 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
| 152 | #endif | 151 | #endif |
| 153 | } | ||
| 154 | 152 | ||
| 155 | if (is32) { | 153 | if (is32) { |
| 156 | if (ka.sa.sa_flags & SA_SIGINFO) | 154 | if (ka.sa.sa_flags & SA_SIGINFO) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d670429a1608..266610119f66 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
| @@ -1078,7 +1078,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 1078 | int i; | 1078 | int i; |
| 1079 | unsigned char tmp; | 1079 | unsigned char tmp; |
| 1080 | unsigned long new_msr = regs->msr; | 1080 | unsigned long new_msr = regs->msr; |
| 1081 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1081 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1082 | unsigned long new_dbcr0 = current->thread.dbcr0; | 1082 | unsigned long new_dbcr0 = current->thread.dbcr0; |
| 1083 | #endif | 1083 | #endif |
| 1084 | 1084 | ||
| @@ -1087,13 +1087,17 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 1087 | return -EFAULT; | 1087 | return -EFAULT; |
| 1088 | switch (op.dbg_type) { | 1088 | switch (op.dbg_type) { |
| 1089 | case SIG_DBG_SINGLE_STEPPING: | 1089 | case SIG_DBG_SINGLE_STEPPING: |
| 1090 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1090 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1091 | if (op.dbg_value) { | 1091 | if (op.dbg_value) { |
| 1092 | new_msr |= MSR_DE; | 1092 | new_msr |= MSR_DE; |
| 1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | 1093 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); |
| 1094 | } else { | 1094 | } else { |
| 1095 | new_msr &= ~MSR_DE; | 1095 | new_dbcr0 &= ~DBCR0_IC; |
| 1096 | new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); | 1096 | if (!DBCR_ACTIVE_EVENTS(new_dbcr0, |
| 1097 | current->thread.dbcr1)) { | ||
| 1098 | new_msr &= ~MSR_DE; | ||
| 1099 | new_dbcr0 &= ~DBCR0_IDM; | ||
| 1100 | } | ||
| 1097 | } | 1101 | } |
| 1098 | #else | 1102 | #else |
| 1099 | if (op.dbg_value) | 1103 | if (op.dbg_value) |
| @@ -1103,7 +1107,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 1103 | #endif | 1107 | #endif |
| 1104 | break; | 1108 | break; |
| 1105 | case SIG_DBG_BRANCH_TRACING: | 1109 | case SIG_DBG_BRANCH_TRACING: |
| 1106 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1110 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1107 | return -EINVAL; | 1111 | return -EINVAL; |
| 1108 | #else | 1112 | #else |
| 1109 | if (op.dbg_value) | 1113 | if (op.dbg_value) |
| @@ -1124,7 +1128,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
| 1124 | failure is a problem, anyway, and it's very unlikely unless | 1128 | failure is a problem, anyway, and it's very unlikely unless |
| 1125 | the user is really doing something wrong. */ | 1129 | the user is really doing something wrong. */ |
| 1126 | regs->msr = new_msr; | 1130 | regs->msr = new_msr; |
| 1127 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1131 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1128 | current->thread.dbcr0 = new_dbcr0; | 1132 | current->thread.dbcr0 = new_dbcr0; |
| 1129 | #endif | 1133 | #endif |
| 1130 | 1134 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6c6093d67f30..1b16b9a3e49a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -265,8 +265,8 @@ void account_system_vtime(struct task_struct *tsk) | |||
| 265 | account_system_time(tsk, 0, delta, deltascaled); | 265 | account_system_time(tsk, 0, delta, deltascaled); |
| 266 | else | 266 | else |
| 267 | account_idle_time(delta); | 267 | account_idle_time(delta); |
| 268 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; | 268 | __get_cpu_var(cputime_last_delta) = delta; |
| 269 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 269 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; |
| 270 | local_irq_restore(flags); | 270 | local_irq_restore(flags); |
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL_GPL(account_system_vtime); | 272 | EXPORT_SYMBOL_GPL(account_system_vtime); |
| @@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 575 | 575 | ||
| 576 | trace_timer_interrupt_entry(regs); | 576 | trace_timer_interrupt_entry(regs); |
| 577 | 577 | ||
| 578 | __get_cpu_var(irq_stat).timer_irqs++; | ||
| 579 | |||
| 578 | /* Ensure a positive value is written to the decrementer, or else | 580 | /* Ensure a positive value is written to the decrementer, or else |
| 579 | * some CPUs will continuue to take decrementer exceptions */ | 581 | * some CPUs will continuue to take decrementer exceptions */ |
| 580 | set_dec(DECREMENTER_MAX); | 582 | set_dec(DECREMENTER_MAX); |
| @@ -935,8 +937,8 @@ static void register_decrementer_clockevent(int cpu) | |||
| 935 | *dec = decrementer_clockevent; | 937 | *dec = decrementer_clockevent; |
| 936 | dec->cpumask = cpumask_of(cpu); | 938 | dec->cpumask = cpumask_of(cpu); |
| 937 | 939 | ||
| 938 | printk(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", | 940 | printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", |
| 939 | dec->name, dec->mult, dec->shift, cpu); | 941 | dec->name, dec->mult, dec->shift, cpu); |
| 940 | 942 | ||
| 941 | clockevents_register_device(dec); | 943 | clockevents_register_device(dec); |
| 942 | } | 944 | } |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d069ff8a7e03..696626a2e835 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
| @@ -60,13 +60,13 @@ | |||
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 62 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
| 63 | int (*__debugger)(struct pt_regs *regs); | 63 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
| 64 | int (*__debugger_ipi)(struct pt_regs *regs); | 64 | int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; |
| 65 | int (*__debugger_bpt)(struct pt_regs *regs); | 65 | int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; |
| 66 | int (*__debugger_sstep)(struct pt_regs *regs); | 66 | int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; |
| 67 | int (*__debugger_iabr_match)(struct pt_regs *regs); | 67 | int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; |
| 68 | int (*__debugger_dabr_match)(struct pt_regs *regs); | 68 | int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; |
| 69 | int (*__debugger_fault_handler)(struct pt_regs *regs); | 69 | int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; |
| 70 | 70 | ||
| 71 | EXPORT_SYMBOL(__debugger); | 71 | EXPORT_SYMBOL(__debugger); |
| 72 | EXPORT_SYMBOL(__debugger_ipi); | 72 | EXPORT_SYMBOL(__debugger_ipi); |
| @@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { } | |||
| 102 | int die(const char *str, struct pt_regs *regs, long err) | 102 | int die(const char *str, struct pt_regs *regs, long err) |
| 103 | { | 103 | { |
| 104 | static struct { | 104 | static struct { |
| 105 | spinlock_t lock; | 105 | raw_spinlock_t lock; |
| 106 | u32 lock_owner; | 106 | u32 lock_owner; |
| 107 | int lock_owner_depth; | 107 | int lock_owner_depth; |
| 108 | } die = { | 108 | } die = { |
| 109 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), | 109 | .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), |
| 110 | .lock_owner = -1, | 110 | .lock_owner = -1, |
| 111 | .lock_owner_depth = 0 | 111 | .lock_owner_depth = 0 |
| 112 | }; | 112 | }; |
| @@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
| 120 | 120 | ||
| 121 | if (die.lock_owner != raw_smp_processor_id()) { | 121 | if (die.lock_owner != raw_smp_processor_id()) { |
| 122 | console_verbose(); | 122 | console_verbose(); |
| 123 | spin_lock_irqsave(&die.lock, flags); | 123 | raw_spin_lock_irqsave(&die.lock, flags); |
| 124 | die.lock_owner = smp_processor_id(); | 124 | die.lock_owner = smp_processor_id(); |
| 125 | die.lock_owner_depth = 0; | 125 | die.lock_owner_depth = 0; |
| 126 | bust_spinlocks(1); | 126 | bust_spinlocks(1); |
| @@ -146,6 +146,11 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
| 146 | #endif | 146 | #endif |
| 147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 147 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
| 148 | 148 | ||
| 149 | sysfs_printk_last_file(); | ||
| 150 | if (notify_die(DIE_OOPS, str, regs, err, 255, | ||
| 151 | SIGSEGV) == NOTIFY_STOP) | ||
| 152 | return 1; | ||
| 153 | |||
| 149 | print_modules(); | 154 | print_modules(); |
| 150 | show_regs(regs); | 155 | show_regs(regs); |
| 151 | } else { | 156 | } else { |
| @@ -155,7 +160,7 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
| 155 | bust_spinlocks(0); | 160 | bust_spinlocks(0); |
| 156 | die.lock_owner = -1; | 161 | die.lock_owner = -1; |
| 157 | add_taint(TAINT_DIE); | 162 | add_taint(TAINT_DIE); |
| 158 | spin_unlock_irqrestore(&die.lock, flags); | 163 | raw_spin_unlock_irqrestore(&die.lock, flags); |
| 159 | 164 | ||
| 160 | if (kexec_should_crash(current) || | 165 | if (kexec_should_crash(current) || |
| 161 | kexec_sr_activated(smp_processor_id())) | 166 | kexec_sr_activated(smp_processor_id())) |
| @@ -294,7 +299,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
| 294 | return 0; | 299 | return 0; |
| 295 | } | 300 | } |
| 296 | 301 | ||
| 297 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 302 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 298 | /* On 4xx, the reason for the machine check or program exception | 303 | /* On 4xx, the reason for the machine check or program exception |
| 299 | is in the ESR. */ | 304 | is in the ESR. */ |
| 300 | #define get_reason(regs) ((regs)->dsisr) | 305 | #define get_reason(regs) ((regs)->dsisr) |
| @@ -478,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs) | |||
| 478 | { | 483 | { |
| 479 | int recover = 0; | 484 | int recover = 0; |
| 480 | 485 | ||
| 486 | __get_cpu_var(irq_stat).mce_exceptions++; | ||
| 487 | |||
| 481 | /* See if any machine dependent calls. In theory, we would want | 488 | /* See if any machine dependent calls. In theory, we would want |
| 482 | * to call the CPU first, and call the ppc_md. one if the CPU | 489 | * to call the CPU first, and call the ppc_md. one if the CPU |
| 483 | * one returns a positive number. However there is existing code | 490 | * one returns a positive number. However there is existing code |
| @@ -960,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs) | |||
| 960 | 967 | ||
| 961 | void performance_monitor_exception(struct pt_regs *regs) | 968 | void performance_monitor_exception(struct pt_regs *regs) |
| 962 | { | 969 | { |
| 970 | __get_cpu_var(irq_stat).pmu_irqs++; | ||
| 971 | |||
| 963 | perf_irq(regs); | 972 | perf_irq(regs); |
| 964 | } | 973 | } |
| 965 | 974 | ||
| @@ -1024,10 +1033,69 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
| 1024 | } | 1033 | } |
| 1025 | #endif /* CONFIG_8xx */ | 1034 | #endif /* CONFIG_8xx */ |
| 1026 | 1035 | ||
| 1027 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 1036 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1037 | static void handle_debug(struct pt_regs *regs, unsigned long debug_status) | ||
| 1038 | { | ||
| 1039 | int changed = 0; | ||
| 1040 | /* | ||
| 1041 | * Determine the cause of the debug event, clear the | ||
| 1042 | * event flags and send a trap to the handler. Torez | ||
| 1043 | */ | ||
| 1044 | if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
| 1045 | dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); | ||
| 1046 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | ||
| 1047 | current->thread.dbcr2 &= ~DBCR2_DAC12MODE; | ||
| 1048 | #endif | ||
| 1049 | do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, | ||
| 1050 | 5); | ||
| 1051 | changed |= 0x01; | ||
| 1052 | } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { | ||
| 1053 | dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); | ||
| 1054 | do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, | ||
| 1055 | 6); | ||
| 1056 | changed |= 0x01; | ||
| 1057 | } else if (debug_status & DBSR_IAC1) { | ||
| 1058 | current->thread.dbcr0 &= ~DBCR0_IAC1; | ||
| 1059 | dbcr_iac_range(current) &= ~DBCR_IAC12MODE; | ||
| 1060 | do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, | ||
| 1061 | 1); | ||
| 1062 | changed |= 0x01; | ||
| 1063 | } else if (debug_status & DBSR_IAC2) { | ||
| 1064 | current->thread.dbcr0 &= ~DBCR0_IAC2; | ||
| 1065 | do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, | ||
| 1066 | 2); | ||
| 1067 | changed |= 0x01; | ||
| 1068 | } else if (debug_status & DBSR_IAC3) { | ||
| 1069 | current->thread.dbcr0 &= ~DBCR0_IAC3; | ||
| 1070 | dbcr_iac_range(current) &= ~DBCR_IAC34MODE; | ||
| 1071 | do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, | ||
| 1072 | 3); | ||
| 1073 | changed |= 0x01; | ||
| 1074 | } else if (debug_status & DBSR_IAC4) { | ||
| 1075 | current->thread.dbcr0 &= ~DBCR0_IAC4; | ||
| 1076 | do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, | ||
| 1077 | 4); | ||
| 1078 | changed |= 0x01; | ||
| 1079 | } | ||
| 1080 | /* | ||
| 1081 | * At the point this routine was called, the MSR(DE) was turned off. | ||
| 1082 | * Check all other debug flags and see if that bit needs to be turned | ||
| 1083 | * back on or not. | ||
| 1084 | */ | ||
| 1085 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) | ||
| 1086 | regs->msr |= MSR_DE; | ||
| 1087 | else | ||
| 1088 | /* Make sure the IDM flag is off */ | ||
| 1089 | current->thread.dbcr0 &= ~DBCR0_IDM; | ||
| 1090 | |||
| 1091 | if (changed & 0x01) | ||
| 1092 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | ||
| 1093 | } | ||
| 1028 | 1094 | ||
| 1029 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | 1095 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
| 1030 | { | 1096 | { |
| 1097 | current->thread.dbsr = debug_status; | ||
| 1098 | |||
| 1031 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while | 1099 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
| 1032 | * on server, it stops on the target of the branch. In order to simulate | 1100 | * on server, it stops on the target of the branch. In order to simulate |
| 1033 | * the server behaviour, we thus restart right away with a single step | 1101 | * the server behaviour, we thus restart right away with a single step |
| @@ -1071,29 +1139,23 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | |||
| 1071 | if (debugger_sstep(regs)) | 1139 | if (debugger_sstep(regs)) |
| 1072 | return; | 1140 | return; |
| 1073 | 1141 | ||
| 1074 | if (user_mode(regs)) | ||
| 1075 | current->thread.dbcr0 &= ~(DBCR0_IC); | ||
| 1076 | |||
| 1077 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
| 1078 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | ||
| 1079 | regs->msr &= ~MSR_DE; | ||
| 1080 | |||
| 1081 | if (user_mode(regs)) { | 1142 | if (user_mode(regs)) { |
| 1082 | current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | | 1143 | current->thread.dbcr0 &= ~DBCR0_IC; |
| 1083 | DBCR0_IDM); | 1144 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 1084 | } else { | 1145 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, |
| 1085 | /* Disable DAC interupts */ | 1146 | current->thread.dbcr1)) |
| 1086 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | | 1147 | regs->msr |= MSR_DE; |
| 1087 | DBSR_DAC1W | DBCR0_IDM)); | 1148 | else |
| 1088 | 1149 | /* Make sure the IDM bit is off */ | |
| 1089 | /* Clear the DAC event */ | 1150 | current->thread.dbcr0 &= ~DBCR0_IDM; |
| 1090 | mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W)); | 1151 | #endif |
| 1091 | } | 1152 | } |
| 1092 | /* Setup and send the trap to the handler */ | 1153 | |
| 1093 | do_dabr(regs, mfspr(SPRN_DAC1), debug_status); | 1154 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
| 1094 | } | 1155 | } else |
| 1156 | handle_debug(regs, debug_status); | ||
| 1095 | } | 1157 | } |
| 1096 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | 1158 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
| 1097 | 1159 | ||
| 1098 | #if !defined(CONFIG_TAU_INT) | 1160 | #if !defined(CONFIG_TAU_INT) |
| 1099 | void TAUException(struct pt_regs *regs) | 1161 | void TAUException(struct pt_regs *regs) |
