diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-04-17 00:35:01 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-04-18 01:38:47 -0400 |
commit | 945feb174b14e7098cc7ecf0cf4768d35bc52f9c (patch) | |
tree | 9810b2ff0efe8edbfb1506f65834ea0d553e2848 /arch | |
parent | fd3e0bbc6052ca9747a5332b382584ece83aab6d (diff) |
[POWERPC] irqtrace support for 64-bit powerpc
This adds the low level irq tracing hooks to the powerpc architecture
needed to enable full lockdep functionality.
This is partly based on Johannes Berg's initial version. I removed
the asm trampoline that isn't needed (thus improving performance) and
modified all sorts of bits and pieces, reworking most of the assembly,
etc...
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 27 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 47 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 4 |
6 files changed, 72 insertions, 22 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ecca20d17a7b..4bb2e9310a56 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -53,6 +53,15 @@ config STACKTRACE_SUPPORT | |||
53 | bool | 53 | bool |
54 | default y | 54 | default y |
55 | 55 | ||
56 | config TRACE_IRQFLAGS_SUPPORT | ||
57 | bool | ||
58 | depends on PPC64 | ||
59 | default y | ||
60 | |||
61 | config LOCKDEP_SUPPORT | ||
62 | bool | ||
63 | default y | ||
64 | |||
56 | config RWSEM_GENERIC_SPINLOCK | 65 | config RWSEM_GENERIC_SPINLOCK |
57 | bool | 66 | bool |
58 | 67 | ||
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 13019845536b..c0db5b769e55 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
31 | #include <asm/bug.h> | 31 | #include <asm/bug.h> |
32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/irqflags.h> | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * System calls. | 36 | * System calls. |
@@ -89,6 +90,14 @@ system_call_common: | |||
89 | addi r9,r1,STACK_FRAME_OVERHEAD | 90 | addi r9,r1,STACK_FRAME_OVERHEAD |
90 | ld r11,exception_marker@toc(r2) | 91 | ld r11,exception_marker@toc(r2) |
91 | std r11,-16(r9) /* "regshere" marker */ | 92 | std r11,-16(r9) /* "regshere" marker */ |
93 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
94 | bl .trace_hardirqs_on | ||
95 | REST_GPR(0,r1) | ||
96 | REST_4GPRS(3,r1) | ||
97 | REST_2GPRS(7,r1) | ||
98 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
99 | ld r12,_MSR(r1) | ||
100 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
92 | li r10,1 | 101 | li r10,1 |
93 | stb r10,PACASOFTIRQEN(r13) | 102 | stb r10,PACASOFTIRQEN(r13) |
94 | stb r10,PACAHARDIRQEN(r13) | 103 | stb r10,PACAHARDIRQEN(r13) |
@@ -103,7 +112,7 @@ BEGIN_FW_FTR_SECTION | |||
103 | b hardware_interrupt_entry | 112 | b hardware_interrupt_entry |
104 | 2: | 113 | 2: |
105 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 114 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
106 | #endif | 115 | #endif /* CONFIG_PPC_ISERIES */ |
107 | mfmsr r11 | 116 | mfmsr r11 |
108 | ori r11,r11,MSR_EE | 117 | ori r11,r11,MSR_EE |
109 | mtmsrd r11,1 | 118 | mtmsrd r11,1 |
@@ -505,6 +514,10 @@ BEGIN_FW_FTR_SECTION | |||
505 | 514 | ||
506 | li r3,0 | 515 | li r3,0 |
507 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ | 516 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ |
517 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
518 | bl .trace_hardirqs_off | ||
519 | mfmsr r10 | ||
520 | #endif | ||
508 | ori r10,r10,MSR_EE | 521 | ori r10,r10,MSR_EE |
509 | mtmsrd r10 /* hard-enable again */ | 522 | mtmsrd r10 /* hard-enable again */ |
510 | addi r3,r1,STACK_FRAME_OVERHEAD | 523 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -513,7 +526,7 @@ BEGIN_FW_FTR_SECTION | |||
513 | 4: | 526 | 4: |
514 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 527 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
515 | #endif | 528 | #endif |
516 | stb r5,PACASOFTIRQEN(r13) | 529 | TRACE_AND_RESTORE_IRQ(r5); |
517 | 530 | ||
518 | /* extract EE bit and use it to restore paca->hard_enabled */ | 531 | /* extract EE bit and use it to restore paca->hard_enabled */ |
519 | ld r3,_MSR(r1) | 532 | ld r3,_MSR(r1) |
@@ -581,6 +594,16 @@ do_work: | |||
581 | bne restore | 594 | bne restore |
582 | /* here we are preempting the current task */ | 595 | /* here we are preempting the current task */ |
583 | 1: | 596 | 1: |
597 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
598 | bl .trace_hardirqs_on | ||
599 | /* Note: we just clobbered r10 which used to contain the previous | ||
600 | * MSR before the hard-disabling done by the caller of do_work. | ||
601 | * We don't have that value anymore, but it doesn't matter as | ||
602 | * we will hard-enable unconditionally, we can just reload the | ||
603 | * current MSR into r10 | ||
604 | */ | ||
605 | mfmsr r10 | ||
606 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
584 | li r0,1 | 607 | li r0,1 |
585 | stb r0,PACASOFTIRQEN(r13) | 608 | stb r0,PACASOFTIRQEN(r13) |
586 | stb r0,PACAHARDIRQEN(r13) | 609 | stb r0,PACAHARDIRQEN(r13) |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 44229c3749ac..215973a2c8d5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -36,8 +36,7 @@ | |||
36 | #include <asm/firmware.h> | 36 | #include <asm/firmware.h> |
37 | #include <asm/page_64.h> | 37 | #include <asm/page_64.h> |
38 | #include <asm/exception.h> | 38 | #include <asm/exception.h> |
39 | 39 | #include <asm/irqflags.h> | |
40 | #define DO_SOFT_DISABLE | ||
41 | 40 | ||
42 | /* | 41 | /* |
43 | * We layout physical memory as follows: | 42 | * We layout physical memory as follows: |
@@ -450,8 +449,8 @@ bad_stack: | |||
450 | */ | 449 | */ |
451 | fast_exc_return_irq: /* restores irq state too */ | 450 | fast_exc_return_irq: /* restores irq state too */ |
452 | ld r3,SOFTE(r1) | 451 | ld r3,SOFTE(r1) |
452 | TRACE_AND_RESTORE_IRQ(r3); | ||
453 | ld r12,_MSR(r1) | 453 | ld r12,_MSR(r1) |
454 | stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */ | ||
455 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | 454 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ |
456 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | 455 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ |
457 | b 1f | 456 | b 1f |
@@ -824,7 +823,7 @@ _STATIC(load_up_altivec) | |||
824 | * Hash table stuff | 823 | * Hash table stuff |
825 | */ | 824 | */ |
826 | .align 7 | 825 | .align 7 |
827 | _GLOBAL(do_hash_page) | 826 | _STATIC(do_hash_page) |
828 | std r3,_DAR(r1) | 827 | std r3,_DAR(r1) |
829 | std r4,_DSISR(r1) | 828 | std r4,_DSISR(r1) |
830 | 829 | ||
@@ -836,6 +835,27 @@ BEGIN_FTR_SECTION | |||
836 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 835 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
837 | 836 | ||
838 | /* | 837 | /* |
838 | * On iSeries, we soft-disable interrupts here, then | ||
839 | * hard-enable interrupts so that the hash_page code can spin on | ||
840 | * the hash_table_lock without problems on a shared processor. | ||
841 | */ | ||
842 | DISABLE_INTS | ||
843 | |||
844 | /* | ||
845 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
846 | * and will clobber volatile registers when irq tracing is enabled | ||
847 | * so we need to reload them. It may be possible to be smarter here | ||
848 | * and move the irq tracing elsewhere but let's keep it simple for | ||
849 | * now | ||
850 | */ | ||
851 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
852 | ld r3,_DAR(r1) | ||
853 | ld r4,_DSISR(r1) | ||
854 | ld r5,_TRAP(r1) | ||
855 | ld r12,_MSR(r1) | ||
856 | clrrdi r5,r5,4 | ||
857 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
858 | /* | ||
839 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | 859 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are |
840 | * accessing a userspace segment (even from the kernel). We assume | 860 | * accessing a userspace segment (even from the kernel). We assume |
841 | * kernel addresses always have the high bit set. | 861 | * kernel addresses always have the high bit set. |
@@ -848,13 +868,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
848 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | 868 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ |
849 | 869 | ||
850 | /* | 870 | /* |
851 | * On iSeries, we soft-disable interrupts here, then | ||
852 | * hard-enable interrupts so that the hash_page code can spin on | ||
853 | * the hash_table_lock without problems on a shared processor. | ||
854 | */ | ||
855 | DISABLE_INTS | ||
856 | |||
857 | /* | ||
858 | * r3 contains the faulting address | 871 | * r3 contains the faulting address |
859 | * r4 contains the required access permissions | 872 | * r4 contains the required access permissions |
860 | * r5 contains the trap number | 873 | * r5 contains the trap number |
@@ -864,7 +877,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
864 | bl .hash_page /* build HPTE if possible */ | 877 | bl .hash_page /* build HPTE if possible */ |
865 | cmpdi r3,0 /* see if hash_page succeeded */ | 878 | cmpdi r3,0 /* see if hash_page succeeded */ |
866 | 879 | ||
867 | #ifdef DO_SOFT_DISABLE | ||
868 | BEGIN_FW_FTR_SECTION | 880 | BEGIN_FW_FTR_SECTION |
869 | /* | 881 | /* |
870 | * If we had interrupts soft-enabled at the point where the | 882 | * If we had interrupts soft-enabled at the point where the |
@@ -876,7 +888,7 @@ BEGIN_FW_FTR_SECTION | |||
876 | */ | 888 | */ |
877 | beq 13f | 889 | beq 13f |
878 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 890 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
879 | #endif | 891 | |
880 | BEGIN_FW_FTR_SECTION | 892 | BEGIN_FW_FTR_SECTION |
881 | /* | 893 | /* |
882 | * Here we have interrupts hard-disabled, so it is sufficient | 894 | * Here we have interrupts hard-disabled, so it is sufficient |
@@ -890,11 +902,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |||
890 | 902 | ||
891 | /* | 903 | /* |
892 | * hash_page couldn't handle it, set soft interrupt enable back | 904 | * hash_page couldn't handle it, set soft interrupt enable back |
893 | * to what it was before the trap. Note that .local_irq_restore | 905 | * to what it was before the trap. Note that .raw_local_irq_restore |
894 | * handles any interrupts pending at this point. | 906 | * handles any interrupts pending at this point. |
895 | */ | 907 | */ |
896 | ld r3,SOFTE(r1) | 908 | ld r3,SOFTE(r1) |
897 | bl .local_irq_restore | 909 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
910 | bl .raw_local_irq_restore | ||
898 | b 11f | 911 | b 11f |
899 | 912 | ||
900 | /* Here we have a page fault that hash_page can't handle. */ | 913 | /* Here we have a page fault that hash_page can't handle. */ |
@@ -1493,6 +1506,10 @@ _INIT_STATIC(start_here_multiplatform) | |||
1493 | addi r2,r2,0x4000 | 1506 | addi r2,r2,0x4000 |
1494 | add r2,r2,r26 | 1507 | add r2,r2,r26 |
1495 | 1508 | ||
1509 | /* Set initial ptr to current */ | ||
1510 | LOAD_REG_IMMEDIATE(r4, init_task) | ||
1511 | std r4,PACACURRENT(r13) | ||
1512 | |||
1496 | /* Do very early kernel initializations, including initial hash table, | 1513 | /* Do very early kernel initializations, including initial hash table, |
1497 | * stab and slb setup before we turn on relocation. */ | 1514 | * stab and slb setup before we turn on relocation. */ |
1498 | 1515 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4617b65d464d..425616f92d18 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -114,7 +114,7 @@ static inline void set_soft_enabled(unsigned long enable) | |||
114 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 114 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
115 | } | 115 | } |
116 | 116 | ||
117 | void local_irq_restore(unsigned long en) | 117 | void raw_local_irq_restore(unsigned long en) |
118 | { | 118 | { |
119 | /* | 119 | /* |
120 | * get_paca()->soft_enabled = en; | 120 | * get_paca()->soft_enabled = en; |
@@ -174,6 +174,7 @@ void local_irq_restore(unsigned long en) | |||
174 | 174 | ||
175 | __hard_irq_enable(); | 175 | __hard_irq_enable(); |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(raw_local_irq_restore); | ||
177 | #endif /* CONFIG_PPC64 */ | 178 | #endif /* CONFIG_PPC64 */ |
178 | 179 | ||
179 | int show_interrupts(struct seq_file *p, void *v) | 180 | int show_interrupts(struct seq_file *p, void *v) |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 5a4c76eada48..b9b765c7d1a7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -45,10 +45,6 @@ | |||
45 | #include <asm/signal.h> | 45 | #include <asm/signal.h> |
46 | #include <asm/dcr.h> | 46 | #include <asm/dcr.h> |
47 | 47 | ||
48 | #ifdef CONFIG_PPC64 | ||
49 | EXPORT_SYMBOL(local_irq_restore); | ||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_PPC32 | 48 | #ifdef CONFIG_PPC32 |
53 | extern void transfer_to_handler(void); | 49 | extern void transfer_to_handler(void); |
54 | extern void do_IRQ(struct pt_regs *regs); | 50 | extern void do_IRQ(struct pt_regs *regs); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0205d408d2ed..31ada9fdfc5c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/serial_8250.h> | 33 | #include <linux/serial_8250.h> |
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
36 | #include <linux/lockdep.h> | ||
36 | #include <linux/lmb.h> | 37 | #include <linux/lmb.h> |
37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
38 | #include <asm/kdump.h> | 39 | #include <asm/kdump.h> |
@@ -178,6 +179,9 @@ void __init early_setup(unsigned long dt_ptr) | |||
178 | /* Enable early debugging if any specified (see udbg.h) */ | 179 | /* Enable early debugging if any specified (see udbg.h) */ |
179 | udbg_early_init(); | 180 | udbg_early_init(); |
180 | 181 | ||
182 | /* Initialize lockdep early or else spinlocks will blow */ | ||
183 | lockdep_init(); | ||
184 | |||
181 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); | 185 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); |
182 | 186 | ||
183 | /* | 187 | /* |