diff options
| author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-04-17 00:35:01 -0400 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2008-04-18 01:38:47 -0400 |
| commit | 945feb174b14e7098cc7ecf0cf4768d35bc52f9c (patch) | |
| tree | 9810b2ff0efe8edbfb1506f65834ea0d553e2848 | |
| parent | fd3e0bbc6052ca9747a5332b382584ece83aab6d (diff) | |
[POWERPC] irqtrace support for 64-bit powerpc
This adds the low level irq tracing hooks to the powerpc architecture
needed to enable full lockdep functionality.
This is partly based on Johannes Berg's initial version. I removed
the asm trampoline that isn't needed (thus improving performance) and
modified all sorts of bits and pieces, reworking most of the assembly,
etc...
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
| -rw-r--r-- | arch/powerpc/Kconfig | 9 | ||||
| -rw-r--r-- | arch/powerpc/kernel/entry_64.S | 27 | ||||
| -rw-r--r-- | arch/powerpc/kernel/head_64.S | 47 | ||||
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 3 | ||||
| -rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 4 | ||||
| -rw-r--r-- | include/asm-powerpc/exception.h | 6 | ||||
| -rw-r--r-- | include/asm-powerpc/hw_irq.h | 13 | ||||
| -rw-r--r-- | include/asm-powerpc/irqflags.h | 37 | ||||
| -rw-r--r-- | include/asm-powerpc/rwsem.h | 35 | ||||
| -rw-r--r-- | include/asm-powerpc/spinlock.h | 1 |
11 files changed, 134 insertions, 52 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ecca20d17a7b..4bb2e9310a56 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -53,6 +53,15 @@ config STACKTRACE_SUPPORT | |||
| 53 | bool | 53 | bool |
| 54 | default y | 54 | default y |
| 55 | 55 | ||
| 56 | config TRACE_IRQFLAGS_SUPPORT | ||
| 57 | bool | ||
| 58 | depends on PPC64 | ||
| 59 | default y | ||
| 60 | |||
| 61 | config LOCKDEP_SUPPORT | ||
| 62 | bool | ||
| 63 | default y | ||
| 64 | |||
| 56 | config RWSEM_GENERIC_SPINLOCK | 65 | config RWSEM_GENERIC_SPINLOCK |
| 57 | bool | 66 | bool |
| 58 | 67 | ||
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 13019845536b..c0db5b769e55 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
| 31 | #include <asm/bug.h> | 31 | #include <asm/bug.h> |
| 32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
| 33 | #include <asm/irqflags.h> | ||
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| 35 | * System calls. | 36 | * System calls. |
| @@ -89,6 +90,14 @@ system_call_common: | |||
| 89 | addi r9,r1,STACK_FRAME_OVERHEAD | 90 | addi r9,r1,STACK_FRAME_OVERHEAD |
| 90 | ld r11,exception_marker@toc(r2) | 91 | ld r11,exception_marker@toc(r2) |
| 91 | std r11,-16(r9) /* "regshere" marker */ | 92 | std r11,-16(r9) /* "regshere" marker */ |
| 93 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 94 | bl .trace_hardirqs_on | ||
| 95 | REST_GPR(0,r1) | ||
| 96 | REST_4GPRS(3,r1) | ||
| 97 | REST_2GPRS(7,r1) | ||
| 98 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
| 99 | ld r12,_MSR(r1) | ||
| 100 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 92 | li r10,1 | 101 | li r10,1 |
| 93 | stb r10,PACASOFTIRQEN(r13) | 102 | stb r10,PACASOFTIRQEN(r13) |
| 94 | stb r10,PACAHARDIRQEN(r13) | 103 | stb r10,PACAHARDIRQEN(r13) |
| @@ -103,7 +112,7 @@ BEGIN_FW_FTR_SECTION | |||
| 103 | b hardware_interrupt_entry | 112 | b hardware_interrupt_entry |
| 104 | 2: | 113 | 2: |
| 105 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 114 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 106 | #endif | 115 | #endif /* CONFIG_PPC_ISERIES */ |
| 107 | mfmsr r11 | 116 | mfmsr r11 |
| 108 | ori r11,r11,MSR_EE | 117 | ori r11,r11,MSR_EE |
| 109 | mtmsrd r11,1 | 118 | mtmsrd r11,1 |
| @@ -505,6 +514,10 @@ BEGIN_FW_FTR_SECTION | |||
| 505 | 514 | ||
| 506 | li r3,0 | 515 | li r3,0 |
| 507 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ | 516 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ |
| 517 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 518 | bl .trace_hardirqs_off | ||
| 519 | mfmsr r10 | ||
| 520 | #endif | ||
| 508 | ori r10,r10,MSR_EE | 521 | ori r10,r10,MSR_EE |
| 509 | mtmsrd r10 /* hard-enable again */ | 522 | mtmsrd r10 /* hard-enable again */ |
| 510 | addi r3,r1,STACK_FRAME_OVERHEAD | 523 | addi r3,r1,STACK_FRAME_OVERHEAD |
| @@ -513,7 +526,7 @@ BEGIN_FW_FTR_SECTION | |||
| 513 | 4: | 526 | 4: |
| 514 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 527 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 515 | #endif | 528 | #endif |
| 516 | stb r5,PACASOFTIRQEN(r13) | 529 | TRACE_AND_RESTORE_IRQ(r5); |
| 517 | 530 | ||
| 518 | /* extract EE bit and use it to restore paca->hard_enabled */ | 531 | /* extract EE bit and use it to restore paca->hard_enabled */ |
| 519 | ld r3,_MSR(r1) | 532 | ld r3,_MSR(r1) |
| @@ -581,6 +594,16 @@ do_work: | |||
| 581 | bne restore | 594 | bne restore |
| 582 | /* here we are preempting the current task */ | 595 | /* here we are preempting the current task */ |
| 583 | 1: | 596 | 1: |
| 597 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 598 | bl .trace_hardirqs_on | ||
| 599 | /* Note: we just clobbered r10 which used to contain the previous | ||
| 600 | * MSR before the hard-disabling done by the caller of do_work. | ||
| 601 | * We don't have that value anymore, but it doesn't matter as | ||
| 602 | * we will hard-enable unconditionally, we can just reload the | ||
| 603 | * current MSR into r10 | ||
| 604 | */ | ||
| 605 | mfmsr r10 | ||
| 606 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 584 | li r0,1 | 607 | li r0,1 |
| 585 | stb r0,PACASOFTIRQEN(r13) | 608 | stb r0,PACASOFTIRQEN(r13) |
| 586 | stb r0,PACAHARDIRQEN(r13) | 609 | stb r0,PACAHARDIRQEN(r13) |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 44229c3749ac..215973a2c8d5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
| @@ -36,8 +36,7 @@ | |||
| 36 | #include <asm/firmware.h> | 36 | #include <asm/firmware.h> |
| 37 | #include <asm/page_64.h> | 37 | #include <asm/page_64.h> |
| 38 | #include <asm/exception.h> | 38 | #include <asm/exception.h> |
| 39 | 39 | #include <asm/irqflags.h> | |
| 40 | #define DO_SOFT_DISABLE | ||
| 41 | 40 | ||
| 42 | /* | 41 | /* |
| 43 | * We layout physical memory as follows: | 42 | * We layout physical memory as follows: |
| @@ -450,8 +449,8 @@ bad_stack: | |||
| 450 | */ | 449 | */ |
| 451 | fast_exc_return_irq: /* restores irq state too */ | 450 | fast_exc_return_irq: /* restores irq state too */ |
| 452 | ld r3,SOFTE(r1) | 451 | ld r3,SOFTE(r1) |
| 452 | TRACE_AND_RESTORE_IRQ(r3); | ||
| 453 | ld r12,_MSR(r1) | 453 | ld r12,_MSR(r1) |
| 454 | stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */ | ||
| 455 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | 454 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ |
| 456 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | 455 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ |
| 457 | b 1f | 456 | b 1f |
| @@ -824,7 +823,7 @@ _STATIC(load_up_altivec) | |||
| 824 | * Hash table stuff | 823 | * Hash table stuff |
| 825 | */ | 824 | */ |
| 826 | .align 7 | 825 | .align 7 |
| 827 | _GLOBAL(do_hash_page) | 826 | _STATIC(do_hash_page) |
| 828 | std r3,_DAR(r1) | 827 | std r3,_DAR(r1) |
| 829 | std r4,_DSISR(r1) | 828 | std r4,_DSISR(r1) |
| 830 | 829 | ||
| @@ -836,6 +835,27 @@ BEGIN_FTR_SECTION | |||
| 836 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 835 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
| 837 | 836 | ||
| 838 | /* | 837 | /* |
| 838 | * On iSeries, we soft-disable interrupts here, then | ||
| 839 | * hard-enable interrupts so that the hash_page code can spin on | ||
| 840 | * the hash_table_lock without problems on a shared processor. | ||
| 841 | */ | ||
| 842 | DISABLE_INTS | ||
| 843 | |||
| 844 | /* | ||
| 845 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
| 846 | * and will clobber volatile registers when irq tracing is enabled | ||
| 847 | * so we need to reload them. It may be possible to be smarter here | ||
| 848 | * and move the irq tracing elsewhere but let's keep it simple for | ||
| 849 | * now | ||
| 850 | */ | ||
| 851 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 852 | ld r3,_DAR(r1) | ||
| 853 | ld r4,_DSISR(r1) | ||
| 854 | ld r5,_TRAP(r1) | ||
| 855 | ld r12,_MSR(r1) | ||
| 856 | clrrdi r5,r5,4 | ||
| 857 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 858 | /* | ||
| 839 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | 859 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are |
| 840 | * accessing a userspace segment (even from the kernel). We assume | 860 | * accessing a userspace segment (even from the kernel). We assume |
| 841 | * kernel addresses always have the high bit set. | 861 | * kernel addresses always have the high bit set. |
| @@ -848,13 +868,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
| 848 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | 868 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ |
| 849 | 869 | ||
| 850 | /* | 870 | /* |
| 851 | * On iSeries, we soft-disable interrupts here, then | ||
| 852 | * hard-enable interrupts so that the hash_page code can spin on | ||
| 853 | * the hash_table_lock without problems on a shared processor. | ||
| 854 | */ | ||
| 855 | DISABLE_INTS | ||
| 856 | |||
| 857 | /* | ||
| 858 | * r3 contains the faulting address | 871 | * r3 contains the faulting address |
| 859 | * r4 contains the required access permissions | 872 | * r4 contains the required access permissions |
| 860 | * r5 contains the trap number | 873 | * r5 contains the trap number |
| @@ -864,7 +877,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
| 864 | bl .hash_page /* build HPTE if possible */ | 877 | bl .hash_page /* build HPTE if possible */ |
| 865 | cmpdi r3,0 /* see if hash_page succeeded */ | 878 | cmpdi r3,0 /* see if hash_page succeeded */ |
| 866 | 879 | ||
| 867 | #ifdef DO_SOFT_DISABLE | ||
| 868 | BEGIN_FW_FTR_SECTION | 880 | BEGIN_FW_FTR_SECTION |
| 869 | /* | 881 | /* |
| 870 | * If we had interrupts soft-enabled at the point where the | 882 | * If we had interrupts soft-enabled at the point where the |
| @@ -876,7 +888,7 @@ BEGIN_FW_FTR_SECTION | |||
| 876 | */ | 888 | */ |
| 877 | beq 13f | 889 | beq 13f |
| 878 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 890 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 879 | #endif | 891 | |
| 880 | BEGIN_FW_FTR_SECTION | 892 | BEGIN_FW_FTR_SECTION |
| 881 | /* | 893 | /* |
| 882 | * Here we have interrupts hard-disabled, so it is sufficient | 894 | * Here we have interrupts hard-disabled, so it is sufficient |
| @@ -890,11 +902,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |||
| 890 | 902 | ||
| 891 | /* | 903 | /* |
| 892 | * hash_page couldn't handle it, set soft interrupt enable back | 904 | * hash_page couldn't handle it, set soft interrupt enable back |
| 893 | * to what it was before the trap. Note that .local_irq_restore | 905 | * to what it was before the trap. Note that .raw_local_irq_restore |
| 894 | * handles any interrupts pending at this point. | 906 | * handles any interrupts pending at this point. |
| 895 | */ | 907 | */ |
| 896 | ld r3,SOFTE(r1) | 908 | ld r3,SOFTE(r1) |
| 897 | bl .local_irq_restore | 909 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
| 910 | bl .raw_local_irq_restore | ||
| 898 | b 11f | 911 | b 11f |
| 899 | 912 | ||
| 900 | /* Here we have a page fault that hash_page can't handle. */ | 913 | /* Here we have a page fault that hash_page can't handle. */ |
| @@ -1493,6 +1506,10 @@ _INIT_STATIC(start_here_multiplatform) | |||
| 1493 | addi r2,r2,0x4000 | 1506 | addi r2,r2,0x4000 |
| 1494 | add r2,r2,r26 | 1507 | add r2,r2,r26 |
| 1495 | 1508 | ||
| 1509 | /* Set initial ptr to current */ | ||
| 1510 | LOAD_REG_IMMEDIATE(r4, init_task) | ||
| 1511 | std r4,PACACURRENT(r13) | ||
| 1512 | |||
| 1496 | /* Do very early kernel initializations, including initial hash table, | 1513 | /* Do very early kernel initializations, including initial hash table, |
| 1497 | * stab and slb setup before we turn on relocation. */ | 1514 | * stab and slb setup before we turn on relocation. */ |
| 1498 | 1515 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4617b65d464d..425616f92d18 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -114,7 +114,7 @@ static inline void set_soft_enabled(unsigned long enable) | |||
| 114 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 114 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void local_irq_restore(unsigned long en) | 117 | void raw_local_irq_restore(unsigned long en) |
| 118 | { | 118 | { |
| 119 | /* | 119 | /* |
| 120 | * get_paca()->soft_enabled = en; | 120 | * get_paca()->soft_enabled = en; |
| @@ -174,6 +174,7 @@ void local_irq_restore(unsigned long en) | |||
| 174 | 174 | ||
| 175 | __hard_irq_enable(); | 175 | __hard_irq_enable(); |
| 176 | } | 176 | } |
| 177 | EXPORT_SYMBOL(raw_local_irq_restore); | ||
| 177 | #endif /* CONFIG_PPC64 */ | 178 | #endif /* CONFIG_PPC64 */ |
| 178 | 179 | ||
| 179 | int show_interrupts(struct seq_file *p, void *v) | 180 | int show_interrupts(struct seq_file *p, void *v) |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 5a4c76eada48..b9b765c7d1a7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
| @@ -45,10 +45,6 @@ | |||
| 45 | #include <asm/signal.h> | 45 | #include <asm/signal.h> |
| 46 | #include <asm/dcr.h> | 46 | #include <asm/dcr.h> |
| 47 | 47 | ||
| 48 | #ifdef CONFIG_PPC64 | ||
| 49 | EXPORT_SYMBOL(local_irq_restore); | ||
| 50 | #endif | ||
| 51 | |||
| 52 | #ifdef CONFIG_PPC32 | 48 | #ifdef CONFIG_PPC32 |
| 53 | extern void transfer_to_handler(void); | 49 | extern void transfer_to_handler(void); |
| 54 | extern void do_IRQ(struct pt_regs *regs); | 50 | extern void do_IRQ(struct pt_regs *regs); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0205d408d2ed..31ada9fdfc5c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/serial_8250.h> | 33 | #include <linux/serial_8250.h> |
| 34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
| 35 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
| 36 | #include <linux/lockdep.h> | ||
| 36 | #include <linux/lmb.h> | 37 | #include <linux/lmb.h> |
| 37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
| 38 | #include <asm/kdump.h> | 39 | #include <asm/kdump.h> |
| @@ -178,6 +179,9 @@ void __init early_setup(unsigned long dt_ptr) | |||
| 178 | /* Enable early debugging if any specified (see udbg.h) */ | 179 | /* Enable early debugging if any specified (see udbg.h) */ |
| 179 | udbg_early_init(); | 180 | udbg_early_init(); |
| 180 | 181 | ||
| 182 | /* Initialize lockdep early or else spinlocks will blow */ | ||
| 183 | lockdep_init(); | ||
| 184 | |||
| 181 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); | 185 | DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); |
| 182 | 186 | ||
| 183 | /* | 187 | /* |
diff --git a/include/asm-powerpc/exception.h b/include/asm-powerpc/exception.h index 39abdb02fdef..329148b5acc6 100644 --- a/include/asm-powerpc/exception.h +++ b/include/asm-powerpc/exception.h | |||
| @@ -228,18 +228,18 @@ label##_pSeries: \ | |||
| 228 | BEGIN_FW_FTR_SECTION; \ | 228 | BEGIN_FW_FTR_SECTION; \ |
| 229 | stb r11,PACAHARDIRQEN(r13); \ | 229 | stb r11,PACAHARDIRQEN(r13); \ |
| 230 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ | 230 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ |
| 231 | TRACE_DISABLE_INTS; \ | ||
| 231 | BEGIN_FW_FTR_SECTION; \ | 232 | BEGIN_FW_FTR_SECTION; \ |
| 232 | mfmsr r10; \ | 233 | mfmsr r10; \ |
| 233 | ori r10,r10,MSR_EE; \ | 234 | ori r10,r10,MSR_EE; \ |
| 234 | mtmsrd r10,1; \ | 235 | mtmsrd r10,1; \ |
| 235 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 236 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
| 236 | |||
| 237 | #else | 237 | #else |
| 238 | #define DISABLE_INTS \ | 238 | #define DISABLE_INTS \ |
| 239 | li r11,0; \ | 239 | li r11,0; \ |
| 240 | stb r11,PACASOFTIRQEN(r13); \ | 240 | stb r11,PACASOFTIRQEN(r13); \ |
| 241 | stb r11,PACAHARDIRQEN(r13) | 241 | stb r11,PACAHARDIRQEN(r13); \ |
| 242 | 242 | TRACE_DISABLE_INTS | |
| 243 | #endif /* CONFIG_PPC_ISERIES */ | 243 | #endif /* CONFIG_PPC_ISERIES */ |
| 244 | 244 | ||
| 245 | #define ENABLE_INTS \ | 245 | #define ENABLE_INTS \ |
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h index a7b60bf639e0..ad8c9f7fd0e3 100644 --- a/include/asm-powerpc/hw_irq.h +++ b/include/asm-powerpc/hw_irq.h | |||
| @@ -27,7 +27,7 @@ static inline unsigned long local_get_flags(void) | |||
| 27 | return flags; | 27 | return flags; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static inline unsigned long local_irq_disable(void) | 30 | static inline unsigned long raw_local_irq_disable(void) |
| 31 | { | 31 | { |
| 32 | unsigned long flags, zero; | 32 | unsigned long flags, zero; |
| 33 | 33 | ||
| @@ -39,14 +39,15 @@ static inline unsigned long local_irq_disable(void) | |||
| 39 | return flags; | 39 | return flags; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | extern void local_irq_restore(unsigned long); | 42 | extern void raw_local_irq_restore(unsigned long); |
| 43 | extern void iseries_handle_interrupts(void); | 43 | extern void iseries_handle_interrupts(void); |
| 44 | 44 | ||
| 45 | #define local_irq_enable() local_irq_restore(1) | 45 | #define raw_local_irq_enable() raw_local_irq_restore(1) |
| 46 | #define local_save_flags(flags) ((flags) = local_get_flags()) | 46 | #define raw_local_save_flags(flags) ((flags) = local_get_flags()) |
| 47 | #define local_irq_save(flags) ((flags) = local_irq_disable()) | 47 | #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) |
| 48 | 48 | ||
| 49 | #define irqs_disabled() (local_get_flags() == 0) | 49 | #define raw_irqs_disabled() (local_get_flags() == 0) |
| 50 | #define raw_irqs_disabled_flags(flags) ((flags) == 0) | ||
| 50 | 51 | ||
| 51 | #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) | 52 | #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) |
| 52 | #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) | 53 | #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) |
diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h index 7970cbaeaa54..cc6fdba33660 100644 --- a/include/asm-powerpc/irqflags.h +++ b/include/asm-powerpc/irqflags.h | |||
| @@ -2,30 +2,43 @@ | |||
| 2 | * include/asm-powerpc/irqflags.h | 2 | * include/asm-powerpc/irqflags.h |
| 3 | * | 3 | * |
| 4 | * IRQ flags handling | 4 | * IRQ flags handling |
| 5 | * | ||
| 6 | * This file gets included from lowlevel asm headers too, to provide | ||
| 7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
| 8 | * raw_local_irq_*() macros from the lowlevel headers. | ||
| 9 | */ | 5 | */ |
| 10 | #ifndef _ASM_IRQFLAGS_H | 6 | #ifndef _ASM_IRQFLAGS_H |
| 11 | #define _ASM_IRQFLAGS_H | 7 | #define _ASM_IRQFLAGS_H |
| 12 | 8 | ||
| 9 | #ifndef __ASSEMBLY__ | ||
| 13 | /* | 10 | /* |
| 14 | * Get definitions for raw_local_save_flags(x), etc. | 11 | * Get definitions for raw_local_save_flags(x), etc. |
| 15 | */ | 12 | */ |
| 16 | #include <asm-powerpc/hw_irq.h> | 13 | #include <asm-powerpc/hw_irq.h> |
| 17 | 14 | ||
| 15 | #else | ||
| 16 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 18 | /* | 17 | /* |
| 19 | * Do the CPU's IRQ-state tracing from assembly code. We call a | 18 | * Most of the CPU's IRQ-state tracing is done from assembly code; we |
| 20 | * C function, so save all the C-clobbered registers: | 19 | * have to call a C function so call a wrapper that saves all the |
| 20 | * C-clobbered registers. | ||
| 21 | */ | 21 | */ |
| 22 | #ifdef CONFIG_TRACE_IRQFLAGS | 22 | #define TRACE_ENABLE_INTS bl .trace_hardirqs_on |
| 23 | 23 | #define TRACE_DISABLE_INTS bl .trace_hardirqs_off | |
| 24 | #error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS | 24 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ |
| 25 | 25 | cmpdi en, 0; \ | |
| 26 | bne 95f; \ | ||
| 27 | stb en,PACASOFTIRQEN(r13); \ | ||
| 28 | bl .trace_hardirqs_off; \ | ||
| 29 | b skip; \ | ||
| 30 | 95: bl .trace_hardirqs_on; \ | ||
| 31 | li en,1; | ||
| 32 | #define TRACE_AND_RESTORE_IRQ(en) \ | ||
| 33 | TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ | ||
| 34 | 96: stb en,PACASOFTIRQEN(r13) | ||
| 26 | #else | 35 | #else |
| 27 | # define TRACE_IRQS_ON | 36 | #define TRACE_ENABLE_INTS |
| 28 | # define TRACE_IRQS_OFF | 37 | #define TRACE_DISABLE_INTS |
| 38 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) | ||
| 39 | #define TRACE_AND_RESTORE_IRQ(en) \ | ||
| 40 | stb en,PACASOFTIRQEN(r13) | ||
| 41 | #endif | ||
| 29 | #endif | 42 | #endif |
| 30 | 43 | ||
| 31 | #endif | 44 | #endif |
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h index cefc14728cc5..a6cc93b78b98 100644 --- a/include/asm-powerpc/rwsem.h +++ b/include/asm-powerpc/rwsem.h | |||
| @@ -32,11 +32,20 @@ struct rw_semaphore { | |||
| 32 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 32 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
| 33 | spinlock_t wait_lock; | 33 | spinlock_t wait_lock; |
| 34 | struct list_head wait_list; | 34 | struct list_head wait_list; |
| 35 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 36 | struct lockdep_map dep_map; | ||
| 37 | #endif | ||
| 35 | }; | 38 | }; |
| 36 | 39 | ||
| 40 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 41 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
| 42 | #else | ||
| 43 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
| 44 | #endif | ||
| 45 | |||
| 37 | #define __RWSEM_INITIALIZER(name) \ | 46 | #define __RWSEM_INITIALIZER(name) \ |
| 38 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | 47 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
| 39 | LIST_HEAD_INIT((name).wait_list) } | 48 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } |
| 40 | 49 | ||
| 41 | #define DECLARE_RWSEM(name) \ | 50 | #define DECLARE_RWSEM(name) \ |
| 42 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 51 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
| @@ -46,12 +55,15 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | |||
| 46 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | 55 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); |
| 47 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | 56 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
| 48 | 57 | ||
| 49 | static inline void init_rwsem(struct rw_semaphore *sem) | 58 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
| 50 | { | 59 | struct lock_class_key *key); |
| 51 | sem->count = RWSEM_UNLOCKED_VALUE; | 60 | |
| 52 | spin_lock_init(&sem->wait_lock); | 61 | #define init_rwsem(sem) \ |
| 53 | INIT_LIST_HEAD(&sem->wait_list); | 62 | do { \ |
| 54 | } | 63 | static struct lock_class_key __key; \ |
| 64 | \ | ||
| 65 | __init_rwsem((sem), #sem, &__key); \ | ||
| 66 | } while (0) | ||
| 55 | 67 | ||
| 56 | /* | 68 | /* |
| 57 | * lock for reading | 69 | * lock for reading |
| @@ -78,7 +90,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
| 78 | /* | 90 | /* |
| 79 | * lock for writing | 91 | * lock for writing |
| 80 | */ | 92 | */ |
| 81 | static inline void __down_write(struct rw_semaphore *sem) | 93 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
| 82 | { | 94 | { |
| 83 | int tmp; | 95 | int tmp; |
| 84 | 96 | ||
| @@ -88,6 +100,11 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
| 88 | rwsem_down_write_failed(sem); | 100 | rwsem_down_write_failed(sem); |
| 89 | } | 101 | } |
| 90 | 102 | ||
| 103 | static inline void __down_write(struct rw_semaphore *sem) | ||
| 104 | { | ||
| 105 | __down_write_nested(sem, 0); | ||
| 106 | } | ||
| 107 | |||
| 91 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 108 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
| 92 | { | 109 | { |
| 93 | int tmp; | 110 | int tmp; |
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index cc4cfceac67c..258c93993190 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | * | 19 | * |
| 20 | * (the type definitions are in asm/spinlock_types.h) | 20 | * (the type definitions are in asm/spinlock_types.h) |
| 21 | */ | 21 | */ |
| 22 | #include <linux/irqflags.h> | ||
| 22 | #ifdef CONFIG_PPC64 | 23 | #ifdef CONFIG_PPC64 |
| 23 | #include <asm/paca.h> | 24 | #include <asm/paca.h> |
| 24 | #include <asm/hvcall.h> | 25 | #include <asm/hvcall.h> |
