diff options
69 files changed, 1591 insertions, 2871 deletions
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index fc5ef90c4fc9..24ae9a366073 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c | |||
@@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus); | |||
185 | EXPORT_SYMBOL(smp_call_function); | 185 | EXPORT_SYMBOL(smp_call_function); |
186 | EXPORT_SYMBOL(smp_call_function_on_cpu); | 186 | EXPORT_SYMBOL(smp_call_function_on_cpu); |
187 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 187 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
188 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
189 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
190 | EXPORT_SYMBOL(debug_spin_lock); | ||
191 | EXPORT_SYMBOL(debug_spin_trylock); | ||
192 | #endif | ||
193 | #ifdef CONFIG_DEBUG_RWLOCK | ||
194 | EXPORT_SYMBOL(_raw_write_lock); | ||
195 | EXPORT_SYMBOL(_raw_read_lock); | ||
196 | #endif | ||
197 | EXPORT_SYMBOL(cpu_present_mask); | 188 | EXPORT_SYMBOL(cpu_present_mask); |
198 | #endif /* CONFIG_SMP */ | 189 | #endif /* CONFIG_SMP */ |
199 | 190 | ||
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index e211aa7404e6..da0be3465791 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
989 | 989 | ||
990 | preempt_enable(); | 990 | preempt_enable(); |
991 | } | 991 | } |
992 | |||
993 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
994 | void | ||
995 | _raw_spin_unlock(spinlock_t * lock) | ||
996 | { | ||
997 | mb(); | ||
998 | lock->lock = 0; | ||
999 | |||
1000 | lock->on_cpu = -1; | ||
1001 | lock->previous = NULL; | ||
1002 | lock->task = NULL; | ||
1003 | lock->base_file = "none"; | ||
1004 | lock->line_no = 0; | ||
1005 | } | ||
1006 | |||
1007 | void | ||
1008 | debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) | ||
1009 | { | ||
1010 | long tmp; | ||
1011 | long stuck; | ||
1012 | void *inline_pc = __builtin_return_address(0); | ||
1013 | unsigned long started = jiffies; | ||
1014 | int printed = 0; | ||
1015 | int cpu = smp_processor_id(); | ||
1016 | |||
1017 | stuck = 1L << 30; | ||
1018 | try_again: | ||
1019 | |||
1020 | /* Use sub-sections to put the actual loop at the end | ||
1021 | of this object file's text section so as to perfect | ||
1022 | branch prediction. */ | ||
1023 | __asm__ __volatile__( | ||
1024 | "1: ldl_l %0,%1\n" | ||
1025 | " subq %2,1,%2\n" | ||
1026 | " blbs %0,2f\n" | ||
1027 | " or %0,1,%0\n" | ||
1028 | " stl_c %0,%1\n" | ||
1029 | " beq %0,3f\n" | ||
1030 | "4: mb\n" | ||
1031 | ".subsection 2\n" | ||
1032 | "2: ldl %0,%1\n" | ||
1033 | " subq %2,1,%2\n" | ||
1034 | "3: blt %2,4b\n" | ||
1035 | " blbs %0,2b\n" | ||
1036 | " br 1b\n" | ||
1037 | ".previous" | ||
1038 | : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) | ||
1039 | : "m" (lock->lock), "2" (stuck) : "memory"); | ||
1040 | |||
1041 | if (stuck < 0) { | ||
1042 | printk(KERN_WARNING | ||
1043 | "%s:%d spinlock stuck in %s at %p(%d)" | ||
1044 | " owner %s at %p(%d) %s:%d\n", | ||
1045 | base_file, line_no, | ||
1046 | current->comm, inline_pc, cpu, | ||
1047 | lock->task->comm, lock->previous, | ||
1048 | lock->on_cpu, lock->base_file, lock->line_no); | ||
1049 | stuck = 1L << 36; | ||
1050 | printed = 1; | ||
1051 | goto try_again; | ||
1052 | } | ||
1053 | |||
1054 | /* Exiting. Got the lock. */ | ||
1055 | lock->on_cpu = cpu; | ||
1056 | lock->previous = inline_pc; | ||
1057 | lock->task = current; | ||
1058 | lock->base_file = base_file; | ||
1059 | lock->line_no = line_no; | ||
1060 | |||
1061 | if (printed) { | ||
1062 | printk(KERN_WARNING | ||
1063 | "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", | ||
1064 | base_file, line_no, current->comm, inline_pc, | ||
1065 | cpu, jiffies - started); | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | int | ||
1070 | debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) | ||
1071 | { | ||
1072 | int ret; | ||
1073 | if ((ret = !test_and_set_bit(0, lock))) { | ||
1074 | lock->on_cpu = smp_processor_id(); | ||
1075 | lock->previous = __builtin_return_address(0); | ||
1076 | lock->task = current; | ||
1077 | } else { | ||
1078 | lock->base_file = base_file; | ||
1079 | lock->line_no = line_no; | ||
1080 | } | ||
1081 | return ret; | ||
1082 | } | ||
1083 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
1084 | |||
1085 | #ifdef CONFIG_DEBUG_RWLOCK | ||
1086 | void _raw_write_lock(rwlock_t * lock) | ||
1087 | { | ||
1088 | long regx, regy; | ||
1089 | int stuck_lock, stuck_reader; | ||
1090 | void *inline_pc = __builtin_return_address(0); | ||
1091 | |||
1092 | try_again: | ||
1093 | |||
1094 | stuck_lock = 1<<30; | ||
1095 | stuck_reader = 1<<30; | ||
1096 | |||
1097 | __asm__ __volatile__( | ||
1098 | "1: ldl_l %1,%0\n" | ||
1099 | " blbs %1,6f\n" | ||
1100 | " blt %1,8f\n" | ||
1101 | " mov 1,%1\n" | ||
1102 | " stl_c %1,%0\n" | ||
1103 | " beq %1,6f\n" | ||
1104 | "4: mb\n" | ||
1105 | ".subsection 2\n" | ||
1106 | "6: blt %3,4b # debug\n" | ||
1107 | " subl %3,1,%3 # debug\n" | ||
1108 | " ldl %1,%0\n" | ||
1109 | " blbs %1,6b\n" | ||
1110 | "8: blt %4,4b # debug\n" | ||
1111 | " subl %4,1,%4 # debug\n" | ||
1112 | " ldl %1,%0\n" | ||
1113 | " blt %1,8b\n" | ||
1114 | " br 1b\n" | ||
1115 | ".previous" | ||
1116 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), | ||
1117 | "=&r" (stuck_lock), "=&r" (stuck_reader) | ||
1118 | : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); | ||
1119 | |||
1120 | if (stuck_lock < 0) { | ||
1121 | printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); | ||
1122 | goto try_again; | ||
1123 | } | ||
1124 | if (stuck_reader < 0) { | ||
1125 | printk(KERN_WARNING "write_lock stuck on readers at %p\n", | ||
1126 | inline_pc); | ||
1127 | goto try_again; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | void _raw_read_lock(rwlock_t * lock) | ||
1132 | { | ||
1133 | long regx; | ||
1134 | int stuck_lock; | ||
1135 | void *inline_pc = __builtin_return_address(0); | ||
1136 | |||
1137 | try_again: | ||
1138 | |||
1139 | stuck_lock = 1<<30; | ||
1140 | |||
1141 | __asm__ __volatile__( | ||
1142 | "1: ldl_l %1,%0;" | ||
1143 | " blbs %1,6f;" | ||
1144 | " subl %1,2,%1;" | ||
1145 | " stl_c %1,%0;" | ||
1146 | " beq %1,6f;" | ||
1147 | "4: mb\n" | ||
1148 | ".subsection 2\n" | ||
1149 | "6: ldl %1,%0;" | ||
1150 | " blt %2,4b # debug\n" | ||
1151 | " subl %2,1,%2 # debug\n" | ||
1152 | " blbs %1,6b;" | ||
1153 | " br 1b\n" | ||
1154 | ".previous" | ||
1155 | : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) | ||
1156 | : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); | ||
1157 | |||
1158 | if (stuck_lock < 0) { | ||
1159 | printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); | ||
1160 | goto try_again; | ||
1161 | } | ||
1162 | } | ||
1163 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4ebbf3974381..8d484204a3ff 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms, | |||
491 | unw_init_from_interruption(&info, current, pt, sw); | 491 | unw_init_from_interruption(&info, current, pt, sw); |
492 | ia64_do_show_stack(&info, NULL); | 492 | ia64_do_show_stack(&info, NULL); |
493 | 493 | ||
494 | #ifdef CONFIG_SMP | 494 | if (read_trylock(&tasklist_lock)) { |
495 | /* read_trylock() would be handy... */ | ||
496 | if (!tasklist_lock.write_lock) | ||
497 | read_lock(&tasklist_lock); | ||
498 | #endif | ||
499 | { | ||
500 | struct task_struct *g, *t; | 495 | struct task_struct *g, *t; |
501 | do_each_thread (g, t) { | 496 | do_each_thread (g, t) { |
502 | if (t == current) | 497 | if (t == current) |
@@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms, | |||
506 | show_stack(t, NULL); | 501 | show_stack(t, NULL); |
507 | } while_each_thread (g, t); | 502 | } while_each_thread (g, t); |
508 | } | 503 | } |
509 | #ifdef CONFIG_SMP | ||
510 | if (!tasklist_lock.write_lock) | ||
511 | read_unlock(&tasklist_lock); | ||
512 | #endif | ||
513 | 504 | ||
514 | printk("\nINIT dump complete. Please reboot now.\n"); | 505 | printk("\nINIT dump complete. Please reboot now.\n"); |
515 | while (1); /* hang city if no debugger */ | 506 | while (1); /* hang city if no debugger */ |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 48b187f2d2b3..a4576ac7e870 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | |||
892 | int try) | 892 | int try) |
893 | { | 893 | { |
894 | spinlock_t *ipilock; | 894 | spinlock_t *ipilock; |
895 | unsigned long flags = 0; | ||
896 | volatile unsigned long *ipicr_addr; | 895 | volatile unsigned long *ipicr_addr; |
897 | unsigned long ipicr_val; | 896 | unsigned long ipicr_val; |
898 | unsigned long my_physid_mask; | 897 | unsigned long my_physid_mask; |
@@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | |||
916 | * write IPICRi (send IPIi) | 915 | * write IPICRi (send IPIi) |
917 | * unlock ipi_lock[i] | 916 | * unlock ipi_lock[i] |
918 | */ | 917 | */ |
918 | spin_lock(ipilock); | ||
919 | __asm__ __volatile__ ( | 919 | __asm__ __volatile__ ( |
920 | ";; LOCK ipi_lock[i] \n\t" | 920 | ";; CHECK IPICRi == 0 \n\t" |
921 | ".fillinsn \n" | 921 | ".fillinsn \n" |
922 | "1: \n\t" | 922 | "1: \n\t" |
923 | "mvfc %1, psw \n\t" | 923 | "ld %0, @%1 \n\t" |
924 | "clrpsw #0x40 -> nop \n\t" | 924 | "and %0, %4 \n\t" |
925 | DCACHE_CLEAR("r4", "r5", "%2") | 925 | "beqz %0, 2f \n\t" |
926 | "lock r4, @%2 \n\t" | 926 | "bnez %3, 3f \n\t" |
927 | "addi r4, #-1 \n\t" | ||
928 | "unlock r4, @%2 \n\t" | ||
929 | "mvtc %1, psw \n\t" | ||
930 | "bnez r4, 2f \n\t" | ||
931 | LOCK_SECTION_START(".balign 4 \n\t") | ||
932 | ".fillinsn \n" | ||
933 | "2: \n\t" | ||
934 | "ld r4, @%2 \n\t" | ||
935 | "blez r4, 2b \n\t" | ||
936 | "bra 1b \n\t" | 927 | "bra 1b \n\t" |
937 | LOCK_SECTION_END | ||
938 | ";; CHECK IPICRi == 0 \n\t" | ||
939 | ".fillinsn \n" | ||
940 | "3: \n\t" | ||
941 | "ld %0, @%3 \n\t" | ||
942 | "and %0, %6 \n\t" | ||
943 | "beqz %0, 4f \n\t" | ||
944 | "bnez %5, 5f \n\t" | ||
945 | "bra 3b \n\t" | ||
946 | ";; WRITE IPICRi (send IPIi) \n\t" | 928 | ";; WRITE IPICRi (send IPIi) \n\t" |
947 | ".fillinsn \n" | 929 | ".fillinsn \n" |
948 | "4: \n\t" | 930 | "2: \n\t" |
949 | "st %4, @%3 \n\t" | 931 | "st %2, @%1 \n\t" |
950 | ";; UNLOCK ipi_lock[i] \n\t" | ||
951 | ".fillinsn \n" | 932 | ".fillinsn \n" |
952 | "5: \n\t" | 933 | "3: \n\t" |
953 | "ldi r4, #1 \n\t" | ||
954 | "st r4, @%2 \n\t" | ||
955 | : "=&r"(ipicr_val) | 934 | : "=&r"(ipicr_val) |
956 | : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr), | 935 | : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask) |
957 | "r"(mask), "r"(try), "r"(my_physid_mask) | 936 | : "memory" |
958 | : "memory", "r4" | ||
959 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
960 | , "r5" | ||
961 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
962 | ); | 937 | ); |
938 | spin_unlock(ipilock); | ||
963 | 939 | ||
964 | return ipicr_val; | 940 | return ipicr_val; |
965 | } | 941 | } |
diff --git a/arch/mips/lib/dec_and_lock.c b/arch/mips/lib/dec_and_lock.c index e44e9579bd36..fd82c84a93b7 100644 --- a/arch/mips/lib/dec_and_lock.c +++ b/arch/mips/lib/dec_and_lock.c | |||
@@ -20,14 +20,7 @@ | |||
20 | * has a cmpxchg, and where atomic->value is an int holding | 20 | * has a cmpxchg, and where atomic->value is an int holding |
21 | * the value of the atomic (i.e. the high bits aren't used | 21 | * the value of the atomic (i.e. the high bits aren't used |
22 | * for a lock or anything like that). | 22 | * for a lock or anything like that). |
23 | * | ||
24 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
25 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
26 | * to be atomic_dec_and_test - in that case we don't need it | ||
27 | * defined here as well. | ||
28 | */ | 23 | */ |
29 | |||
30 | #ifndef ATOMIC_DEC_AND_LOCK | ||
31 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 24 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
32 | { | 25 | { |
33 | int counter; | 26 | int counter; |
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
52 | } | 45 | } |
53 | 46 | ||
54 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 47 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
55 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 7bf705676297..5f2e6904d14a 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile | |||
@@ -5,5 +5,3 @@ | |||
5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o | 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o |
6 | 6 | ||
7 | obj-y := iomap.o | 7 | obj-y := iomap.o |
8 | |||
9 | lib-$(CONFIG_SMP) += debuglocks.o | ||
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 2de182f6fe8a..90f400b10282 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c | |||
@@ -13,8 +13,8 @@ | |||
13 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
14 | 14 | ||
15 | #ifdef CONFIG_SMP | 15 | #ifdef CONFIG_SMP |
16 | spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { | 16 | raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { |
17 | [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED | 17 | [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED |
18 | }; | 18 | }; |
19 | #endif | 19 | #endif |
20 | 20 | ||
diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c deleted file mode 100644 index 1b33fe6e5b7a..000000000000 --- a/arch/parisc/lib/debuglocks.c +++ /dev/null | |||
@@ -1,277 +0,0 @@ | |||
1 | /* | ||
2 | * Debugging versions of SMP locking primitives. | ||
3 | * | ||
4 | * Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org> | ||
5 | * | ||
6 | * Some code stollen from alpha & sparc64 ;) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | * | ||
22 | * We use pdc_printf() throughout the file for all output messages, to avoid | ||
23 | * losing messages because of disabled interrupts. Since we're using these | ||
24 | * messages for debugging purposes, it makes sense not to send them to the | ||
25 | * linux console. | ||
26 | */ | ||
27 | |||
28 | |||
29 | #include <linux/config.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/hardirq.h> /* in_interrupt() */ | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/hardirq.h> /* in_interrupt() */ | ||
36 | #include <asm/pdc.h> | ||
37 | |||
38 | #undef INIT_STUCK | ||
39 | #define INIT_STUCK 1L << 30 | ||
40 | |||
41 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
42 | |||
43 | |||
44 | void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no) | ||
45 | { | ||
46 | volatile unsigned int *a; | ||
47 | long stuck = INIT_STUCK; | ||
48 | void *inline_pc = __builtin_return_address(0); | ||
49 | unsigned long started = jiffies; | ||
50 | int printed = 0; | ||
51 | int cpu = smp_processor_id(); | ||
52 | |||
53 | try_again: | ||
54 | |||
55 | /* Do the actual locking */ | ||
56 | /* <T-Bone> ggg: we can't get stuck on the outter loop? | ||
57 | * <ggg> T-Bone: We can hit the outer loop | ||
58 | * alot if multiple CPUs are constantly racing for a lock | ||
59 | * and the backplane is NOT fair about which CPU sees | ||
60 | * the update first. But it won't hang since every failed | ||
61 | * attempt will drop us back into the inner loop and | ||
62 | * decrement `stuck'. | ||
63 | * <ggg> K-class and some of the others are NOT fair in the HW | ||
64 | * implementation so we could see false positives. | ||
65 | * But fixing the lock contention is easier than | ||
66 | * fixing the HW to be fair. | ||
67 | * <tausq> __ldcw() returns 1 if we get the lock; otherwise we | ||
68 | * spin until the value of the lock changes, or we time out. | ||
69 | */ | ||
70 | mb(); | ||
71 | a = __ldcw_align(lock); | ||
72 | while (stuck && (__ldcw(a) == 0)) | ||
73 | while ((*a == 0) && --stuck); | ||
74 | mb(); | ||
75 | |||
76 | if (unlikely(stuck <= 0)) { | ||
77 | pdc_printf( | ||
78 | "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)" | ||
79 | " owned by %s:%d in %s at %p(%d)\n", | ||
80 | base_file, line_no, lock->module, lock, | ||
81 | current->comm, inline_pc, cpu, | ||
82 | lock->bfile, lock->bline, lock->task->comm, | ||
83 | lock->previous, lock->oncpu); | ||
84 | stuck = INIT_STUCK; | ||
85 | printed = 1; | ||
86 | goto try_again; | ||
87 | } | ||
88 | |||
89 | /* Exiting. Got the lock. */ | ||
90 | lock->oncpu = cpu; | ||
91 | lock->previous = inline_pc; | ||
92 | lock->task = current; | ||
93 | lock->bfile = (char *)base_file; | ||
94 | lock->bline = line_no; | ||
95 | |||
96 | if (unlikely(printed)) { | ||
97 | pdc_printf( | ||
98 | "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
99 | base_file, line_no, current->comm, inline_pc, | ||
100 | cpu, jiffies - started); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no) | ||
105 | { | ||
106 | CHECK_LOCK(lock); | ||
107 | volatile unsigned int *a; | ||
108 | mb(); | ||
109 | a = __ldcw_align(lock); | ||
110 | if (unlikely((*a != 0) && lock->babble)) { | ||
111 | lock->babble--; | ||
112 | pdc_printf( | ||
113 | "%s:%d: spin_unlock(%s:%p) not locked\n", | ||
114 | base_file, line_no, lock->module, lock); | ||
115 | } | ||
116 | *a = 1; | ||
117 | mb(); | ||
118 | } | ||
119 | |||
120 | int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) | ||
121 | { | ||
122 | int ret; | ||
123 | volatile unsigned int *a; | ||
124 | mb(); | ||
125 | a = __ldcw_align(lock); | ||
126 | ret = (__ldcw(a) != 0); | ||
127 | mb(); | ||
128 | if (ret) { | ||
129 | lock->oncpu = smp_processor_id(); | ||
130 | lock->previous = __builtin_return_address(0); | ||
131 | lock->task = current; | ||
132 | } else { | ||
133 | lock->bfile = (char *)base_file; | ||
134 | lock->bline = line_no; | ||
135 | } | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
140 | |||
141 | #ifdef CONFIG_DEBUG_RWLOCK | ||
142 | |||
143 | /* Interrupts trouble detailed explanation, thx Grant: | ||
144 | * | ||
145 | * o writer (wants to modify data) attempts to acquire the rwlock | ||
146 | * o He gets the write lock. | ||
147 | * o Interupts are still enabled, we take an interrupt with the | ||
148 | * write still holding the lock. | ||
149 | * o interrupt handler tries to acquire the rwlock for read. | ||
150 | * o deadlock since the writer can't release it at this point. | ||
151 | * | ||
152 | * In general, any use of spinlocks that competes between "base" | ||
153 | * level and interrupt level code will risk deadlock. Interrupts | ||
154 | * need to be disabled in the base level routines to avoid it. | ||
155 | * Or more precisely, only the IRQ the base level routine | ||
156 | * is competing with for the lock. But it's more efficient/faster | ||
157 | * to just disable all interrupts on that CPU to guarantee | ||
158 | * once it gets the lock it can release it quickly too. | ||
159 | */ | ||
160 | |||
161 | void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline) | ||
162 | { | ||
163 | void *inline_pc = __builtin_return_address(0); | ||
164 | unsigned long started = jiffies; | ||
165 | long stuck = INIT_STUCK; | ||
166 | int printed = 0; | ||
167 | int cpu = smp_processor_id(); | ||
168 | |||
169 | if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ | ||
170 | pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); | ||
171 | BUG(); | ||
172 | } | ||
173 | |||
174 | /* Note: if interrupts are disabled (which is most likely), the printk | ||
175 | will never show on the console. We might need a polling method to flush | ||
176 | the dmesg buffer anyhow. */ | ||
177 | |||
178 | retry: | ||
179 | _raw_spin_lock(&rw->lock); | ||
180 | |||
181 | if(rw->counter != 0) { | ||
182 | /* this basically never happens */ | ||
183 | _raw_spin_unlock(&rw->lock); | ||
184 | |||
185 | stuck--; | ||
186 | if ((unlikely(stuck <= 0)) && (rw->counter < 0)) { | ||
187 | pdc_printf( | ||
188 | "%s:%d: write_lock stuck on writer" | ||
189 | " in %s at %p(%d) %ld ticks\n", | ||
190 | bfile, bline, current->comm, inline_pc, | ||
191 | cpu, jiffies - started); | ||
192 | stuck = INIT_STUCK; | ||
193 | printed = 1; | ||
194 | } | ||
195 | else if (unlikely(stuck <= 0)) { | ||
196 | pdc_printf( | ||
197 | "%s:%d: write_lock stuck on reader" | ||
198 | " in %s at %p(%d) %ld ticks\n", | ||
199 | bfile, bline, current->comm, inline_pc, | ||
200 | cpu, jiffies - started); | ||
201 | stuck = INIT_STUCK; | ||
202 | printed = 1; | ||
203 | } | ||
204 | |||
205 | while(rw->counter != 0); | ||
206 | |||
207 | goto retry; | ||
208 | } | ||
209 | |||
210 | /* got it. now leave without unlocking */ | ||
211 | rw->counter = -1; /* remember we are locked */ | ||
212 | |||
213 | if (unlikely(printed)) { | ||
214 | pdc_printf( | ||
215 | "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
216 | bfile, bline, current->comm, inline_pc, | ||
217 | cpu, jiffies - started); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline) | ||
222 | { | ||
223 | #if 0 | ||
224 | void *inline_pc = __builtin_return_address(0); | ||
225 | int cpu = smp_processor_id(); | ||
226 | #endif | ||
227 | |||
228 | if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ | ||
229 | pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); | ||
230 | BUG(); | ||
231 | } | ||
232 | |||
233 | /* Note: if interrupts are disabled (which is most likely), the printk | ||
234 | will never show on the console. We might need a polling method to flush | ||
235 | the dmesg buffer anyhow. */ | ||
236 | |||
237 | _raw_spin_lock(&rw->lock); | ||
238 | |||
239 | if(rw->counter != 0) { | ||
240 | /* this basically never happens */ | ||
241 | _raw_spin_unlock(&rw->lock); | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /* got it. now leave without unlocking */ | ||
246 | rw->counter = -1; /* remember we are locked */ | ||
247 | #if 0 | ||
248 | pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n", | ||
249 | bfile, bline, current->comm, inline_pc, cpu); | ||
250 | #endif | ||
251 | return 1; | ||
252 | } | ||
253 | |||
254 | void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline) | ||
255 | { | ||
256 | #if 0 | ||
257 | void *inline_pc = __builtin_return_address(0); | ||
258 | unsigned long started = jiffies; | ||
259 | int cpu = smp_processor_id(); | ||
260 | #endif | ||
261 | unsigned long flags; | ||
262 | |||
263 | local_irq_save(flags); | ||
264 | _raw_spin_lock(&rw->lock); | ||
265 | |||
266 | rw->counter++; | ||
267 | #if 0 | ||
268 | pdc_printf( | ||
269 | "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n", | ||
270 | bfile, bline, current->comm, inline_pc, | ||
271 | cpu, jiffies - started); | ||
272 | #endif | ||
273 | _raw_spin_unlock(&rw->lock); | ||
274 | local_irq_restore(flags); | ||
275 | } | ||
276 | |||
277 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile index 1c380e67d435..f1e1fb4144f0 100644 --- a/arch/ppc/lib/Makefile +++ b/arch/ppc/lib/Makefile | |||
@@ -4,6 +4,5 @@ | |||
4 | 4 | ||
5 | obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o | 5 | obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o |
6 | 6 | ||
7 | obj-$(CONFIG_SMP) += locks.o | ||
8 | obj-$(CONFIG_8xx) += rheap.o | 7 | obj-$(CONFIG_8xx) += rheap.o |
9 | obj-$(CONFIG_CPM2) += rheap.o | 8 | obj-$(CONFIG_CPM2) += rheap.o |
diff --git a/arch/ppc/lib/dec_and_lock.c b/arch/ppc/lib/dec_and_lock.c index 4ee888070d91..b18f0d9a00fc 100644 --- a/arch/ppc/lib/dec_and_lock.c +++ b/arch/ppc/lib/dec_and_lock.c | |||
@@ -11,14 +11,7 @@ | |||
11 | * has a cmpxchg, and where atomic->value is an int holding | 11 | * has a cmpxchg, and where atomic->value is an int holding |
12 | * the value of the atomic (i.e. the high bits aren't used | 12 | * the value of the atomic (i.e. the high bits aren't used |
13 | * for a lock or anything like that). | 13 | * for a lock or anything like that). |
14 | * | ||
15 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
16 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
17 | * to be atomic_dec_and_test - in that case we don't need it | ||
18 | * defined here as well. | ||
19 | */ | 14 | */ |
20 | |||
21 | #ifndef ATOMIC_DEC_AND_LOCK | ||
22 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 15 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
23 | { | 16 | { |
24 | int counter; | 17 | int counter; |
@@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
43 | } | 36 | } |
44 | 37 | ||
45 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 38 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
46 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/ppc64/lib/dec_and_lock.c b/arch/ppc64/lib/dec_and_lock.c index 6e8d8591708c..7b9d4da5cf92 100644 --- a/arch/ppc64/lib/dec_and_lock.c +++ b/arch/ppc64/lib/dec_and_lock.c | |||
@@ -20,14 +20,7 @@ | |||
20 | * has a cmpxchg, and where atomic->value is an int holding | 20 | * has a cmpxchg, and where atomic->value is an int holding |
21 | * the value of the atomic (i.e. the high bits aren't used | 21 | * the value of the atomic (i.e. the high bits aren't used |
22 | * for a lock or anything like that). | 22 | * for a lock or anything like that). |
23 | * | ||
24 | * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h | ||
25 | * if spinlocks are empty and thus atomic_dec_and_lock is defined | ||
26 | * to be atomic_dec_and_test - in that case we don't need it | ||
27 | * defined here as well. | ||
28 | */ | 23 | */ |
29 | |||
30 | #ifndef ATOMIC_DEC_AND_LOCK | ||
31 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 24 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
32 | { | 25 | { |
33 | int counter; | 26 | int counter; |
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
52 | } | 45 | } |
53 | 46 | ||
54 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 47 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
55 | #endif /* ATOMIC_DEC_AND_LOCK */ | ||
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c index ef70ef91abe2..033643ab69e0 100644 --- a/arch/ppc64/lib/locks.c +++ b/arch/ppc64/lib/locks.c | |||
@@ -23,12 +23,12 @@ | |||
23 | /* waiting for a spinlock... */ | 23 | /* waiting for a spinlock... */ |
24 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 24 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
25 | 25 | ||
26 | void __spin_yield(spinlock_t *lock) | 26 | void __spin_yield(raw_spinlock_t *lock) |
27 | { | 27 | { |
28 | unsigned int lock_value, holder_cpu, yield_count; | 28 | unsigned int lock_value, holder_cpu, yield_count; |
29 | struct paca_struct *holder_paca; | 29 | struct paca_struct *holder_paca; |
30 | 30 | ||
31 | lock_value = lock->lock; | 31 | lock_value = lock->slock; |
32 | if (lock_value == 0) | 32 | if (lock_value == 0) |
33 | return; | 33 | return; |
34 | holder_cpu = lock_value & 0xffff; | 34 | holder_cpu = lock_value & 0xffff; |
@@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock) | |||
38 | if ((yield_count & 1) == 0) | 38 | if ((yield_count & 1) == 0) |
39 | return; /* virtual cpu is currently running */ | 39 | return; /* virtual cpu is currently running */ |
40 | rmb(); | 40 | rmb(); |
41 | if (lock->lock != lock_value) | 41 | if (lock->slock != lock_value) |
42 | return; /* something has changed */ | 42 | return; /* something has changed */ |
43 | #ifdef CONFIG_PPC_ISERIES | 43 | #ifdef CONFIG_PPC_ISERIES |
44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | 44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, |
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock) | |||
54 | * This turns out to be the same for read and write locks, since | 54 | * This turns out to be the same for read and write locks, since |
55 | * we only know the holder if it is write-locked. | 55 | * we only know the holder if it is write-locked. |
56 | */ | 56 | */ |
57 | void __rw_yield(rwlock_t *rw) | 57 | void __rw_yield(raw_rwlock_t *rw) |
58 | { | 58 | { |
59 | int lock_value; | 59 | int lock_value; |
60 | unsigned int holder_cpu, yield_count; | 60 | unsigned int holder_cpu, yield_count; |
@@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw) | |||
82 | } | 82 | } |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | void spin_unlock_wait(spinlock_t *lock) | 85 | void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
86 | { | 86 | { |
87 | while (lock->lock) { | 87 | while (lock->slock) { |
88 | HMT_low(); | 88 | HMT_low(); |
89 | if (SHARED_PROCESSOR) | 89 | if (SHARED_PROCESSOR) |
90 | __spin_yield(lock); | 90 | __spin_yield(lock); |
@@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock) | |||
92 | HMT_medium(); | 92 | HMT_medium(); |
93 | } | 93 | } |
94 | 94 | ||
95 | EXPORT_SYMBOL(spin_unlock_wait); | 95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 888b5596c195..2dc14e9c8327 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -36,7 +36,7 @@ _diag44(void) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | void | 38 | void |
39 | _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) | 39 | _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) |
40 | { | 40 | { |
41 | int count = spin_retry; | 41 | int count = spin_retry; |
42 | 42 | ||
@@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc) | |||
53 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 53 | EXPORT_SYMBOL(_raw_spin_lock_wait); |
54 | 54 | ||
55 | int | 55 | int |
56 | _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) | 56 | _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) |
57 | { | 57 | { |
58 | int count = spin_retry; | 58 | int count = spin_retry; |
59 | 59 | ||
@@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc) | |||
67 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 67 | EXPORT_SYMBOL(_raw_spin_trylock_retry); |
68 | 68 | ||
69 | void | 69 | void |
70 | _raw_read_lock_wait(rwlock_t *rw) | 70 | _raw_read_lock_wait(raw_rwlock_t *rw) |
71 | { | 71 | { |
72 | unsigned int old; | 72 | unsigned int old; |
73 | int count = spin_retry; | 73 | int count = spin_retry; |
@@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw) | |||
86 | EXPORT_SYMBOL(_raw_read_lock_wait); | 86 | EXPORT_SYMBOL(_raw_read_lock_wait); |
87 | 87 | ||
88 | int | 88 | int |
89 | _raw_read_trylock_retry(rwlock_t *rw) | 89 | _raw_read_trylock_retry(raw_rwlock_t *rw) |
90 | { | 90 | { |
91 | unsigned int old; | 91 | unsigned int old; |
92 | int count = spin_retry; | 92 | int count = spin_retry; |
@@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw) | |||
102 | EXPORT_SYMBOL(_raw_read_trylock_retry); | 102 | EXPORT_SYMBOL(_raw_read_trylock_retry); |
103 | 103 | ||
104 | void | 104 | void |
105 | _raw_write_lock_wait(rwlock_t *rw) | 105 | _raw_write_lock_wait(raw_rwlock_t *rw) |
106 | { | 106 | { |
107 | int count = spin_retry; | 107 | int count = spin_retry; |
108 | 108 | ||
@@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw) | |||
119 | EXPORT_SYMBOL(_raw_write_lock_wait); | 119 | EXPORT_SYMBOL(_raw_write_lock_wait); |
120 | 120 | ||
121 | int | 121 | int |
122 | _raw_write_trylock_retry(rwlock_t *rw) | 122 | _raw_write_trylock_retry(raw_rwlock_t *rw) |
123 | { | 123 | { |
124 | int count = spin_retry; | 124 | int count = spin_retry; |
125 | 125 | ||
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 5d974a2b735a..f84809333624 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned) | |||
114 | /* used by various drivers */ | 114 | /* used by various drivers */ |
115 | EXPORT_SYMBOL(sparc_cpu_model); | 115 | EXPORT_SYMBOL(sparc_cpu_model); |
116 | EXPORT_SYMBOL(kernel_thread); | 116 | EXPORT_SYMBOL(kernel_thread); |
117 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
118 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
119 | EXPORT_SYMBOL(_do_spin_lock); | ||
120 | EXPORT_SYMBOL(_do_spin_unlock); | ||
121 | EXPORT_SYMBOL(_spin_trylock); | ||
122 | EXPORT_SYMBOL(_do_read_lock); | ||
123 | EXPORT_SYMBOL(_do_read_unlock); | ||
124 | EXPORT_SYMBOL(_do_write_lock); | ||
125 | EXPORT_SYMBOL(_do_write_unlock); | ||
126 | #endif | ||
127 | #else | ||
128 | // XXX find what uses (or used) these. | 118 | // XXX find what uses (or used) these. |
129 | EXPORT_SYMBOL(___rw_read_enter); | 119 | EXPORT_SYMBOL(___rw_read_enter); |
130 | EXPORT_SYMBOL(___rw_read_exit); | 120 | EXPORT_SYMBOL(___rw_read_exit); |
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 2296ff9dc47a..fa5006946062 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
@@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ | |||
9 | strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ | 9 | strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ |
10 | copy_user.o locks.o atomic.o atomic32.o bitops.o \ | 10 | copy_user.o locks.o atomic.o atomic32.o bitops.o \ |
11 | lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o | 11 | lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o |
12 | |||
13 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o | ||
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c deleted file mode 100644 index fb182352782c..000000000000 --- a/arch/sparc/lib/debuglocks.c +++ /dev/null | |||
@@ -1,202 +0,0 @@ | |||
1 | /* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $ | ||
2 | * debuglocks.c: Debugging versions of SMP locking primitives. | ||
3 | * | ||
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
5 | * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au) | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/threads.h> /* For NR_CPUS */ | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <asm/psr.h> | ||
13 | #include <asm/system.h> | ||
14 | |||
15 | #ifdef CONFIG_SMP | ||
16 | |||
17 | /* Some notes on how these debugging routines work. When a lock is acquired | ||
18 | * an extra debugging member lock->owner_pc is set to the caller of the lock | ||
19 | * acquisition routine. Right before releasing a lock, the debugging program | ||
20 | * counter is cleared to zero. | ||
21 | * | ||
22 | * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU | ||
23 | * number of the owner in the lowest two bits. | ||
24 | */ | ||
25 | |||
26 | #define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A)); | ||
27 | |||
28 | static inline void show(char *str, spinlock_t *lock, unsigned long caller) | ||
29 | { | ||
30 | int cpu = smp_processor_id(); | ||
31 | |||
32 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str, | ||
33 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
34 | } | ||
35 | |||
36 | static inline void show_read(char *str, rwlock_t *lock, unsigned long caller) | ||
37 | { | ||
38 | int cpu = smp_processor_id(); | ||
39 | |||
40 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str, | ||
41 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
42 | } | ||
43 | |||
44 | static inline void show_write(char *str, rwlock_t *lock, unsigned long caller) | ||
45 | { | ||
46 | int cpu = smp_processor_id(); | ||
47 | int i; | ||
48 | |||
49 | printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str, | ||
50 | lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3); | ||
51 | |||
52 | for(i = 0; i < NR_CPUS; i++) | ||
53 | printk(" reader[%d]=%08lx", i, lock->reader_pc[i]); | ||
54 | |||
55 | printk("\n"); | ||
56 | } | ||
57 | |||
58 | #undef INIT_STUCK | ||
59 | #define INIT_STUCK 100000000 | ||
60 | |||
61 | void _do_spin_lock(spinlock_t *lock, char *str) | ||
62 | { | ||
63 | unsigned long caller; | ||
64 | unsigned long val; | ||
65 | int cpu = smp_processor_id(); | ||
66 | int stuck = INIT_STUCK; | ||
67 | |||
68 | STORE_CALLER(caller); | ||
69 | |||
70 | again: | ||
71 | __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); | ||
72 | if(val) { | ||
73 | while(lock->lock) { | ||
74 | if (!--stuck) { | ||
75 | show(str, lock, caller); | ||
76 | stuck = INIT_STUCK; | ||
77 | } | ||
78 | barrier(); | ||
79 | } | ||
80 | goto again; | ||
81 | } | ||
82 | lock->owner_pc = (cpu & 3) | (caller & ~3); | ||
83 | } | ||
84 | |||
85 | int _spin_trylock(spinlock_t *lock) | ||
86 | { | ||
87 | unsigned long val; | ||
88 | unsigned long caller; | ||
89 | int cpu = smp_processor_id(); | ||
90 | |||
91 | STORE_CALLER(caller); | ||
92 | |||
93 | __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock))); | ||
94 | if(!val) { | ||
95 | /* We got it, record our identity for debugging. */ | ||
96 | lock->owner_pc = (cpu & 3) | (caller & ~3); | ||
97 | } | ||
98 | return val == 0; | ||
99 | } | ||
100 | |||
101 | void _do_spin_unlock(spinlock_t *lock) | ||
102 | { | ||
103 | lock->owner_pc = 0; | ||
104 | barrier(); | ||
105 | lock->lock = 0; | ||
106 | } | ||
107 | |||
108 | void _do_read_lock(rwlock_t *rw, char *str) | ||
109 | { | ||
110 | unsigned long caller; | ||
111 | unsigned long val; | ||
112 | int cpu = smp_processor_id(); | ||
113 | int stuck = INIT_STUCK; | ||
114 | |||
115 | STORE_CALLER(caller); | ||
116 | |||
117 | wlock_again: | ||
118 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
119 | if(val) { | ||
120 | while(rw->lock & 0xff) { | ||
121 | if (!--stuck) { | ||
122 | show_read(str, rw, caller); | ||
123 | stuck = INIT_STUCK; | ||
124 | } | ||
125 | barrier(); | ||
126 | } | ||
127 | goto wlock_again; | ||
128 | } | ||
129 | |||
130 | rw->reader_pc[cpu] = caller; | ||
131 | barrier(); | ||
132 | rw->lock++; | ||
133 | } | ||
134 | |||
135 | void _do_read_unlock(rwlock_t *rw, char *str) | ||
136 | { | ||
137 | unsigned long caller; | ||
138 | unsigned long val; | ||
139 | int cpu = smp_processor_id(); | ||
140 | int stuck = INIT_STUCK; | ||
141 | |||
142 | STORE_CALLER(caller); | ||
143 | |||
144 | wlock_again: | ||
145 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
146 | if(val) { | ||
147 | while(rw->lock & 0xff) { | ||
148 | if (!--stuck) { | ||
149 | show_read(str, rw, caller); | ||
150 | stuck = INIT_STUCK; | ||
151 | } | ||
152 | barrier(); | ||
153 | } | ||
154 | goto wlock_again; | ||
155 | } | ||
156 | |||
157 | rw->reader_pc[cpu] = 0; | ||
158 | barrier(); | ||
159 | rw->lock -= 0x1ff; | ||
160 | } | ||
161 | |||
162 | void _do_write_lock(rwlock_t *rw, char *str) | ||
163 | { | ||
164 | unsigned long caller; | ||
165 | unsigned long val; | ||
166 | int cpu = smp_processor_id(); | ||
167 | int stuck = INIT_STUCK; | ||
168 | |||
169 | STORE_CALLER(caller); | ||
170 | |||
171 | wlock_again: | ||
172 | __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock))); | ||
173 | if(val) { | ||
174 | wlock_wait: | ||
175 | while(rw->lock) { | ||
176 | if (!--stuck) { | ||
177 | show_write(str, rw, caller); | ||
178 | stuck = INIT_STUCK; | ||
179 | } | ||
180 | barrier(); | ||
181 | } | ||
182 | goto wlock_again; | ||
183 | } | ||
184 | |||
185 | if (rw->lock & ~0xff) { | ||
186 | *(((unsigned char *)&rw->lock)+3) = 0; | ||
187 | barrier(); | ||
188 | goto wlock_wait; | ||
189 | } | ||
190 | |||
191 | barrier(); | ||
192 | rw->owner_pc = (cpu & 3) | (caller & ~3); | ||
193 | } | ||
194 | |||
195 | void _do_write_unlock(rwlock_t *rw) | ||
196 | { | ||
197 | rw->owner_pc = 0; | ||
198 | barrier(); | ||
199 | rw->lock = 0; | ||
200 | } | ||
201 | |||
202 | #endif /* SMP */ | ||
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 66255434128a..7d10b0397091 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
607 | struct thread_info *t = p->thread_info; | 607 | struct thread_info *t = p->thread_info; |
608 | char *child_trap_frame; | 608 | char *child_trap_frame; |
609 | 609 | ||
610 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
611 | p->thread.smp_lock_count = 0; | ||
612 | p->thread.smp_lock_pc = 0; | ||
613 | #endif | ||
614 | |||
615 | /* Calculate offset to stack_frame & pt_regs */ | 610 | /* Calculate offset to stack_frame & pt_regs */ |
616 | child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); | 611 | child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); |
617 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); | 612 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 7d9a0f6c437d..cbb5e59824e5 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); | |||
115 | 115 | ||
116 | /* used by various drivers */ | 116 | /* used by various drivers */ |
117 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
118 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
119 | /* Out of line rw-locking implementation. */ | 118 | /* Out of line rw-locking implementation. */ |
120 | EXPORT_SYMBOL(__read_lock); | 119 | EXPORT_SYMBOL(__read_lock); |
121 | EXPORT_SYMBOL(__read_unlock); | 120 | EXPORT_SYMBOL(__read_unlock); |
122 | EXPORT_SYMBOL(__write_lock); | 121 | EXPORT_SYMBOL(__write_lock); |
123 | EXPORT_SYMBOL(__write_unlock); | 122 | EXPORT_SYMBOL(__write_unlock); |
124 | EXPORT_SYMBOL(__write_trylock); | 123 | EXPORT_SYMBOL(__write_trylock); |
125 | /* Out of line spin-locking implementation. */ | ||
126 | EXPORT_SYMBOL(_raw_spin_lock); | ||
127 | EXPORT_SYMBOL(_raw_spin_lock_flags); | ||
128 | #endif | ||
129 | 124 | ||
130 | /* Hard IRQ locking */ | 125 | /* Hard IRQ locking */ |
131 | EXPORT_SYMBOL(synchronize_irq); | 126 | EXPORT_SYMBOL(synchronize_irq); |
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 40dbeec7e5d6..d968aebe83b2 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
14 | copy_in_user.o user_fixup.o memmove.o \ | 14 | copy_in_user.o user_fixup.o memmove.o \ |
15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o |
16 | 16 | ||
17 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o | ||
18 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o | 17 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o |
19 | 18 | ||
20 | obj-y += iomap.o | 19 | obj-y += iomap.o |
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c deleted file mode 100644 index f5f0b5586f01..000000000000 --- a/arch/sparc64/lib/debuglocks.c +++ /dev/null | |||
@@ -1,366 +0,0 @@ | |||
1 | /* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $ | ||
2 | * debuglocks.c: Debugging versions of SMP locking primitives. | ||
3 | * | ||
4 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <asm/system.h> | ||
12 | |||
13 | #ifdef CONFIG_SMP | ||
14 | |||
15 | static inline void show (char *str, spinlock_t *lock, unsigned long caller) | ||
16 | { | ||
17 | int cpu = smp_processor_id(); | ||
18 | |||
19 | printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n", | ||
20 | str, lock, cpu, (unsigned int) caller, | ||
21 | lock->owner_pc, lock->owner_cpu); | ||
22 | } | ||
23 | |||
24 | static inline void show_read (char *str, rwlock_t *lock, unsigned long caller) | ||
25 | { | ||
26 | int cpu = smp_processor_id(); | ||
27 | |||
28 | printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n", | ||
29 | str, lock, cpu, (unsigned int) caller, | ||
30 | lock->writer_pc, lock->writer_cpu); | ||
31 | } | ||
32 | |||
33 | static inline void show_write (char *str, rwlock_t *lock, unsigned long caller) | ||
34 | { | ||
35 | int cpu = smp_processor_id(); | ||
36 | int i; | ||
37 | |||
38 | printk("%s(%p) CPU#%d stuck at %08x\n", | ||
39 | str, lock, cpu, (unsigned int) caller); | ||
40 | printk("Writer: PC(%08x):CPU(%x)\n", | ||
41 | lock->writer_pc, lock->writer_cpu); | ||
42 | printk("Readers:"); | ||
43 | for (i = 0; i < NR_CPUS; i++) | ||
44 | if (lock->reader_pc[i]) | ||
45 | printk(" %d[%08x]", i, lock->reader_pc[i]); | ||
46 | printk("\n"); | ||
47 | } | ||
48 | |||
49 | #undef INIT_STUCK | ||
50 | #define INIT_STUCK 100000000 | ||
51 | |||
52 | void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller) | ||
53 | { | ||
54 | unsigned long val; | ||
55 | int stuck = INIT_STUCK; | ||
56 | int cpu = get_cpu(); | ||
57 | int shown = 0; | ||
58 | |||
59 | again: | ||
60 | __asm__ __volatile__("ldstub [%1], %0" | ||
61 | : "=r" (val) | ||
62 | : "r" (&(lock->lock)) | ||
63 | : "memory"); | ||
64 | membar_storeload_storestore(); | ||
65 | if (val) { | ||
66 | while (lock->lock) { | ||
67 | if (!--stuck) { | ||
68 | if (shown++ <= 2) | ||
69 | show(str, lock, caller); | ||
70 | stuck = INIT_STUCK; | ||
71 | } | ||
72 | rmb(); | ||
73 | } | ||
74 | goto again; | ||
75 | } | ||
76 | lock->owner_pc = ((unsigned int)caller); | ||
77 | lock->owner_cpu = cpu; | ||
78 | current->thread.smp_lock_count++; | ||
79 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
80 | |||
81 | put_cpu(); | ||
82 | } | ||
83 | |||
84 | int _do_spin_trylock(spinlock_t *lock, unsigned long caller) | ||
85 | { | ||
86 | unsigned long val; | ||
87 | int cpu = get_cpu(); | ||
88 | |||
89 | __asm__ __volatile__("ldstub [%1], %0" | ||
90 | : "=r" (val) | ||
91 | : "r" (&(lock->lock)) | ||
92 | : "memory"); | ||
93 | membar_storeload_storestore(); | ||
94 | if (!val) { | ||
95 | lock->owner_pc = ((unsigned int)caller); | ||
96 | lock->owner_cpu = cpu; | ||
97 | current->thread.smp_lock_count++; | ||
98 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
99 | } | ||
100 | |||
101 | put_cpu(); | ||
102 | |||
103 | return val == 0; | ||
104 | } | ||
105 | |||
106 | void _do_spin_unlock(spinlock_t *lock) | ||
107 | { | ||
108 | lock->owner_pc = 0; | ||
109 | lock->owner_cpu = NO_PROC_ID; | ||
110 | membar_storestore_loadstore(); | ||
111 | lock->lock = 0; | ||
112 | current->thread.smp_lock_count--; | ||
113 | } | ||
114 | |||
115 | /* Keep INIT_STUCK the same... */ | ||
116 | |||
117 | void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller) | ||
118 | { | ||
119 | unsigned long val; | ||
120 | int stuck = INIT_STUCK; | ||
121 | int cpu = get_cpu(); | ||
122 | int shown = 0; | ||
123 | |||
124 | wlock_again: | ||
125 | /* Wait for any writer to go away. */ | ||
126 | while (((long)(rw->lock)) < 0) { | ||
127 | if (!--stuck) { | ||
128 | if (shown++ <= 2) | ||
129 | show_read(str, rw, caller); | ||
130 | stuck = INIT_STUCK; | ||
131 | } | ||
132 | rmb(); | ||
133 | } | ||
134 | /* Try once to increment the counter. */ | ||
135 | __asm__ __volatile__( | ||
136 | " ldx [%0], %%g1\n" | ||
137 | " brlz,a,pn %%g1, 2f\n" | ||
138 | " mov 1, %0\n" | ||
139 | " add %%g1, 1, %%g7\n" | ||
140 | " casx [%0], %%g1, %%g7\n" | ||
141 | " sub %%g1, %%g7, %0\n" | ||
142 | "2:" : "=r" (val) | ||
143 | : "0" (&(rw->lock)) | ||
144 | : "g1", "g7", "memory"); | ||
145 | membar_storeload_storestore(); | ||
146 | if (val) | ||
147 | goto wlock_again; | ||
148 | rw->reader_pc[cpu] = ((unsigned int)caller); | ||
149 | current->thread.smp_lock_count++; | ||
150 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
151 | |||
152 | put_cpu(); | ||
153 | } | ||
154 | |||
155 | void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller) | ||
156 | { | ||
157 | unsigned long val; | ||
158 | int stuck = INIT_STUCK; | ||
159 | int cpu = get_cpu(); | ||
160 | int shown = 0; | ||
161 | |||
162 | /* Drop our identity _first_. */ | ||
163 | rw->reader_pc[cpu] = 0; | ||
164 | current->thread.smp_lock_count--; | ||
165 | runlock_again: | ||
166 | /* Spin trying to decrement the counter using casx. */ | ||
167 | __asm__ __volatile__( | ||
168 | " membar #StoreLoad | #LoadLoad\n" | ||
169 | " ldx [%0], %%g1\n" | ||
170 | " sub %%g1, 1, %%g7\n" | ||
171 | " casx [%0], %%g1, %%g7\n" | ||
172 | " membar #StoreLoad | #StoreStore\n" | ||
173 | " sub %%g1, %%g7, %0\n" | ||
174 | : "=r" (val) | ||
175 | : "0" (&(rw->lock)) | ||
176 | : "g1", "g7", "memory"); | ||
177 | if (val) { | ||
178 | if (!--stuck) { | ||
179 | if (shown++ <= 2) | ||
180 | show_read(str, rw, caller); | ||
181 | stuck = INIT_STUCK; | ||
182 | } | ||
183 | goto runlock_again; | ||
184 | } | ||
185 | |||
186 | put_cpu(); | ||
187 | } | ||
188 | |||
189 | void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller) | ||
190 | { | ||
191 | unsigned long val; | ||
192 | int stuck = INIT_STUCK; | ||
193 | int cpu = get_cpu(); | ||
194 | int shown = 0; | ||
195 | |||
196 | wlock_again: | ||
197 | /* Spin while there is another writer. */ | ||
198 | while (((long)rw->lock) < 0) { | ||
199 | if (!--stuck) { | ||
200 | if (shown++ <= 2) | ||
201 | show_write(str, rw, caller); | ||
202 | stuck = INIT_STUCK; | ||
203 | } | ||
204 | rmb(); | ||
205 | } | ||
206 | |||
207 | /* Try to acuire the write bit. */ | ||
208 | __asm__ __volatile__( | ||
209 | " mov 1, %%g3\n" | ||
210 | " sllx %%g3, 63, %%g3\n" | ||
211 | " ldx [%0], %%g1\n" | ||
212 | " brlz,pn %%g1, 1f\n" | ||
213 | " or %%g1, %%g3, %%g7\n" | ||
214 | " casx [%0], %%g1, %%g7\n" | ||
215 | " membar #StoreLoad | #StoreStore\n" | ||
216 | " ba,pt %%xcc, 2f\n" | ||
217 | " sub %%g1, %%g7, %0\n" | ||
218 | "1: mov 1, %0\n" | ||
219 | "2:" : "=r" (val) | ||
220 | : "0" (&(rw->lock)) | ||
221 | : "g3", "g1", "g7", "memory"); | ||
222 | if (val) { | ||
223 | /* We couldn't get the write bit. */ | ||
224 | if (!--stuck) { | ||
225 | if (shown++ <= 2) | ||
226 | show_write(str, rw, caller); | ||
227 | stuck = INIT_STUCK; | ||
228 | } | ||
229 | goto wlock_again; | ||
230 | } | ||
231 | if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { | ||
232 | /* Readers still around, drop the write | ||
233 | * lock, spin, and try again. | ||
234 | */ | ||
235 | if (!--stuck) { | ||
236 | if (shown++ <= 2) | ||
237 | show_write(str, rw, caller); | ||
238 | stuck = INIT_STUCK; | ||
239 | } | ||
240 | __asm__ __volatile__( | ||
241 | " mov 1, %%g3\n" | ||
242 | " sllx %%g3, 63, %%g3\n" | ||
243 | "1: ldx [%0], %%g1\n" | ||
244 | " andn %%g1, %%g3, %%g7\n" | ||
245 | " casx [%0], %%g1, %%g7\n" | ||
246 | " cmp %%g1, %%g7\n" | ||
247 | " membar #StoreLoad | #StoreStore\n" | ||
248 | " bne,pn %%xcc, 1b\n" | ||
249 | " nop" | ||
250 | : /* no outputs */ | ||
251 | : "r" (&(rw->lock)) | ||
252 | : "g3", "g1", "g7", "cc", "memory"); | ||
253 | while(rw->lock != 0) { | ||
254 | if (!--stuck) { | ||
255 | if (shown++ <= 2) | ||
256 | show_write(str, rw, caller); | ||
257 | stuck = INIT_STUCK; | ||
258 | } | ||
259 | rmb(); | ||
260 | } | ||
261 | goto wlock_again; | ||
262 | } | ||
263 | |||
264 | /* We have it, say who we are. */ | ||
265 | rw->writer_pc = ((unsigned int)caller); | ||
266 | rw->writer_cpu = cpu; | ||
267 | current->thread.smp_lock_count++; | ||
268 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
269 | |||
270 | put_cpu(); | ||
271 | } | ||
272 | |||
273 | void _do_write_unlock(rwlock_t *rw, unsigned long caller) | ||
274 | { | ||
275 | unsigned long val; | ||
276 | int stuck = INIT_STUCK; | ||
277 | int shown = 0; | ||
278 | |||
279 | /* Drop our identity _first_ */ | ||
280 | rw->writer_pc = 0; | ||
281 | rw->writer_cpu = NO_PROC_ID; | ||
282 | current->thread.smp_lock_count--; | ||
283 | wlock_again: | ||
284 | __asm__ __volatile__( | ||
285 | " membar #StoreLoad | #LoadLoad\n" | ||
286 | " mov 1, %%g3\n" | ||
287 | " sllx %%g3, 63, %%g3\n" | ||
288 | " ldx [%0], %%g1\n" | ||
289 | " andn %%g1, %%g3, %%g7\n" | ||
290 | " casx [%0], %%g1, %%g7\n" | ||
291 | " membar #StoreLoad | #StoreStore\n" | ||
292 | " sub %%g1, %%g7, %0\n" | ||
293 | : "=r" (val) | ||
294 | : "0" (&(rw->lock)) | ||
295 | : "g3", "g1", "g7", "memory"); | ||
296 | if (val) { | ||
297 | if (!--stuck) { | ||
298 | if (shown++ <= 2) | ||
299 | show_write("write_unlock", rw, caller); | ||
300 | stuck = INIT_STUCK; | ||
301 | } | ||
302 | goto wlock_again; | ||
303 | } | ||
304 | } | ||
305 | |||
306 | int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller) | ||
307 | { | ||
308 | unsigned long val; | ||
309 | int cpu = get_cpu(); | ||
310 | |||
311 | /* Try to acuire the write bit. */ | ||
312 | __asm__ __volatile__( | ||
313 | " mov 1, %%g3\n" | ||
314 | " sllx %%g3, 63, %%g3\n" | ||
315 | " ldx [%0], %%g1\n" | ||
316 | " brlz,pn %%g1, 1f\n" | ||
317 | " or %%g1, %%g3, %%g7\n" | ||
318 | " casx [%0], %%g1, %%g7\n" | ||
319 | " membar #StoreLoad | #StoreStore\n" | ||
320 | " ba,pt %%xcc, 2f\n" | ||
321 | " sub %%g1, %%g7, %0\n" | ||
322 | "1: mov 1, %0\n" | ||
323 | "2:" : "=r" (val) | ||
324 | : "0" (&(rw->lock)) | ||
325 | : "g3", "g1", "g7", "memory"); | ||
326 | |||
327 | if (val) { | ||
328 | put_cpu(); | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) { | ||
333 | /* Readers still around, drop the write | ||
334 | * lock, return failure. | ||
335 | */ | ||
336 | __asm__ __volatile__( | ||
337 | " mov 1, %%g3\n" | ||
338 | " sllx %%g3, 63, %%g3\n" | ||
339 | "1: ldx [%0], %%g1\n" | ||
340 | " andn %%g1, %%g3, %%g7\n" | ||
341 | " casx [%0], %%g1, %%g7\n" | ||
342 | " cmp %%g1, %%g7\n" | ||
343 | " membar #StoreLoad | #StoreStore\n" | ||
344 | " bne,pn %%xcc, 1b\n" | ||
345 | " nop" | ||
346 | : /* no outputs */ | ||
347 | : "r" (&(rw->lock)) | ||
348 | : "g3", "g1", "g7", "cc", "memory"); | ||
349 | |||
350 | put_cpu(); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* We have it, say who we are. */ | ||
356 | rw->writer_pc = ((unsigned int)caller); | ||
357 | rw->writer_cpu = cpu; | ||
358 | current->thread.smp_lock_count++; | ||
359 | current->thread.smp_lock_pc = ((unsigned int)caller); | ||
360 | |||
361 | put_cpu(); | ||
362 | |||
363 | return 1; | ||
364 | } | ||
365 | |||
366 | #endif /* CONFIG_SMP */ | ||
diff --git a/fs/buffer.c b/fs/buffer.c index 1c62203a4906..6cbfceabd95d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/mpage.h> | 42 | #include <linux/mpage.h> |
43 | #include <linux/bit_spinlock.h> | ||
43 | 44 | ||
44 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 45 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
45 | static void invalidate_bh_lrus(void); | 46 | static void invalidate_bh_lrus(void); |
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index 80780dba9986..8197c69eff44 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <asm/current.h> | 7 | #include <asm/current.h> |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Simple spin lock operations. There are two variants, one clears IRQ's | 10 | * Simple spin lock operations. There are two variants, one clears IRQ's |
12 | * on the local processor, one does not. | 11 | * on the local processor, one does not. |
@@ -14,43 +13,18 @@ | |||
14 | * We make no fairness assumptions. They have a cost. | 13 | * We make no fairness assumptions. They have a cost. |
15 | */ | 14 | */ |
16 | 15 | ||
17 | typedef struct { | 16 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
18 | volatile unsigned int lock; | 17 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
19 | #ifdef CONFIG_DEBUG_SPINLOCK | 18 | #define __raw_spin_unlock_wait(x) \ |
20 | int on_cpu; | 19 | do { cpu_relax(); } while ((x)->lock) |
21 | int line_no; | 20 | |
22 | void *previous; | 21 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) |
23 | struct task_struct * task; | ||
24 | const char *base_file; | ||
25 | #endif | ||
26 | } spinlock_t; | ||
27 | |||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
29 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL } | ||
30 | #else | ||
31 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 } | ||
32 | #endif | ||
33 | |||
34 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
35 | #define spin_is_locked(x) ((x)->lock != 0) | ||
36 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
37 | |||
38 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
39 | extern void _raw_spin_unlock(spinlock_t * lock); | ||
40 | extern void debug_spin_lock(spinlock_t * lock, const char *, int); | ||
41 | extern int debug_spin_trylock(spinlock_t * lock, const char *, int); | ||
42 | #define _raw_spin_lock(LOCK) \ | ||
43 | debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) | ||
44 | #define _raw_spin_trylock(LOCK) \ | ||
45 | debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) | ||
46 | #else | ||
47 | static inline void _raw_spin_unlock(spinlock_t * lock) | ||
48 | { | 22 | { |
49 | mb(); | 23 | mb(); |
50 | lock->lock = 0; | 24 | lock->lock = 0; |
51 | } | 25 | } |
52 | 26 | ||
53 | static inline void _raw_spin_lock(spinlock_t * lock) | 27 | static inline void __raw_spin_lock(raw_spinlock_t * lock) |
54 | { | 28 | { |
55 | long tmp; | 29 | long tmp; |
56 | 30 | ||
@@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock) | |||
70 | : "m"(lock->lock) : "memory"); | 44 | : "m"(lock->lock) : "memory"); |
71 | } | 45 | } |
72 | 46 | ||
73 | static inline int _raw_spin_trylock(spinlock_t *lock) | 47 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
74 | { | 48 | { |
75 | return !test_and_set_bit(0, &lock->lock); | 49 | return !test_and_set_bit(0, &lock->lock); |
76 | } | 50 | } |
77 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
78 | |||
79 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
80 | 51 | ||
81 | /***********************************************************/ | 52 | /***********************************************************/ |
82 | 53 | ||
83 | typedef struct { | 54 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
84 | volatile unsigned int lock; | ||
85 | } rwlock_t; | ||
86 | |||
87 | #define RW_LOCK_UNLOCKED (rwlock_t){ 0 } | ||
88 | |||
89 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
90 | |||
91 | static inline int read_can_lock(rwlock_t *lock) | ||
92 | { | 55 | { |
93 | return (lock->lock & 1) == 0; | 56 | return (lock->lock & 1) == 0; |
94 | } | 57 | } |
95 | 58 | ||
96 | static inline int write_can_lock(rwlock_t *lock) | 59 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
97 | { | 60 | { |
98 | return lock->lock == 0; | 61 | return lock->lock == 0; |
99 | } | 62 | } |
100 | 63 | ||
101 | #ifdef CONFIG_DEBUG_RWLOCK | 64 | static inline void __raw_read_lock(raw_rwlock_t *lock) |
102 | extern void _raw_write_lock(rwlock_t * lock); | ||
103 | extern void _raw_read_lock(rwlock_t * lock); | ||
104 | #else | ||
105 | static inline void _raw_write_lock(rwlock_t * lock) | ||
106 | { | 65 | { |
107 | long regx; | 66 | long regx; |
108 | 67 | ||
109 | __asm__ __volatile__( | 68 | __asm__ __volatile__( |
110 | "1: ldl_l %1,%0\n" | 69 | "1: ldl_l %1,%0\n" |
111 | " bne %1,6f\n" | 70 | " blbs %1,6f\n" |
112 | " lda %1,1\n" | 71 | " subl %1,2,%1\n" |
113 | " stl_c %1,%0\n" | 72 | " stl_c %1,%0\n" |
114 | " beq %1,6f\n" | 73 | " beq %1,6f\n" |
115 | " mb\n" | 74 | " mb\n" |
116 | ".subsection 2\n" | 75 | ".subsection 2\n" |
117 | "6: ldl %1,%0\n" | 76 | "6: ldl %1,%0\n" |
118 | " bne %1,6b\n" | 77 | " blbs %1,6b\n" |
119 | " br 1b\n" | 78 | " br 1b\n" |
120 | ".previous" | 79 | ".previous" |
121 | : "=m" (*lock), "=&r" (regx) | 80 | : "=m" (*lock), "=&r" (regx) |
122 | : "m" (*lock) : "memory"); | 81 | : "m" (*lock) : "memory"); |
123 | } | 82 | } |
124 | 83 | ||
125 | static inline void _raw_read_lock(rwlock_t * lock) | 84 | static inline void __raw_write_lock(raw_rwlock_t *lock) |
126 | { | 85 | { |
127 | long regx; | 86 | long regx; |
128 | 87 | ||
129 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
130 | "1: ldl_l %1,%0\n" | 89 | "1: ldl_l %1,%0\n" |
131 | " blbs %1,6f\n" | 90 | " bne %1,6f\n" |
132 | " subl %1,2,%1\n" | 91 | " lda %1,1\n" |
133 | " stl_c %1,%0\n" | 92 | " stl_c %1,%0\n" |
134 | " beq %1,6f\n" | 93 | " beq %1,6f\n" |
135 | " mb\n" | 94 | " mb\n" |
136 | ".subsection 2\n" | 95 | ".subsection 2\n" |
137 | "6: ldl %1,%0\n" | 96 | "6: ldl %1,%0\n" |
138 | " blbs %1,6b\n" | 97 | " bne %1,6b\n" |
139 | " br 1b\n" | 98 | " br 1b\n" |
140 | ".previous" | 99 | ".previous" |
141 | : "=m" (*lock), "=&r" (regx) | 100 | : "=m" (*lock), "=&r" (regx) |
142 | : "m" (*lock) : "memory"); | 101 | : "m" (*lock) : "memory"); |
143 | } | 102 | } |
144 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
145 | 103 | ||
146 | static inline int _raw_read_trylock(rwlock_t * lock) | 104 | static inline int __raw_read_trylock(raw_rwlock_t * lock) |
147 | { | 105 | { |
148 | long regx; | 106 | long regx; |
149 | int success; | 107 | int success; |
@@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock) | |||
165 | return success; | 123 | return success; |
166 | } | 124 | } |
167 | 125 | ||
168 | static inline int _raw_write_trylock(rwlock_t * lock) | 126 | static inline int __raw_write_trylock(raw_rwlock_t * lock) |
169 | { | 127 | { |
170 | long regx; | 128 | long regx; |
171 | int success; | 129 | int success; |
@@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock) | |||
187 | return success; | 145 | return success; |
188 | } | 146 | } |
189 | 147 | ||
190 | static inline void _raw_write_unlock(rwlock_t * lock) | 148 | static inline void __raw_read_unlock(raw_rwlock_t * lock) |
191 | { | ||
192 | mb(); | ||
193 | lock->lock = 0; | ||
194 | } | ||
195 | |||
196 | static inline void _raw_read_unlock(rwlock_t * lock) | ||
197 | { | 149 | { |
198 | long regx; | 150 | long regx; |
199 | __asm__ __volatile__( | 151 | __asm__ __volatile__( |
@@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock) | |||
209 | : "m" (*lock) : "memory"); | 161 | : "m" (*lock) : "memory"); |
210 | } | 162 | } |
211 | 163 | ||
164 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | ||
165 | { | ||
166 | mb(); | ||
167 | lock->lock = 0; | ||
168 | } | ||
169 | |||
212 | #endif /* _ALPHA_SPINLOCK_H */ | 170 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h new file mode 100644 index 000000000000..8141eb5ebf0d --- /dev/null +++ b/include/asm-alpha/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ALPHA_SPINLOCK_TYPES_H | ||
2 | #define _ALPHA_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b688..cb4906b45555 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
@@ -16,21 +16,14 @@ | |||
16 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
17 | * Locked value: 1 | 17 | * Locked value: 1 |
18 | */ | 18 | */ |
19 | typedef struct { | ||
20 | volatile unsigned int lock; | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | 19 | ||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | ||
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
27 | 23 | ||
28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
32 | 25 | ||
33 | static inline void _raw_spin_lock(spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
34 | { | 27 | { |
35 | unsigned long tmp; | 28 | unsigned long tmp; |
36 | 29 | ||
@@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
47 | smp_mb(); | 40 | smp_mb(); |
48 | } | 41 | } |
49 | 42 | ||
50 | static inline int _raw_spin_trylock(spinlock_t *lock) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
51 | { | 44 | { |
52 | unsigned long tmp; | 45 | unsigned long tmp; |
53 | 46 | ||
@@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
67 | } | 60 | } |
68 | } | 61 | } |
69 | 62 | ||
70 | static inline void _raw_spin_unlock(spinlock_t *lock) | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
71 | { | 64 | { |
72 | smp_mb(); | 65 | smp_mb(); |
73 | 66 | ||
@@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
80 | 73 | ||
81 | /* | 74 | /* |
82 | * RWLOCKS | 75 | * RWLOCKS |
83 | */ | 76 | * |
84 | typedef struct { | 77 | * |
85 | volatile unsigned int lock; | ||
86 | #ifdef CONFIG_PREEMPT | ||
87 | unsigned int break_lock; | ||
88 | #endif | ||
89 | } rwlock_t; | ||
90 | |||
91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | ||
94 | |||
95 | /* | ||
96 | * Write locks are easy - we just set bit 31. When unlocking, we can | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
97 | * just write zero since the lock is exclusively held. | 79 | * just write zero since the lock is exclusively held. |
98 | */ | 80 | */ |
99 | static inline void _raw_write_lock(rwlock_t *rw) | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
82 | |||
83 | static inline void __raw_write_lock(rwlock_t *rw) | ||
100 | { | 84 | { |
101 | unsigned long tmp; | 85 | unsigned long tmp; |
102 | 86 | ||
@@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
113 | smp_mb(); | 97 | smp_mb(); |
114 | } | 98 | } |
115 | 99 | ||
116 | static inline int _raw_write_trylock(rwlock_t *rw) | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
117 | { | 101 | { |
118 | unsigned long tmp; | 102 | unsigned long tmp; |
119 | 103 | ||
@@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
133 | } | 117 | } |
134 | } | 118 | } |
135 | 119 | ||
136 | static inline void _raw_write_unlock(rwlock_t *rw) | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
137 | { | 121 | { |
138 | smp_mb(); | 122 | smp_mb(); |
139 | 123 | ||
@@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
156 | * currently active. However, we know we won't have any write | 140 | * currently active. However, we know we won't have any write |
157 | * locks. | 141 | * locks. |
158 | */ | 142 | */ |
159 | static inline void _raw_read_lock(rwlock_t *rw) | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
160 | { | 144 | { |
161 | unsigned long tmp, tmp2; | 145 | unsigned long tmp, tmp2; |
162 | 146 | ||
@@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
173 | smp_mb(); | 157 | smp_mb(); |
174 | } | 158 | } |
175 | 159 | ||
176 | static inline void _raw_read_unlock(rwlock_t *rw) | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
177 | { | 161 | { |
178 | unsigned long tmp, tmp2; | 162 | unsigned long tmp, tmp2; |
179 | 163 | ||
@@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
190 | : "cc"); | 174 | : "cc"); |
191 | } | 175 | } |
192 | 176 | ||
193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
194 | 178 | ||
195 | #endif /* __ASM_SPINLOCK_H */ | 179 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h new file mode 100644 index 000000000000..43e83f6d2ee5 --- /dev/null +++ b/include/asm-arm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index f9ff31f40036..23604350cdf4 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -7,46 +7,21 @@ | |||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | ||
10 | asmlinkage int printk(const char * fmt, ...) | ||
11 | __attribute__ ((format (printf, 1, 2))); | ||
12 | |||
13 | /* | 10 | /* |
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
15 | */ | 12 | * |
16 | |||
17 | typedef struct { | ||
18 | volatile unsigned int slock; | ||
19 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
20 | unsigned magic; | ||
21 | #endif | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPINLOCK_MAGIC 0xdead4ead | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
30 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
31 | #else | ||
32 | #define SPINLOCK_MAGIC_INIT /* */ | ||
33 | #endif | ||
34 | |||
35 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
36 | |||
37 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
38 | |||
39 | /* | ||
40 | * Simple spin lock operations. There are two variants, one clears IRQ's | 13 | * Simple spin lock operations. There are two variants, one clears IRQ's |
41 | * on the local processor, one does not. | 14 | * on the local processor, one does not. |
42 | * | 15 | * |
43 | * We make no fairness assumptions. They have a cost. | 16 | * We make no fairness assumptions. They have a cost. |
17 | * | ||
18 | * (the type definitions are in asm/spinlock_types.h) | ||
44 | */ | 19 | */ |
45 | 20 | ||
46 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) | 21 | #define __raw_spin_is_locked(x) \ |
47 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 22 | (*(volatile signed char *)(&(x)->slock) <= 0) |
48 | 23 | ||
49 | #define spin_lock_string \ | 24 | #define __raw_spin_lock_string \ |
50 | "\n1:\t" \ | 25 | "\n1:\t" \ |
51 | "lock ; decb %0\n\t" \ | 26 | "lock ; decb %0\n\t" \ |
52 | "jns 3f\n" \ | 27 | "jns 3f\n" \ |
@@ -57,7 +32,7 @@ typedef struct { | |||
57 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
58 | "3:\n\t" | 33 | "3:\n\t" |
59 | 34 | ||
60 | #define spin_lock_string_flags \ | 35 | #define __raw_spin_lock_string_flags \ |
61 | "\n1:\t" \ | 36 | "\n1:\t" \ |
62 | "lock ; decb %0\n\t" \ | 37 | "lock ; decb %0\n\t" \ |
63 | "jns 4f\n\t" \ | 38 | "jns 4f\n\t" \ |
@@ -73,86 +48,71 @@ typedef struct { | |||
73 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
74 | "4:\n\t" | 49 | "4:\n\t" |
75 | 50 | ||
51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
52 | { | ||
53 | __asm__ __volatile__( | ||
54 | __raw_spin_lock_string | ||
55 | :"=m" (lock->slock) : : "memory"); | ||
56 | } | ||
57 | |||
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
59 | { | ||
60 | __asm__ __volatile__( | ||
61 | __raw_spin_lock_string_flags | ||
62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
63 | } | ||
64 | |||
65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
66 | { | ||
67 | char oldval; | ||
68 | __asm__ __volatile__( | ||
69 | "xchgb %b0,%1" | ||
70 | :"=q" (oldval), "=m" (lock->slock) | ||
71 | :"0" (0) : "memory"); | ||
72 | return oldval > 0; | ||
73 | } | ||
74 | |||
76 | /* | 75 | /* |
77 | * This works. Despite all the confusion. | 76 | * __raw_spin_unlock based on writing $1 to the low byte. |
78 | * (except on PPro SMP or if we are using OOSTORE) | 77 | * This method works. Despite all the confusion. |
78 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
79 | * (PPro errata 66, 92) | 79 | * (PPro errata 66, 92) |
80 | */ | 80 | */ |
81 | 81 | ||
82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
83 | 83 | ||
84 | #define spin_unlock_string \ | 84 | #define __raw_spin_unlock_string \ |
85 | "movb $1,%0" \ | 85 | "movb $1,%0" \ |
86 | :"=m" (lock->slock) : : "memory" | 86 | :"=m" (lock->slock) : : "memory" |
87 | 87 | ||
88 | 88 | ||
89 | static inline void _raw_spin_unlock(spinlock_t *lock) | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
90 | { | 90 | { |
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
93 | BUG_ON(!spin_is_locked(lock)); | ||
94 | #endif | ||
95 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
96 | spin_unlock_string | 92 | __raw_spin_unlock_string |
97 | ); | 93 | ); |
98 | } | 94 | } |
99 | 95 | ||
100 | #else | 96 | #else |
101 | 97 | ||
102 | #define spin_unlock_string \ | 98 | #define __raw_spin_unlock_string \ |
103 | "xchgb %b0, %1" \ | 99 | "xchgb %b0, %1" \ |
104 | :"=q" (oldval), "=m" (lock->slock) \ | 100 | :"=q" (oldval), "=m" (lock->slock) \ |
105 | :"0" (oldval) : "memory" | 101 | :"0" (oldval) : "memory" |
106 | 102 | ||
107 | static inline void _raw_spin_unlock(spinlock_t *lock) | 103 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
108 | { | 104 | { |
109 | char oldval = 1; | 105 | char oldval = 1; |
110 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
111 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
112 | BUG_ON(!spin_is_locked(lock)); | ||
113 | #endif | ||
114 | __asm__ __volatile__( | ||
115 | spin_unlock_string | ||
116 | ); | ||
117 | } | ||
118 | 106 | ||
119 | #endif | ||
120 | |||
121 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
122 | { | ||
123 | char oldval; | ||
124 | __asm__ __volatile__( | 107 | __asm__ __volatile__( |
125 | "xchgb %b0,%1" | 108 | __raw_spin_unlock_string |
126 | :"=q" (oldval), "=m" (lock->slock) | 109 | ); |
127 | :"0" (0) : "memory"); | ||
128 | return oldval > 0; | ||
129 | } | 110 | } |
130 | 111 | ||
131 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
132 | { | ||
133 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
134 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
135 | printk("eip: %p\n", __builtin_return_address(0)); | ||
136 | BUG(); | ||
137 | } | ||
138 | #endif | 112 | #endif |
139 | __asm__ __volatile__( | ||
140 | spin_lock_string | ||
141 | :"=m" (lock->slock) : : "memory"); | ||
142 | } | ||
143 | 113 | ||
144 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 114 | #define __raw_spin_unlock_wait(lock) \ |
145 | { | 115 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
146 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
147 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
148 | printk("eip: %p\n", __builtin_return_address(0)); | ||
149 | BUG(); | ||
150 | } | ||
151 | #endif | ||
152 | __asm__ __volatile__( | ||
153 | spin_lock_string_flags | ||
154 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
155 | } | ||
156 | 116 | ||
157 | /* | 117 | /* |
158 | * Read-write spinlocks, allowing multiple readers | 118 | * Read-write spinlocks, allowing multiple readers |
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
163 | * can "mix" irq-safe locks - any writer needs to get a | 123 | * can "mix" irq-safe locks - any writer needs to get a |
164 | * irq-safe write-lock, but readers can get non-irqsafe | 124 | * irq-safe write-lock, but readers can get non-irqsafe |
165 | * read-locks. | 125 | * read-locks. |
126 | * | ||
127 | * On x86, we implement read-write locks as a 32-bit counter | ||
128 | * with the high bit (sign) being the "contended" bit. | ||
129 | * | ||
130 | * The inline assembly is non-obvious. Think about it. | ||
131 | * | ||
132 | * Changed to use the same technique as rw semaphores. See | ||
133 | * semaphore.h for details. -ben | ||
134 | * | ||
135 | * the helpers are in arch/i386/kernel/semaphore.c | ||
166 | */ | 136 | */ |
167 | typedef struct { | ||
168 | volatile unsigned int lock; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned magic; | ||
171 | #endif | ||
172 | #ifdef CONFIG_PREEMPT | ||
173 | unsigned int break_lock; | ||
174 | #endif | ||
175 | } rwlock_t; | ||
176 | |||
177 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
178 | |||
179 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
180 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
181 | #else | ||
182 | #define RWLOCK_MAGIC_INIT /* */ | ||
183 | #endif | ||
184 | |||
185 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
186 | |||
187 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
188 | 137 | ||
189 | /** | 138 | /** |
190 | * read_can_lock - would read_trylock() succeed? | 139 | * read_can_lock - would read_trylock() succeed? |
191 | * @lock: the rwlock in question. | 140 | * @lock: the rwlock in question. |
192 | */ | 141 | */ |
193 | #define read_can_lock(x) ((int)(x)->lock > 0) | 142 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
194 | 143 | ||
195 | /** | 144 | /** |
196 | * write_can_lock - would write_trylock() succeed? | 145 | * write_can_lock - would write_trylock() succeed? |
197 | * @lock: the rwlock in question. | 146 | * @lock: the rwlock in question. |
198 | */ | 147 | */ |
199 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 148 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
200 | 149 | ||
201 | /* | 150 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
202 | * On x86, we implement read-write locks as a 32-bit counter | ||
203 | * with the high bit (sign) being the "contended" bit. | ||
204 | * | ||
205 | * The inline assembly is non-obvious. Think about it. | ||
206 | * | ||
207 | * Changed to use the same technique as rw semaphores. See | ||
208 | * semaphore.h for details. -ben | ||
209 | */ | ||
210 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
211 | |||
212 | static inline void _raw_read_lock(rwlock_t *rw) | ||
213 | { | 151 | { |
214 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
215 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
216 | #endif | ||
217 | __build_read_lock(rw, "__read_lock_failed"); | 152 | __build_read_lock(rw, "__read_lock_failed"); |
218 | } | 153 | } |
219 | 154 | ||
220 | static inline void _raw_write_lock(rwlock_t *rw) | 155 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
221 | { | 156 | { |
222 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
223 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
224 | #endif | ||
225 | __build_write_lock(rw, "__write_lock_failed"); | 157 | __build_write_lock(rw, "__write_lock_failed"); |
226 | } | 158 | } |
227 | 159 | ||
228 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 160 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
229 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
230 | |||
231 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
232 | { | 161 | { |
233 | atomic_t *count = (atomic_t *)lock; | 162 | atomic_t *count = (atomic_t *)lock; |
234 | atomic_dec(count); | 163 | atomic_dec(count); |
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
238 | return 0; | 167 | return 0; |
239 | } | 168 | } |
240 | 169 | ||
241 | static inline int _raw_write_trylock(rwlock_t *lock) | 170 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
242 | { | 171 | { |
243 | atomic_t *count = (atomic_t *)lock; | 172 | atomic_t *count = (atomic_t *)lock; |
244 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 173 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
247 | return 0; | 176 | return 0; |
248 | } | 177 | } |
249 | 178 | ||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
180 | { | ||
181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
182 | } | ||
183 | |||
184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
185 | { | ||
186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | ||
187 | : "=m" (rw->lock) : : "memory"); | ||
188 | } | ||
189 | |||
250 | #endif /* __ASM_SPINLOCK_H */ | 190 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-i386/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
@@ -17,28 +17,20 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
21 | volatile unsigned int lock; | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
29 | 21 | ||
30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
31 | /* | 23 | /* |
32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
36 | */ | 28 | */ |
37 | 29 | ||
38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
39 | 31 | ||
40 | static inline void | 32 | static inline void |
41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
42 | { | 34 | { |
43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
44 | 36 | ||
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
94 | #endif | 86 | #endif |
95 | } | 87 | } |
96 | 88 | ||
97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
98 | 90 | ||
99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
101 | barrier(); | 93 | barrier(); |
102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
103 | } | 95 | } |
104 | 96 | ||
105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
108 | do { \ | 100 | do { \ |
109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
@@ -117,29 +109,20 @@ do { \ | |||
117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
118 | } \ | 110 | } \ |
119 | } while (0) | 111 | } while (0) |
120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
122 | 114 | ||
123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
127 | typedef struct { | ||
128 | volatile unsigned int read_counter : 24; | ||
129 | volatile unsigned int write_lock : 8; | ||
130 | #ifdef CONFIG_PREEMPT | ||
131 | unsigned int break_lock; | ||
132 | #endif | ||
133 | } rwlock_t; | ||
134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
135 | 119 | ||
136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
139 | 122 | ||
140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
141 | do { \ | 124 | do { \ |
142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
143 | \ | 126 | \ |
144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -148,14 +131,14 @@ do { \ | |||
148 | } \ | 131 | } \ |
149 | } while (0) | 132 | } while (0) |
150 | 133 | ||
151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
152 | do { \ | 135 | do { \ |
153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
155 | } while (0) | 138 | } while (0) |
156 | 139 | ||
157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
159 | do { \ | 142 | do { \ |
160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
@@ -170,7 +153,7 @@ do { \ | |||
170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
171 | } while(0) | 154 | } while(0) |
172 | 155 | ||
173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
174 | ({ \ | 157 | ({ \ |
175 | register long result; \ | 158 | register long result; \ |
176 | \ | 159 | \ |
@@ -182,7 +165,7 @@ do { \ | |||
182 | (result == 0); \ | 165 | (result == 0); \ |
183 | }) | 166 | }) |
184 | 167 | ||
185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
186 | { | 169 | { |
187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
188 | barrier(); | 171 | barrier(); |
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
191 | 174 | ||
192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
193 | 176 | ||
194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
195 | ({ \ | 178 | ({ \ |
196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
203 | }) | 186 | }) |
204 | 187 | ||
205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
206 | ({ \ | 189 | ({ \ |
207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
211 | }) | 194 | }) |
212 | 195 | ||
213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
214 | { | 197 | { |
215 | barrier(); | 198 | barrier(); |
216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
218 | 201 | ||
219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
220 | 203 | ||
221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
222 | 205 | ||
223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 000000000000..474e46f1ab4a --- /dev/null +++ b/include/asm-ia64/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_IA64_SPINLOCK_TYPES_H | ||
2 | #define _ASM_IA64_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int read_counter : 31; | ||
16 | volatile unsigned int write_lock : 1; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h index 6608d8371c50..7de7def28da9 100644 --- a/include/asm-m32r/spinlock.h +++ b/include/asm-m32r/spinlock.h | |||
@@ -14,57 +14,30 @@ | |||
14 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | 16 | ||
17 | extern int printk(const char * fmt, ...) | ||
18 | __attribute__ ((format (printf, 1, 2))); | ||
19 | |||
20 | #define RW_LOCK_BIAS 0x01000000 | ||
21 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
22 | |||
23 | /* | 17 | /* |
24 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
25 | */ | 19 | * |
26 | 20 | * (the type definitions are in asm/spinlock_types.h) | |
27 | typedef struct { | 21 | * |
28 | volatile int slock; | ||
29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
30 | unsigned magic; | ||
31 | #endif | ||
32 | #ifdef CONFIG_PREEMPT | ||
33 | unsigned int break_lock; | ||
34 | #endif | ||
35 | } spinlock_t; | ||
36 | |||
37 | #define SPINLOCK_MAGIC 0xdead4ead | ||
38 | |||
39 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
40 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
41 | #else | ||
42 | #define SPINLOCK_MAGIC_INIT /* */ | ||
43 | #endif | ||
44 | |||
45 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
46 | |||
47 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
48 | |||
49 | /* | ||
50 | * Simple spin lock operations. There are two variants, one clears IRQ's | 22 | * Simple spin lock operations. There are two variants, one clears IRQ's |
51 | * on the local processor, one does not. | 23 | * on the local processor, one does not. |
52 | * | 24 | * |
53 | * We make no fairness assumptions. They have a cost. | 25 | * We make no fairness assumptions. They have a cost. |
54 | */ | 26 | */ |
55 | 27 | ||
56 | #define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 28 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
57 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 29 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
58 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 30 | #define __raw_spin_unlock_wait(x) \ |
31 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
59 | 32 | ||
60 | /** | 33 | /** |
61 | * _raw_spin_trylock - Try spin lock and return a result | 34 | * __raw_spin_trylock - Try spin lock and return a result |
62 | * @lock: Pointer to the lock variable | 35 | * @lock: Pointer to the lock variable |
63 | * | 36 | * |
64 | * _raw_spin_trylock() tries to get the lock and returns a result. | 37 | * __raw_spin_trylock() tries to get the lock and returns a result. |
65 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
66 | */ | 39 | */ |
67 | static inline int _raw_spin_trylock(spinlock_t *lock) | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
68 | { | 41 | { |
69 | int oldval; | 42 | int oldval; |
70 | unsigned long tmp1, tmp2; | 43 | unsigned long tmp1, tmp2; |
@@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
78 | * } | 51 | * } |
79 | */ | 52 | */ |
80 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
81 | "# spin_trylock \n\t" | 54 | "# __raw_spin_trylock \n\t" |
82 | "ldi %1, #0; \n\t" | 55 | "ldi %1, #0; \n\t" |
83 | "mvfc %2, psw; \n\t" | 56 | "mvfc %2, psw; \n\t" |
84 | "clrpsw #0x40 -> nop; \n\t" | 57 | "clrpsw #0x40 -> nop; \n\t" |
@@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
97 | return (oldval > 0); | 70 | return (oldval > 0); |
98 | } | 71 | } |
99 | 72 | ||
100 | static inline void _raw_spin_lock(spinlock_t *lock) | 73 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
101 | { | 74 | { |
102 | unsigned long tmp0, tmp1; | 75 | unsigned long tmp0, tmp1; |
103 | 76 | ||
104 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
105 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
106 | printk("pc: %p\n", __builtin_return_address(0)); | ||
107 | BUG(); | ||
108 | } | ||
109 | #endif | ||
110 | /* | 77 | /* |
111 | * lock->slock : =1 : unlock | 78 | * lock->slock : =1 : unlock |
112 | * : <=0 : lock | 79 | * : <=0 : lock |
@@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
118 | * } | 85 | * } |
119 | */ | 86 | */ |
120 | __asm__ __volatile__ ( | 87 | __asm__ __volatile__ ( |
121 | "# spin_lock \n\t" | 88 | "# __raw_spin_lock \n\t" |
122 | ".fillinsn \n" | 89 | ".fillinsn \n" |
123 | "1: \n\t" | 90 | "1: \n\t" |
124 | "mvfc %1, psw; \n\t" | 91 | "mvfc %1, psw; \n\t" |
@@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
145 | ); | 112 | ); |
146 | } | 113 | } |
147 | 114 | ||
148 | static inline void _raw_spin_unlock(spinlock_t *lock) | 115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
149 | { | 116 | { |
150 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
151 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
152 | BUG_ON(!spin_is_locked(lock)); | ||
153 | #endif | ||
154 | mb(); | 117 | mb(); |
155 | lock->slock = 1; | 118 | lock->slock = 1; |
156 | } | 119 | } |
@@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
164 | * can "mix" irq-safe locks - any writer needs to get a | 127 | * can "mix" irq-safe locks - any writer needs to get a |
165 | * irq-safe write-lock, but readers can get non-irqsafe | 128 | * irq-safe write-lock, but readers can get non-irqsafe |
166 | * read-locks. | 129 | * read-locks. |
130 | * | ||
131 | * On x86, we implement read-write locks as a 32-bit counter | ||
132 | * with the high bit (sign) being the "contended" bit. | ||
133 | * | ||
134 | * The inline assembly is non-obvious. Think about it. | ||
135 | * | ||
136 | * Changed to use the same technique as rw semaphores. See | ||
137 | * semaphore.h for details. -ben | ||
167 | */ | 138 | */ |
168 | typedef struct { | ||
169 | volatile int lock; | ||
170 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
171 | unsigned magic; | ||
172 | #endif | ||
173 | #ifdef CONFIG_PREEMPT | ||
174 | unsigned int break_lock; | ||
175 | #endif | ||
176 | } rwlock_t; | ||
177 | |||
178 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
179 | |||
180 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
181 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
182 | #else | ||
183 | #define RWLOCK_MAGIC_INIT /* */ | ||
184 | #endif | ||
185 | |||
186 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
187 | |||
188 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
189 | 139 | ||
190 | /** | 140 | /** |
191 | * read_can_lock - would read_trylock() succeed? | 141 | * read_can_lock - would read_trylock() succeed? |
192 | * @lock: the rwlock in question. | 142 | * @lock: the rwlock in question. |
193 | */ | 143 | */ |
194 | #define read_can_lock(x) ((int)(x)->lock > 0) | 144 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
195 | 145 | ||
196 | /** | 146 | /** |
197 | * write_can_lock - would write_trylock() succeed? | 147 | * write_can_lock - would write_trylock() succeed? |
198 | * @lock: the rwlock in question. | 148 | * @lock: the rwlock in question. |
199 | */ | 149 | */ |
200 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 150 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
201 | |||
202 | /* | ||
203 | * On x86, we implement read-write locks as a 32-bit counter | ||
204 | * with the high bit (sign) being the "contended" bit. | ||
205 | * | ||
206 | * The inline assembly is non-obvious. Think about it. | ||
207 | * | ||
208 | * Changed to use the same technique as rw semaphores. See | ||
209 | * semaphore.h for details. -ben | ||
210 | */ | ||
211 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
212 | 151 | ||
213 | static inline void _raw_read_lock(rwlock_t *rw) | 152 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
214 | { | 153 | { |
215 | unsigned long tmp0, tmp1; | 154 | unsigned long tmp0, tmp1; |
216 | 155 | ||
217 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
218 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
219 | #endif | ||
220 | /* | 156 | /* |
221 | * rw->lock : >0 : unlock | 157 | * rw->lock : >0 : unlock |
222 | * : <=0 : lock | 158 | * : <=0 : lock |
@@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
264 | ); | 200 | ); |
265 | } | 201 | } |
266 | 202 | ||
267 | static inline void _raw_write_lock(rwlock_t *rw) | 203 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
268 | { | 204 | { |
269 | unsigned long tmp0, tmp1, tmp2; | 205 | unsigned long tmp0, tmp1, tmp2; |
270 | 206 | ||
271 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
272 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
273 | #endif | ||
274 | /* | 207 | /* |
275 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | 208 | * rw->lock : =RW_LOCK_BIAS_STR : unlock |
276 | * : !=RW_LOCK_BIAS_STR : lock | 209 | * : !=RW_LOCK_BIAS_STR : lock |
@@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
320 | ); | 253 | ); |
321 | } | 254 | } |
322 | 255 | ||
323 | static inline void _raw_read_unlock(rwlock_t *rw) | 256 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
324 | { | 257 | { |
325 | unsigned long tmp0, tmp1; | 258 | unsigned long tmp0, tmp1; |
326 | 259 | ||
@@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
342 | ); | 275 | ); |
343 | } | 276 | } |
344 | 277 | ||
345 | static inline void _raw_write_unlock(rwlock_t *rw) | 278 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
346 | { | 279 | { |
347 | unsigned long tmp0, tmp1, tmp2; | 280 | unsigned long tmp0, tmp1, tmp2; |
348 | 281 | ||
@@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
366 | ); | 299 | ); |
367 | } | 300 | } |
368 | 301 | ||
369 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 302 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
370 | 303 | ||
371 | static inline int _raw_write_trylock(rwlock_t *lock) | 304 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
372 | { | 305 | { |
373 | atomic_t *count = (atomic_t *)lock; | 306 | atomic_t *count = (atomic_t *)lock; |
374 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 307 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h new file mode 100644 index 000000000000..7e9941c45f40 --- /dev/null +++ b/include/asm-m32r/spinlock_types.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASM_M32R_SPINLOCK_TYPES_H | ||
2 | #define _ASM_M32R_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define RW_LOCK_BIAS 0x01000000 | ||
19 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
20 | |||
21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
22 | |||
23 | #endif | ||
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 114d3eb98a6a..4d0135b11156 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
@@ -16,20 +16,10 @@ | |||
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
17 | */ | 17 | */ |
18 | 18 | ||
19 | typedef struct { | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
20 | volatile unsigned int lock; | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
21 | #ifdef CONFIG_PREEMPT | 21 | #define __raw_spin_unlock_wait(x) \ |
22 | unsigned int break_lock; | 22 | do { cpu_relax(); } while ((x)->lock) |
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
27 | |||
28 | #define spin_lock_init(x) do { (x)->lock = 0; } while(0) | ||
29 | |||
30 | #define spin_is_locked(x) ((x)->lock != 0) | ||
31 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
32 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
33 | 23 | ||
34 | /* | 24 | /* |
35 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -38,13 +28,13 @@ typedef struct { | |||
38 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
39 | */ | 29 | */ |
40 | 30 | ||
41 | static inline void _raw_spin_lock(spinlock_t *lock) | 31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
42 | { | 32 | { |
43 | unsigned int tmp; | 33 | unsigned int tmp; |
44 | 34 | ||
45 | if (R10000_LLSC_WAR) { | 35 | if (R10000_LLSC_WAR) { |
46 | __asm__ __volatile__( | 36 | __asm__ __volatile__( |
47 | " .set noreorder # _raw_spin_lock \n" | 37 | " .set noreorder # __raw_spin_lock \n" |
48 | "1: ll %1, %2 \n" | 38 | "1: ll %1, %2 \n" |
49 | " bnez %1, 1b \n" | 39 | " bnez %1, 1b \n" |
50 | " li %1, 1 \n" | 40 | " li %1, 1 \n" |
@@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
58 | : "memory"); | 48 | : "memory"); |
59 | } else { | 49 | } else { |
60 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
61 | " .set noreorder # _raw_spin_lock \n" | 51 | " .set noreorder # __raw_spin_lock \n" |
62 | "1: ll %1, %2 \n" | 52 | "1: ll %1, %2 \n" |
63 | " bnez %1, 1b \n" | 53 | " bnez %1, 1b \n" |
64 | " li %1, 1 \n" | 54 | " li %1, 1 \n" |
@@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
72 | } | 62 | } |
73 | } | 63 | } |
74 | 64 | ||
75 | static inline void _raw_spin_unlock(spinlock_t *lock) | 65 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
76 | { | 66 | { |
77 | __asm__ __volatile__( | 67 | __asm__ __volatile__( |
78 | " .set noreorder # _raw_spin_unlock \n" | 68 | " .set noreorder # __raw_spin_unlock \n" |
79 | " sync \n" | 69 | " sync \n" |
80 | " sw $0, %0 \n" | 70 | " sw $0, %0 \n" |
81 | " .set\treorder \n" | 71 | " .set\treorder \n" |
@@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
84 | : "memory"); | 74 | : "memory"); |
85 | } | 75 | } |
86 | 76 | ||
87 | static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | 77 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) |
88 | { | 78 | { |
89 | unsigned int temp, res; | 79 | unsigned int temp, res; |
90 | 80 | ||
91 | if (R10000_LLSC_WAR) { | 81 | if (R10000_LLSC_WAR) { |
92 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
93 | " .set noreorder # _raw_spin_trylock \n" | 83 | " .set noreorder # __raw_spin_trylock \n" |
94 | "1: ll %0, %3 \n" | 84 | "1: ll %0, %3 \n" |
95 | " ori %2, %0, 1 \n" | 85 | " ori %2, %0, 1 \n" |
96 | " sc %2, %1 \n" | 86 | " sc %2, %1 \n" |
@@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
104 | : "memory"); | 94 | : "memory"); |
105 | } else { | 95 | } else { |
106 | __asm__ __volatile__( | 96 | __asm__ __volatile__( |
107 | " .set noreorder # _raw_spin_trylock \n" | 97 | " .set noreorder # __raw_spin_trylock \n" |
108 | "1: ll %0, %3 \n" | 98 | "1: ll %0, %3 \n" |
109 | " ori %2, %0, 1 \n" | 99 | " ori %2, %0, 1 \n" |
110 | " sc %2, %1 \n" | 100 | " sc %2, %1 \n" |
@@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
129 | * read-locks. | 119 | * read-locks. |
130 | */ | 120 | */ |
131 | 121 | ||
132 | typedef struct { | 122 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
133 | volatile unsigned int lock; | ||
134 | #ifdef CONFIG_PREEMPT | ||
135 | unsigned int break_lock; | ||
136 | #endif | ||
137 | } rwlock_t; | ||
138 | |||
139 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
140 | |||
141 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
142 | |||
143 | static inline void _raw_read_lock(rwlock_t *rw) | ||
144 | { | 123 | { |
145 | unsigned int tmp; | 124 | unsigned int tmp; |
146 | 125 | ||
147 | if (R10000_LLSC_WAR) { | 126 | if (R10000_LLSC_WAR) { |
148 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
149 | " .set noreorder # _raw_read_lock \n" | 128 | " .set noreorder # __raw_read_lock \n" |
150 | "1: ll %1, %2 \n" | 129 | "1: ll %1, %2 \n" |
151 | " bltz %1, 1b \n" | 130 | " bltz %1, 1b \n" |
152 | " addu %1, 1 \n" | 131 | " addu %1, 1 \n" |
@@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
160 | : "memory"); | 139 | : "memory"); |
161 | } else { | 140 | } else { |
162 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
163 | " .set noreorder # _raw_read_lock \n" | 142 | " .set noreorder # __raw_read_lock \n" |
164 | "1: ll %1, %2 \n" | 143 | "1: ll %1, %2 \n" |
165 | " bltz %1, 1b \n" | 144 | " bltz %1, 1b \n" |
166 | " addu %1, 1 \n" | 145 | " addu %1, 1 \n" |
@@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
177 | /* Note the use of sub, not subu which will make the kernel die with an | 156 | /* Note the use of sub, not subu which will make the kernel die with an |
178 | overflow exception if we ever try to unlock an rwlock that is already | 157 | overflow exception if we ever try to unlock an rwlock that is already |
179 | unlocked or is being held by a writer. */ | 158 | unlocked or is being held by a writer. */ |
180 | static inline void _raw_read_unlock(rwlock_t *rw) | 159 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
181 | { | 160 | { |
182 | unsigned int tmp; | 161 | unsigned int tmp; |
183 | 162 | ||
184 | if (R10000_LLSC_WAR) { | 163 | if (R10000_LLSC_WAR) { |
185 | __asm__ __volatile__( | 164 | __asm__ __volatile__( |
186 | "1: ll %1, %2 # _raw_read_unlock \n" | 165 | "1: ll %1, %2 # __raw_read_unlock \n" |
187 | " sub %1, 1 \n" | 166 | " sub %1, 1 \n" |
188 | " sc %1, %0 \n" | 167 | " sc %1, %0 \n" |
189 | " beqzl %1, 1b \n" | 168 | " beqzl %1, 1b \n" |
@@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
193 | : "memory"); | 172 | : "memory"); |
194 | } else { | 173 | } else { |
195 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
196 | " .set noreorder # _raw_read_unlock \n" | 175 | " .set noreorder # __raw_read_unlock \n" |
197 | "1: ll %1, %2 \n" | 176 | "1: ll %1, %2 \n" |
198 | " sub %1, 1 \n" | 177 | " sub %1, 1 \n" |
199 | " sc %1, %0 \n" | 178 | " sc %1, %0 \n" |
@@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
206 | } | 185 | } |
207 | } | 186 | } |
208 | 187 | ||
209 | static inline void _raw_write_lock(rwlock_t *rw) | 188 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
210 | { | 189 | { |
211 | unsigned int tmp; | 190 | unsigned int tmp; |
212 | 191 | ||
213 | if (R10000_LLSC_WAR) { | 192 | if (R10000_LLSC_WAR) { |
214 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
215 | " .set noreorder # _raw_write_lock \n" | 194 | " .set noreorder # __raw_write_lock \n" |
216 | "1: ll %1, %2 \n" | 195 | "1: ll %1, %2 \n" |
217 | " bnez %1, 1b \n" | 196 | " bnez %1, 1b \n" |
218 | " lui %1, 0x8000 \n" | 197 | " lui %1, 0x8000 \n" |
@@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
226 | : "memory"); | 205 | : "memory"); |
227 | } else { | 206 | } else { |
228 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
229 | " .set noreorder # _raw_write_lock \n" | 208 | " .set noreorder # __raw_write_lock \n" |
230 | "1: ll %1, %2 \n" | 209 | "1: ll %1, %2 \n" |
231 | " bnez %1, 1b \n" | 210 | " bnez %1, 1b \n" |
232 | " lui %1, 0x8000 \n" | 211 | " lui %1, 0x8000 \n" |
@@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
241 | } | 220 | } |
242 | } | 221 | } |
243 | 222 | ||
244 | static inline void _raw_write_unlock(rwlock_t *rw) | 223 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
245 | { | 224 | { |
246 | __asm__ __volatile__( | 225 | __asm__ __volatile__( |
247 | " sync # _raw_write_unlock \n" | 226 | " sync # __raw_write_unlock \n" |
248 | " sw $0, %0 \n" | 227 | " sw $0, %0 \n" |
249 | : "=m" (rw->lock) | 228 | : "=m" (rw->lock) |
250 | : "m" (rw->lock) | 229 | : "m" (rw->lock) |
251 | : "memory"); | 230 | : "memory"); |
252 | } | 231 | } |
253 | 232 | ||
254 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 233 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
255 | 234 | ||
256 | static inline int _raw_write_trylock(rwlock_t *rw) | 235 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
257 | { | 236 | { |
258 | unsigned int tmp; | 237 | unsigned int tmp; |
259 | int ret; | 238 | int ret; |
260 | 239 | ||
261 | if (R10000_LLSC_WAR) { | 240 | if (R10000_LLSC_WAR) { |
262 | __asm__ __volatile__( | 241 | __asm__ __volatile__( |
263 | " .set noreorder # _raw_write_trylock \n" | 242 | " .set noreorder # __raw_write_trylock \n" |
264 | " li %2, 0 \n" | 243 | " li %2, 0 \n" |
265 | "1: ll %1, %3 \n" | 244 | "1: ll %1, %3 \n" |
266 | " bnez %1, 2f \n" | 245 | " bnez %1, 2f \n" |
@@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
277 | : "memory"); | 256 | : "memory"); |
278 | } else { | 257 | } else { |
279 | __asm__ __volatile__( | 258 | __asm__ __volatile__( |
280 | " .set noreorder # _raw_write_trylock \n" | 259 | " .set noreorder # __raw_write_trylock \n" |
281 | " li %2, 0 \n" | 260 | " li %2, 0 \n" |
282 | "1: ll %1, %3 \n" | 261 | "1: ll %1, %3 \n" |
283 | " bnez %1, 2f \n" | 262 | " bnez %1, 2f \n" |
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h new file mode 100644 index 000000000000..ce26c5048b15 --- /dev/null +++ b/include/asm-mips/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASM_SPINLOCK_TYPES_H | ||
2 | #define _ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index e24f7579adb0..048a2c7fd0c0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
@@ -24,19 +24,19 @@ | |||
24 | # define ATOMIC_HASH_SIZE 4 | 24 | # define ATOMIC_HASH_SIZE 4 |
25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
26 | 26 | ||
27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 27 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
28 | 28 | ||
29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | 29 | /* Can't use raw_spin_lock_irq because of #include problems, so |
30 | * this is the substitute */ | 30 | * this is the substitute */ |
31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
32 | spinlock_t *s = ATOMIC_HASH(l); \ | 32 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
33 | local_irq_save(f); \ | 33 | local_irq_save(f); \ |
34 | _raw_spin_lock(s); \ | 34 | __raw_spin_lock(s); \ |
35 | } while(0) | 35 | } while(0) |
36 | 36 | ||
37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
38 | spinlock_t *s = ATOMIC_HASH(l); \ | 38 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
39 | _raw_spin_unlock(s); \ | 39 | __raw_spin_unlock(s); \ |
40 | local_irq_restore(f); \ | 40 | local_irq_restore(f); \ |
41 | } while(0) | 41 | } while(0) |
42 | 42 | ||
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 928e5ef850bd..af7db694b22d 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/system.h> | 5 | #include <asm/spinlock.h> |
6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
8 | 8 | ||
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 06732719d927..aa592d8c0e39 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ | ||
6 | 7 | ||
7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index 0b61f51d8467..a9dfadd05658 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/spinlock_types.h> | ||
14 | 15 | ||
15 | #include <asm/hardware.h> | 16 | #include <asm/hardware.h> |
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 679ea1c651ef..43eaa6e742e0 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
@@ -2,30 +2,25 @@ | |||
2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | #include <asm/processor.h> | ||
6 | #include <asm/spinlock_types.h> | ||
5 | 7 | ||
6 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked |
7 | * since it only has load-and-zero. Moreover, at least on some PA processors, | 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, |
8 | * the semaphore address has to be 16-byte aligned. | 10 | * the semaphore address has to be 16-byte aligned. |
9 | */ | 11 | */ |
10 | 12 | ||
11 | #ifndef CONFIG_DEBUG_SPINLOCK | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
12 | |||
13 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
14 | #undef SPIN_LOCK_UNLOCKED | ||
15 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
16 | |||
17 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
18 | |||
19 | static inline int spin_is_locked(spinlock_t *x) | ||
20 | { | 14 | { |
21 | volatile unsigned int *a = __ldcw_align(x); | 15 | volatile unsigned int *a = __ldcw_align(x); |
22 | return *a == 0; | 16 | return *a == 0; |
23 | } | 17 | } |
24 | 18 | ||
25 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
26 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 20 | #define __raw_spin_unlock_wait(x) \ |
21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
27 | 22 | ||
28 | static inline void _raw_spin_lock(spinlock_t *x) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
29 | { | 24 | { |
30 | volatile unsigned int *a; | 25 | volatile unsigned int *a; |
31 | 26 | ||
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x) | |||
36 | mb(); | 31 | mb(); |
37 | } | 32 | } |
38 | 33 | ||
39 | static inline void _raw_spin_unlock(spinlock_t *x) | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
40 | { | 35 | { |
41 | volatile unsigned int *a; | 36 | volatile unsigned int *a; |
42 | mb(); | 37 | mb(); |
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x) | |||
45 | mb(); | 40 | mb(); |
46 | } | 41 | } |
47 | 42 | ||
48 | static inline int _raw_spin_trylock(spinlock_t *x) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
49 | { | 44 | { |
50 | volatile unsigned int *a; | 45 | volatile unsigned int *a; |
51 | int ret; | 46 | int ret; |
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x) | |||
57 | 52 | ||
58 | return ret; | 53 | return ret; |
59 | } | 54 | } |
60 | |||
61 | #define spin_lock_own(LOCK, LOCATION) ((void)0) | ||
62 | |||
63 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
64 | |||
65 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
66 | |||
67 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL } | ||
68 | #undef SPIN_LOCK_UNLOCKED | ||
69 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
70 | |||
71 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
72 | |||
73 | #define CHECK_LOCK(x) \ | ||
74 | do { \ | ||
75 | if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \ | ||
76 | printk(KERN_ERR "%s:%d: spin_is_locked" \ | ||
77 | " on uninitialized spinlock %p.\n", \ | ||
78 | __FILE__, __LINE__, (x)); \ | ||
79 | } \ | ||
80 | } while(0) | ||
81 | |||
82 | #define spin_is_locked(x) \ | ||
83 | ({ \ | ||
84 | CHECK_LOCK(x); \ | ||
85 | volatile unsigned int *a = __ldcw_align(x); \ | ||
86 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
87 | (x)->babble--; \ | ||
88 | printk("KERN_WARNING \ | ||
89 | %s:%d: spin_is_locked(%s/%p) already" \ | ||
90 | " locked by %s:%d in %s at %p(%d)\n", \ | ||
91 | __FILE__,__LINE__, (x)->module, (x), \ | ||
92 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
93 | (x)->previous, (x)->oncpu); \ | ||
94 | } \ | ||
95 | *a == 0; \ | ||
96 | }) | ||
97 | |||
98 | #define spin_unlock_wait(x) \ | ||
99 | do { \ | ||
100 | CHECK_LOCK(x); \ | ||
101 | volatile unsigned int *a = __ldcw_align(x); \ | ||
102 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
103 | (x)->babble--; \ | ||
104 | printk("KERN_WARNING \ | ||
105 | %s:%d: spin_unlock_wait(%s/%p)" \ | ||
106 | " owned by %s:%d in %s at %p(%d)\n", \ | ||
107 | __FILE__,__LINE__, (x)->module, (x), \ | ||
108 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
109 | (x)->previous, (x)->oncpu); \ | ||
110 | } \ | ||
111 | barrier(); \ | ||
112 | } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0) | ||
113 | |||
114 | extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no); | ||
115 | extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int); | ||
116 | extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int); | ||
117 | |||
118 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
119 | |||
120 | #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__) | ||
121 | #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__) | ||
122 | #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__) | ||
123 | |||
124 | /* just in case we need it */ | ||
125 | #define spin_lock_own(LOCK, LOCATION) \ | ||
126 | do { \ | ||
127 | volatile unsigned int *a = __ldcw_align(LOCK); \ | ||
128 | if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \ | ||
129 | printk("KERN_WARNING \ | ||
130 | %s: called on %d from %p but lock %s on %d\n", \ | ||
131 | LOCATION, smp_processor_id(), \ | ||
132 | __builtin_return_address(0), \ | ||
133 | (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \ | ||
134 | } while (0) | ||
135 | |||
136 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
137 | 55 | ||
138 | /* | 56 | /* |
139 | * Read-write spinlocks, allowing multiple readers | 57 | * Read-write spinlocks, allowing multiple readers |
140 | * but only one writer. | 58 | * but only one writer. |
141 | */ | 59 | */ |
142 | typedef struct { | ||
143 | spinlock_t lock; | ||
144 | volatile int counter; | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 } | ||
151 | |||
152 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
153 | 60 | ||
154 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
155 | 62 | ||
156 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
157 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
158 | 65 | ||
159 | #ifdef CONFIG_DEBUG_RWLOCK | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
160 | extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline); | ||
161 | #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__) | ||
162 | #else | ||
163 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
164 | { | 67 | { |
165 | unsigned long flags; | 68 | unsigned long flags; |
166 | local_irq_save(flags); | 69 | local_irq_save(flags); |
167 | _raw_spin_lock(&rw->lock); | 70 | __raw_spin_lock(&rw->lock); |
168 | 71 | ||
169 | rw->counter++; | 72 | rw->counter++; |
170 | 73 | ||
171 | _raw_spin_unlock(&rw->lock); | 74 | __raw_spin_unlock(&rw->lock); |
172 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
173 | } | 76 | } |
174 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
175 | 77 | ||
176 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
177 | { | 79 | { |
178 | unsigned long flags; | 80 | unsigned long flags; |
179 | local_irq_save(flags); | 81 | local_irq_save(flags); |
180 | _raw_spin_lock(&rw->lock); | 82 | __raw_spin_lock(&rw->lock); |
181 | 83 | ||
182 | rw->counter--; | 84 | rw->counter--; |
183 | 85 | ||
184 | _raw_spin_unlock(&rw->lock); | 86 | __raw_spin_unlock(&rw->lock); |
185 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
186 | } | 88 | } |
187 | 89 | ||
@@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
194 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | 96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
195 | * sooner or later anyway. prumpf */ | 97 | * sooner or later anyway. prumpf */ |
196 | 98 | ||
197 | #ifdef CONFIG_DEBUG_RWLOCK | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
198 | extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline); | ||
199 | #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__) | ||
200 | #else | ||
201 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
202 | { | 100 | { |
203 | retry: | 101 | retry: |
204 | _raw_spin_lock(&rw->lock); | 102 | __raw_spin_lock(&rw->lock); |
205 | 103 | ||
206 | if(rw->counter != 0) { | 104 | if(rw->counter != 0) { |
207 | /* this basically never happens */ | 105 | /* this basically never happens */ |
208 | _raw_spin_unlock(&rw->lock); | 106 | __raw_spin_unlock(&rw->lock); |
209 | 107 | ||
210 | while(rw->counter != 0); | 108 | while (rw->counter != 0) |
109 | cpu_relax(); | ||
211 | 110 | ||
212 | goto retry; | 111 | goto retry; |
213 | } | 112 | } |
@@ -215,26 +114,21 @@ retry: | |||
215 | /* got it. now leave without unlocking */ | 114 | /* got it. now leave without unlocking */ |
216 | rw->counter = -1; /* remember we are locked */ | 115 | rw->counter = -1; /* remember we are locked */ |
217 | } | 116 | } |
218 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
219 | 117 | ||
220 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
221 | 119 | ||
222 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
223 | { | 121 | { |
224 | rw->counter = 0; | 122 | rw->counter = 0; |
225 | _raw_spin_unlock(&rw->lock); | 123 | __raw_spin_unlock(&rw->lock); |
226 | } | 124 | } |
227 | 125 | ||
228 | #ifdef CONFIG_DEBUG_RWLOCK | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
229 | extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline); | ||
230 | #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__) | ||
231 | #else | ||
232 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | ||
233 | { | 127 | { |
234 | _raw_spin_lock(&rw->lock); | 128 | __raw_spin_lock(&rw->lock); |
235 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
236 | /* this basically never happens */ | 130 | /* this basically never happens */ |
237 | _raw_spin_unlock(&rw->lock); | 131 | __raw_spin_unlock(&rw->lock); |
238 | 132 | ||
239 | return 0; | 133 | return 0; |
240 | } | 134 | } |
@@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
243 | rw->counter = -1; /* remember we are locked */ | 137 | rw->counter = -1; /* remember we are locked */ |
244 | return 1; | 138 | return 1; |
245 | } | 139 | } |
246 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
247 | 140 | ||
248 | static __inline__ int is_read_locked(rwlock_t *rw) | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
249 | { | 142 | { |
250 | return rw->counter > 0; | 143 | return rw->counter > 0; |
251 | } | 144 | } |
252 | 145 | ||
253 | static __inline__ int is_write_locked(rwlock_t *rw) | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
254 | { | 147 | { |
255 | return rw->counter < 0; | 148 | return rw->counter < 0; |
256 | } | 149 | } |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h new file mode 100644 index 000000000000..785bba822fbf --- /dev/null +++ b/include/asm-parisc/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock[4]; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | volatile int counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 81c543339036..26ff844a21c1 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
160 | }) | 160 | }) |
161 | 161 | ||
162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
163 | /* | 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
164 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
165 | */ | ||
166 | |||
167 | typedef struct { | ||
168 | volatile unsigned int lock[4]; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned long magic; | ||
171 | volatile unsigned int babble; | ||
172 | const char *module; | ||
173 | char *bfile; | ||
174 | int bline; | ||
175 | int oncpu; | ||
176 | void *previous; | ||
177 | struct task_struct * task; | ||
178 | #endif | ||
179 | #ifdef CONFIG_PREEMPT | ||
180 | unsigned int break_lock; | ||
181 | #endif | ||
182 | } spinlock_t; | ||
183 | |||
184 | #define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
185 | |||
186 | #endif | 164 | #endif |
187 | 165 | ||
188 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define KERNEL_START (0x10100000 - 0x1000) |
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h index 909199aae104..20edcf2a6e0c 100644 --- a/include/asm-ppc/spinlock.h +++ b/include/asm-ppc/spinlock.h | |||
@@ -5,41 +5,21 @@ | |||
5 | 5 | ||
6 | /* | 6 | /* |
7 | * Simple spin lock operations. | 7 | * Simple spin lock operations. |
8 | * | ||
9 | * (the type definitions are in asm/raw_spinlock_types.h) | ||
8 | */ | 10 | */ |
9 | 11 | ||
10 | typedef struct { | 12 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
11 | volatile unsigned long lock; | 13 | #define __raw_spin_unlock_wait(lock) \ |
12 | #ifdef CONFIG_DEBUG_SPINLOCK | 14 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
13 | volatile unsigned long owner_pc; | 15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
14 | volatile unsigned long owner_cpu; | 16 | |
15 | #endif | 17 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
16 | #ifdef CONFIG_PREEMPT | ||
17 | unsigned int break_lock; | ||
18 | #endif | ||
19 | } spinlock_t; | ||
20 | |||
21 | #ifdef __KERNEL__ | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
23 | #define SPINLOCK_DEBUG_INIT , 0, 0 | ||
24 | #else | ||
25 | #define SPINLOCK_DEBUG_INIT /* */ | ||
26 | #endif | ||
27 | |||
28 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT } | ||
29 | |||
30 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
31 | #define spin_is_locked(x) ((x)->lock != 0) | ||
32 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
33 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
34 | |||
35 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
36 | |||
37 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
38 | { | 18 | { |
39 | unsigned long tmp; | 19 | unsigned long tmp; |
40 | 20 | ||
41 | __asm__ __volatile__( | 21 | __asm__ __volatile__( |
42 | "b 1f # spin_lock\n\ | 22 | "b 1f # __raw_spin_lock\n\ |
43 | 2: lwzx %0,0,%1\n\ | 23 | 2: lwzx %0,0,%1\n\ |
44 | cmpwi 0,%0,0\n\ | 24 | cmpwi 0,%0,0\n\ |
45 | bne+ 2b\n\ | 25 | bne+ 2b\n\ |
@@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
55 | : "cr0", "memory"); | 35 | : "cr0", "memory"); |
56 | } | 36 | } |
57 | 37 | ||
58 | static inline void _raw_spin_unlock(spinlock_t *lock) | 38 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
59 | { | 39 | { |
60 | __asm__ __volatile__("eieio # spin_unlock": : :"memory"); | 40 | __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); |
61 | lock->lock = 0; | 41 | lock->lock = 0; |
62 | } | 42 | } |
63 | 43 | ||
64 | #define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) | 44 | #define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) |
65 | |||
66 | #else | ||
67 | |||
68 | extern void _raw_spin_lock(spinlock_t *lock); | ||
69 | extern void _raw_spin_unlock(spinlock_t *lock); | ||
70 | extern int _raw_spin_trylock(spinlock_t *lock); | ||
71 | |||
72 | #endif | ||
73 | 45 | ||
74 | /* | 46 | /* |
75 | * Read-write spinlocks, allowing multiple readers | 47 | * Read-write spinlocks, allowing multiple readers |
@@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock); | |||
81 | * irq-safe write-lock, but readers can get non-irqsafe | 53 | * irq-safe write-lock, but readers can get non-irqsafe |
82 | * read-locks. | 54 | * read-locks. |
83 | */ | 55 | */ |
84 | typedef struct { | ||
85 | volatile signed int lock; | ||
86 | #ifdef CONFIG_PREEMPT | ||
87 | unsigned int break_lock; | ||
88 | #endif | ||
89 | } rwlock_t; | ||
90 | 56 | ||
91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | 57 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
92 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | 58 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
93 | 59 | ||
94 | #define read_can_lock(rw) ((rw)->lock >= 0) | 60 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
95 | #define write_can_lock(rw) (!(rw)->lock) | ||
96 | |||
97 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
98 | |||
99 | static __inline__ int _raw_read_trylock(rwlock_t *rw) | ||
100 | { | 61 | { |
101 | signed int tmp; | 62 | signed int tmp; |
102 | 63 | ||
@@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw) | |||
116 | return tmp > 0; | 77 | return tmp > 0; |
117 | } | 78 | } |
118 | 79 | ||
119 | static __inline__ void _raw_read_lock(rwlock_t *rw) | 80 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
120 | { | 81 | { |
121 | signed int tmp; | 82 | signed int tmp; |
122 | 83 | ||
@@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) | |||
137 | : "cr0", "memory"); | 98 | : "cr0", "memory"); |
138 | } | 99 | } |
139 | 100 | ||
140 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 101 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
141 | { | 102 | { |
142 | signed int tmp; | 103 | signed int tmp; |
143 | 104 | ||
@@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
153 | : "cr0", "memory"); | 114 | : "cr0", "memory"); |
154 | } | 115 | } |
155 | 116 | ||
156 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | 117 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
157 | { | 118 | { |
158 | signed int tmp; | 119 | signed int tmp; |
159 | 120 | ||
@@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
173 | return tmp == 0; | 134 | return tmp == 0; |
174 | } | 135 | } |
175 | 136 | ||
176 | static __inline__ void _raw_write_lock(rwlock_t *rw) | 137 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
177 | { | 138 | { |
178 | signed int tmp; | 139 | signed int tmp; |
179 | 140 | ||
@@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) | |||
194 | : "cr0", "memory"); | 155 | : "cr0", "memory"); |
195 | } | 156 | } |
196 | 157 | ||
197 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 158 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
198 | { | 159 | { |
199 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); | 160 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); |
200 | rw->lock = 0; | 161 | rw->lock = 0; |
201 | } | 162 | } |
202 | 163 | ||
203 | #else | ||
204 | |||
205 | extern void _raw_read_lock(rwlock_t *rw); | ||
206 | extern void _raw_read_unlock(rwlock_t *rw); | ||
207 | extern void _raw_write_lock(rwlock_t *rw); | ||
208 | extern void _raw_write_unlock(rwlock_t *rw); | ||
209 | extern int _raw_read_trylock(rwlock_t *rw); | ||
210 | extern int _raw_write_trylock(rwlock_t *rw); | ||
211 | |||
212 | #endif | ||
213 | |||
214 | #endif /* __ASM_SPINLOCK_H */ | 164 | #endif /* __ASM_SPINLOCK_H */ |
215 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h new file mode 100644 index 000000000000..7919ccc75b8a --- /dev/null +++ b/include/asm-ppc/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned long lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile signed int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index acd11564dd75..14cb895bb607 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h | |||
@@ -15,36 +15,42 @@ | |||
15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
18 | * | ||
19 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | 20 | */ |
19 | #include <linux/config.h> | 21 | #include <linux/config.h> |
20 | #include <asm/paca.h> | 22 | #include <asm/paca.h> |
21 | #include <asm/hvcall.h> | 23 | #include <asm/hvcall.h> |
22 | #include <asm/iSeries/HvCall.h> | 24 | #include <asm/iSeries/HvCall.h> |
23 | 25 | ||
24 | typedef struct { | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
25 | volatile unsigned int lock; | ||
26 | #ifdef CONFIG_PREEMPT | ||
27 | unsigned int break_lock; | ||
28 | #endif | ||
29 | } spinlock_t; | ||
30 | 27 | ||
31 | typedef struct { | 28 | /* |
32 | volatile signed int lock; | 29 | * This returns the old value in the lock, so we succeeded |
33 | #ifdef CONFIG_PREEMPT | 30 | * in getting the lock if the return value is 0. |
34 | unsigned int break_lock; | 31 | */ |
35 | #endif | 32 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) |
36 | } rwlock_t; | 33 | { |
34 | unsigned long tmp, tmp2; | ||
37 | 35 | ||
38 | #ifdef __KERNEL__ | 36 | __asm__ __volatile__( |
39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 37 | " lwz %1,%3(13) # __spin_trylock\n\ |
38 | 1: lwarx %0,0,%2\n\ | ||
39 | cmpwi 0,%0,0\n\ | ||
40 | bne- 2f\n\ | ||
41 | stwcx. %1,0,%2\n\ | ||
42 | bne- 1b\n\ | ||
43 | isync\n\ | ||
44 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
45 | : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) | ||
46 | : "cr0", "memory"); | ||
40 | 47 | ||
41 | #define spin_is_locked(x) ((x)->lock != 0) | 48 | return tmp; |
42 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 49 | } |
43 | 50 | ||
44 | static __inline__ void _raw_spin_unlock(spinlock_t *lock) | 51 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
45 | { | 52 | { |
46 | __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); | 53 | return __spin_trylock(lock) == 0; |
47 | lock->lock = 0; | ||
48 | } | 54 | } |
49 | 55 | ||
50 | /* | 56 | /* |
@@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
64 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 70 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
65 | /* We only yield to the hypervisor if we are in shared processor mode */ | 71 | /* We only yield to the hypervisor if we are in shared processor mode */ |
66 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | 72 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) |
67 | extern void __spin_yield(spinlock_t *lock); | 73 | extern void __spin_yield(raw_spinlock_t *lock); |
68 | extern void __rw_yield(rwlock_t *lock); | 74 | extern void __rw_yield(raw_rwlock_t *lock); |
69 | #else /* SPLPAR || ISERIES */ | 75 | #else /* SPLPAR || ISERIES */ |
70 | #define __spin_yield(x) barrier() | 76 | #define __spin_yield(x) barrier() |
71 | #define __rw_yield(x) barrier() | 77 | #define __rw_yield(x) barrier() |
72 | #define SHARED_PROCESSOR 0 | 78 | #define SHARED_PROCESSOR 0 |
73 | #endif | 79 | #endif |
74 | extern void spin_unlock_wait(spinlock_t *lock); | ||
75 | |||
76 | /* | ||
77 | * This returns the old value in the lock, so we succeeded | ||
78 | * in getting the lock if the return value is 0. | ||
79 | */ | ||
80 | static __inline__ unsigned long __spin_trylock(spinlock_t *lock) | ||
81 | { | ||
82 | unsigned long tmp, tmp2; | ||
83 | |||
84 | __asm__ __volatile__( | ||
85 | " lwz %1,%3(13) # __spin_trylock\n\ | ||
86 | 1: lwarx %0,0,%2\n\ | ||
87 | cmpwi 0,%0,0\n\ | ||
88 | bne- 2f\n\ | ||
89 | stwcx. %1,0,%2\n\ | ||
90 | bne- 1b\n\ | ||
91 | isync\n\ | ||
92 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
93 | : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
94 | : "cr0", "memory"); | ||
95 | |||
96 | return tmp; | ||
97 | } | ||
98 | |||
99 | static int __inline__ _raw_spin_trylock(spinlock_t *lock) | ||
100 | { | ||
101 | return __spin_trylock(lock) == 0; | ||
102 | } | ||
103 | 80 | ||
104 | static void __inline__ _raw_spin_lock(spinlock_t *lock) | 81 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
105 | { | 82 | { |
106 | while (1) { | 83 | while (1) { |
107 | if (likely(__spin_trylock(lock) == 0)) | 84 | if (likely(__spin_trylock(lock) == 0)) |
@@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) | |||
110 | HMT_low(); | 87 | HMT_low(); |
111 | if (SHARED_PROCESSOR) | 88 | if (SHARED_PROCESSOR) |
112 | __spin_yield(lock); | 89 | __spin_yield(lock); |
113 | } while (unlikely(lock->lock != 0)); | 90 | } while (unlikely(lock->slock != 0)); |
114 | HMT_medium(); | 91 | HMT_medium(); |
115 | } | 92 | } |
116 | } | 93 | } |
117 | 94 | ||
118 | static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 95 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
119 | { | 96 | { |
120 | unsigned long flags_dis; | 97 | unsigned long flags_dis; |
121 | 98 | ||
@@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
128 | HMT_low(); | 105 | HMT_low(); |
129 | if (SHARED_PROCESSOR) | 106 | if (SHARED_PROCESSOR) |
130 | __spin_yield(lock); | 107 | __spin_yield(lock); |
131 | } while (unlikely(lock->lock != 0)); | 108 | } while (unlikely(lock->slock != 0)); |
132 | HMT_medium(); | 109 | HMT_medium(); |
133 | local_irq_restore(flags_dis); | 110 | local_irq_restore(flags_dis); |
134 | } | 111 | } |
135 | } | 112 | } |
136 | 113 | ||
114 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | ||
115 | { | ||
116 | __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); | ||
117 | lock->slock = 0; | ||
118 | } | ||
119 | |||
120 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
121 | |||
137 | /* | 122 | /* |
138 | * Read-write spinlocks, allowing multiple readers | 123 | * Read-write spinlocks, allowing multiple readers |
139 | * but only one writer. | 124 | * but only one writer. |
@@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
144 | * irq-safe write-lock, but readers can get non-irqsafe | 129 | * irq-safe write-lock, but readers can get non-irqsafe |
145 | * read-locks. | 130 | * read-locks. |
146 | */ | 131 | */ |
147 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
148 | 132 | ||
149 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 133 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
150 | 134 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
151 | #define read_can_lock(rw) ((rw)->lock >= 0) | ||
152 | #define write_can_lock(rw) (!(rw)->lock) | ||
153 | |||
154 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
155 | { | ||
156 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
157 | rw->lock = 0; | ||
158 | } | ||
159 | 135 | ||
160 | /* | 136 | /* |
161 | * This returns the old value in the lock + 1, | 137 | * This returns the old value in the lock + 1, |
162 | * so we got a read lock if the return value is > 0. | 138 | * so we got a read lock if the return value is > 0. |
163 | */ | 139 | */ |
164 | static long __inline__ __read_trylock(rwlock_t *rw) | 140 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
165 | { | 141 | { |
166 | long tmp; | 142 | long tmp; |
167 | 143 | ||
@@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw) | |||
180 | return tmp; | 156 | return tmp; |
181 | } | 157 | } |
182 | 158 | ||
183 | static int __inline__ _raw_read_trylock(rwlock_t *rw) | ||
184 | { | ||
185 | return __read_trylock(rw) > 0; | ||
186 | } | ||
187 | |||
188 | static void __inline__ _raw_read_lock(rwlock_t *rw) | ||
189 | { | ||
190 | while (1) { | ||
191 | if (likely(__read_trylock(rw) > 0)) | ||
192 | break; | ||
193 | do { | ||
194 | HMT_low(); | ||
195 | if (SHARED_PROCESSOR) | ||
196 | __rw_yield(rw); | ||
197 | } while (unlikely(rw->lock < 0)); | ||
198 | HMT_medium(); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void __inline__ _raw_read_unlock(rwlock_t *rw) | ||
203 | { | ||
204 | long tmp; | ||
205 | |||
206 | __asm__ __volatile__( | ||
207 | "eieio # read_unlock\n\ | ||
208 | 1: lwarx %0,0,%1\n\ | ||
209 | addic %0,%0,-1\n\ | ||
210 | stwcx. %0,0,%1\n\ | ||
211 | bne- 1b" | ||
212 | : "=&r"(tmp) | ||
213 | : "r"(&rw->lock) | ||
214 | : "cr0", "memory"); | ||
215 | } | ||
216 | |||
217 | /* | 159 | /* |
218 | * This returns the old value in the lock, | 160 | * This returns the old value in the lock, |
219 | * so we got the write lock if the return value is 0. | 161 | * so we got the write lock if the return value is 0. |
220 | */ | 162 | */ |
221 | static __inline__ long __write_trylock(rwlock_t *rw) | 163 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
222 | { | 164 | { |
223 | long tmp, tmp2; | 165 | long tmp, tmp2; |
224 | 166 | ||
@@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw) | |||
237 | return tmp; | 179 | return tmp; |
238 | } | 180 | } |
239 | 181 | ||
240 | static int __inline__ _raw_write_trylock(rwlock_t *rw) | 182 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
241 | { | 183 | { |
242 | return __write_trylock(rw) == 0; | 184 | while (1) { |
185 | if (likely(__read_trylock(rw) > 0)) | ||
186 | break; | ||
187 | do { | ||
188 | HMT_low(); | ||
189 | if (SHARED_PROCESSOR) | ||
190 | __rw_yield(rw); | ||
191 | } while (unlikely(rw->lock < 0)); | ||
192 | HMT_medium(); | ||
193 | } | ||
243 | } | 194 | } |
244 | 195 | ||
245 | static void __inline__ _raw_write_lock(rwlock_t *rw) | 196 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
246 | { | 197 | { |
247 | while (1) { | 198 | while (1) { |
248 | if (likely(__write_trylock(rw) == 0)) | 199 | if (likely(__write_trylock(rw) == 0)) |
@@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) | |||
256 | } | 207 | } |
257 | } | 208 | } |
258 | 209 | ||
259 | #endif /* __KERNEL__ */ | 210 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
211 | { | ||
212 | return __read_trylock(rw) > 0; | ||
213 | } | ||
214 | |||
215 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | ||
216 | { | ||
217 | return __write_trylock(rw) == 0; | ||
218 | } | ||
219 | |||
220 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | ||
221 | { | ||
222 | long tmp; | ||
223 | |||
224 | __asm__ __volatile__( | ||
225 | "eieio # read_unlock\n\ | ||
226 | 1: lwarx %0,0,%1\n\ | ||
227 | addic %0,%0,-1\n\ | ||
228 | stwcx. %0,0,%1\n\ | ||
229 | bne- 1b" | ||
230 | : "=&r"(tmp) | ||
231 | : "r"(&rw->lock) | ||
232 | : "cr0", "memory"); | ||
233 | } | ||
234 | |||
235 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
236 | { | ||
237 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
238 | rw->lock = 0; | ||
239 | } | ||
240 | |||
260 | #endif /* __ASM_SPINLOCK_H */ | 241 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h new file mode 100644 index 000000000000..a37c8eabb9f2 --- /dev/null +++ b/include/asm-ppc64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile signed int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 321b23bba1ec..273dbecf8ace 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h | |||
@@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
27 | * on the local processor, one does not. | 27 | * on the local processor, one does not. |
28 | * | 28 | * |
29 | * We make no fairness assumptions. They have a cost. | 29 | * We make no fairness assumptions. They have a cost. |
30 | * | ||
31 | * (the type definitions are in asm/spinlock_types.h) | ||
30 | */ | 32 | */ |
31 | 33 | ||
32 | typedef struct { | 34 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
33 | volatile unsigned int lock; | 35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
34 | #ifdef CONFIG_PREEMPT | 36 | #define __raw_spin_unlock_wait(lock) \ |
35 | unsigned int break_lock; | 37 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
36 | #endif | ||
37 | } __attribute__ ((aligned (4))) spinlock_t; | ||
38 | |||
39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
40 | #define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) | ||
41 | #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) | ||
42 | #define spin_is_locked(x) ((x)->lock != 0) | ||
43 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
44 | 38 | ||
45 | extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); | 39 | extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); |
46 | extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); | 40 | extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); |
47 | 41 | ||
48 | static inline void _raw_spin_lock(spinlock_t *lp) | 42 | static inline void __raw_spin_lock(raw_spinlock_t *lp) |
49 | { | 43 | { |
50 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 44 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
51 | 45 | ||
@@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp) | |||
53 | _raw_spin_lock_wait(lp, pc); | 47 | _raw_spin_lock_wait(lp, pc); |
54 | } | 48 | } |
55 | 49 | ||
56 | static inline int _raw_spin_trylock(spinlock_t *lp) | 50 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) |
57 | { | 51 | { |
58 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 52 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
59 | 53 | ||
@@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp) | |||
62 | return _raw_spin_trylock_retry(lp, pc); | 56 | return _raw_spin_trylock_retry(lp, pc); |
63 | } | 57 | } |
64 | 58 | ||
65 | static inline void _raw_spin_unlock(spinlock_t *lp) | 59 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) |
66 | { | 60 | { |
67 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); | 61 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); |
68 | } | 62 | } |
@@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp) | |||
77 | * irq-safe write-lock, but readers can get non-irqsafe | 71 | * irq-safe write-lock, but readers can get non-irqsafe |
78 | * read-locks. | 72 | * read-locks. |
79 | */ | 73 | */ |
80 | typedef struct { | ||
81 | volatile unsigned int lock; | ||
82 | volatile unsigned long owner_pc; | ||
83 | #ifdef CONFIG_PREEMPT | ||
84 | unsigned int break_lock; | ||
85 | #endif | ||
86 | } rwlock_t; | ||
87 | |||
88 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
89 | |||
90 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
91 | 74 | ||
92 | /** | 75 | /** |
93 | * read_can_lock - would read_trylock() succeed? | 76 | * read_can_lock - would read_trylock() succeed? |
94 | * @lock: the rwlock in question. | 77 | * @lock: the rwlock in question. |
95 | */ | 78 | */ |
96 | #define read_can_lock(x) ((int)(x)->lock >= 0) | 79 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) |
97 | 80 | ||
98 | /** | 81 | /** |
99 | * write_can_lock - would write_trylock() succeed? | 82 | * write_can_lock - would write_trylock() succeed? |
100 | * @lock: the rwlock in question. | 83 | * @lock: the rwlock in question. |
101 | */ | 84 | */ |
102 | #define write_can_lock(x) ((x)->lock == 0) | 85 | #define __raw_write_can_lock(x) ((x)->lock == 0) |
103 | 86 | ||
104 | extern void _raw_read_lock_wait(rwlock_t *lp); | 87 | extern void _raw_read_lock_wait(raw_rwlock_t *lp); |
105 | extern int _raw_read_trylock_retry(rwlock_t *lp); | 88 | extern int _raw_read_trylock_retry(raw_rwlock_t *lp); |
106 | extern void _raw_write_lock_wait(rwlock_t *lp); | 89 | extern void _raw_write_lock_wait(raw_rwlock_t *lp); |
107 | extern int _raw_write_trylock_retry(rwlock_t *lp); | 90 | extern int _raw_write_trylock_retry(raw_rwlock_t *lp); |
108 | 91 | ||
109 | static inline void _raw_read_lock(rwlock_t *rw) | 92 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
110 | { | 93 | { |
111 | unsigned int old; | 94 | unsigned int old; |
112 | old = rw->lock & 0x7fffffffU; | 95 | old = rw->lock & 0x7fffffffU; |
@@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
114 | _raw_read_lock_wait(rw); | 97 | _raw_read_lock_wait(rw); |
115 | } | 98 | } |
116 | 99 | ||
117 | static inline void _raw_read_unlock(rwlock_t *rw) | 100 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
118 | { | 101 | { |
119 | unsigned int old, cmp; | 102 | unsigned int old, cmp; |
120 | 103 | ||
@@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
125 | } while (cmp != old); | 108 | } while (cmp != old); |
126 | } | 109 | } |
127 | 110 | ||
128 | static inline void _raw_write_lock(rwlock_t *rw) | 111 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
129 | { | 112 | { |
130 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 113 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
131 | _raw_write_lock_wait(rw); | 114 | _raw_write_lock_wait(rw); |
132 | } | 115 | } |
133 | 116 | ||
134 | static inline void _raw_write_unlock(rwlock_t *rw) | 117 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
135 | { | 118 | { |
136 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 119 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
137 | } | 120 | } |
138 | 121 | ||
139 | static inline int _raw_read_trylock(rwlock_t *rw) | 122 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
140 | { | 123 | { |
141 | unsigned int old; | 124 | unsigned int old; |
142 | old = rw->lock & 0x7fffffffU; | 125 | old = rw->lock & 0x7fffffffU; |
@@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw) | |||
145 | return _raw_read_trylock_retry(rw); | 128 | return _raw_read_trylock_retry(rw); |
146 | } | 129 | } |
147 | 130 | ||
148 | static inline int _raw_write_trylock(rwlock_t *rw) | 131 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
149 | { | 132 | { |
150 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 133 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
151 | return 1; | 134 | return 1; |
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h new file mode 100644 index 000000000000..f79a2216204f --- /dev/null +++ b/include/asm-s390/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | volatile unsigned int owner_pc; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index e770b55649eb..846322d4c35d 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h | |||
@@ -15,20 +15,11 @@ | |||
15 | /* | 15 | /* |
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
17 | */ | 17 | */ |
18 | typedef struct { | ||
19 | volatile unsigned long lock; | ||
20 | #ifdef CONFIG_PREEMPT | ||
21 | unsigned int break_lock; | ||
22 | #endif | ||
23 | } spinlock_t; | ||
24 | 18 | ||
25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
26 | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
27 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 21 | #define __raw_spin_unlock_wait(x) \ |
28 | 22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
32 | 23 | ||
33 | /* | 24 | /* |
34 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -36,7 +27,7 @@ typedef struct { | |||
36 | * | 27 | * |
37 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
38 | */ | 29 | */ |
39 | static inline void _raw_spin_lock(spinlock_t *lock) | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
40 | { | 31 | { |
41 | __asm__ __volatile__ ( | 32 | __asm__ __volatile__ ( |
42 | "1:\n\t" | 33 | "1:\n\t" |
@@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
49 | ); | 40 | ); |
50 | } | 41 | } |
51 | 42 | ||
52 | static inline void _raw_spin_unlock(spinlock_t *lock) | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
53 | { | 44 | { |
54 | assert_spin_locked(lock); | 45 | assert_spin_locked(lock); |
55 | 46 | ||
56 | lock->lock = 0; | 47 | lock->lock = 0; |
57 | } | 48 | } |
58 | 49 | ||
59 | #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
60 | 51 | ||
61 | /* | 52 | /* |
62 | * Read-write spinlocks, allowing multiple readers but only one writer. | 53 | * Read-write spinlocks, allowing multiple readers but only one writer. |
@@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
66 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | 57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
67 | * read-locks. | 58 | * read-locks. |
68 | */ | 59 | */ |
69 | typedef struct { | 60 | |
70 | spinlock_t lock; | 61 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
71 | atomic_t counter; | ||
72 | #ifdef CONFIG_PREEMPT | ||
73 | unsigned int break_lock; | ||
74 | #endif | ||
75 | } rwlock_t; | ||
76 | |||
77 | #define RW_LOCK_BIAS 0x01000000 | ||
78 | #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } } | ||
79 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
80 | |||
81 | static inline void _raw_read_lock(rwlock_t *rw) | ||
82 | { | 62 | { |
83 | _raw_spin_lock(&rw->lock); | 63 | __raw_spin_lock(&rw->lock); |
84 | 64 | ||
85 | atomic_inc(&rw->counter); | 65 | atomic_inc(&rw->counter); |
86 | 66 | ||
87 | _raw_spin_unlock(&rw->lock); | 67 | __raw_spin_unlock(&rw->lock); |
88 | } | 68 | } |
89 | 69 | ||
90 | static inline void _raw_read_unlock(rwlock_t *rw) | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
91 | { | 71 | { |
92 | _raw_spin_lock(&rw->lock); | 72 | __raw_spin_lock(&rw->lock); |
93 | 73 | ||
94 | atomic_dec(&rw->counter); | 74 | atomic_dec(&rw->counter); |
95 | 75 | ||
96 | _raw_spin_unlock(&rw->lock); | 76 | __raw_spin_unlock(&rw->lock); |
97 | } | 77 | } |
98 | 78 | ||
99 | static inline void _raw_write_lock(rwlock_t *rw) | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
100 | { | 80 | { |
101 | _raw_spin_lock(&rw->lock); | 81 | __raw_spin_lock(&rw->lock); |
102 | atomic_set(&rw->counter, -1); | 82 | atomic_set(&rw->counter, -1); |
103 | } | 83 | } |
104 | 84 | ||
105 | static inline void _raw_write_unlock(rwlock_t *rw) | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
106 | { | 86 | { |
107 | atomic_set(&rw->counter, 0); | 87 | atomic_set(&rw->counter, 0); |
108 | _raw_spin_unlock(&rw->lock); | 88 | __raw_spin_unlock(&rw->lock); |
109 | } | 89 | } |
110 | 90 | ||
111 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 91 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
112 | 92 | ||
113 | static inline int _raw_write_trylock(rwlock_t *rw) | 93 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
114 | { | 94 | { |
115 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | 95 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) |
116 | return 1; | 96 | return 1; |
@@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
121 | } | 101 | } |
122 | 102 | ||
123 | #endif /* __ASM_SH_SPINLOCK_H */ | 103 | #endif /* __ASM_SH_SPINLOCK_H */ |
124 | |||
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h new file mode 100644 index 000000000000..8c41b6c3aac8 --- /dev/null +++ b/include/asm-sh/spinlock_types.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SH_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SH_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned long lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | atomic_t counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define RW_LOCK_BIAS 0x01000000 | ||
20 | #define __RAW_RW_LOCK_UNLOCKED { { 0 }, { RW_LOCK_BIAS } } | ||
21 | |||
22 | #endif | ||
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 0cbd87ad4912..111727a2bb4e 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
@@ -12,96 +12,12 @@ | |||
12 | 12 | ||
13 | #include <asm/psr.h> | 13 | #include <asm/psr.h> |
14 | 14 | ||
15 | #ifdef CONFIG_DEBUG_SPINLOCK | 15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
16 | struct _spinlock_debug { | ||
17 | unsigned char lock; | ||
18 | unsigned long owner_pc; | ||
19 | #ifdef CONFIG_PREEMPT | ||
20 | unsigned int break_lock; | ||
21 | #endif | ||
22 | }; | ||
23 | typedef struct _spinlock_debug spinlock_t; | ||
24 | |||
25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 } | ||
26 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
27 | #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0) | ||
28 | #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock)) | ||
29 | |||
30 | extern void _do_spin_lock(spinlock_t *lock, char *str); | ||
31 | extern int _spin_trylock(spinlock_t *lock); | ||
32 | extern void _do_spin_unlock(spinlock_t *lock); | ||
33 | |||
34 | #define _raw_spin_trylock(lp) _spin_trylock(lp) | ||
35 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | ||
36 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
37 | |||
38 | struct _rwlock_debug { | ||
39 | volatile unsigned int lock; | ||
40 | unsigned long owner_pc; | ||
41 | unsigned long reader_pc[NR_CPUS]; | ||
42 | #ifdef CONFIG_PREEMPT | ||
43 | unsigned int break_lock; | ||
44 | #endif | ||
45 | }; | ||
46 | typedef struct _rwlock_debug rwlock_t; | ||
47 | |||
48 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} } | ||
49 | |||
50 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
51 | |||
52 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
53 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
54 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
55 | extern void _do_write_unlock(rwlock_t *rw); | ||
56 | |||
57 | #define _raw_read_lock(lock) \ | ||
58 | do { unsigned long flags; \ | ||
59 | local_irq_save(flags); \ | ||
60 | _do_read_lock(lock, "read_lock"); \ | ||
61 | local_irq_restore(flags); \ | ||
62 | } while(0) | ||
63 | |||
64 | #define _raw_read_unlock(lock) \ | ||
65 | do { unsigned long flags; \ | ||
66 | local_irq_save(flags); \ | ||
67 | _do_read_unlock(lock, "read_unlock"); \ | ||
68 | local_irq_restore(flags); \ | ||
69 | } while(0) | ||
70 | |||
71 | #define _raw_write_lock(lock) \ | ||
72 | do { unsigned long flags; \ | ||
73 | local_irq_save(flags); \ | ||
74 | _do_write_lock(lock, "write_lock"); \ | ||
75 | local_irq_restore(flags); \ | ||
76 | } while(0) | ||
77 | |||
78 | #define _raw_write_unlock(lock) \ | ||
79 | do { unsigned long flags; \ | ||
80 | local_irq_save(flags); \ | ||
81 | _do_write_unlock(lock); \ | ||
82 | local_irq_restore(flags); \ | ||
83 | } while(0) | ||
84 | |||
85 | #else /* !CONFIG_DEBUG_SPINLOCK */ | ||
86 | |||
87 | typedef struct { | ||
88 | unsigned char lock; | ||
89 | #ifdef CONFIG_PREEMPT | ||
90 | unsigned int break_lock; | ||
91 | #endif | ||
92 | } spinlock_t; | ||
93 | |||
94 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
95 | |||
96 | #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) | ||
97 | #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
98 | 16 | ||
99 | #define spin_unlock_wait(lock) \ | 17 | #define __raw_spin_unlock_wait(lock) \ |
100 | do { \ | 18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
101 | barrier(); \ | ||
102 | } while(*((volatile unsigned char *)lock)) | ||
103 | 19 | ||
104 | extern __inline__ void _raw_spin_lock(spinlock_t *lock) | 20 | extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) |
105 | { | 21 | { |
106 | __asm__ __volatile__( | 22 | __asm__ __volatile__( |
107 | "\n1:\n\t" | 23 | "\n1:\n\t" |
@@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock) | |||
121 | : "g2", "memory", "cc"); | 37 | : "g2", "memory", "cc"); |
122 | } | 38 | } |
123 | 39 | ||
124 | extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | 40 | extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) |
125 | { | 41 | { |
126 | unsigned int result; | 42 | unsigned int result; |
127 | __asm__ __volatile__("ldstub [%1], %0" | 43 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | |||
131 | return (result == 0); | 47 | return (result == 0); |
132 | } | 48 | } |
133 | 49 | ||
134 | extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | 50 | extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
135 | { | 51 | { |
136 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
137 | } | 53 | } |
@@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
147 | * | 63 | * |
148 | * XXX This might create some problems with my dual spinlock | 64 | * XXX This might create some problems with my dual spinlock |
149 | * XXX scheme, deadlocks etc. -DaveM | 65 | * XXX scheme, deadlocks etc. -DaveM |
150 | */ | 66 | * |
151 | typedef struct { | 67 | * Sort of like atomic_t's on Sparc, but even more clever. |
152 | volatile unsigned int lock; | ||
153 | #ifdef CONFIG_PREEMPT | ||
154 | unsigned int break_lock; | ||
155 | #endif | ||
156 | } rwlock_t; | ||
157 | |||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
159 | |||
160 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | |||
163 | /* Sort of like atomic_t's on Sparc, but even more clever. | ||
164 | * | 68 | * |
165 | * ------------------------------------ | 69 | * ------------------------------------ |
166 | * | 24-bit counter | wlock | rwlock_t | 70 | * | 24-bit counter | wlock | raw_rwlock_t |
167 | * ------------------------------------ | 71 | * ------------------------------------ |
168 | * 31 8 7 0 | 72 | * 31 8 7 0 |
169 | * | 73 | * |
@@ -174,9 +78,9 @@ typedef struct { | |||
174 | * | 78 | * |
175 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
176 | */ | 80 | */ |
177 | extern __inline__ void _read_lock(rwlock_t *rw) | 81 | extern __inline__ void __read_lock(raw_rwlock_t *rw) |
178 | { | 82 | { |
179 | register rwlock_t *lp asm("g1"); | 83 | register raw_rwlock_t *lp asm("g1"); |
180 | lp = rw; | 84 | lp = rw; |
181 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
182 | "mov %%o7, %%g4\n\t" | 86 | "mov %%o7, %%g4\n\t" |
@@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw) | |||
187 | : "g2", "g4", "memory", "cc"); | 91 | : "g2", "g4", "memory", "cc"); |
188 | } | 92 | } |
189 | 93 | ||
190 | #define _raw_read_lock(lock) \ | 94 | #define __raw_read_lock(lock) \ |
191 | do { unsigned long flags; \ | 95 | do { unsigned long flags; \ |
192 | local_irq_save(flags); \ | 96 | local_irq_save(flags); \ |
193 | _read_lock(lock); \ | 97 | __raw_read_lock(lock); \ |
194 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
195 | } while(0) | 99 | } while(0) |
196 | 100 | ||
197 | extern __inline__ void _read_unlock(rwlock_t *rw) | 101 | extern __inline__ void __read_unlock(raw_rwlock_t *rw) |
198 | { | 102 | { |
199 | register rwlock_t *lp asm("g1"); | 103 | register raw_rwlock_t *lp asm("g1"); |
200 | lp = rw; | 104 | lp = rw; |
201 | __asm__ __volatile__( | 105 | __asm__ __volatile__( |
202 | "mov %%o7, %%g4\n\t" | 106 | "mov %%o7, %%g4\n\t" |
@@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw) | |||
207 | : "g2", "g4", "memory", "cc"); | 111 | : "g2", "g4", "memory", "cc"); |
208 | } | 112 | } |
209 | 113 | ||
210 | #define _raw_read_unlock(lock) \ | 114 | #define __raw_read_unlock(lock) \ |
211 | do { unsigned long flags; \ | 115 | do { unsigned long flags; \ |
212 | local_irq_save(flags); \ | 116 | local_irq_save(flags); \ |
213 | _read_unlock(lock); \ | 117 | __raw_read_unlock(lock); \ |
214 | local_irq_restore(flags); \ | 118 | local_irq_restore(flags); \ |
215 | } while(0) | 119 | } while(0) |
216 | 120 | ||
217 | extern __inline__ void _raw_write_lock(rwlock_t *rw) | 121 | extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
218 | { | 122 | { |
219 | register rwlock_t *lp asm("g1"); | 123 | register raw_rwlock_t *lp asm("g1"); |
220 | lp = rw; | 124 | lp = rw; |
221 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
222 | "mov %%o7, %%g4\n\t" | 126 | "mov %%o7, %%g4\n\t" |
@@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw) | |||
227 | : "g2", "g4", "memory", "cc"); | 131 | : "g2", "g4", "memory", "cc"); |
228 | } | 132 | } |
229 | 133 | ||
230 | #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 134 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
231 | |||
232 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
233 | 135 | ||
234 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 136 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
235 | 137 | ||
236 | #endif /* !(__ASSEMBLY__) */ | 138 | #endif /* !(__ASSEMBLY__) */ |
237 | 139 | ||
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h new file mode 100644 index 000000000000..0a0fb116c4ec --- /dev/null +++ b/include/asm-sparc/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __SPARC_SPINLOCK_TYPES_H | ||
2 | #define __SPARC_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | unsigned char lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index a02c4370eb42..ec85d12d73b9 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
@@ -29,24 +29,13 @@ | |||
29 | * must be pre-V9 branches. | 29 | * must be pre-V9 branches. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifndef CONFIG_DEBUG_SPINLOCK | 32 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) |
33 | 33 | ||
34 | typedef struct { | 34 | #define __raw_spin_unlock_wait(lp) \ |
35 | volatile unsigned char lock; | 35 | do { rmb(); \ |
36 | #ifdef CONFIG_PREEMPT | 36 | } while((lp)->lock) |
37 | unsigned int break_lock; | ||
38 | #endif | ||
39 | } spinlock_t; | ||
40 | #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} | ||
41 | 37 | ||
42 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | 38 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
43 | #define spin_is_locked(lp) ((lp)->lock != 0) | ||
44 | |||
45 | #define spin_unlock_wait(lp) \ | ||
46 | do { rmb(); \ | ||
47 | } while((lp)->lock) | ||
48 | |||
49 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
50 | { | 39 | { |
51 | unsigned long tmp; | 40 | unsigned long tmp; |
52 | 41 | ||
@@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
67 | : "memory"); | 56 | : "memory"); |
68 | } | 57 | } |
69 | 58 | ||
70 | static inline int _raw_spin_trylock(spinlock_t *lock) | 59 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
71 | { | 60 | { |
72 | unsigned long result; | 61 | unsigned long result; |
73 | 62 | ||
@@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
81 | return (result == 0UL); | 70 | return (result == 0UL); |
82 | } | 71 | } |
83 | 72 | ||
84 | static inline void _raw_spin_unlock(spinlock_t *lock) | 73 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
85 | { | 74 | { |
86 | __asm__ __volatile__( | 75 | __asm__ __volatile__( |
87 | " membar #StoreStore | #LoadStore\n" | 76 | " membar #StoreStore | #LoadStore\n" |
@@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
91 | : "memory"); | 80 | : "memory"); |
92 | } | 81 | } |
93 | 82 | ||
94 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 83 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
95 | { | 84 | { |
96 | unsigned long tmp1, tmp2; | 85 | unsigned long tmp1, tmp2; |
97 | 86 | ||
@@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | |||
115 | : "memory"); | 104 | : "memory"); |
116 | } | 105 | } |
117 | 106 | ||
118 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
119 | |||
120 | typedef struct { | ||
121 | volatile unsigned char lock; | ||
122 | unsigned int owner_pc, owner_cpu; | ||
123 | #ifdef CONFIG_PREEMPT | ||
124 | unsigned int break_lock; | ||
125 | #endif | ||
126 | } spinlock_t; | ||
127 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | ||
128 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
129 | #define spin_is_locked(__lock) ((__lock)->lock != 0) | ||
130 | #define spin_unlock_wait(__lock) \ | ||
131 | do { \ | ||
132 | rmb(); \ | ||
133 | } while((__lock)->lock) | ||
134 | |||
135 | extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); | ||
136 | extern void _do_spin_unlock(spinlock_t *lock); | ||
137 | extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); | ||
138 | |||
139 | #define _raw_spin_trylock(lp) \ | ||
140 | _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) | ||
141 | #define _raw_spin_lock(lock) \ | ||
142 | _do_spin_lock(lock, "spin_lock", \ | ||
143 | (unsigned long) __builtin_return_address(0)) | ||
144 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
145 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
146 | |||
147 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
148 | |||
149 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 107 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
150 | 108 | ||
151 | #ifndef CONFIG_DEBUG_SPINLOCK | 109 | static void inline __read_lock(raw_rwlock_t *lock) |
152 | |||
153 | typedef struct { | ||
154 | volatile unsigned int lock; | ||
155 | #ifdef CONFIG_PREEMPT | ||
156 | unsigned int break_lock; | ||
157 | #endif | ||
158 | } rwlock_t; | ||
159 | #define RW_LOCK_UNLOCKED (rwlock_t) {0,} | ||
160 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | static void inline __read_lock(rwlock_t *lock) | ||
163 | { | 110 | { |
164 | unsigned long tmp1, tmp2; | 111 | unsigned long tmp1, tmp2; |
165 | 112 | ||
@@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) | |||
184 | : "memory"); | 131 | : "memory"); |
185 | } | 132 | } |
186 | 133 | ||
187 | static void inline __read_unlock(rwlock_t *lock) | 134 | static void inline __read_unlock(raw_rwlock_t *lock) |
188 | { | 135 | { |
189 | unsigned long tmp1, tmp2; | 136 | unsigned long tmp1, tmp2; |
190 | 137 | ||
@@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) | |||
201 | : "memory"); | 148 | : "memory"); |
202 | } | 149 | } |
203 | 150 | ||
204 | static void inline __write_lock(rwlock_t *lock) | 151 | static void inline __write_lock(raw_rwlock_t *lock) |
205 | { | 152 | { |
206 | unsigned long mask, tmp1, tmp2; | 153 | unsigned long mask, tmp1, tmp2; |
207 | 154 | ||
@@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) | |||
228 | : "memory"); | 175 | : "memory"); |
229 | } | 176 | } |
230 | 177 | ||
231 | static void inline __write_unlock(rwlock_t *lock) | 178 | static void inline __write_unlock(raw_rwlock_t *lock) |
232 | { | 179 | { |
233 | __asm__ __volatile__( | 180 | __asm__ __volatile__( |
234 | " membar #LoadStore | #StoreStore\n" | 181 | " membar #LoadStore | #StoreStore\n" |
@@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) | |||
238 | : "memory"); | 185 | : "memory"); |
239 | } | 186 | } |
240 | 187 | ||
241 | static int inline __write_trylock(rwlock_t *lock) | 188 | static int inline __write_trylock(raw_rwlock_t *lock) |
242 | { | 189 | { |
243 | unsigned long mask, tmp1, tmp2, result; | 190 | unsigned long mask, tmp1, tmp2, result; |
244 | 191 | ||
@@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) | |||
263 | return result; | 210 | return result; |
264 | } | 211 | } |
265 | 212 | ||
266 | #define _raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) __read_lock(p) |
267 | #define _raw_read_unlock(p) __read_unlock(p) | 214 | #define __raw_read_unlock(p) __read_unlock(p) |
268 | #define _raw_write_lock(p) __write_lock(p) | 215 | #define __raw_write_lock(p) __write_lock(p) |
269 | #define _raw_write_unlock(p) __write_unlock(p) | 216 | #define __raw_write_unlock(p) __write_unlock(p) |
270 | #define _raw_write_trylock(p) __write_trylock(p) | 217 | #define __raw_write_trylock(p) __write_trylock(p) |
271 | 218 | ||
272 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | 219 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
273 | 220 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | |
274 | typedef struct { | 221 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
275 | volatile unsigned long lock; | ||
276 | unsigned int writer_pc, writer_cpu; | ||
277 | unsigned int reader_pc[NR_CPUS]; | ||
278 | #ifdef CONFIG_PREEMPT | ||
279 | unsigned int break_lock; | ||
280 | #endif | ||
281 | } rwlock_t; | ||
282 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | ||
283 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
284 | |||
285 | extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
286 | extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); | ||
287 | extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
288 | extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); | ||
289 | extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); | ||
290 | |||
291 | #define _raw_read_lock(lock) \ | ||
292 | do { unsigned long flags; \ | ||
293 | local_irq_save(flags); \ | ||
294 | _do_read_lock(lock, "read_lock", \ | ||
295 | (unsigned long) __builtin_return_address(0)); \ | ||
296 | local_irq_restore(flags); \ | ||
297 | } while(0) | ||
298 | |||
299 | #define _raw_read_unlock(lock) \ | ||
300 | do { unsigned long flags; \ | ||
301 | local_irq_save(flags); \ | ||
302 | _do_read_unlock(lock, "read_unlock", \ | ||
303 | (unsigned long) __builtin_return_address(0)); \ | ||
304 | local_irq_restore(flags); \ | ||
305 | } while(0) | ||
306 | |||
307 | #define _raw_write_lock(lock) \ | ||
308 | do { unsigned long flags; \ | ||
309 | local_irq_save(flags); \ | ||
310 | _do_write_lock(lock, "write_lock", \ | ||
311 | (unsigned long) __builtin_return_address(0)); \ | ||
312 | local_irq_restore(flags); \ | ||
313 | } while(0) | ||
314 | |||
315 | #define _raw_write_unlock(lock) \ | ||
316 | do { unsigned long flags; \ | ||
317 | local_irq_save(flags); \ | ||
318 | _do_write_unlock(lock, \ | ||
319 | (unsigned long) __builtin_return_address(0)); \ | ||
320 | local_irq_restore(flags); \ | ||
321 | } while(0) | ||
322 | |||
323 | #define _raw_write_trylock(lock) \ | ||
324 | ({ unsigned long flags; \ | ||
325 | int val; \ | ||
326 | local_irq_save(flags); \ | ||
327 | val = _do_write_trylock(lock, "write_trylock", \ | ||
328 | (unsigned long) __builtin_return_address(0)); \ | ||
329 | local_irq_restore(flags); \ | ||
330 | val; \ | ||
331 | }) | ||
332 | |||
333 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
334 | |||
335 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
336 | #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | ||
337 | #define write_can_lock(rw) (!(rw)->lock) | ||
338 | 222 | ||
339 | #endif /* !(__ASSEMBLY__) */ | 223 | #endif /* !(__ASSEMBLY__) */ |
340 | 224 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 000000000000..e128112a0d7c --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __SPARC64_SPINLOCK_TYPES_H | ||
2 | #define __SPARC64_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned char lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baad..69636831ad2f 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
@@ -6,47 +6,21 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | 8 | ||
9 | extern int printk(const char * fmt, ...) | ||
10 | __attribute__ ((format (printf, 1, 2))); | ||
11 | |||
12 | /* | 9 | /* |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
14 | */ | 11 | * |
15 | |||
16 | typedef struct { | ||
17 | volatile unsigned int lock; | ||
18 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
19 | unsigned magic; | ||
20 | #endif | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPINLOCK_MAGIC 0xdead4ead | ||
27 | |||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
30 | #else | ||
31 | #define SPINLOCK_MAGIC_INIT /* */ | ||
32 | #endif | ||
33 | |||
34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
35 | |||
36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
37 | |||
38 | /* | ||
39 | * Simple spin lock operations. There are two variants, one clears IRQ's | 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
40 | * on the local processor, one does not. | 13 | * on the local processor, one does not. |
41 | * | 14 | * |
42 | * We make no fairness assumptions. They have a cost. | 15 | * We make no fairness assumptions. They have a cost. |
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
43 | */ | 18 | */ |
44 | 19 | ||
45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | 20 | #define __raw_spin_is_locked(x) \ |
46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 21 | (*(volatile signed char *)(&(x)->slock) <= 0) |
47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
48 | 22 | ||
49 | #define spin_lock_string \ | 23 | #define __raw_spin_lock_string \ |
50 | "\n1:\t" \ | 24 | "\n1:\t" \ |
51 | "lock ; decb %0\n\t" \ | 25 | "lock ; decb %0\n\t" \ |
52 | "js 2f\n" \ | 26 | "js 2f\n" \ |
@@ -58,74 +32,40 @@ typedef struct { | |||
58 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
59 | LOCK_SECTION_END | 33 | LOCK_SECTION_END |
60 | 34 | ||
61 | /* | 35 | #define __raw_spin_unlock_string \ |
62 | * This works. Despite all the confusion. | ||
63 | * (except on PPro SMP or if we are using OOSTORE) | ||
64 | * (PPro errata 66, 92) | ||
65 | */ | ||
66 | |||
67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
68 | |||
69 | #define spin_unlock_string \ | ||
70 | "movb $1,%0" \ | 36 | "movb $1,%0" \ |
71 | :"=m" (lock->lock) : : "memory" | 37 | :"=m" (lock->slock) : : "memory" |
72 | |||
73 | |||
74 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
75 | { | ||
76 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
78 | assert_spin_locked(lock); | ||
79 | #endif | ||
80 | __asm__ __volatile__( | ||
81 | spin_unlock_string | ||
82 | ); | ||
83 | } | ||
84 | |||
85 | #else | ||
86 | |||
87 | #define spin_unlock_string \ | ||
88 | "xchgb %b0, %1" \ | ||
89 | :"=q" (oldval), "=m" (lock->lock) \ | ||
90 | :"0" (oldval) : "memory" | ||
91 | 38 | ||
92 | static inline void _raw_spin_unlock(spinlock_t *lock) | 39 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
93 | { | 40 | { |
94 | char oldval = 1; | ||
95 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
97 | assert_spin_locked(lock); | ||
98 | #endif | ||
99 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
100 | spin_unlock_string | 42 | __raw_spin_lock_string |
101 | ); | 43 | :"=m" (lock->slock) : : "memory"); |
102 | } | 44 | } |
103 | 45 | ||
104 | #endif | 46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
105 | 47 | ||
106 | static inline int _raw_spin_trylock(spinlock_t *lock) | 48 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
107 | { | 49 | { |
108 | char oldval; | 50 | char oldval; |
51 | |||
109 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
110 | "xchgb %b0,%1" | 53 | "xchgb %b0,%1" |
111 | :"=q" (oldval), "=m" (lock->lock) | 54 | :"=q" (oldval), "=m" (lock->slock) |
112 | :"0" (0) : "memory"); | 55 | :"0" (0) : "memory"); |
56 | |||
113 | return oldval > 0; | 57 | return oldval > 0; |
114 | } | 58 | } |
115 | 59 | ||
116 | static inline void _raw_spin_lock(spinlock_t *lock) | 60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
117 | { | 61 | { |
118 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
119 | if (lock->magic != SPINLOCK_MAGIC) { | ||
120 | printk("eip: %p\n", __builtin_return_address(0)); | ||
121 | BUG(); | ||
122 | } | ||
123 | #endif | ||
124 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
125 | spin_lock_string | 63 | __raw_spin_unlock_string |
126 | :"=m" (lock->lock) : : "memory"); | 64 | ); |
127 | } | 65 | } |
128 | 66 | ||
67 | #define __raw_spin_unlock_wait(lock) \ | ||
68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
129 | 69 | ||
130 | /* | 70 | /* |
131 | * Read-write spinlocks, allowing multiple readers | 71 | * Read-write spinlocks, allowing multiple readers |
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
136 | * can "mix" irq-safe locks - any writer needs to get a | 76 | * can "mix" irq-safe locks - any writer needs to get a |
137 | * irq-safe write-lock, but readers can get non-irqsafe | 77 | * irq-safe write-lock, but readers can get non-irqsafe |
138 | * read-locks. | 78 | * read-locks. |
139 | */ | 79 | * |
140 | typedef struct { | ||
141 | volatile unsigned int lock; | ||
142 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
143 | unsigned magic; | ||
144 | #endif | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
154 | #else | ||
155 | #define RWLOCK_MAGIC_INIT /* */ | ||
156 | #endif | ||
157 | |||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
159 | |||
160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
164 | |||
165 | /* | ||
166 | * On x86, we implement read-write locks as a 32-bit counter | 80 | * On x86, we implement read-write locks as a 32-bit counter |
167 | * with the high bit (sign) being the "contended" bit. | 81 | * with the high bit (sign) being the "contended" bit. |
168 | * | 82 | * |
@@ -170,29 +84,24 @@ typedef struct { | |||
170 | * | 84 | * |
171 | * Changed to use the same technique as rw semaphores. See | 85 | * Changed to use the same technique as rw semaphores. See |
172 | * semaphore.h for details. -ben | 86 | * semaphore.h for details. -ben |
87 | * | ||
88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
173 | */ | 89 | */ |
174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
175 | 90 | ||
176 | static inline void _raw_read_lock(rwlock_t *rw) | 91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
93 | |||
94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
177 | { | 95 | { |
178 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
180 | #endif | ||
181 | __build_read_lock(rw, "__read_lock_failed"); | 96 | __build_read_lock(rw, "__read_lock_failed"); |
182 | } | 97 | } |
183 | 98 | ||
184 | static inline void _raw_write_lock(rwlock_t *rw) | 99 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
185 | { | 100 | { |
186 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
188 | #endif | ||
189 | __build_write_lock(rw, "__write_lock_failed"); | 101 | __build_write_lock(rw, "__write_lock_failed"); |
190 | } | 102 | } |
191 | 103 | ||
192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
194 | |||
195 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
196 | { | 105 | { |
197 | atomic_t *count = (atomic_t *)lock; | 106 | atomic_t *count = (atomic_t *)lock; |
198 | atomic_dec(count); | 107 | atomic_dec(count); |
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
202 | return 0; | 111 | return 0; |
203 | } | 112 | } |
204 | 113 | ||
205 | static inline int _raw_write_trylock(rwlock_t *lock) | 114 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
206 | { | 115 | { |
207 | atomic_t *count = (atomic_t *)lock; | 116 | atomic_t *count = (atomic_t *)lock; |
208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 117 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
211 | return 0; | 120 | return 0; |
212 | } | 121 | } |
213 | 122 | ||
123 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
124 | { | ||
125 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
126 | } | ||
127 | |||
128 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
129 | { | ||
130 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
131 | : "=m" (rw->lock) : : "memory"); | ||
132 | } | ||
133 | |||
214 | #endif /* __ASM_SPINLOCK_H */ | 134 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 000000000000..6b20af0bbb79 --- /dev/null +++ b/include/linux/bit_spinlock.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #ifndef __LINUX_BIT_SPINLOCK_H | ||
2 | #define __LINUX_BIT_SPINLOCK_H | ||
3 | |||
4 | /* | ||
5 | * bit-based spin_lock() | ||
6 | * | ||
7 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
8 | * are significantly faster. | ||
9 | */ | ||
10 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
11 | { | ||
12 | /* | ||
13 | * Assuming the lock is uncontended, this never enters | ||
14 | * the body of the outer loop. If it is contended, then | ||
15 | * within the inner loop a non-atomic test is used to | ||
16 | * busywait with less bus contention for a good time to | ||
17 | * attempt to acquire the lock bit. | ||
18 | */ | ||
19 | preempt_disable(); | ||
20 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
21 | while (test_and_set_bit(bitnum, addr)) { | ||
22 | while (test_bit(bitnum, addr)) { | ||
23 | preempt_enable(); | ||
24 | cpu_relax(); | ||
25 | preempt_disable(); | ||
26 | } | ||
27 | } | ||
28 | #endif | ||
29 | __acquire(bitlock); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Return true if it was acquired | ||
34 | */ | ||
35 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | ||
36 | { | ||
37 | preempt_disable(); | ||
38 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
39 | if (test_and_set_bit(bitnum, addr)) { | ||
40 | preempt_enable(); | ||
41 | return 0; | ||
42 | } | ||
43 | #endif | ||
44 | __acquire(bitlock); | ||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * bit-based spin_unlock() | ||
50 | */ | ||
51 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
52 | { | ||
53 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
54 | BUG_ON(!test_bit(bitnum, addr)); | ||
55 | smp_mb__before_clear_bit(); | ||
56 | clear_bit(bitnum, addr); | ||
57 | #endif | ||
58 | preempt_enable(); | ||
59 | __release(bitlock); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Return true if the lock is held. | ||
64 | */ | ||
65 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | ||
66 | { | ||
67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
68 | return test_bit(bitnum, addr); | ||
69 | #elif defined CONFIG_PREEMPT | ||
70 | return preempt_count(); | ||
71 | #else | ||
72 | return 1; | ||
73 | #endif | ||
74 | } | ||
75 | |||
76 | #endif /* __LINUX_BIT_SPINLOCK_H */ | ||
77 | |||
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 84321a4cac93..de097269bd7f 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
29 | #include <linux/journal-head.h> | 29 | #include <linux/journal-head.h> |
30 | #include <linux/stddef.h> | 30 | #include <linux/stddef.h> |
31 | #include <linux/bit_spinlock.h> | ||
31 | #include <asm/semaphore.h> | 32 | #include <asm/semaphore.h> |
32 | #endif | 33 | #endif |
33 | 34 | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b6..cdc99a27840d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -2,7 +2,48 @@ | |||
2 | #define __LINUX_SPINLOCK_H | 2 | #define __LINUX_SPINLOCK_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * include/linux/spinlock.h - generic locking declarations | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * | ||
7 | * here's the role of the various spinlock/rwlock related include files: | ||
8 | * | ||
9 | * on SMP builds: | ||
10 | * | ||
11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | ||
12 | * initializers | ||
13 | * | ||
14 | * linux/spinlock_types.h: | ||
15 | * defines the generic type and initializers | ||
16 | * | ||
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | ||
18 | * implementations, mostly inline assembly code | ||
19 | * | ||
20 | * (also included on UP-debug builds:) | ||
21 | * | ||
22 | * linux/spinlock_api_smp.h: | ||
23 | * contains the prototypes for the _spin_*() APIs. | ||
24 | * | ||
25 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
26 | * | ||
27 | * on UP builds: | ||
28 | * | ||
29 | * linux/spinlock_type_up.h: | ||
30 | * contains the generic, simplified UP spinlock type. | ||
31 | * (which is an empty structure on non-debug builds) | ||
32 | * | ||
33 | * linux/spinlock_types.h: | ||
34 | * defines the generic type and initializers | ||
35 | * | ||
36 | * linux/spinlock_up.h: | ||
37 | * contains the __raw_spin_*()/etc. version of UP | ||
38 | * builds. (which are NOPs on non-debug, non-preempt | ||
39 | * builds) | ||
40 | * | ||
41 | * (included on UP-non-debug builds:) | ||
42 | * | ||
43 | * linux/spinlock_api_up.h: | ||
44 | * builds the _spin_*() APIs. | ||
45 | * | ||
46 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
6 | */ | 47 | */ |
7 | 48 | ||
8 | #include <linux/config.h> | 49 | #include <linux/config.h> |
@@ -13,7 +54,6 @@ | |||
13 | #include <linux/kernel.h> | 54 | #include <linux/kernel.h> |
14 | #include <linux/stringify.h> | 55 | #include <linux/stringify.h> |
15 | 56 | ||
16 | #include <asm/processor.h> /* for cpu relax */ | ||
17 | #include <asm/system.h> | 57 | #include <asm/system.h> |
18 | 58 | ||
19 | /* | 59 | /* |
@@ -35,423 +75,84 @@ | |||
35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) |
36 | 76 | ||
37 | /* | 77 | /* |
38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | 78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
39 | */ | 79 | */ |
40 | #ifdef CONFIG_SMP | 80 | #include <linux/spinlock_types.h> |
41 | |||
42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
43 | #include <asm/spinlock.h> | ||
44 | |||
45 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
46 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
47 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
48 | |||
49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
52 | |||
53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
56 | |||
57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | ||
58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
60 | |||
61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
67 | |||
68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | ||
69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
77 | |||
78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | ||
80 | int in_lock_functions(unsigned long addr); | ||
81 | |||
82 | #else | ||
83 | 81 | ||
84 | #define in_lock_functions(ADDR) 0 | 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
85 | 83 | ||
86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | ||
88 | # define ATOMIC_DEC_AND_LOCK | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | |||
93 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
94 | typedef struct { | ||
95 | unsigned long magic; | ||
96 | volatile unsigned long lock; | ||
97 | volatile unsigned int babble; | ||
98 | const char *module; | ||
99 | char *owner; | ||
100 | int oline; | ||
101 | } spinlock_t; | ||
102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | ||
103 | |||
104 | #define spin_lock_init(x) \ | ||
105 | do { \ | ||
106 | (x)->magic = SPINLOCK_MAGIC; \ | ||
107 | (x)->lock = 0; \ | ||
108 | (x)->babble = 5; \ | ||
109 | (x)->module = __FILE__; \ | ||
110 | (x)->owner = NULL; \ | ||
111 | (x)->oline = 0; \ | ||
112 | } while (0) | ||
113 | |||
114 | #define CHECK_LOCK(x) \ | ||
115 | do { \ | ||
116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | ||
117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | ||
118 | __FILE__, __LINE__, (x)); \ | ||
119 | } \ | ||
120 | } while(0) | ||
121 | |||
122 | #define _raw_spin_lock(x) \ | ||
123 | do { \ | ||
124 | CHECK_LOCK(x); \ | ||
125 | if ((x)->lock&&(x)->babble) { \ | ||
126 | (x)->babble--; \ | ||
127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | ||
128 | __FILE__,__LINE__, (x)->module, \ | ||
129 | (x), (x)->owner, (x)->oline); \ | ||
130 | } \ | ||
131 | (x)->lock = 1; \ | ||
132 | (x)->owner = __FILE__; \ | ||
133 | (x)->oline = __LINE__; \ | ||
134 | } while (0) | ||
135 | |||
136 | /* without debugging, spin_is_locked on UP always says | ||
137 | * FALSE. --> printk if already locked. */ | ||
138 | #define spin_is_locked(x) \ | ||
139 | ({ \ | ||
140 | CHECK_LOCK(x); \ | ||
141 | if ((x)->lock&&(x)->babble) { \ | ||
142 | (x)->babble--; \ | ||
143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | ||
144 | __FILE__,__LINE__, (x)->module, \ | ||
145 | (x), (x)->owner, (x)->oline); \ | ||
146 | } \ | ||
147 | 0; \ | ||
148 | }) | ||
149 | |||
150 | /* with debugging, assert_spin_locked() on UP does check | ||
151 | * the lock value properly */ | ||
152 | #define assert_spin_locked(x) \ | ||
153 | ({ \ | ||
154 | CHECK_LOCK(x); \ | ||
155 | BUG_ON(!(x)->lock); \ | ||
156 | }) | ||
157 | |||
158 | /* without debugging, spin_trylock on UP always says | ||
159 | * TRUE. --> printk if already locked. */ | ||
160 | #define _raw_spin_trylock(x) \ | ||
161 | ({ \ | ||
162 | CHECK_LOCK(x); \ | ||
163 | if ((x)->lock&&(x)->babble) { \ | ||
164 | (x)->babble--; \ | ||
165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | ||
166 | __FILE__,__LINE__, (x)->module, \ | ||
167 | (x), (x)->owner, (x)->oline); \ | ||
168 | } \ | ||
169 | (x)->lock = 1; \ | ||
170 | (x)->owner = __FILE__; \ | ||
171 | (x)->oline = __LINE__; \ | ||
172 | 1; \ | ||
173 | }) | ||
174 | |||
175 | #define spin_unlock_wait(x) \ | ||
176 | do { \ | ||
177 | CHECK_LOCK(x); \ | ||
178 | if ((x)->lock&&(x)->babble) { \ | ||
179 | (x)->babble--; \ | ||
180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | ||
181 | __FILE__,__LINE__, (x)->module, (x), \ | ||
182 | (x)->owner, (x)->oline); \ | ||
183 | }\ | ||
184 | } while (0) | ||
185 | |||
186 | #define _raw_spin_unlock(x) \ | ||
187 | do { \ | ||
188 | CHECK_LOCK(x); \ | ||
189 | if (!(x)->lock&&(x)->babble) { \ | ||
190 | (x)->babble--; \ | ||
191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | ||
192 | __FILE__,__LINE__, (x)->module, (x));\ | ||
193 | } \ | ||
194 | (x)->lock = 0; \ | ||
195 | } while (0) | ||
196 | #else | ||
197 | /* | 84 | /* |
198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
199 | */ | 86 | */ |
200 | #if (__GNUC__ > 2) | 87 | #if defined(CONFIG_SMP) |
201 | typedef struct { } spinlock_t; | 88 | # include <asm/spinlock.h> |
202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | ||
203 | #else | 89 | #else |
204 | typedef struct { int gcc_is_buggy; } spinlock_t; | 90 | # include <linux/spinlock_up.h> |
205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
206 | #endif | 91 | #endif |
207 | 92 | ||
93 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
94 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
95 | |||
96 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
97 | |||
98 | /** | ||
99 | * spin_unlock_wait - wait until the spinlock gets unlocked | ||
100 | * @lock: the spinlock in question. | ||
101 | */ | ||
102 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | ||
103 | |||
208 | /* | 104 | /* |
209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | 105 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
210 | */ | 106 | */ |
211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | 107 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | 108 | # include <linux/spinlock_api_smp.h> |
213 | #define spin_is_locked(lock) ((void)(lock), 0) | ||
214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | ||
215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | ||
216 | #define spin_unlock_wait(lock) (void)(lock) | ||
217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | ||
218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
219 | |||
220 | /* RW spinlocks: No debug version */ | ||
221 | |||
222 | #if (__GNUC__ > 2) | ||
223 | typedef struct { } rwlock_t; | ||
224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | ||
225 | #else | 109 | #else |
226 | typedef struct { int gcc_is_buggy; } rwlock_t; | 110 | # include <linux/spinlock_api_up.h> |
227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
228 | #endif | 111 | #endif |
229 | 112 | ||
230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | 113 | #ifdef CONFIG_DEBUG_SPINLOCK |
231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | 114 | extern void _raw_spin_lock(spinlock_t *lock); |
232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | 115 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | 116 | extern int _raw_spin_trylock(spinlock_t *lock); |
234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | 117 | extern void _raw_spin_unlock(spinlock_t *lock); |
235 | #define read_can_lock(lock) (((void)(lock), 1)) | 118 | |
236 | #define write_can_lock(lock) (((void)(lock), 1)) | 119 | extern void _raw_read_lock(rwlock_t *lock); |
237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | 120 | extern int _raw_read_trylock(rwlock_t *lock); |
238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | 121 | extern void _raw_read_unlock(rwlock_t *lock); |
239 | 122 | extern void _raw_write_lock(rwlock_t *lock); | |
240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | 123 | extern int _raw_write_trylock(rwlock_t *lock); |
241 | 1 : ({preempt_enable(); 0;});}) | 124 | extern void _raw_write_unlock(rwlock_t *lock); |
242 | 125 | #else | |
243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | 126 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
244 | 1 : ({preempt_enable(); 0;});}) | 127 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
245 | 128 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | |
246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | 129 | # define _raw_spin_lock_flags(lock, flags) \ |
247 | 1 : ({preempt_enable(); 0;});}) | 130 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
248 | 131 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | |
249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 132 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
250 | _raw_spin_trylock(lock) ? \ | 133 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) | 134 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
252 | 135 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | |
253 | #define _spin_lock(lock) \ | 136 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
254 | do { \ | 137 | #endif |
255 | preempt_disable(); \ | ||
256 | _raw_spin_lock(lock); \ | ||
257 | __acquire(lock); \ | ||
258 | } while(0) | ||
259 | |||
260 | #define _write_lock(lock) \ | ||
261 | do { \ | ||
262 | preempt_disable(); \ | ||
263 | _raw_write_lock(lock); \ | ||
264 | __acquire(lock); \ | ||
265 | } while(0) | ||
266 | |||
267 | #define _read_lock(lock) \ | ||
268 | do { \ | ||
269 | preempt_disable(); \ | ||
270 | _raw_read_lock(lock); \ | ||
271 | __acquire(lock); \ | ||
272 | } while(0) | ||
273 | |||
274 | #define _spin_unlock(lock) \ | ||
275 | do { \ | ||
276 | _raw_spin_unlock(lock); \ | ||
277 | preempt_enable(); \ | ||
278 | __release(lock); \ | ||
279 | } while (0) | ||
280 | |||
281 | #define _write_unlock(lock) \ | ||
282 | do { \ | ||
283 | _raw_write_unlock(lock); \ | ||
284 | preempt_enable(); \ | ||
285 | __release(lock); \ | ||
286 | } while(0) | ||
287 | |||
288 | #define _read_unlock(lock) \ | ||
289 | do { \ | ||
290 | _raw_read_unlock(lock); \ | ||
291 | preempt_enable(); \ | ||
292 | __release(lock); \ | ||
293 | } while(0) | ||
294 | |||
295 | #define _spin_lock_irqsave(lock, flags) \ | ||
296 | do { \ | ||
297 | local_irq_save(flags); \ | ||
298 | preempt_disable(); \ | ||
299 | _raw_spin_lock(lock); \ | ||
300 | __acquire(lock); \ | ||
301 | } while (0) | ||
302 | |||
303 | #define _spin_lock_irq(lock) \ | ||
304 | do { \ | ||
305 | local_irq_disable(); \ | ||
306 | preempt_disable(); \ | ||
307 | _raw_spin_lock(lock); \ | ||
308 | __acquire(lock); \ | ||
309 | } while (0) | ||
310 | |||
311 | #define _spin_lock_bh(lock) \ | ||
312 | do { \ | ||
313 | local_bh_disable(); \ | ||
314 | preempt_disable(); \ | ||
315 | _raw_spin_lock(lock); \ | ||
316 | __acquire(lock); \ | ||
317 | } while (0) | ||
318 | |||
319 | #define _read_lock_irqsave(lock, flags) \ | ||
320 | do { \ | ||
321 | local_irq_save(flags); \ | ||
322 | preempt_disable(); \ | ||
323 | _raw_read_lock(lock); \ | ||
324 | __acquire(lock); \ | ||
325 | } while (0) | ||
326 | |||
327 | #define _read_lock_irq(lock) \ | ||
328 | do { \ | ||
329 | local_irq_disable(); \ | ||
330 | preempt_disable(); \ | ||
331 | _raw_read_lock(lock); \ | ||
332 | __acquire(lock); \ | ||
333 | } while (0) | ||
334 | |||
335 | #define _read_lock_bh(lock) \ | ||
336 | do { \ | ||
337 | local_bh_disable(); \ | ||
338 | preempt_disable(); \ | ||
339 | _raw_read_lock(lock); \ | ||
340 | __acquire(lock); \ | ||
341 | } while (0) | ||
342 | |||
343 | #define _write_lock_irqsave(lock, flags) \ | ||
344 | do { \ | ||
345 | local_irq_save(flags); \ | ||
346 | preempt_disable(); \ | ||
347 | _raw_write_lock(lock); \ | ||
348 | __acquire(lock); \ | ||
349 | } while (0) | ||
350 | 138 | ||
351 | #define _write_lock_irq(lock) \ | 139 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
352 | do { \ | 140 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
353 | local_irq_disable(); \ | ||
354 | preempt_disable(); \ | ||
355 | _raw_write_lock(lock); \ | ||
356 | __acquire(lock); \ | ||
357 | } while (0) | ||
358 | |||
359 | #define _write_lock_bh(lock) \ | ||
360 | do { \ | ||
361 | local_bh_disable(); \ | ||
362 | preempt_disable(); \ | ||
363 | _raw_write_lock(lock); \ | ||
364 | __acquire(lock); \ | ||
365 | } while (0) | ||
366 | |||
367 | #define _spin_unlock_irqrestore(lock, flags) \ | ||
368 | do { \ | ||
369 | _raw_spin_unlock(lock); \ | ||
370 | local_irq_restore(flags); \ | ||
371 | preempt_enable(); \ | ||
372 | __release(lock); \ | ||
373 | } while (0) | ||
374 | |||
375 | #define _spin_unlock_irq(lock) \ | ||
376 | do { \ | ||
377 | _raw_spin_unlock(lock); \ | ||
378 | local_irq_enable(); \ | ||
379 | preempt_enable(); \ | ||
380 | __release(lock); \ | ||
381 | } while (0) | ||
382 | |||
383 | #define _spin_unlock_bh(lock) \ | ||
384 | do { \ | ||
385 | _raw_spin_unlock(lock); \ | ||
386 | preempt_enable_no_resched(); \ | ||
387 | local_bh_enable(); \ | ||
388 | __release(lock); \ | ||
389 | } while (0) | ||
390 | |||
391 | #define _write_unlock_bh(lock) \ | ||
392 | do { \ | ||
393 | _raw_write_unlock(lock); \ | ||
394 | preempt_enable_no_resched(); \ | ||
395 | local_bh_enable(); \ | ||
396 | __release(lock); \ | ||
397 | } while (0) | ||
398 | |||
399 | #define _read_unlock_irqrestore(lock, flags) \ | ||
400 | do { \ | ||
401 | _raw_read_unlock(lock); \ | ||
402 | local_irq_restore(flags); \ | ||
403 | preempt_enable(); \ | ||
404 | __release(lock); \ | ||
405 | } while (0) | ||
406 | |||
407 | #define _write_unlock_irqrestore(lock, flags) \ | ||
408 | do { \ | ||
409 | _raw_write_unlock(lock); \ | ||
410 | local_irq_restore(flags); \ | ||
411 | preempt_enable(); \ | ||
412 | __release(lock); \ | ||
413 | } while (0) | ||
414 | |||
415 | #define _read_unlock_irq(lock) \ | ||
416 | do { \ | ||
417 | _raw_read_unlock(lock); \ | ||
418 | local_irq_enable(); \ | ||
419 | preempt_enable(); \ | ||
420 | __release(lock); \ | ||
421 | } while (0) | ||
422 | |||
423 | #define _read_unlock_bh(lock) \ | ||
424 | do { \ | ||
425 | _raw_read_unlock(lock); \ | ||
426 | preempt_enable_no_resched(); \ | ||
427 | local_bh_enable(); \ | ||
428 | __release(lock); \ | ||
429 | } while (0) | ||
430 | |||
431 | #define _write_unlock_irq(lock) \ | ||
432 | do { \ | ||
433 | _raw_write_unlock(lock); \ | ||
434 | local_irq_enable(); \ | ||
435 | preempt_enable(); \ | ||
436 | __release(lock); \ | ||
437 | } while (0) | ||
438 | |||
439 | #endif /* !SMP */ | ||
440 | 141 | ||
441 | /* | 142 | /* |
442 | * Define the various spin_lock and rw_lock methods. Note we define these | 143 | * Define the various spin_lock and rw_lock methods. Note we define these |
443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 144 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
444 | * methods are defined as nops in the case they are not required. | 145 | * methods are defined as nops in the case they are not required. |
445 | */ | 146 | */ |
446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | 147 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) |
447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | 148 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) |
448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 149 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
449 | 150 | ||
450 | #define spin_lock(lock) _spin_lock(lock) | 151 | #define spin_lock(lock) _spin_lock(lock) |
451 | #define write_lock(lock) _write_lock(lock) | 152 | #define write_lock(lock) _write_lock(lock) |
452 | #define read_lock(lock) _read_lock(lock) | 153 | #define read_lock(lock) _read_lock(lock) |
453 | 154 | ||
454 | #ifdef CONFIG_SMP | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | 156 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) |
456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | 157 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) |
457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | 158 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
@@ -470,137 +171,59 @@ do { \ | |||
470 | #define write_lock_irq(lock) _write_lock_irq(lock) | 171 | #define write_lock_irq(lock) _write_lock_irq(lock) |
471 | #define write_lock_bh(lock) _write_lock_bh(lock) | 172 | #define write_lock_bh(lock) _write_lock_bh(lock) |
472 | 173 | ||
473 | #define spin_unlock(lock) _spin_unlock(lock) | 174 | #define spin_unlock(lock) _spin_unlock(lock) |
474 | #define write_unlock(lock) _write_unlock(lock) | 175 | #define write_unlock(lock) _write_unlock(lock) |
475 | #define read_unlock(lock) _read_unlock(lock) | 176 | #define read_unlock(lock) _read_unlock(lock) |
476 | 177 | ||
477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | 178 | #define spin_unlock_irqrestore(lock, flags) \ |
179 | _spin_unlock_irqrestore(lock, flags) | ||
478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 180 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 181 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
480 | 182 | ||
481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | 183 | #define read_unlock_irqrestore(lock, flags) \ |
482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 184 | _read_unlock_irqrestore(lock, flags) |
483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 185 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
186 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
484 | 187 | ||
485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | 188 | #define write_unlock_irqrestore(lock, flags) \ |
486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 189 | _write_unlock_irqrestore(lock, flags) |
487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 190 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
191 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
488 | 192 | ||
489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | 193 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) |
490 | 194 | ||
491 | #define spin_trylock_irq(lock) \ | 195 | #define spin_trylock_irq(lock) \ |
492 | ({ \ | 196 | ({ \ |
493 | local_irq_disable(); \ | 197 | local_irq_disable(); \ |
494 | _spin_trylock(lock) ? \ | 198 | _spin_trylock(lock) ? \ |
495 | 1 : ({local_irq_enable(); 0; }); \ | 199 | 1 : ({ local_irq_enable(); 0; }); \ |
496 | }) | 200 | }) |
497 | 201 | ||
498 | #define spin_trylock_irqsave(lock, flags) \ | 202 | #define spin_trylock_irqsave(lock, flags) \ |
499 | ({ \ | 203 | ({ \ |
500 | local_irq_save(flags); \ | 204 | local_irq_save(flags); \ |
501 | _spin_trylock(lock) ? \ | 205 | _spin_trylock(lock) ? \ |
502 | 1 : ({local_irq_restore(flags); 0;}); \ | 206 | 1 : ({ local_irq_restore(flags); 0; }); \ |
503 | }) | 207 | }) |
504 | 208 | ||
505 | #ifdef CONFIG_LOCKMETER | ||
506 | extern void _metered_spin_lock (spinlock_t *lock); | ||
507 | extern void _metered_spin_unlock (spinlock_t *lock); | ||
508 | extern int _metered_spin_trylock(spinlock_t *lock); | ||
509 | extern void _metered_read_lock (rwlock_t *lock); | ||
510 | extern void _metered_read_unlock (rwlock_t *lock); | ||
511 | extern void _metered_write_lock (rwlock_t *lock); | ||
512 | extern void _metered_write_unlock (rwlock_t *lock); | ||
513 | extern int _metered_read_trylock (rwlock_t *lock); | ||
514 | extern int _metered_write_trylock(rwlock_t *lock); | ||
515 | #endif | ||
516 | |||
517 | /* "lock on reference count zero" */ | ||
518 | #ifndef ATOMIC_DEC_AND_LOCK | ||
519 | #include <asm/atomic.h> | ||
520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | ||
521 | #endif | ||
522 | |||
523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | ||
524 | |||
525 | /* | ||
526 | * bit-based spin_lock() | ||
527 | * | ||
528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
529 | * are significantly faster. | ||
530 | */ | ||
531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
532 | { | ||
533 | /* | ||
534 | * Assuming the lock is uncontended, this never enters | ||
535 | * the body of the outer loop. If it is contended, then | ||
536 | * within the inner loop a non-atomic test is used to | ||
537 | * busywait with less bus contention for a good time to | ||
538 | * attempt to acquire the lock bit. | ||
539 | */ | ||
540 | preempt_disable(); | ||
541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
542 | while (test_and_set_bit(bitnum, addr)) { | ||
543 | while (test_bit(bitnum, addr)) { | ||
544 | preempt_enable(); | ||
545 | cpu_relax(); | ||
546 | preempt_disable(); | ||
547 | } | ||
548 | } | ||
549 | #endif | ||
550 | __acquire(bitlock); | ||
551 | } | ||
552 | |||
553 | /* | 209 | /* |
554 | * Return true if it was acquired | 210 | * Pull the atomic_t declaration: |
211 | * (asm-mips/atomic.h needs above definitions) | ||
555 | */ | 212 | */ |
556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 213 | #include <asm/atomic.h> |
557 | { | 214 | /** |
558 | preempt_disable(); | 215 | * atomic_dec_and_lock - lock on reaching reference count zero |
559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 216 | * @atomic: the atomic counter |
560 | if (test_and_set_bit(bitnum, addr)) { | 217 | * @lock: the spinlock in question |
561 | preempt_enable(); | ||
562 | return 0; | ||
563 | } | ||
564 | #endif | ||
565 | __acquire(bitlock); | ||
566 | return 1; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * bit-based spin_unlock() | ||
571 | */ | ||
572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
573 | { | ||
574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
575 | BUG_ON(!test_bit(bitnum, addr)); | ||
576 | smp_mb__before_clear_bit(); | ||
577 | clear_bit(bitnum, addr); | ||
578 | #endif | ||
579 | preempt_enable(); | ||
580 | __release(bitlock); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Return true if the lock is held. | ||
585 | */ | 218 | */ |
586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 219 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
587 | { | 220 | #define atomic_dec_and_lock(atomic, lock) \ |
588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 221 | __cond_lock(_atomic_dec_and_lock(atomic, lock)) |
589 | return test_bit(bitnum, addr); | ||
590 | #elif defined CONFIG_PREEMPT | ||
591 | return preempt_count(); | ||
592 | #else | ||
593 | return 1; | ||
594 | #endif | ||
595 | } | ||
596 | |||
597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
599 | 222 | ||
600 | /** | 223 | /** |
601 | * spin_can_lock - would spin_trylock() succeed? | 224 | * spin_can_lock - would spin_trylock() succeed? |
602 | * @lock: the spinlock in question. | 225 | * @lock: the spinlock in question. |
603 | */ | 226 | */ |
604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 227 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
605 | 228 | ||
606 | #endif /* __LINUX_SPINLOCK_H */ | 229 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 000000000000..78e6989ffb54 --- /dev/null +++ b/include/linux/spinlock_api_smp.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
2 | #define __LINUX_SPINLOCK_API_SMP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_api_smp.h | ||
10 | * | ||
11 | * spinlock API declarations on SMP (and debug) | ||
12 | * (implemented in kernel/spinlock.c) | ||
13 | * | ||
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
15 | * Released under the General Public License (GPL). | ||
16 | */ | ||
17 | |||
18 | int in_lock_functions(unsigned long addr); | ||
19 | |||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
21 | |||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
23 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
24 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
25 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
26 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
27 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
29 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
30 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
31 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
32 | __acquires(spinlock_t); | ||
33 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
34 | __acquires(rwlock_t); | ||
35 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
36 | __acquires(rwlock_t); | ||
37 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
38 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
39 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
40 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
41 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
42 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
43 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
44 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
45 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
46 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
47 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
48 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
49 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
50 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
51 | __releases(spinlock_t); | ||
52 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
53 | __releases(rwlock_t); | ||
54 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
55 | __releases(rwlock_t); | ||
56 | |||
57 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | ||
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 000000000000..cd81cee566f4 --- /dev/null +++ b/include/linux/spinlock_api_up.h | |||
@@ -0,0 +1,80 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_API_UP_H | ||
2 | #define __LINUX_SPINLOCK_API_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_api_up.h | ||
10 | * | ||
11 | * spinlock API implementation on UP-nondebug (inlined implementation) | ||
12 | * | ||
13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
14 | * Released under the General Public License (GPL). | ||
15 | */ | ||
16 | |||
17 | #define in_lock_functions(ADDR) 0 | ||
18 | |||
19 | #define assert_spin_locked(lock) do { (void)(lock); } while (0) | ||
20 | |||
21 | /* | ||
22 | * In the UP-nondebug case there's no real locking going on, so the | ||
23 | * only thing we have to do is to keep the preempt counts and irq | ||
24 | * flags straight, to supress compiler warnings of unused lock | ||
25 | * variables, and to add the proper checker annotations: | ||
26 | */ | ||
27 | #define __LOCK(lock) \ | ||
28 | do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) | ||
29 | |||
30 | #define __LOCK_BH(lock) \ | ||
31 | do { local_bh_disable(); __LOCK(lock); } while (0) | ||
32 | |||
33 | #define __LOCK_IRQ(lock) \ | ||
34 | do { local_irq_disable(); __LOCK(lock); } while (0) | ||
35 | |||
36 | #define __LOCK_IRQSAVE(lock, flags) \ | ||
37 | do { local_irq_save(flags); __LOCK(lock); } while (0) | ||
38 | |||
39 | #define __UNLOCK(lock) \ | ||
40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | ||
41 | |||
42 | #define __UNLOCK_BH(lock) \ | ||
43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | ||
44 | |||
45 | #define __UNLOCK_IRQ(lock) \ | ||
46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | ||
47 | |||
48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | ||
49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | ||
50 | |||
51 | #define _spin_lock(lock) __LOCK(lock) | ||
52 | #define _read_lock(lock) __LOCK(lock) | ||
53 | #define _write_lock(lock) __LOCK(lock) | ||
54 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | ||
55 | #define _read_lock_bh(lock) __LOCK_BH(lock) | ||
56 | #define _write_lock_bh(lock) __LOCK_BH(lock) | ||
57 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | ||
58 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | ||
59 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | ||
60 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
61 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
62 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
63 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | ||
64 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | ||
65 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | ||
66 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | ||
67 | #define _spin_unlock(lock) __UNLOCK(lock) | ||
68 | #define _read_unlock(lock) __UNLOCK(lock) | ||
69 | #define _write_unlock(lock) __UNLOCK(lock) | ||
70 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | ||
71 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | ||
72 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | ||
73 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
74 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
75 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
76 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
77 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
78 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
79 | |||
80 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | ||
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 000000000000..9cb51e070390 --- /dev/null +++ b/include/linux/spinlock_types.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
2 | #define __LINUX_SPINLOCK_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * include/linux/spinlock_types.h - generic spinlock type definitions | ||
6 | * and initializers | ||
7 | * | ||
8 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
9 | * Released under the General Public License (GPL). | ||
10 | */ | ||
11 | |||
12 | #if defined(CONFIG_SMP) | ||
13 | # include <asm/spinlock_types.h> | ||
14 | #else | ||
15 | # include <linux/spinlock_types_up.h> | ||
16 | #endif | ||
17 | |||
18 | typedef struct { | ||
19 | raw_spinlock_t raw_lock; | ||
20 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
21 | unsigned int break_lock; | ||
22 | #endif | ||
23 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
24 | unsigned int magic, owner_cpu; | ||
25 | void *owner; | ||
26 | #endif | ||
27 | } spinlock_t; | ||
28 | |||
29 | #define SPINLOCK_MAGIC 0xdead4ead | ||
30 | |||
31 | typedef struct { | ||
32 | raw_rwlock_t raw_lock; | ||
33 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
34 | unsigned int break_lock; | ||
35 | #endif | ||
36 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
37 | unsigned int magic, owner_cpu; | ||
38 | void *owner; | ||
39 | #endif | ||
40 | } rwlock_t; | ||
41 | |||
42 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
43 | |||
44 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | ||
45 | |||
46 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
47 | # define SPIN_LOCK_UNLOCKED \ | ||
48 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | ||
49 | .magic = SPINLOCK_MAGIC, \ | ||
50 | .owner = SPINLOCK_OWNER_INIT, \ | ||
51 | .owner_cpu = -1 } | ||
52 | #define RW_LOCK_UNLOCKED \ | ||
53 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | ||
54 | .magic = RWLOCK_MAGIC, \ | ||
55 | .owner = SPINLOCK_OWNER_INIT, \ | ||
56 | .owner_cpu = -1 } | ||
57 | #else | ||
58 | # define SPIN_LOCK_UNLOCKED \ | ||
59 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } | ||
60 | #define RW_LOCK_UNLOCKED \ | ||
61 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } | ||
62 | #endif | ||
63 | |||
64 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
65 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
66 | |||
67 | #endif /* __LINUX_SPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 000000000000..def2d173a8db --- /dev/null +++ b/include/linux/spinlock_types_up.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_TYPES_UP_H | ||
2 | #define __LINUX_SPINLOCK_TYPES_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_types_up.h - spinlock type definitions for UP | ||
10 | * | ||
11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
12 | * Released under the General Public License (GPL). | ||
13 | */ | ||
14 | |||
15 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
16 | |||
17 | typedef struct { | ||
18 | volatile unsigned int slock; | ||
19 | } raw_spinlock_t; | ||
20 | |||
21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
22 | |||
23 | #else | ||
24 | |||
25 | /* | ||
26 | * All gcc 2.95 versions and early versions of 2.96 have a nasty bug | ||
27 | * with empty initializers. | ||
28 | */ | ||
29 | #if (__GNUC__ > 2) | ||
30 | typedef struct { } raw_spinlock_t; | ||
31 | |||
32 | #define __RAW_SPIN_LOCK_UNLOCKED { } | ||
33 | #else | ||
34 | typedef struct { int gcc_is_buggy; } raw_spinlock_t; | ||
35 | #define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 } | ||
36 | #endif | ||
37 | |||
38 | #endif | ||
39 | |||
40 | #if (__GNUC__ > 2) | ||
41 | typedef struct { | ||
42 | /* no debug version on UP */ | ||
43 | } raw_rwlock_t; | ||
44 | |||
45 | #define __RAW_RW_LOCK_UNLOCKED { } | ||
46 | #else | ||
47 | typedef struct { int gcc_is_buggy; } raw_rwlock_t; | ||
48 | #define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 } | ||
49 | #endif | ||
50 | |||
51 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ | ||
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 000000000000..31accf2f0b13 --- /dev/null +++ b/include/linux/spinlock_up.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_UP_H | ||
2 | #define __LINUX_SPINLOCK_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | ||
10 | * | ||
11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
12 | * Released under the General Public License (GPL). | ||
13 | * | ||
14 | * In the debug case, 1 means unlocked, 0 means locked. (the values | ||
15 | * are inverted, to catch initialization bugs) | ||
16 | * | ||
17 | * No atomicity anywhere, we are on UP. | ||
18 | */ | ||
19 | |||
20 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
21 | |||
22 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | ||
23 | |||
24 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
25 | { | ||
26 | lock->slock = 0; | ||
27 | } | ||
28 | |||
29 | static inline void | ||
30 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
31 | { | ||
32 | local_irq_save(flags); | ||
33 | lock->slock = 0; | ||
34 | } | ||
35 | |||
36 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
37 | { | ||
38 | char oldval = lock->slock; | ||
39 | |||
40 | lock->slock = 0; | ||
41 | |||
42 | return oldval > 0; | ||
43 | } | ||
44 | |||
45 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
46 | { | ||
47 | lock->slock = 1; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Read-write spinlocks. No debug version. | ||
52 | */ | ||
53 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | ||
54 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | ||
55 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | ||
56 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | ||
57 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | ||
58 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | ||
59 | |||
60 | #else /* DEBUG_SPINLOCK */ | ||
61 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | ||
62 | /* for sched.c and kernel_lock.c: */ | ||
63 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | ||
64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | ||
65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | ||
66 | #endif /* DEBUG_SPINLOCK */ | ||
67 | |||
68 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | ||
69 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | ||
70 | |||
71 | #define __raw_spin_unlock_wait(lock) \ | ||
72 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | ||
73 | |||
74 | #endif /* __LINUX_SPINLOCK_UP_H */ | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 8d57a2f1226b..ff4dc02ce170 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -12,6 +12,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
12 | obj-$(CONFIG_FUTEX) += futex.o | 12 | obj-$(CONFIG_FUTEX) += futex.o |
13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
14 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 14 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
15 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | ||
15 | obj-$(CONFIG_UID16) += uid16.o | 16 | obj-$(CONFIG_UID16) += uid16.o |
16 | obj-$(CONFIG_MODULES) += module.o | 17 | obj-$(CONFIG_MODULES) += module.o |
17 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 18 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
diff --git a/kernel/sched.c b/kernel/sched.c index 2632b812cf24..15db82116e19 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1511,6 +1511,10 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) | |||
1511 | * Manfred Spraul <manfred@colorfullife.com> | 1511 | * Manfred Spraul <manfred@colorfullife.com> |
1512 | */ | 1512 | */ |
1513 | prev_task_flags = prev->flags; | 1513 | prev_task_flags = prev->flags; |
1514 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1515 | /* this is a valid case when another task releases the spinlock */ | ||
1516 | rq->lock.owner = current; | ||
1517 | #endif | ||
1514 | finish_arch_switch(prev); | 1518 | finish_arch_switch(prev); |
1515 | finish_lock_switch(rq, prev); | 1519 | finish_lock_switch(rq, prev); |
1516 | if (mm) | 1520 | if (mm) |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 0c3f9d8bbe17..0375fcd5921d 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -3,7 +3,10 @@ | |||
3 | * | 3 | * |
4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | 4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> |
5 | * | 5 | * |
6 | * Copyright (2004) Ingo Molnar | 6 | * Copyright (2004, 2005) Ingo Molnar |
7 | * | ||
8 | * This file contains the spinlock/rwlock implementations for the | ||
9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | ||
7 | */ | 10 | */ |
8 | 11 | ||
9 | #include <linux/config.h> | 12 | #include <linux/config.h> |
@@ -17,12 +20,12 @@ | |||
17 | * Generic declaration of the raw read_trylock() function, | 20 | * Generic declaration of the raw read_trylock() function, |
18 | * architectures are supposed to optimize this: | 21 | * architectures are supposed to optimize this: |
19 | */ | 22 | */ |
20 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock) | 23 | int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock) |
21 | { | 24 | { |
22 | _raw_read_lock(lock); | 25 | __raw_read_lock(lock); |
23 | return 1; | 26 | return 1; |
24 | } | 27 | } |
25 | EXPORT_SYMBOL(generic_raw_read_trylock); | 28 | EXPORT_SYMBOL(generic__raw_read_trylock); |
26 | 29 | ||
27 | int __lockfunc _spin_trylock(spinlock_t *lock) | 30 | int __lockfunc _spin_trylock(spinlock_t *lock) |
28 | { | 31 | { |
@@ -57,7 +60,7 @@ int __lockfunc _write_trylock(rwlock_t *lock) | |||
57 | } | 60 | } |
58 | EXPORT_SYMBOL(_write_trylock); | 61 | EXPORT_SYMBOL(_write_trylock); |
59 | 62 | ||
60 | #ifndef CONFIG_PREEMPT | 63 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) |
61 | 64 | ||
62 | void __lockfunc _read_lock(rwlock_t *lock) | 65 | void __lockfunc _read_lock(rwlock_t *lock) |
63 | { | 66 | { |
@@ -72,7 +75,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |||
72 | 75 | ||
73 | local_irq_save(flags); | 76 | local_irq_save(flags); |
74 | preempt_disable(); | 77 | preempt_disable(); |
75 | _raw_spin_lock_flags(lock, flags); | 78 | _raw_spin_lock_flags(lock, &flags); |
76 | return flags; | 79 | return flags; |
77 | } | 80 | } |
78 | EXPORT_SYMBOL(_spin_lock_irqsave); | 81 | EXPORT_SYMBOL(_spin_lock_irqsave); |
diff --git a/lib/Makefile b/lib/Makefile index d9c38ba05e7b..44a46750690a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -16,6 +16,7 @@ CFLAGS_kobject.o += -DDEBUG | |||
16 | CFLAGS_kobject_uevent.o += -DDEBUG | 16 | CFLAGS_kobject_uevent.o += -DDEBUG |
17 | endif | 17 | endif |
18 | 18 | ||
19 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | ||
19 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 20 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
20 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 21 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
21 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | 22 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o |
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 6658d81e1836..2377af057d09 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * this is trivially done efficiently using a load-locked | 25 | * this is trivially done efficiently using a load-locked |
26 | * store-conditional approach, for example. | 26 | * store-conditional approach, for example. |
27 | */ | 27 | */ |
28 | |||
29 | #ifndef ATOMIC_DEC_AND_LOCK | ||
30 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 28 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
31 | { | 29 | { |
32 | spin_lock(lock); | 30 | spin_lock(lock); |
@@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
37 | } | 35 | } |
38 | 36 | ||
39 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 37 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
40 | #endif | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index bd2bc5d887b8..cb5490ec00f2 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -177,8 +177,7 @@ static inline void __lock_kernel(void) | |||
177 | 177 | ||
178 | static inline void __unlock_kernel(void) | 178 | static inline void __unlock_kernel(void) |
179 | { | 179 | { |
180 | _raw_spin_unlock(&kernel_flag); | 180 | spin_unlock(&kernel_flag); |
181 | preempt_enable(); | ||
182 | } | 181 | } |
183 | 182 | ||
184 | /* | 183 | /* |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c new file mode 100644 index 000000000000..906ad101eab3 --- /dev/null +++ b/lib/spinlock_debug.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
3 | * Released under the General Public License (GPL). | ||
4 | * | ||
5 | * This file contains the spinlock/rwlock implementations for | ||
6 | * DEBUG_SPINLOCK. | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/delay.h> | ||
13 | |||
14 | static void spin_bug(spinlock_t *lock, const char *msg) | ||
15 | { | ||
16 | static long print_once = 1; | ||
17 | struct task_struct *owner = NULL; | ||
18 | |||
19 | if (xchg(&print_once, 0)) { | ||
20 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | ||
21 | owner = lock->owner; | ||
22 | printk("BUG: spinlock %s on CPU#%d, %s/%d\n", | ||
23 | msg, smp_processor_id(), current->comm, current->pid); | ||
24 | printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", | ||
25 | lock, lock->magic, | ||
26 | owner ? owner->comm : "<none>", | ||
27 | owner ? owner->pid : -1, | ||
28 | lock->owner_cpu); | ||
29 | dump_stack(); | ||
30 | #ifdef CONFIG_SMP | ||
31 | /* | ||
32 | * We cannot continue on SMP: | ||
33 | */ | ||
34 | // panic("bad locking"); | ||
35 | #endif | ||
36 | } | ||
37 | } | ||
38 | |||
39 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | ||
40 | |||
41 | static inline void debug_spin_lock_before(spinlock_t *lock) | ||
42 | { | ||
43 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
44 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | ||
45 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
46 | lock, "cpu recursion"); | ||
47 | } | ||
48 | |||
49 | static inline void debug_spin_lock_after(spinlock_t *lock) | ||
50 | { | ||
51 | lock->owner_cpu = raw_smp_processor_id(); | ||
52 | lock->owner = current; | ||
53 | } | ||
54 | |||
55 | static inline void debug_spin_unlock(spinlock_t *lock) | ||
56 | { | ||
57 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
58 | SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); | ||
59 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
60 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
61 | lock, "wrong CPU"); | ||
62 | lock->owner = SPINLOCK_OWNER_INIT; | ||
63 | lock->owner_cpu = -1; | ||
64 | } | ||
65 | |||
66 | static void __spin_lock_debug(spinlock_t *lock) | ||
67 | { | ||
68 | int print_once = 1; | ||
69 | u64 i; | ||
70 | |||
71 | for (;;) { | ||
72 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
73 | cpu_relax(); | ||
74 | if (__raw_spin_trylock(&lock->raw_lock)) | ||
75 | return; | ||
76 | } | ||
77 | /* lockup suspected: */ | ||
78 | if (print_once) { | ||
79 | print_once = 0; | ||
80 | printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", | ||
81 | smp_processor_id(), current->comm, current->pid, | ||
82 | lock); | ||
83 | dump_stack(); | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | void _raw_spin_lock(spinlock_t *lock) | ||
89 | { | ||
90 | debug_spin_lock_before(lock); | ||
91 | if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) | ||
92 | __spin_lock_debug(lock); | ||
93 | debug_spin_lock_after(lock); | ||
94 | } | ||
95 | |||
96 | int _raw_spin_trylock(spinlock_t *lock) | ||
97 | { | ||
98 | int ret = __raw_spin_trylock(&lock->raw_lock); | ||
99 | |||
100 | if (ret) | ||
101 | debug_spin_lock_after(lock); | ||
102 | #ifndef CONFIG_SMP | ||
103 | /* | ||
104 | * Must not happen on UP: | ||
105 | */ | ||
106 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
107 | #endif | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | void _raw_spin_unlock(spinlock_t *lock) | ||
112 | { | ||
113 | debug_spin_unlock(lock); | ||
114 | __raw_spin_unlock(&lock->raw_lock); | ||
115 | } | ||
116 | |||
117 | static void rwlock_bug(rwlock_t *lock, const char *msg) | ||
118 | { | ||
119 | static long print_once = 1; | ||
120 | |||
121 | if (xchg(&print_once, 0)) { | ||
122 | printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, | ||
123 | smp_processor_id(), current->comm, current->pid, lock); | ||
124 | dump_stack(); | ||
125 | #ifdef CONFIG_SMP | ||
126 | /* | ||
127 | * We cannot continue on SMP: | ||
128 | */ | ||
129 | panic("bad locking"); | ||
130 | #endif | ||
131 | } | ||
132 | } | ||
133 | |||
134 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | ||
135 | |||
136 | static void __read_lock_debug(rwlock_t *lock) | ||
137 | { | ||
138 | int print_once = 1; | ||
139 | u64 i; | ||
140 | |||
141 | for (;;) { | ||
142 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
143 | cpu_relax(); | ||
144 | if (__raw_read_trylock(&lock->raw_lock)) | ||
145 | return; | ||
146 | } | ||
147 | /* lockup suspected: */ | ||
148 | if (print_once) { | ||
149 | print_once = 0; | ||
150 | printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", | ||
151 | smp_processor_id(), current->comm, current->pid, | ||
152 | lock); | ||
153 | dump_stack(); | ||
154 | } | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void _raw_read_lock(rwlock_t *lock) | ||
159 | { | ||
160 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
161 | if (unlikely(!__raw_read_trylock(&lock->raw_lock))) | ||
162 | __read_lock_debug(lock); | ||
163 | } | ||
164 | |||
165 | int _raw_read_trylock(rwlock_t *lock) | ||
166 | { | ||
167 | int ret = __raw_read_trylock(&lock->raw_lock); | ||
168 | |||
169 | #ifndef CONFIG_SMP | ||
170 | /* | ||
171 | * Must not happen on UP: | ||
172 | */ | ||
173 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
174 | #endif | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | void _raw_read_unlock(rwlock_t *lock) | ||
179 | { | ||
180 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
181 | __raw_read_unlock(&lock->raw_lock); | ||
182 | } | ||
183 | |||
184 | static inline void debug_write_lock_before(rwlock_t *lock) | ||
185 | { | ||
186 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
187 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | ||
188 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
189 | lock, "cpu recursion"); | ||
190 | } | ||
191 | |||
192 | static inline void debug_write_lock_after(rwlock_t *lock) | ||
193 | { | ||
194 | lock->owner_cpu = raw_smp_processor_id(); | ||
195 | lock->owner = current; | ||
196 | } | ||
197 | |||
198 | static inline void debug_write_unlock(rwlock_t *lock) | ||
199 | { | ||
200 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
201 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
202 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
203 | lock, "wrong CPU"); | ||
204 | lock->owner = SPINLOCK_OWNER_INIT; | ||
205 | lock->owner_cpu = -1; | ||
206 | } | ||
207 | |||
208 | static void __write_lock_debug(rwlock_t *lock) | ||
209 | { | ||
210 | int print_once = 1; | ||
211 | u64 i; | ||
212 | |||
213 | for (;;) { | ||
214 | for (i = 0; i < loops_per_jiffy * HZ; i++) { | ||
215 | cpu_relax(); | ||
216 | if (__raw_write_trylock(&lock->raw_lock)) | ||
217 | return; | ||
218 | } | ||
219 | /* lockup suspected: */ | ||
220 | if (print_once) { | ||
221 | print_once = 0; | ||
222 | printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", | ||
223 | smp_processor_id(), current->comm, current->pid, | ||
224 | lock); | ||
225 | dump_stack(); | ||
226 | } | ||
227 | } | ||
228 | } | ||
229 | |||
230 | void _raw_write_lock(rwlock_t *lock) | ||
231 | { | ||
232 | debug_write_lock_before(lock); | ||
233 | if (unlikely(!__raw_write_trylock(&lock->raw_lock))) | ||
234 | __write_lock_debug(lock); | ||
235 | debug_write_lock_after(lock); | ||
236 | } | ||
237 | |||
238 | int _raw_write_trylock(rwlock_t *lock) | ||
239 | { | ||
240 | int ret = __raw_write_trylock(&lock->raw_lock); | ||
241 | |||
242 | if (ret) | ||
243 | debug_write_lock_after(lock); | ||
244 | #ifndef CONFIG_SMP | ||
245 | /* | ||
246 | * Must not happen on UP: | ||
247 | */ | ||
248 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
249 | #endif | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | void _raw_write_unlock(rwlock_t *lock) | ||
254 | { | ||
255 | debug_write_unlock(lock); | ||
256 | __raw_write_unlock(&lock->raw_lock); | ||
257 | } | ||