aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h2
-rw-r--r--arch/ia64/include/asm/spinlock_types.h2
-rw-r--r--arch/m32r/include/asm/spinlock_types.h2
-rw-r--r--arch/mips/include/asm/spinlock_types.h2
-rw-r--r--arch/parisc/include/asm/spinlock_types.h6
-rw-r--r--arch/parisc/lib/bitops.c2
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h2
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/s390/include/asm/spinlock_types.h2
-rw-r--r--arch/sh/include/asm/spinlock_types.h2
-rw-r--r--arch/sparc/include/asm/spinlock_types.h2
-rw-r--r--arch/x86/include/asm/spinlock_types.h2
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--include/linux/spinlock_types.h4
-rw-r--r--include/linux/spinlock_types_up.h4
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_clock.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--lib/spinlock_debug.c2
25 files changed, 33 insertions, 33 deletions
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index bb94a51e53d2..08975ee0a100 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 5e9d3eadd167..9622e126a8de 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index 03b377abf5c0..c8a3928a58c5 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -17,7 +17,7 @@ typedef struct {
17 volatile unsigned int lock; 17 volatile unsigned int lock;
18} arch_spinlock_t; 18} arch_spinlock_t;
19 19
20#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 20#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
21 21
22typedef struct { 22typedef struct {
23 volatile unsigned int lock; 23 volatile unsigned int lock;
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 447ccc6ca7a8..6a11b65fa66d 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 17d15bd6322d..5873a8701107 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile int slock; 9 volatile int slock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile int lock; 15 volatile int lock;
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index 2e1060892d3b..b4c5efaadb9c 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -14,7 +14,7 @@ typedef struct {
14 unsigned int lock; 14 unsigned int lock;
15} arch_spinlock_t; 15} arch_spinlock_t;
16 16
17#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 17#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
18 18
19typedef struct { 19typedef struct {
20 volatile unsigned int lock; 20 volatile unsigned int lock;
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 735caafb81f5..396d2746ca57 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,10 +4,10 @@
4typedef struct { 4typedef struct {
5#ifdef CONFIG_PA20 5#ifdef CONFIG_PA20
6 volatile unsigned int slock; 6 volatile unsigned int slock;
7# define __RAW_SPIN_LOCK_UNLOCKED { 1 } 7# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
8#else 8#else
9 volatile unsigned int lock[4]; 9 volatile unsigned int lock[4];
10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
11#endif 11#endif
12} arch_spinlock_t; 12} arch_spinlock_t;
13 13
@@ -16,6 +16,6 @@ typedef struct {
16 volatile int counter; 16 volatile int counter;
17} raw_rwlock_t; 17} raw_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } 19#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
20 20
21#endif 21#endif
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index fdd7f583de54..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -13,7 +13,7 @@
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 16 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
17}; 17};
18#endif 18#endif
19 19
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 4312e5baaf88..f5f39d82711f 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int slock; 9 volatile unsigned int slock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile signed int lock; 15 volatile signed int lock;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 579069c12152..57dfa414cfb8 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
42#include <asm/mmu.h> 42#include <asm/mmu.h>
43 43
44struct rtas_t rtas = { 44struct rtas_t rtas = {
45 .lock = __RAW_SPIN_LOCK_UNLOCKED 45 .lock = __ARCH_SPIN_LOCK_UNLOCKED
46}; 46};
47EXPORT_SYMBOL(rtas); 47EXPORT_SYMBOL(rtas);
48 48
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index a93638eee3f7..e25c0370f6cd 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10} __attribute__ ((aligned (4))) arch_spinlock_t; 10} __attribute__ ((aligned (4))) arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index 37712c32ba99..a3be2db960ed 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 41d9a8fec13d..c145e63a5d66 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 2ae7637ed524..696f8364a4f3 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -9,7 +9,7 @@ typedef struct arch_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} arch_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 0862d9d89c92..5b75afac8a38 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
188} 188}
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
192static int die_owner = -1; 192static int die_owner = -1;
193static unsigned int die_nest_count; 193static unsigned int die_nest_count;
194 194
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9f908b9d1abe..f1714697a09a 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static __cpuinitdata cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static __cpuinitdata cycles_t max_warp;
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index d4af2d7a86ea..7dadce303ebf 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -43,14 +43,14 @@ typedef struct {
43 43
44#ifdef CONFIG_DEBUG_SPINLOCK 44#ifdef CONFIG_DEBUG_SPINLOCK
45# define __SPIN_LOCK_UNLOCKED(lockname) \ 45# define __SPIN_LOCK_UNLOCKED(lockname) \
46 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 46 (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
47 .magic = SPINLOCK_MAGIC, \ 47 .magic = SPINLOCK_MAGIC, \
48 .owner = SPINLOCK_OWNER_INIT, \ 48 .owner = SPINLOCK_OWNER_INIT, \
49 .owner_cpu = -1, \ 49 .owner_cpu = -1, \
50 SPIN_DEP_MAP_INIT(lockname) } 50 SPIN_DEP_MAP_INIT(lockname) }
51#else 51#else
52# define __SPIN_LOCK_UNLOCKED(lockname) \ 52# define __SPIN_LOCK_UNLOCKED(lockname) \
53 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 53 (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
54 SPIN_DEP_MAP_INIT(lockname) } 54 SPIN_DEP_MAP_INIT(lockname) }
55#endif 55#endif
56 56
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 34d36691c4ec..10db021f4875 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -18,13 +18,13 @@ typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} arch_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } arch_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __ARCH_SPIN_LOCK_UNLOCKED { }
28 28
29#endif 29#endif
30 30
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 7cc50c62af59..2389e3f85cf6 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
73 * to use a raw spinlock - we really dont want the spinlock 73 * to use a raw spinlock - we really dont want the spinlock
74 * code to recurse back into the lockdep code... 74 * code to recurse back into the lockdep code...
75 */ 75 */
76static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 76static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
77 77
78static int graph_lock(void) 78static int graph_lock(void)
79{ 79{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5ac8ee0a9e35..fb7a0fa508b9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 998 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 999 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1002 1002
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1004 GFP_KERNEL, cpu_to_node(cpu));
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7d56cecc2c6e..63bc1cc38219 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static arch_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static arch_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s)
4308static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4309{ 4309{
4310 static arch_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4311 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4312 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4313 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4314 unsigned int old_userobj; 4314 unsigned int old_userobj;
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 206ec3d4b3c2..433e2eda2d01 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -74,7 +74,7 @@ static struct {
74 arch_spinlock_t lock; 74 arch_spinlock_t lock;
75} trace_clock_struct ____cacheline_aligned_in_smp = 75} trace_clock_struct ____cacheline_aligned_in_smp =
76 { 76 {
77 .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, 77 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
78 }; 78 };
79 79
80u64 notrace trace_clock_global(void) 80u64 notrace trace_clock_global(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 4cf7e83ec235..e347853564e9 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1;
29static int wakeup_rt; 29static int wakeup_rt;
30 30
31static arch_spinlock_t wakeup_lock = 31static arch_spinlock_t wakeup_lock =
32 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 33
34static void __wakeup_reset(struct trace_array *tr); 34static void __wakeup_reset(struct trace_array *tr);
35 35
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 9a82d568fdec..728c35221483 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = {
28 28
29static unsigned long max_stack_size; 29static unsigned long max_stack_size;
30static arch_spinlock_t max_stack_lock = 30static arch_spinlock_t max_stack_lock =
31 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
32 32
33static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
34static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 2acd501b3826..f73004137141 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;