diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
commit | 8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch) | |
tree | b907c35c79caadafff6ad46a91614e30afd2f967 /arch/sparc | |
parent | 050cbb09dac0402672edeaeac06094ef8ff1749a (diff) | |
parent | b5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
clockevents: Convert to raw_spinlock
clockevents: Make tick_device_lock static
debugobjects: Convert to raw_spinlocks
perf_event: Convert to raw_spinlock
hrtimers: Convert to raw_spinlocks
genirq: Convert irq_desc.lock to raw_spinlock
smp: Convert smplocks to raw_spinlocks
rtmutes: Convert rtmutex.lock to raw_spinlock
sched: Convert pi_lock to raw_spinlock
sched: Convert cpupri lock to raw_spinlock
sched: Convert rt_runtime_lock to raw_spinlock
sched: Convert rq->lock to raw_spinlock
plist: Make plist debugging raw_spinlock aware
bkl: Fixup core_lock fallout
locking: Cleanup the name space completely
locking: Further name space cleanups
alpha: Fix fallout from locking changes
locking: Implement new raw_spinlock
locking: Convert raw_rwlock functions to arch_rwlock
locking: Convert raw_rwlock to arch_rwlock
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/spinlock_32.h | 62 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 54 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_types.h | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 8 |
4 files changed, 66 insertions, 66 deletions
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff636..7f9b9dba38a6 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -10,12 +10,12 @@ | |||
10 | 10 | ||
11 | #include <asm/psr.h> | 11 | #include <asm/psr.h> |
12 | 12 | ||
13 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | 13 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
14 | 14 | ||
15 | #define __raw_spin_unlock_wait(lock) \ | 15 | #define arch_spin_unlock_wait(lock) \ |
16 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 16 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
17 | 17 | ||
18 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 18 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
19 | { | 19 | { |
20 | __asm__ __volatile__( | 20 | __asm__ __volatile__( |
21 | "\n1:\n\t" | 21 | "\n1:\n\t" |
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
35 | : "g2", "memory", "cc"); | 35 | : "g2", "memory", "cc"); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 38 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
39 | { | 39 | { |
40 | unsigned int result; | 40 | unsigned int result; |
41 | __asm__ __volatile__("ldstub [%1], %0" | 41 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
45 | return (result == 0); | 45 | return (result == 0); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 48 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
51 | } | 51 | } |
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
65 | * Sort of like atomic_t's on Sparc, but even more clever. | 65 | * Sort of like atomic_t's on Sparc, but even more clever. |
66 | * | 66 | * |
67 | * ------------------------------------ | 67 | * ------------------------------------ |
68 | * | 24-bit counter | wlock | raw_rwlock_t | 68 | * | 24-bit counter | wlock | arch_rwlock_t |
69 | * ------------------------------------ | 69 | * ------------------------------------ |
70 | * 31 8 7 0 | 70 | * 31 8 7 0 |
71 | * | 71 | * |
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void arch_read_lock(raw_rwlock_t *rw) | 79 | static inline void __arch_read_lock(arch_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register arch_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
83 | __asm__ __volatile__( | 83 | __asm__ __volatile__( |
84 | "mov %%o7, %%g4\n\t" | 84 | "mov %%o7, %%g4\n\t" |
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) | |||
89 | : "g2", "g4", "memory", "cc"); | 89 | : "g2", "g4", "memory", "cc"); |
90 | } | 90 | } |
91 | 91 | ||
92 | #define __raw_read_lock(lock) \ | 92 | #define arch_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | arch_read_lock(lock); \ | 95 | __arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void arch_read_unlock(raw_rwlock_t *rw) | 99 | static inline void __arch_read_unlock(arch_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register arch_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
103 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
104 | "mov %%o7, %%g4\n\t" | 104 | "mov %%o7, %%g4\n\t" |
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) | |||
109 | : "g2", "g4", "memory", "cc"); | 109 | : "g2", "g4", "memory", "cc"); |
110 | } | 110 | } |
111 | 111 | ||
112 | #define __raw_read_unlock(lock) \ | 112 | #define arch_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | arch_read_unlock(lock); \ | 115 | __arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
119 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 119 | static inline void arch_write_lock(arch_rwlock_t *rw) |
120 | { | 120 | { |
121 | register raw_rwlock_t *lp asm("g1"); | 121 | register arch_rwlock_t *lp asm("g1"); |
122 | lp = rw; | 122 | lp = rw; |
123 | __asm__ __volatile__( | 123 | __asm__ __volatile__( |
124 | "mov %%o7, %%g4\n\t" | 124 | "mov %%o7, %%g4\n\t" |
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
130 | *(volatile __u32 *)&lp->lock = ~0U; | 130 | *(volatile __u32 *)&lp->lock = ~0U; |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 133 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
134 | { | 134 | { |
135 | unsigned int val; | 135 | unsigned int val; |
136 | 136 | ||
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int arch_read_trylock(raw_rwlock_t *rw) | 153 | static inline int __arch_read_trylock(arch_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register arch_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
157 | lp = rw; | 157 | lp = rw; |
158 | __asm__ __volatile__( | 158 | __asm__ __volatile__( |
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) | |||
165 | return res; | 165 | return res; |
166 | } | 166 | } |
167 | 167 | ||
168 | #define __raw_read_trylock(lock) \ | 168 | #define arch_read_trylock(lock) \ |
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = arch_read_trylock(lock); \ | 172 | res = __arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
176 | 176 | ||
177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 177 | #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
178 | 178 | ||
179 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 179 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 180 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) | 181 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) |
182 | 182 | ||
183 | #define _raw_spin_relax(lock) cpu_relax() | 183 | #define arch_spin_relax(lock) cpu_relax() |
184 | #define _raw_read_relax(lock) cpu_relax() | 184 | #define arch_read_relax(lock) cpu_relax() |
185 | #define _raw_write_relax(lock) cpu_relax() | 185 | #define arch_write_relax(lock) cpu_relax() |
186 | 186 | ||
187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | 187 | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) |
188 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 188 | #define arch_write_can_lock(rw) (!(rw)->lock) |
189 | 189 | ||
190 | #endif /* !(__ASSEMBLY__) */ | 190 | #endif /* !(__ASSEMBLY__) */ |
191 | 191 | ||
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e514783582..073936a8b275 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -21,13 +21,13 @@ | |||
21 | * the spinner sections must be pre-V9 branches. | 21 | * the spinner sections must be pre-V9 branches. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) | 24 | #define arch_spin_is_locked(lp) ((lp)->lock != 0) |
25 | 25 | ||
26 | #define __raw_spin_unlock_wait(lp) \ | 26 | #define arch_spin_unlock_wait(lp) \ |
27 | do { rmb(); \ | 27 | do { rmb(); \ |
28 | } while((lp)->lock) | 28 | } while((lp)->lock) |
29 | 29 | ||
30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 30 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
31 | { | 31 | { |
32 | unsigned long tmp; | 32 | unsigned long tmp; |
33 | 33 | ||
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
46 | : "memory"); | 46 | : "memory"); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 49 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
50 | { | 50 | { |
51 | unsigned long result; | 51 | unsigned long result; |
52 | 52 | ||
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
59 | return (result == 0UL); | 59 | return (result == 0UL); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 62 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
63 | { | 63 | { |
64 | __asm__ __volatile__( | 64 | __asm__ __volatile__( |
65 | " stb %%g0, [%0]" | 65 | " stb %%g0, [%0]" |
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
68 | : "memory"); | 68 | : "memory"); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 71 | static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
72 | { | 72 | { |
73 | unsigned long tmp1, tmp2; | 73 | unsigned long tmp1, tmp2; |
74 | 74 | ||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline arch_read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(arch_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline arch_read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(arch_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline arch_read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(arch_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline arch_write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(arch_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline arch_write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(arch_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline arch_write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(arch_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) arch_read_lock(p) | 213 | #define arch_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) | 214 | #define arch_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) arch_read_trylock(p) | 215 | #define arch_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) arch_read_unlock(p) | 216 | #define arch_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) arch_write_lock(p) | 217 | #define arch_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) | 218 | #define arch_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) arch_write_unlock(p) | 219 | #define arch_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) arch_write_trylock(p) | 220 | #define arch_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define arch_write_can_lock(rw) (!(rw)->lock) |
224 | 224 | ||
225 | #define _raw_spin_relax(lock) cpu_relax() | 225 | #define arch_spin_relax(lock) cpu_relax() |
226 | #define _raw_read_relax(lock) cpu_relax() | 226 | #define arch_read_relax(lock) cpu_relax() |
227 | #define _raw_write_relax(lock) cpu_relax() | 227 | #define arch_write_relax(lock) cpu_relax() |
228 | 228 | ||
229 | #endif /* !(__ASSEMBLY__) */ | 229 | #endif /* !(__ASSEMBLY__) */ |
230 | 230 | ||
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585b..9c454fdeaad8 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned char lock; | 9 | volatile unsigned char lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index ce996f97855f..8d6882bb480a 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | if (i < NR_IRQS) { | 178 | if (i < NR_IRQS) { |
179 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 179 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
180 | action = irq_desc[i].action; | 180 | action = irq_desc[i].action; |
181 | if (!action) | 181 | if (!action) |
182 | goto skip; | 182 | goto skip; |
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
195 | 195 | ||
196 | seq_putc(p, '\n'); | 196 | seq_putc(p, '\n'); |
197 | skip: | 197 | skip: |
198 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 198 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
199 | } else if (i == NR_IRQS) { | 199 | } else if (i == NR_IRQS) { |
200 | seq_printf(p, "NMI: "); | 200 | seq_printf(p, "NMI: "); |
201 | for_each_online_cpu(j) | 201 | for_each_online_cpu(j) |
@@ -785,14 +785,14 @@ void fixup_irqs(void) | |||
785 | for (irq = 0; irq < NR_IRQS; irq++) { | 785 | for (irq = 0; irq < NR_IRQS; irq++) { |
786 | unsigned long flags; | 786 | unsigned long flags; |
787 | 787 | ||
788 | spin_lock_irqsave(&irq_desc[irq].lock, flags); | 788 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); |
789 | if (irq_desc[irq].action && | 789 | if (irq_desc[irq].action && |
790 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | 790 | !(irq_desc[irq].status & IRQ_PER_CPU)) { |
791 | if (irq_desc[irq].chip->set_affinity) | 791 | if (irq_desc[irq].chip->set_affinity) |
792 | irq_desc[irq].chip->set_affinity(irq, | 792 | irq_desc[irq].chip->set_affinity(irq, |
793 | irq_desc[irq].affinity); | 793 | irq_desc[irq].affinity); |
794 | } | 794 | } |
795 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 795 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
796 | } | 796 | } |
797 | 797 | ||
798 | tick_ops->disable_irq(); | 798 | tick_ops->disable_irq(); |