diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/spinlock.h | 78 | ||||
-rw-r--r-- | arch/mips/include/asm/spinlock_types.h | 8 | ||||
-rw-r--r-- | arch/mips/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/mips/vr41xx/common/icu.c | 92 |
4 files changed, 91 insertions, 91 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f08..21ef9efbde43 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -34,33 +34,33 @@ | |||
34 | * becomes equal to the the initial value of the tail. | 34 | * becomes equal to the the initial value of the tail. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned int counters = ACCESS_ONCE(lock->lock); | 39 | unsigned int counters = ACCESS_ONCE(lock->lock); |
40 | 40 | ||
41 | return ((counters >> 14) ^ counters) & 0x1fff; | 41 | return ((counters >> 14) ^ counters) & 0x1fff; |
42 | } | 42 | } |
43 | 43 | ||
44 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
45 | #define __raw_spin_unlock_wait(x) \ | 45 | #define arch_spin_unlock_wait(x) \ |
46 | while (__raw_spin_is_locked(x)) { cpu_relax(); } | 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
47 | 47 | ||
48 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | unsigned int counters = ACCESS_ONCE(lock->lock); | 50 | unsigned int counters = ACCESS_ONCE(lock->lock); |
51 | 51 | ||
52 | return (((counters >> 14) - counters) & 0x1fff) > 1; | 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; |
53 | } | 53 | } |
54 | #define __raw_spin_is_contended __raw_spin_is_contended | 54 | #define arch_spin_is_contended arch_spin_is_contended |
55 | 55 | ||
56 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
57 | { | 57 | { |
58 | int my_ticket; | 58 | int my_ticket; |
59 | int tmp; | 59 | int tmp; |
60 | 60 | ||
61 | if (R10000_LLSC_WAR) { | 61 | if (R10000_LLSC_WAR) { |
62 | __asm__ __volatile__ ( | 62 | __asm__ __volatile__ ( |
63 | " .set push # __raw_spin_lock \n" | 63 | " .set push # arch_spin_lock \n" |
64 | " .set noreorder \n" | 64 | " .set noreorder \n" |
65 | " \n" | 65 | " \n" |
66 | "1: ll %[ticket], %[ticket_ptr] \n" | 66 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
94 | [my_ticket] "=&r" (my_ticket)); | 94 | [my_ticket] "=&r" (my_ticket)); |
95 | } else { | 95 | } else { |
96 | __asm__ __volatile__ ( | 96 | __asm__ __volatile__ ( |
97 | " .set push # __raw_spin_lock \n" | 97 | " .set push # arch_spin_lock \n" |
98 | " .set noreorder \n" | 98 | " .set noreorder \n" |
99 | " \n" | 99 | " \n" |
100 | " ll %[ticket], %[ticket_ptr] \n" | 100 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
134 | smp_llsc_mb(); | 134 | smp_llsc_mb(); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 137 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
138 | { | 138 | { |
139 | int tmp; | 139 | int tmp; |
140 | 140 | ||
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
142 | 142 | ||
143 | if (R10000_LLSC_WAR) { | 143 | if (R10000_LLSC_WAR) { |
144 | __asm__ __volatile__ ( | 144 | __asm__ __volatile__ ( |
145 | " # __raw_spin_unlock \n" | 145 | " # arch_spin_unlock \n" |
146 | "1: ll %[ticket], %[ticket_ptr] \n" | 146 | "1: ll %[ticket], %[ticket_ptr] \n" |
147 | " addiu %[ticket], %[ticket], 1 \n" | 147 | " addiu %[ticket], %[ticket], 1 \n" |
148 | " ori %[ticket], %[ticket], 0x2000 \n" | 148 | " ori %[ticket], %[ticket], 0x2000 \n" |
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
153 | [ticket] "=&r" (tmp)); | 153 | [ticket] "=&r" (tmp)); |
154 | } else { | 154 | } else { |
155 | __asm__ __volatile__ ( | 155 | __asm__ __volatile__ ( |
156 | " .set push # __raw_spin_unlock \n" | 156 | " .set push # arch_spin_unlock \n" |
157 | " .set noreorder \n" | 157 | " .set noreorder \n" |
158 | " \n" | 158 | " \n" |
159 | " ll %[ticket], %[ticket_ptr] \n" | 159 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | 177 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | int tmp, tmp2, tmp3; | 179 | int tmp, tmp2, tmp3; |
180 | 180 | ||
181 | if (R10000_LLSC_WAR) { | 181 | if (R10000_LLSC_WAR) { |
182 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
183 | " .set push # __raw_spin_trylock \n" | 183 | " .set push # arch_spin_trylock \n" |
184 | " .set noreorder \n" | 184 | " .set noreorder \n" |
185 | " \n" | 185 | " \n" |
186 | "1: ll %[ticket], %[ticket_ptr] \n" | 186 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
204 | [now_serving] "=&r" (tmp3)); | 204 | [now_serving] "=&r" (tmp3)); |
205 | } else { | 205 | } else { |
206 | __asm__ __volatile__ ( | 206 | __asm__ __volatile__ ( |
207 | " .set push # __raw_spin_trylock \n" | 207 | " .set push # arch_spin_trylock \n" |
208 | " .set noreorder \n" | 208 | " .set noreorder \n" |
209 | " \n" | 209 | " \n" |
210 | " ll %[ticket], %[ticket_ptr] \n" | 210 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
248 | * read_can_lock - would read_trylock() succeed? | 248 | * read_can_lock - would read_trylock() succeed? |
249 | * @lock: the rwlock in question. | 249 | * @lock: the rwlock in question. |
250 | */ | 250 | */ |
251 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 251 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * write_can_lock - would write_trylock() succeed? | 254 | * write_can_lock - would write_trylock() succeed? |
255 | * @lock: the rwlock in question. | 255 | * @lock: the rwlock in question. |
256 | */ | 256 | */ |
257 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 257 | #define arch_write_can_lock(rw) (!(rw)->lock) |
258 | 258 | ||
259 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 259 | static inline void arch_read_lock(arch_rwlock_t *rw) |
260 | { | 260 | { |
261 | unsigned int tmp; | 261 | unsigned int tmp; |
262 | 262 | ||
263 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
264 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
265 | " .set noreorder # __raw_read_lock \n" | 265 | " .set noreorder # arch_read_lock \n" |
266 | "1: ll %1, %2 \n" | 266 | "1: ll %1, %2 \n" |
267 | " bltz %1, 1b \n" | 267 | " bltz %1, 1b \n" |
268 | " addu %1, 1 \n" | 268 | " addu %1, 1 \n" |
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
275 | : "memory"); | 275 | : "memory"); |
276 | } else { | 276 | } else { |
277 | __asm__ __volatile__( | 277 | __asm__ __volatile__( |
278 | " .set noreorder # __raw_read_lock \n" | 278 | " .set noreorder # arch_read_lock \n" |
279 | "1: ll %1, %2 \n" | 279 | "1: ll %1, %2 \n" |
280 | " bltz %1, 2f \n" | 280 | " bltz %1, 2f \n" |
281 | " addu %1, 1 \n" | 281 | " addu %1, 1 \n" |
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
301 | /* Note the use of sub, not subu which will make the kernel die with an | 301 | /* Note the use of sub, not subu which will make the kernel die with an |
302 | overflow exception if we ever try to unlock an rwlock that is already | 302 | overflow exception if we ever try to unlock an rwlock that is already |
303 | unlocked or is being held by a writer. */ | 303 | unlocked or is being held by a writer. */ |
304 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 304 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
305 | { | 305 | { |
306 | unsigned int tmp; | 306 | unsigned int tmp; |
307 | 307 | ||
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
309 | 309 | ||
310 | if (R10000_LLSC_WAR) { | 310 | if (R10000_LLSC_WAR) { |
311 | __asm__ __volatile__( | 311 | __asm__ __volatile__( |
312 | "1: ll %1, %2 # __raw_read_unlock \n" | 312 | "1: ll %1, %2 # arch_read_unlock \n" |
313 | " sub %1, 1 \n" | 313 | " sub %1, 1 \n" |
314 | " sc %1, %0 \n" | 314 | " sc %1, %0 \n" |
315 | " beqzl %1, 1b \n" | 315 | " beqzl %1, 1b \n" |
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
318 | : "memory"); | 318 | : "memory"); |
319 | } else { | 319 | } else { |
320 | __asm__ __volatile__( | 320 | __asm__ __volatile__( |
321 | " .set noreorder # __raw_read_unlock \n" | 321 | " .set noreorder # arch_read_unlock \n" |
322 | "1: ll %1, %2 \n" | 322 | "1: ll %1, %2 \n" |
323 | " sub %1, 1 \n" | 323 | " sub %1, 1 \n" |
324 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 338 | static inline void arch_write_lock(arch_rwlock_t *rw) |
339 | { | 339 | { |
340 | unsigned int tmp; | 340 | unsigned int tmp; |
341 | 341 | ||
342 | if (R10000_LLSC_WAR) { | 342 | if (R10000_LLSC_WAR) { |
343 | __asm__ __volatile__( | 343 | __asm__ __volatile__( |
344 | " .set noreorder # __raw_write_lock \n" | 344 | " .set noreorder # arch_write_lock \n" |
345 | "1: ll %1, %2 \n" | 345 | "1: ll %1, %2 \n" |
346 | " bnez %1, 1b \n" | 346 | " bnez %1, 1b \n" |
347 | " lui %1, 0x8000 \n" | 347 | " lui %1, 0x8000 \n" |
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
354 | : "memory"); | 354 | : "memory"); |
355 | } else { | 355 | } else { |
356 | __asm__ __volatile__( | 356 | __asm__ __volatile__( |
357 | " .set noreorder # __raw_write_lock \n" | 357 | " .set noreorder # arch_write_lock \n" |
358 | "1: ll %1, %2 \n" | 358 | "1: ll %1, %2 \n" |
359 | " bnez %1, 2f \n" | 359 | " bnez %1, 2f \n" |
360 | " lui %1, 0x8000 \n" | 360 | " lui %1, 0x8000 \n" |
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
377 | smp_llsc_mb(); | 377 | smp_llsc_mb(); |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 380 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
381 | { | 381 | { |
382 | smp_mb(); | 382 | smp_mb(); |
383 | 383 | ||
384 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
385 | " # __raw_write_unlock \n" | 385 | " # arch_write_unlock \n" |
386 | " sw $0, %0 \n" | 386 | " sw $0, %0 \n" |
387 | : "=m" (rw->lock) | 387 | : "=m" (rw->lock) |
388 | : "m" (rw->lock) | 388 | : "m" (rw->lock) |
389 | : "memory"); | 389 | : "memory"); |
390 | } | 390 | } |
391 | 391 | ||
392 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 392 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
393 | { | 393 | { |
394 | unsigned int tmp; | 394 | unsigned int tmp; |
395 | int ret; | 395 | int ret; |
396 | 396 | ||
397 | if (R10000_LLSC_WAR) { | 397 | if (R10000_LLSC_WAR) { |
398 | __asm__ __volatile__( | 398 | __asm__ __volatile__( |
399 | " .set noreorder # __raw_read_trylock \n" | 399 | " .set noreorder # arch_read_trylock \n" |
400 | " li %2, 0 \n" | 400 | " li %2, 0 \n" |
401 | "1: ll %1, %3 \n" | 401 | "1: ll %1, %3 \n" |
402 | " bltz %1, 2f \n" | 402 | " bltz %1, 2f \n" |
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
413 | : "memory"); | 413 | : "memory"); |
414 | } else { | 414 | } else { |
415 | __asm__ __volatile__( | 415 | __asm__ __volatile__( |
416 | " .set noreorder # __raw_read_trylock \n" | 416 | " .set noreorder # arch_read_trylock \n" |
417 | " li %2, 0 \n" | 417 | " li %2, 0 \n" |
418 | "1: ll %1, %3 \n" | 418 | "1: ll %1, %3 \n" |
419 | " bltz %1, 2f \n" | 419 | " bltz %1, 2f \n" |
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | 435 | ||
436 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 436 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
437 | { | 437 | { |
438 | unsigned int tmp; | 438 | unsigned int tmp; |
439 | int ret; | 439 | int ret; |
440 | 440 | ||
441 | if (R10000_LLSC_WAR) { | 441 | if (R10000_LLSC_WAR) { |
442 | __asm__ __volatile__( | 442 | __asm__ __volatile__( |
443 | " .set noreorder # __raw_write_trylock \n" | 443 | " .set noreorder # arch_write_trylock \n" |
444 | " li %2, 0 \n" | 444 | " li %2, 0 \n" |
445 | "1: ll %1, %3 \n" | 445 | "1: ll %1, %3 \n" |
446 | " bnez %1, 2f \n" | 446 | " bnez %1, 2f \n" |
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
457 | : "memory"); | 457 | : "memory"); |
458 | } else { | 458 | } else { |
459 | __asm__ __volatile__( | 459 | __asm__ __volatile__( |
460 | " .set noreorder # __raw_write_trylock \n" | 460 | " .set noreorder # arch_write_trylock \n" |
461 | " li %2, 0 \n" | 461 | " li %2, 0 \n" |
462 | "1: ll %1, %3 \n" | 462 | "1: ll %1, %3 \n" |
463 | " bnez %1, 2f \n" | 463 | " bnez %1, 2f \n" |
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
480 | return ret; | 480 | return ret; |
481 | } | 481 | } |
482 | 482 | ||
483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
485 | 485 | ||
486 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
487 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
488 | #define _raw_write_relax(lock) cpu_relax() | 488 | #define arch_write_relax(lock) cpu_relax() |
489 | 489 | ||
490 | #endif /* _ASM_SPINLOCK_H */ | 490 | #endif /* _ASM_SPINLOCK_H */ |
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa116c1..ee197c2f9c98 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h | |||
@@ -12,14 +12,14 @@ typedef struct { | |||
12 | * bits 15..28: ticket | 12 | * bits 15..28: ticket |
13 | */ | 13 | */ |
14 | unsigned int lock; | 14 | unsigned int lock; |
15 | } raw_spinlock_t; | 15 | } arch_spinlock_t; |
16 | 16 | ||
17 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 17 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
18 | 18 | ||
19 | typedef struct { | 19 | typedef struct { |
20 | volatile unsigned int lock; | 20 | volatile unsigned int lock; |
21 | } raw_rwlock_t; | 21 | } arch_rwlock_t; |
22 | 22 | ||
23 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 23 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
24 | 24 | ||
25 | #endif | 25 | #endif |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba9dff4..8b0b4181219f 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | if (i < NR_IRQS) { | 101 | if (i < NR_IRQS) { |
102 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 102 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
103 | action = irq_desc[i].action; | 103 | action = irq_desc[i].action; |
104 | if (!action) | 104 | if (!action) |
105 | goto skip; | 105 | goto skip; |
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
118 | 118 | ||
119 | seq_putc(p, '\n'); | 119 | seq_putc(p, '\n'); |
120 | skip: | 120 | skip: |
121 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 121 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
122 | } else if (i == NR_IRQS) { | 122 | } else if (i == NR_IRQS) { |
123 | seq_putc(p, '\n'); | 123 | seq_putc(p, '\n'); |
124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e222b170..6153b6a05ccf 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c | |||
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) | |||
159 | 159 | ||
160 | if (current_cpu_type() == CPU_VR4111 || | 160 | if (current_cpu_type() == CPU_VR4111 || |
161 | current_cpu_type() == CPU_VR4121) { | 161 | current_cpu_type() == CPU_VR4121) { |
162 | spin_lock_irqsave(&desc->lock, flags); | 162 | raw_spin_lock_irqsave(&desc->lock, flags); |
163 | icu1_set(MPIUINTREG, mask); | 163 | icu1_set(MPIUINTREG, mask); |
164 | spin_unlock_irqrestore(&desc->lock, flags); | 164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | 167 | ||
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) | |||
174 | 174 | ||
175 | if (current_cpu_type() == CPU_VR4111 || | 175 | if (current_cpu_type() == CPU_VR4111 || |
176 | current_cpu_type() == CPU_VR4121) { | 176 | current_cpu_type() == CPU_VR4121) { |
177 | spin_lock_irqsave(&desc->lock, flags); | 177 | raw_spin_lock_irqsave(&desc->lock, flags); |
178 | icu1_clear(MPIUINTREG, mask); | 178 | icu1_clear(MPIUINTREG, mask); |
179 | spin_unlock_irqrestore(&desc->lock, flags); | 179 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
180 | } | 180 | } |
181 | } | 181 | } |
182 | 182 | ||
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) | |||
189 | 189 | ||
190 | if (current_cpu_type() == CPU_VR4111 || | 190 | if (current_cpu_type() == CPU_VR4111 || |
191 | current_cpu_type() == CPU_VR4121) { | 191 | current_cpu_type() == CPU_VR4121) { |
192 | spin_lock_irqsave(&desc->lock, flags); | 192 | raw_spin_lock_irqsave(&desc->lock, flags); |
193 | icu1_set(MAIUINTREG, mask); | 193 | icu1_set(MAIUINTREG, mask); |
194 | spin_unlock_irqrestore(&desc->lock, flags); | 194 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) | |||
204 | 204 | ||
205 | if (current_cpu_type() == CPU_VR4111 || | 205 | if (current_cpu_type() == CPU_VR4111 || |
206 | current_cpu_type() == CPU_VR4121) { | 206 | current_cpu_type() == CPU_VR4121) { |
207 | spin_lock_irqsave(&desc->lock, flags); | 207 | raw_spin_lock_irqsave(&desc->lock, flags); |
208 | icu1_clear(MAIUINTREG, mask); | 208 | icu1_clear(MAIUINTREG, mask); |
209 | spin_unlock_irqrestore(&desc->lock, flags); | 209 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) | |||
219 | 219 | ||
220 | if (current_cpu_type() == CPU_VR4111 || | 220 | if (current_cpu_type() == CPU_VR4111 || |
221 | current_cpu_type() == CPU_VR4121) { | 221 | current_cpu_type() == CPU_VR4121) { |
222 | spin_lock_irqsave(&desc->lock, flags); | 222 | raw_spin_lock_irqsave(&desc->lock, flags); |
223 | icu1_set(MKIUINTREG, mask); | 223 | icu1_set(MKIUINTREG, mask); |
224 | spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
225 | } | 225 | } |
226 | } | 226 | } |
227 | 227 | ||
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) | |||
234 | 234 | ||
235 | if (current_cpu_type() == CPU_VR4111 || | 235 | if (current_cpu_type() == CPU_VR4111 || |
236 | current_cpu_type() == CPU_VR4121) { | 236 | current_cpu_type() == CPU_VR4121) { |
237 | spin_lock_irqsave(&desc->lock, flags); | 237 | raw_spin_lock_irqsave(&desc->lock, flags); |
238 | icu1_clear(MKIUINTREG, mask); | 238 | icu1_clear(MKIUINTREG, mask); |
239 | spin_unlock_irqrestore(&desc->lock, flags); | 239 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) | |||
247 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; | 247 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; |
248 | unsigned long flags; | 248 | unsigned long flags; |
249 | 249 | ||
250 | spin_lock_irqsave(&desc->lock, flags); | 250 | raw_spin_lock_irqsave(&desc->lock, flags); |
251 | icu1_set(MMACINTREG, mask); | 251 | icu1_set(MMACINTREG, mask); |
252 | spin_unlock_irqrestore(&desc->lock, flags); | 252 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
253 | } | 253 | } |
254 | 254 | ||
255 | EXPORT_SYMBOL(vr41xx_enable_macint); | 255 | EXPORT_SYMBOL(vr41xx_enable_macint); |
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) | |||
259 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; | 259 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; |
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | 261 | ||
262 | spin_lock_irqsave(&desc->lock, flags); | 262 | raw_spin_lock_irqsave(&desc->lock, flags); |
263 | icu1_clear(MMACINTREG, mask); | 263 | icu1_clear(MMACINTREG, mask); |
264 | spin_unlock_irqrestore(&desc->lock, flags); | 264 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
265 | } | 265 | } |
266 | 266 | ||
267 | EXPORT_SYMBOL(vr41xx_disable_macint); | 267 | EXPORT_SYMBOL(vr41xx_disable_macint); |
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) | |||
271 | struct irq_desc *desc = irq_desc + DSIU_IRQ; | 271 | struct irq_desc *desc = irq_desc + DSIU_IRQ; |
272 | unsigned long flags; | 272 | unsigned long flags; |
273 | 273 | ||
274 | spin_lock_irqsave(&desc->lock, flags); | 274 | raw_spin_lock_irqsave(&desc->lock, flags); |
275 | icu1_set(MDSIUINTREG, mask); | 275 | icu1_set(MDSIUINTREG, mask); |
276 | spin_unlock_irqrestore(&desc->lock, flags); | 276 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
277 | } | 277 | } |
278 | 278 | ||
279 | EXPORT_SYMBOL(vr41xx_enable_dsiuint); | 279 | EXPORT_SYMBOL(vr41xx_enable_dsiuint); |
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) | |||
283 | struct irq_desc *desc = irq_desc + DSIU_IRQ; | 283 | struct irq_desc *desc = irq_desc + DSIU_IRQ; |
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | 285 | ||
286 | spin_lock_irqsave(&desc->lock, flags); | 286 | raw_spin_lock_irqsave(&desc->lock, flags); |
287 | icu1_clear(MDSIUINTREG, mask); | 287 | icu1_clear(MDSIUINTREG, mask); |
288 | spin_unlock_irqrestore(&desc->lock, flags); | 288 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
289 | } | 289 | } |
290 | 290 | ||
291 | EXPORT_SYMBOL(vr41xx_disable_dsiuint); | 291 | EXPORT_SYMBOL(vr41xx_disable_dsiuint); |
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) | |||
295 | struct irq_desc *desc = irq_desc + FIR_IRQ; | 295 | struct irq_desc *desc = irq_desc + FIR_IRQ; |
296 | unsigned long flags; | 296 | unsigned long flags; |
297 | 297 | ||
298 | spin_lock_irqsave(&desc->lock, flags); | 298 | raw_spin_lock_irqsave(&desc->lock, flags); |
299 | icu2_set(MFIRINTREG, mask); | 299 | icu2_set(MFIRINTREG, mask); |
300 | spin_unlock_irqrestore(&desc->lock, flags); | 300 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
301 | } | 301 | } |
302 | 302 | ||
303 | EXPORT_SYMBOL(vr41xx_enable_firint); | 303 | EXPORT_SYMBOL(vr41xx_enable_firint); |
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) | |||
307 | struct irq_desc *desc = irq_desc + FIR_IRQ; | 307 | struct irq_desc *desc = irq_desc + FIR_IRQ; |
308 | unsigned long flags; | 308 | unsigned long flags; |
309 | 309 | ||
310 | spin_lock_irqsave(&desc->lock, flags); | 310 | raw_spin_lock_irqsave(&desc->lock, flags); |
311 | icu2_clear(MFIRINTREG, mask); | 311 | icu2_clear(MFIRINTREG, mask); |
312 | spin_unlock_irqrestore(&desc->lock, flags); | 312 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
313 | } | 313 | } |
314 | 314 | ||
315 | EXPORT_SYMBOL(vr41xx_disable_firint); | 315 | EXPORT_SYMBOL(vr41xx_disable_firint); |
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) | |||
322 | if (current_cpu_type() == CPU_VR4122 || | 322 | if (current_cpu_type() == CPU_VR4122 || |
323 | current_cpu_type() == CPU_VR4131 || | 323 | current_cpu_type() == CPU_VR4131 || |
324 | current_cpu_type() == CPU_VR4133) { | 324 | current_cpu_type() == CPU_VR4133) { |
325 | spin_lock_irqsave(&desc->lock, flags); | 325 | raw_spin_lock_irqsave(&desc->lock, flags); |
326 | icu2_write(MPCIINTREG, PCIINT0); | 326 | icu2_write(MPCIINTREG, PCIINT0); |
327 | spin_unlock_irqrestore(&desc->lock, flags); | 327 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
328 | } | 328 | } |
329 | } | 329 | } |
330 | 330 | ||
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) | |||
338 | if (current_cpu_type() == CPU_VR4122 || | 338 | if (current_cpu_type() == CPU_VR4122 || |
339 | current_cpu_type() == CPU_VR4131 || | 339 | current_cpu_type() == CPU_VR4131 || |
340 | current_cpu_type() == CPU_VR4133) { | 340 | current_cpu_type() == CPU_VR4133) { |
341 | spin_lock_irqsave(&desc->lock, flags); | 341 | raw_spin_lock_irqsave(&desc->lock, flags); |
342 | icu2_write(MPCIINTREG, 0); | 342 | icu2_write(MPCIINTREG, 0); |
343 | spin_unlock_irqrestore(&desc->lock, flags); | 343 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) | |||
354 | if (current_cpu_type() == CPU_VR4122 || | 354 | if (current_cpu_type() == CPU_VR4122 || |
355 | current_cpu_type() == CPU_VR4131 || | 355 | current_cpu_type() == CPU_VR4131 || |
356 | current_cpu_type() == CPU_VR4133) { | 356 | current_cpu_type() == CPU_VR4133) { |
357 | spin_lock_irqsave(&desc->lock, flags); | 357 | raw_spin_lock_irqsave(&desc->lock, flags); |
358 | icu2_write(MSCUINTREG, SCUINT0); | 358 | icu2_write(MSCUINTREG, SCUINT0); |
359 | spin_unlock_irqrestore(&desc->lock, flags); | 359 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) | |||
370 | if (current_cpu_type() == CPU_VR4122 || | 370 | if (current_cpu_type() == CPU_VR4122 || |
371 | current_cpu_type() == CPU_VR4131 || | 371 | current_cpu_type() == CPU_VR4131 || |
372 | current_cpu_type() == CPU_VR4133) { | 372 | current_cpu_type() == CPU_VR4133) { |
373 | spin_lock_irqsave(&desc->lock, flags); | 373 | raw_spin_lock_irqsave(&desc->lock, flags); |
374 | icu2_write(MSCUINTREG, 0); | 374 | icu2_write(MSCUINTREG, 0); |
375 | spin_unlock_irqrestore(&desc->lock, flags); | 375 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) | |||
386 | if (current_cpu_type() == CPU_VR4122 || | 386 | if (current_cpu_type() == CPU_VR4122 || |
387 | current_cpu_type() == CPU_VR4131 || | 387 | current_cpu_type() == CPU_VR4131 || |
388 | current_cpu_type() == CPU_VR4133) { | 388 | current_cpu_type() == CPU_VR4133) { |
389 | spin_lock_irqsave(&desc->lock, flags); | 389 | raw_spin_lock_irqsave(&desc->lock, flags); |
390 | icu2_set(MCSIINTREG, mask); | 390 | icu2_set(MCSIINTREG, mask); |
391 | spin_unlock_irqrestore(&desc->lock, flags); | 391 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
392 | } | 392 | } |
393 | } | 393 | } |
394 | 394 | ||
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) | |||
402 | if (current_cpu_type() == CPU_VR4122 || | 402 | if (current_cpu_type() == CPU_VR4122 || |
403 | current_cpu_type() == CPU_VR4131 || | 403 | current_cpu_type() == CPU_VR4131 || |
404 | current_cpu_type() == CPU_VR4133) { | 404 | current_cpu_type() == CPU_VR4133) { |
405 | spin_lock_irqsave(&desc->lock, flags); | 405 | raw_spin_lock_irqsave(&desc->lock, flags); |
406 | icu2_clear(MCSIINTREG, mask); | 406 | icu2_clear(MCSIINTREG, mask); |
407 | spin_unlock_irqrestore(&desc->lock, flags); | 407 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
408 | } | 408 | } |
409 | } | 409 | } |
410 | 410 | ||
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) | |||
418 | if (current_cpu_type() == CPU_VR4122 || | 418 | if (current_cpu_type() == CPU_VR4122 || |
419 | current_cpu_type() == CPU_VR4131 || | 419 | current_cpu_type() == CPU_VR4131 || |
420 | current_cpu_type() == CPU_VR4133) { | 420 | current_cpu_type() == CPU_VR4133) { |
421 | spin_lock_irqsave(&desc->lock, flags); | 421 | raw_spin_lock_irqsave(&desc->lock, flags); |
422 | icu2_write(MBCUINTREG, BCUINTR); | 422 | icu2_write(MBCUINTREG, BCUINTR); |
423 | spin_unlock_irqrestore(&desc->lock, flags); | 423 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
424 | } | 424 | } |
425 | } | 425 | } |
426 | 426 | ||
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) | |||
434 | if (current_cpu_type() == CPU_VR4122 || | 434 | if (current_cpu_type() == CPU_VR4122 || |
435 | current_cpu_type() == CPU_VR4131 || | 435 | current_cpu_type() == CPU_VR4131 || |
436 | current_cpu_type() == CPU_VR4133) { | 436 | current_cpu_type() == CPU_VR4133) { |
437 | spin_lock_irqsave(&desc->lock, flags); | 437 | raw_spin_lock_irqsave(&desc->lock, flags); |
438 | icu2_write(MBCUINTREG, 0); | 438 | icu2_write(MBCUINTREG, 0); |
439 | spin_unlock_irqrestore(&desc->lock, flags); | 439 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
440 | } | 440 | } |
441 | } | 441 | } |
442 | 442 | ||
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
486 | 486 | ||
487 | pin = SYSINT1_IRQ_TO_PIN(irq); | 487 | pin = SYSINT1_IRQ_TO_PIN(irq); |
488 | 488 | ||
489 | spin_lock_irq(&desc->lock); | 489 | raw_spin_lock_irq(&desc->lock); |
490 | 490 | ||
491 | intassign0 = icu1_read(INTASSIGN0); | 491 | intassign0 = icu1_read(INTASSIGN0); |
492 | intassign1 = icu1_read(INTASSIGN1); | 492 | intassign1 = icu1_read(INTASSIGN1); |
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
525 | intassign1 |= (uint16_t)assign << 9; | 525 | intassign1 |= (uint16_t)assign << 9; |
526 | break; | 526 | break; |
527 | default: | 527 | default: |
528 | spin_unlock_irq(&desc->lock); | 528 | raw_spin_unlock_irq(&desc->lock); |
529 | return -EINVAL; | 529 | return -EINVAL; |
530 | } | 530 | } |
531 | 531 | ||
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
533 | icu1_write(INTASSIGN0, intassign0); | 533 | icu1_write(INTASSIGN0, intassign0); |
534 | icu1_write(INTASSIGN1, intassign1); | 534 | icu1_write(INTASSIGN1, intassign1); |
535 | 535 | ||
536 | spin_unlock_irq(&desc->lock); | 536 | raw_spin_unlock_irq(&desc->lock); |
537 | 537 | ||
538 | return 0; | 538 | return 0; |
539 | } | 539 | } |
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
546 | 546 | ||
547 | pin = SYSINT2_IRQ_TO_PIN(irq); | 547 | pin = SYSINT2_IRQ_TO_PIN(irq); |
548 | 548 | ||
549 | spin_lock_irq(&desc->lock); | 549 | raw_spin_lock_irq(&desc->lock); |
550 | 550 | ||
551 | intassign2 = icu1_read(INTASSIGN2); | 551 | intassign2 = icu1_read(INTASSIGN2); |
552 | intassign3 = icu1_read(INTASSIGN3); | 552 | intassign3 = icu1_read(INTASSIGN3); |
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
593 | intassign3 |= (uint16_t)assign << 12; | 593 | intassign3 |= (uint16_t)assign << 12; |
594 | break; | 594 | break; |
595 | default: | 595 | default: |
596 | spin_unlock_irq(&desc->lock); | 596 | raw_spin_unlock_irq(&desc->lock); |
597 | return -EINVAL; | 597 | return -EINVAL; |
598 | } | 598 | } |
599 | 599 | ||
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
601 | icu1_write(INTASSIGN2, intassign2); | 601 | icu1_write(INTASSIGN2, intassign2); |
602 | icu1_write(INTASSIGN3, intassign3); | 602 | icu1_write(INTASSIGN3, intassign3); |
603 | 603 | ||
604 | spin_unlock_irq(&desc->lock); | 604 | raw_spin_unlock_irq(&desc->lock); |
605 | 605 | ||
606 | return 0; | 606 | return 0; |
607 | } | 607 | } |