diff options
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 9e1aa4f99fac..784a400e0781 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | #define HPTE_LOCK_BIT 3 | 38 | #define HPTE_LOCK_BIT 3 |
39 | 39 | ||
40 | static DEFINE_SPINLOCK(native_tlbie_lock); | 40 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
41 | 41 | ||
42 | static inline void __tlbie(unsigned long va, int psize, int ssize) | 42 | static inline void __tlbie(unsigned long va, int psize, int ssize) |
43 | { | 43 | { |
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) | |||
104 | if (use_local) | 104 | if (use_local) |
105 | use_local = mmu_psize_defs[psize].tlbiel; | 105 | use_local = mmu_psize_defs[psize].tlbiel; |
106 | if (lock_tlbie && !use_local) | 106 | if (lock_tlbie && !use_local) |
107 | spin_lock(&native_tlbie_lock); | 107 | raw_spin_lock(&native_tlbie_lock); |
108 | asm volatile("ptesync": : :"memory"); | 108 | asm volatile("ptesync": : :"memory"); |
109 | if (use_local) { | 109 | if (use_local) { |
110 | __tlbiel(va, psize, ssize); | 110 | __tlbiel(va, psize, ssize); |
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) | |||
114 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | 114 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
115 | } | 115 | } |
116 | if (lock_tlbie && !use_local) | 116 | if (lock_tlbie && !use_local) |
117 | spin_unlock(&native_tlbie_lock); | 117 | raw_spin_unlock(&native_tlbie_lock); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void native_lock_hpte(struct hash_pte *hptep) | 120 | static inline void native_lock_hpte(struct hash_pte *hptep) |
@@ -433,7 +433,7 @@ static void native_hpte_clear(void) | |||
433 | /* we take the tlbie lock and hold it. Some hardware will | 433 | /* we take the tlbie lock and hold it. Some hardware will |
434 | * deadlock if we try to tlbie from two processors at once. | 434 | * deadlock if we try to tlbie from two processors at once. |
435 | */ | 435 | */ |
436 | spin_lock(&native_tlbie_lock); | 436 | raw_spin_lock(&native_tlbie_lock); |
437 | 437 | ||
438 | slots = pteg_count * HPTES_PER_GROUP; | 438 | slots = pteg_count * HPTES_PER_GROUP; |
439 | 439 | ||
@@ -457,7 +457,7 @@ static void native_hpte_clear(void) | |||
457 | } | 457 | } |
458 | 458 | ||
459 | asm volatile("eieio; tlbsync; ptesync":::"memory"); | 459 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
460 | spin_unlock(&native_tlbie_lock); | 460 | raw_spin_unlock(&native_tlbie_lock); |
461 | local_irq_restore(flags); | 461 | local_irq_restore(flags); |
462 | } | 462 | } |
463 | 463 | ||
@@ -520,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local) | |||
520 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 520 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); |
521 | 521 | ||
522 | if (lock_tlbie) | 522 | if (lock_tlbie) |
523 | spin_lock(&native_tlbie_lock); | 523 | raw_spin_lock(&native_tlbie_lock); |
524 | 524 | ||
525 | asm volatile("ptesync":::"memory"); | 525 | asm volatile("ptesync":::"memory"); |
526 | for (i = 0; i < number; i++) { | 526 | for (i = 0; i < number; i++) { |
@@ -535,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local) | |||
535 | asm volatile("eieio; tlbsync; ptesync":::"memory"); | 535 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
536 | 536 | ||
537 | if (lock_tlbie) | 537 | if (lock_tlbie) |
538 | spin_unlock(&native_tlbie_lock); | 538 | raw_spin_unlock(&native_tlbie_lock); |
539 | } | 539 | } |
540 | 540 | ||
541 | local_irq_restore(flags); | 541 | local_irq_restore(flags); |