aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 18:46:07 -0400
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/m32r
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/include/asm/atomic.h45
-rw-r--r--arch/m32r/kernel/smp.c4
2 files changed, 6 insertions, 43 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74adba08..025e2a170493 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
94ATOMIC_OPS(add) 94ATOMIC_OPS(add)
95ATOMIC_OPS(sub) 95ATOMIC_OPS(sub)
96 96
97ATOMIC_OP(and)
98ATOMIC_OP(or)
99ATOMIC_OP(xor)
100
97#undef ATOMIC_OPS 101#undef ATOMIC_OPS
98#undef ATOMIC_OP_RETURN 102#undef ATOMIC_OP_RETURN
99#undef ATOMIC_OP 103#undef ATOMIC_OP
@@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
239 return c; 243 return c;
240} 244}
241 245
242
243static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
244{
245 unsigned long flags;
246 unsigned long tmp;
247
248 local_irq_save(flags);
249 __asm__ __volatile__ (
250 "# atomic_clear_mask \n\t"
251 DCACHE_CLEAR("%0", "r5", "%1")
252 M32R_LOCK" %0, @%1; \n\t"
253 "and %0, %2; \n\t"
254 M32R_UNLOCK" %0, @%1; \n\t"
255 : "=&r" (tmp)
256 : "r" (addr), "r" (~mask)
257 : "memory"
258 __ATOMIC_CLOBBER
259 );
260 local_irq_restore(flags);
261}
262
263static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
264{
265 unsigned long flags;
266 unsigned long tmp;
267
268 local_irq_save(flags);
269 __asm__ __volatile__ (
270 "# atomic_set_mask \n\t"
271 DCACHE_CLEAR("%0", "r5", "%1")
272 M32R_LOCK" %0, @%1; \n\t"
273 "or %0, %2; \n\t"
274 M32R_UNLOCK" %0, @%1; \n\t"
275 : "=&r" (tmp)
276 : "r" (addr), "r" (mask)
277 : "memory"
278 __ATOMIC_CLOBBER
279 );
280 local_irq_restore(flags);
281}
282
283#endif /* _ASM_M32R_ATOMIC_H */ 246#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc74ef9a..62d6961e7f2b 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
156 cpumask_clear_cpu(smp_processor_id(), &cpumask); 156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
157 spin_lock(&flushcache_lock); 157 spin_lock(&flushcache_lock);
158 mask=cpumask_bits(&cpumask); 158 mask=cpumask_bits(&cpumask);
159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161 _flush_cache_copyback_all(); 161 _flush_cache_copyback_all();
162 while (flushcache_cpumask) 162 while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
407 flush_vma = vma; 407 flush_vma = vma;
408 flush_va = va; 408 flush_va = va;
409 mask=cpumask_bits(&cpumask); 409 mask=cpumask_bits(&cpumask);
410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 atomic_or(*mask, (atomic_t *)&flush_cpumask);
411 411
412 /* 412 /*
413 * We have to send the IPI only to 413 * We have to send the IPI only to