diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-06-20 11:25:44 -0400 |
commit | af52739b922f656eb1f39016fabaabe4baeda2e2 (patch) | |
tree | 79a7aa810d0493cd0cf4adebac26d37f12e8b545 /arch/arc/include | |
parent | 25ed6a5e97809129a1bc852b6b5c7d03baa112c4 (diff) | |
parent | 33688abb2802ff3a230bd2441f765477b94cc89e (diff) |
Merge 4.7-rc4 into staging-next
We want the fixes in here, and we can resolve a merge issue in
drivers/iio/industrialio-trigger.c
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 45 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-compact.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock.h | 292 | ||||
-rw-r--r-- | arch/arc/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/arc/include/uapi/asm/swab.h | 2 |
10 files changed, 13 insertions, 342 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5f3dcbbc0cc9..dd683995bc9d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
@@ -25,50 +25,17 @@ | |||
25 | 25 | ||
26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) | 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
27 | 27 | ||
28 | #ifdef CONFIG_ARC_STAR_9000923308 | ||
29 | |||
30 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
31 | unsigned int delay = 1, tmp; \ | ||
32 | |||
33 | #define SCOND_FAIL_RETRY_ASM \ | ||
34 | " bz 4f \n" \ | ||
35 | " ; --- scond fail delay --- \n" \ | ||
36 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
37 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
38 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
39 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
40 | " b 1b \n" /* start over */ \ | ||
41 | "4: ; --- success --- \n" \ | ||
42 | |||
43 | #define SCOND_FAIL_RETRY_VARS \ | ||
44 | ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ | ||
45 | |||
46 | #else /* !CONFIG_ARC_STAR_9000923308 */ | ||
47 | |||
48 | #define SCOND_FAIL_RETRY_VAR_DEF | ||
49 | |||
50 | #define SCOND_FAIL_RETRY_ASM \ | ||
51 | " bnz 1b \n" \ | ||
52 | |||
53 | #define SCOND_FAIL_RETRY_VARS | ||
54 | |||
55 | #endif | ||
56 | |||
57 | #define ATOMIC_OP(op, c_op, asm_op) \ | 28 | #define ATOMIC_OP(op, c_op, asm_op) \ |
58 | static inline void atomic_##op(int i, atomic_t *v) \ | 29 | static inline void atomic_##op(int i, atomic_t *v) \ |
59 | { \ | 30 | { \ |
60 | unsigned int val; \ | 31 | unsigned int val; \ |
61 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
62 | \ | 32 | \ |
63 | __asm__ __volatile__( \ | 33 | __asm__ __volatile__( \ |
64 | "1: llock %[val], [%[ctr]] \n" \ | 34 | "1: llock %[val], [%[ctr]] \n" \ |
65 | " " #asm_op " %[val], %[val], %[i] \n" \ | 35 | " " #asm_op " %[val], %[val], %[i] \n" \ |
66 | " scond %[val], [%[ctr]] \n" \ | 36 | " scond %[val], [%[ctr]] \n" \ |
67 | " \n" \ | 37 | " bnz 1b \n" \ |
68 | SCOND_FAIL_RETRY_ASM \ | ||
69 | \ | ||
70 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ | 38 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ |
71 | SCOND_FAIL_RETRY_VARS \ | ||
72 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ | 39 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ |
73 | [i] "ir" (i) \ | 40 | [i] "ir" (i) \ |
74 | : "cc"); \ | 41 | : "cc"); \ |
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
77 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | 44 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
78 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 45 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
79 | { \ | 46 | { \ |
80 | unsigned int val; \ | 47 | unsigned int val; \ |
81 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
82 | \ | 48 | \ |
83 | /* \ | 49 | /* \ |
84 | * Explicit full memory barrier needed before/after as \ | 50 | * Explicit full memory barrier needed before/after as \ |
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
90 | "1: llock %[val], [%[ctr]] \n" \ | 56 | "1: llock %[val], [%[ctr]] \n" \ |
91 | " " #asm_op " %[val], %[val], %[i] \n" \ | 57 | " " #asm_op " %[val], %[val], %[i] \n" \ |
92 | " scond %[val], [%[ctr]] \n" \ | 58 | " scond %[val], [%[ctr]] \n" \ |
93 | " \n" \ | 59 | " bnz 1b \n" \ |
94 | SCOND_FAIL_RETRY_ASM \ | ||
95 | \ | ||
96 | : [val] "=&r" (val) \ | 60 | : [val] "=&r" (val) \ |
97 | SCOND_FAIL_RETRY_VARS \ | ||
98 | : [ctr] "r" (&v->counter), \ | 61 | : [ctr] "r" (&v->counter), \ |
99 | [i] "ir" (i) \ | 62 | [i] "ir" (i) \ |
100 | : "cc"); \ | 63 | : "cc"); \ |
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index e0e1faf03c50..14c310f2e0b1 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
@@ -76,8 +76,8 @@ | |||
76 | * We need to be a bit more cautious here. What if a kernel bug in | 76 | * We need to be a bit more cautious here. What if a kernel bug in |
77 | * L1 ISR, caused SP to go whaco (some small value which looks like | 77 | * L1 ISR, caused SP to go whaco (some small value which looks like |
78 | * USER stk) and then we take L2 ISR. | 78 | * USER stk) and then we take L2 ISR. |
79 | * Above brlo alone would treat it as a valid L1-L2 sceanrio | 79 | * Above brlo alone would treat it as a valid L1-L2 scenario |
80 | * instead of shouting alound | 80 | * instead of shouting around |
81 | * The only feasible way is to make sure this L2 happened in | 81 | * The only feasible way is to make sure this L2 happened in |
82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in | 82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in |
83 | * L1 ISR before it switches stack | 83 | * L1 ISR before it switches stack |
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 1fd467ef658f..b0b87f2447f5 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
83 | local_flush_tlb_all(); | 83 | local_flush_tlb_all(); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Above checke for rollover of 8 bit ASID in 32 bit container. | 86 | * Above check for rollover of 8 bit ASID in 32 bit container. |
87 | * If the container itself wrapped around, set it to a non zero | 87 | * If the container itself wrapped around, set it to a non zero |
88 | * "generation" to distinguish from no context | 88 | * "generation" to distinguish from no context |
89 | */ | 89 | */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 034bbdc0ff61..858f98ef7f1b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -47,7 +47,7 @@ | |||
47 | * Page Tables are purely for Linux VM's consumption and the bits below are | 47 | * Page Tables are purely for Linux VM's consumption and the bits below are |
48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and | 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and |
49 | * some have different value in TLB. | 49 | * some have different value in TLB. |
50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in | 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in |
51 | * seperate PD0 and PD1, which combined forms a translation entry) | 51 | * seperate PD0 and PD1, which combined forms a translation entry) |
52 | * while for PTE perspective, they are 8 and 9 respectively | 52 | * while for PTE perspective, they are 8 and 9 respectively |
53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos | 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index f9048994b22f..16b630fbeb6a 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -78,7 +78,7 @@ struct task_struct; | |||
78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | 78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. | 81 | * Where about of Task's sp, fp, blink when it was last seen in kernel mode. |
82 | * Look in process.c for details of kernel stack layout | 82 | * Look in process.c for details of kernel stack layout |
83 | */ | 83 | */ |
84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) | 84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 991380438d6b..89fdd1b0a76e 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) | |||
86 | * (1) These insn were introduced only in 4.10 release. So for older released | 86 | * (1) These insn were introduced only in 4.10 release. So for older released |
87 | * support needed. | 87 | * support needed. |
88 | * | 88 | * |
89 | * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be | 89 | * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be |
90 | * gaurantted by the platform (not something which core handles). | 90 | * gaurantted by the platform (not something which core handles). |
91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | 91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ |
92 | * disabling for atomicity. | 92 | * disabling for atomicity. |
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 800e7c430ca5..cded4a9b5438 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h | |||
@@ -20,11 +20,6 @@ | |||
20 | 20 | ||
21 | #ifdef CONFIG_ARC_HAS_LLSC | 21 | #ifdef CONFIG_ARC_HAS_LLSC |
22 | 22 | ||
23 | /* | ||
24 | * A normal LLOCK/SCOND based system, w/o need for livelock workaround | ||
25 | */ | ||
26 | #ifndef CONFIG_ARC_STAR_9000923308 | ||
27 | |||
28 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
29 | { | 24 | { |
30 | unsigned int val; | 25 | unsigned int val; |
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
238 | smp_mb(); | 233 | smp_mb(); |
239 | } | 234 | } |
240 | 235 | ||
241 | #else /* CONFIG_ARC_STAR_9000923308 */ | ||
242 | |||
243 | /* | ||
244 | * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping | ||
245 | * coherency transactions in the SCU. The exclusive line state keeps rotating | ||
246 | * among contenting cores leading to a never ending cycle. So break the cycle | ||
247 | * by deferring the retry of failed exclusive access (SCOND). The actual delay | ||
248 | * needed is function of number of contending cores as well as the unrelated | ||
249 | * coherency traffic from other cores. To keep the code simple, start off with | ||
250 | * small delay of 1 which would suffice most cases and in case of contention | ||
251 | * double the delay. Eventually the delay is sufficient such that the coherency | ||
252 | * pipeline is drained, thus a subsequent exclusive access would succeed. | ||
253 | */ | ||
254 | |||
255 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
256 | unsigned int delay, tmp; \ | ||
257 | |||
258 | #define SCOND_FAIL_RETRY_ASM \ | ||
259 | " ; --- scond fail delay --- \n" \ | ||
260 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
261 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
262 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
263 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
264 | " b 1b \n" /* start over */ \ | ||
265 | " \n" \ | ||
266 | "4: ; --- done --- \n" \ | ||
267 | |||
268 | #define SCOND_FAIL_RETRY_VARS \ | ||
269 | ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ | ||
270 | |||
271 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
272 | { | ||
273 | unsigned int val; | ||
274 | SCOND_FAIL_RETRY_VAR_DEF; | ||
275 | |||
276 | smp_mb(); | ||
277 | |||
278 | __asm__ __volatile__( | ||
279 | "0: mov %[delay], 1 \n" | ||
280 | "1: llock %[val], [%[slock]] \n" | ||
281 | " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ | ||
282 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
283 | " bz 4f \n" /* done */ | ||
284 | " \n" | ||
285 | SCOND_FAIL_RETRY_ASM | ||
286 | |||
287 | : [val] "=&r" (val) | ||
288 | SCOND_FAIL_RETRY_VARS | ||
289 | : [slock] "r" (&(lock->slock)), | ||
290 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
291 | : "memory", "cc"); | ||
292 | |||
293 | smp_mb(); | ||
294 | } | ||
295 | |||
296 | /* 1 - lock taken successfully */ | ||
297 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
298 | { | ||
299 | unsigned int val, got_it = 0; | ||
300 | SCOND_FAIL_RETRY_VAR_DEF; | ||
301 | |||
302 | smp_mb(); | ||
303 | |||
304 | __asm__ __volatile__( | ||
305 | "0: mov %[delay], 1 \n" | ||
306 | "1: llock %[val], [%[slock]] \n" | ||
307 | " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ | ||
308 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
309 | " bz.d 4f \n" | ||
310 | " mov.z %[got_it], 1 \n" /* got it */ | ||
311 | " \n" | ||
312 | SCOND_FAIL_RETRY_ASM | ||
313 | |||
314 | : [val] "=&r" (val), | ||
315 | [got_it] "+&r" (got_it) | ||
316 | SCOND_FAIL_RETRY_VARS | ||
317 | : [slock] "r" (&(lock->slock)), | ||
318 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
319 | : "memory", "cc"); | ||
320 | |||
321 | smp_mb(); | ||
322 | |||
323 | return got_it; | ||
324 | } | ||
325 | |||
326 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
327 | { | ||
328 | smp_mb(); | ||
329 | |||
330 | lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; | ||
331 | |||
332 | smp_mb(); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
337 | * Unfair locking as Writers could be starved indefinitely by Reader(s) | ||
338 | */ | ||
339 | |||
340 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
341 | { | ||
342 | unsigned int val; | ||
343 | SCOND_FAIL_RETRY_VAR_DEF; | ||
344 | |||
345 | smp_mb(); | ||
346 | |||
347 | /* | ||
348 | * zero means writer holds the lock exclusively, deny Reader. | ||
349 | * Otherwise grant lock to first/subseq reader | ||
350 | * | ||
351 | * if (rw->counter > 0) { | ||
352 | * rw->counter--; | ||
353 | * ret = 1; | ||
354 | * } | ||
355 | */ | ||
356 | |||
357 | __asm__ __volatile__( | ||
358 | "0: mov %[delay], 1 \n" | ||
359 | "1: llock %[val], [%[rwlock]] \n" | ||
360 | " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ | ||
361 | " sub %[val], %[val], 1 \n" /* reader lock */ | ||
362 | " scond %[val], [%[rwlock]] \n" | ||
363 | " bz 4f \n" /* done */ | ||
364 | " \n" | ||
365 | SCOND_FAIL_RETRY_ASM | ||
366 | |||
367 | : [val] "=&r" (val) | ||
368 | SCOND_FAIL_RETRY_VARS | ||
369 | : [rwlock] "r" (&(rw->counter)), | ||
370 | [WR_LOCKED] "ir" (0) | ||
371 | : "memory", "cc"); | ||
372 | |||
373 | smp_mb(); | ||
374 | } | ||
375 | |||
376 | /* 1 - lock taken successfully */ | ||
377 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
378 | { | ||
379 | unsigned int val, got_it = 0; | ||
380 | SCOND_FAIL_RETRY_VAR_DEF; | ||
381 | |||
382 | smp_mb(); | ||
383 | |||
384 | __asm__ __volatile__( | ||
385 | "0: mov %[delay], 1 \n" | ||
386 | "1: llock %[val], [%[rwlock]] \n" | ||
387 | " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ | ||
388 | " sub %[val], %[val], 1 \n" /* counter-- */ | ||
389 | " scond %[val], [%[rwlock]] \n" | ||
390 | " bz.d 4f \n" | ||
391 | " mov.z %[got_it], 1 \n" /* got it */ | ||
392 | " \n" | ||
393 | SCOND_FAIL_RETRY_ASM | ||
394 | |||
395 | : [val] "=&r" (val), | ||
396 | [got_it] "+&r" (got_it) | ||
397 | SCOND_FAIL_RETRY_VARS | ||
398 | : [rwlock] "r" (&(rw->counter)), | ||
399 | [WR_LOCKED] "ir" (0) | ||
400 | : "memory", "cc"); | ||
401 | |||
402 | smp_mb(); | ||
403 | |||
404 | return got_it; | ||
405 | } | ||
406 | |||
407 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
408 | { | ||
409 | unsigned int val; | ||
410 | SCOND_FAIL_RETRY_VAR_DEF; | ||
411 | |||
412 | smp_mb(); | ||
413 | |||
414 | /* | ||
415 | * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), | ||
416 | * deny writer. Otherwise if unlocked grant to writer | ||
417 | * Hence the claim that Linux rwlocks are unfair to writers. | ||
418 | * (can be starved for an indefinite time by readers). | ||
419 | * | ||
420 | * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { | ||
421 | * rw->counter = 0; | ||
422 | * ret = 1; | ||
423 | * } | ||
424 | */ | ||
425 | |||
426 | __asm__ __volatile__( | ||
427 | "0: mov %[delay], 1 \n" | ||
428 | "1: llock %[val], [%[rwlock]] \n" | ||
429 | " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ | ||
430 | " mov %[val], %[WR_LOCKED] \n" | ||
431 | " scond %[val], [%[rwlock]] \n" | ||
432 | " bz 4f \n" | ||
433 | " \n" | ||
434 | SCOND_FAIL_RETRY_ASM | ||
435 | |||
436 | : [val] "=&r" (val) | ||
437 | SCOND_FAIL_RETRY_VARS | ||
438 | : [rwlock] "r" (&(rw->counter)), | ||
439 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
440 | [WR_LOCKED] "ir" (0) | ||
441 | : "memory", "cc"); | ||
442 | |||
443 | smp_mb(); | ||
444 | } | ||
445 | |||
446 | /* 1 - lock taken successfully */ | ||
447 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
448 | { | ||
449 | unsigned int val, got_it = 0; | ||
450 | SCOND_FAIL_RETRY_VAR_DEF; | ||
451 | |||
452 | smp_mb(); | ||
453 | |||
454 | __asm__ __volatile__( | ||
455 | "0: mov %[delay], 1 \n" | ||
456 | "1: llock %[val], [%[rwlock]] \n" | ||
457 | " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ | ||
458 | " mov %[val], %[WR_LOCKED] \n" | ||
459 | " scond %[val], [%[rwlock]] \n" | ||
460 | " bz.d 4f \n" | ||
461 | " mov.z %[got_it], 1 \n" /* got it */ | ||
462 | " \n" | ||
463 | SCOND_FAIL_RETRY_ASM | ||
464 | |||
465 | : [val] "=&r" (val), | ||
466 | [got_it] "+&r" (got_it) | ||
467 | SCOND_FAIL_RETRY_VARS | ||
468 | : [rwlock] "r" (&(rw->counter)), | ||
469 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
470 | [WR_LOCKED] "ir" (0) | ||
471 | : "memory", "cc"); | ||
472 | |||
473 | smp_mb(); | ||
474 | |||
475 | return got_it; | ||
476 | } | ||
477 | |||
478 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
479 | { | ||
480 | unsigned int val; | ||
481 | |||
482 | smp_mb(); | ||
483 | |||
484 | /* | ||
485 | * rw->counter++; | ||
486 | */ | ||
487 | __asm__ __volatile__( | ||
488 | "1: llock %[val], [%[rwlock]] \n" | ||
489 | " add %[val], %[val], 1 \n" | ||
490 | " scond %[val], [%[rwlock]] \n" | ||
491 | " bnz 1b \n" | ||
492 | " \n" | ||
493 | : [val] "=&r" (val) | ||
494 | : [rwlock] "r" (&(rw->counter)) | ||
495 | : "memory", "cc"); | ||
496 | |||
497 | smp_mb(); | ||
498 | } | ||
499 | |||
500 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
501 | { | ||
502 | unsigned int val; | ||
503 | |||
504 | smp_mb(); | ||
505 | |||
506 | /* | ||
507 | * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; | ||
508 | */ | ||
509 | __asm__ __volatile__( | ||
510 | "1: llock %[val], [%[rwlock]] \n" | ||
511 | " scond %[UNLOCKED], [%[rwlock]]\n" | ||
512 | " bnz 1b \n" | ||
513 | " \n" | ||
514 | : [val] "=&r" (val) | ||
515 | : [rwlock] "r" (&(rw->counter)), | ||
516 | [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) | ||
517 | : "memory", "cc"); | ||
518 | |||
519 | smp_mb(); | ||
520 | } | ||
521 | |||
522 | #undef SCOND_FAIL_RETRY_VAR_DEF | ||
523 | #undef SCOND_FAIL_RETRY_ASM | ||
524 | #undef SCOND_FAIL_RETRY_VARS | ||
525 | |||
526 | #endif /* CONFIG_ARC_STAR_9000923308 */ | ||
527 | |||
528 | #else /* !CONFIG_ARC_HAS_LLSC */ | 236 | #else /* !CONFIG_ARC_HAS_LLSC */ |
529 | 237 | ||
530 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 238 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 3af67455659a..2d79e527fa50 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h | |||
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) | |||
103 | 103 | ||
104 | /* | 104 | /* |
105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. | 105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. |
106 | * SYSCALL_TRACE is anways seperately/unconditionally tested right after a | 106 | * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a |
107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK | 107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK |
108 | */ | 108 | */ |
109 | 109 | ||
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d1da6032b715..a78d5670884f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Algorthmically, for __user_ok() we want do: | 35 | * Algorithmically, for __user_ok() we want do: |
36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) | 36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) |
37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or | 37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or |
38 | * emitted directly in code. | 38 | * emitted directly in code. |
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h index 095599a73195..71f3918b0fc3 100644 --- a/arch/arc/include/uapi/asm/swab.h +++ b/arch/arc/include/uapi/asm/swab.h | |||
@@ -74,7 +74,7 @@ | |||
74 | __tmp ^ __in; \ | 74 | __tmp ^ __in; \ |
75 | }) | 75 | }) |
76 | 76 | ||
77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ | 77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */ |
78 | 78 | ||
79 | #define __arch_swab32(x) \ | 79 | #define __arch_swab32(x) \ |
80 | ({ \ | 80 | ({ \ |