diff options
Diffstat (limited to 'arch/arc/include/asm/spinlock.h')
-rw-r--r-- | arch/arc/include/asm/spinlock.h | 292 |
1 files changed, 0 insertions, 292 deletions
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 800e7c430ca5..cded4a9b5438 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h | |||
@@ -20,11 +20,6 @@ | |||
20 | 20 | ||
21 | #ifdef CONFIG_ARC_HAS_LLSC | 21 | #ifdef CONFIG_ARC_HAS_LLSC |
22 | 22 | ||
23 | /* | ||
24 | * A normal LLOCK/SCOND based system, w/o need for livelock workaround | ||
25 | */ | ||
26 | #ifndef CONFIG_ARC_STAR_9000923308 | ||
27 | |||
28 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
29 | { | 24 | { |
30 | unsigned int val; | 25 | unsigned int val; |
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
238 | smp_mb(); | 233 | smp_mb(); |
239 | } | 234 | } |
240 | 235 | ||
241 | #else /* CONFIG_ARC_STAR_9000923308 */ | ||
242 | |||
243 | /* | ||
244 | * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping | ||
245 | * coherency transactions in the SCU. The exclusive line state keeps rotating | ||
246 | * among contenting cores leading to a never ending cycle. So break the cycle | ||
247 | * by deferring the retry of failed exclusive access (SCOND). The actual delay | ||
248 | * needed is function of number of contending cores as well as the unrelated | ||
249 | * coherency traffic from other cores. To keep the code simple, start off with | ||
250 | * small delay of 1 which would suffice most cases and in case of contention | ||
251 | * double the delay. Eventually the delay is sufficient such that the coherency | ||
252 | * pipeline is drained, thus a subsequent exclusive access would succeed. | ||
253 | */ | ||
254 | |||
255 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
256 | unsigned int delay, tmp; \ | ||
257 | |||
258 | #define SCOND_FAIL_RETRY_ASM \ | ||
259 | " ; --- scond fail delay --- \n" \ | ||
260 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
261 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
262 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
263 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
264 | " b 1b \n" /* start over */ \ | ||
265 | " \n" \ | ||
266 | "4: ; --- done --- \n" \ | ||
267 | |||
268 | #define SCOND_FAIL_RETRY_VARS \ | ||
269 | ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ | ||
270 | |||
271 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
272 | { | ||
273 | unsigned int val; | ||
274 | SCOND_FAIL_RETRY_VAR_DEF; | ||
275 | |||
276 | smp_mb(); | ||
277 | |||
278 | __asm__ __volatile__( | ||
279 | "0: mov %[delay], 1 \n" | ||
280 | "1: llock %[val], [%[slock]] \n" | ||
281 | " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ | ||
282 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
283 | " bz 4f \n" /* done */ | ||
284 | " \n" | ||
285 | SCOND_FAIL_RETRY_ASM | ||
286 | |||
287 | : [val] "=&r" (val) | ||
288 | SCOND_FAIL_RETRY_VARS | ||
289 | : [slock] "r" (&(lock->slock)), | ||
290 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
291 | : "memory", "cc"); | ||
292 | |||
293 | smp_mb(); | ||
294 | } | ||
295 | |||
296 | /* 1 - lock taken successfully */ | ||
297 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
298 | { | ||
299 | unsigned int val, got_it = 0; | ||
300 | SCOND_FAIL_RETRY_VAR_DEF; | ||
301 | |||
302 | smp_mb(); | ||
303 | |||
304 | __asm__ __volatile__( | ||
305 | "0: mov %[delay], 1 \n" | ||
306 | "1: llock %[val], [%[slock]] \n" | ||
307 | " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ | ||
308 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
309 | " bz.d 4f \n" | ||
310 | " mov.z %[got_it], 1 \n" /* got it */ | ||
311 | " \n" | ||
312 | SCOND_FAIL_RETRY_ASM | ||
313 | |||
314 | : [val] "=&r" (val), | ||
315 | [got_it] "+&r" (got_it) | ||
316 | SCOND_FAIL_RETRY_VARS | ||
317 | : [slock] "r" (&(lock->slock)), | ||
318 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
319 | : "memory", "cc"); | ||
320 | |||
321 | smp_mb(); | ||
322 | |||
323 | return got_it; | ||
324 | } | ||
325 | |||
326 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
327 | { | ||
328 | smp_mb(); | ||
329 | |||
330 | lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; | ||
331 | |||
332 | smp_mb(); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
337 | * Unfair locking as Writers could be starved indefinitely by Reader(s) | ||
338 | */ | ||
339 | |||
340 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
341 | { | ||
342 | unsigned int val; | ||
343 | SCOND_FAIL_RETRY_VAR_DEF; | ||
344 | |||
345 | smp_mb(); | ||
346 | |||
347 | /* | ||
348 | * zero means writer holds the lock exclusively, deny Reader. | ||
349 | * Otherwise grant lock to first/subseq reader | ||
350 | * | ||
351 | * if (rw->counter > 0) { | ||
352 | * rw->counter--; | ||
353 | * ret = 1; | ||
354 | * } | ||
355 | */ | ||
356 | |||
357 | __asm__ __volatile__( | ||
358 | "0: mov %[delay], 1 \n" | ||
359 | "1: llock %[val], [%[rwlock]] \n" | ||
360 | " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ | ||
361 | " sub %[val], %[val], 1 \n" /* reader lock */ | ||
362 | " scond %[val], [%[rwlock]] \n" | ||
363 | " bz 4f \n" /* done */ | ||
364 | " \n" | ||
365 | SCOND_FAIL_RETRY_ASM | ||
366 | |||
367 | : [val] "=&r" (val) | ||
368 | SCOND_FAIL_RETRY_VARS | ||
369 | : [rwlock] "r" (&(rw->counter)), | ||
370 | [WR_LOCKED] "ir" (0) | ||
371 | : "memory", "cc"); | ||
372 | |||
373 | smp_mb(); | ||
374 | } | ||
375 | |||
376 | /* 1 - lock taken successfully */ | ||
377 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
378 | { | ||
379 | unsigned int val, got_it = 0; | ||
380 | SCOND_FAIL_RETRY_VAR_DEF; | ||
381 | |||
382 | smp_mb(); | ||
383 | |||
384 | __asm__ __volatile__( | ||
385 | "0: mov %[delay], 1 \n" | ||
386 | "1: llock %[val], [%[rwlock]] \n" | ||
387 | " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ | ||
388 | " sub %[val], %[val], 1 \n" /* counter-- */ | ||
389 | " scond %[val], [%[rwlock]] \n" | ||
390 | " bz.d 4f \n" | ||
391 | " mov.z %[got_it], 1 \n" /* got it */ | ||
392 | " \n" | ||
393 | SCOND_FAIL_RETRY_ASM | ||
394 | |||
395 | : [val] "=&r" (val), | ||
396 | [got_it] "+&r" (got_it) | ||
397 | SCOND_FAIL_RETRY_VARS | ||
398 | : [rwlock] "r" (&(rw->counter)), | ||
399 | [WR_LOCKED] "ir" (0) | ||
400 | : "memory", "cc"); | ||
401 | |||
402 | smp_mb(); | ||
403 | |||
404 | return got_it; | ||
405 | } | ||
406 | |||
407 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
408 | { | ||
409 | unsigned int val; | ||
410 | SCOND_FAIL_RETRY_VAR_DEF; | ||
411 | |||
412 | smp_mb(); | ||
413 | |||
414 | /* | ||
415 | * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), | ||
416 | * deny writer. Otherwise if unlocked grant to writer | ||
417 | * Hence the claim that Linux rwlocks are unfair to writers. | ||
418 | * (can be starved for an indefinite time by readers). | ||
419 | * | ||
420 | * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { | ||
421 | * rw->counter = 0; | ||
422 | * ret = 1; | ||
423 | * } | ||
424 | */ | ||
425 | |||
426 | __asm__ __volatile__( | ||
427 | "0: mov %[delay], 1 \n" | ||
428 | "1: llock %[val], [%[rwlock]] \n" | ||
429 | " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ | ||
430 | " mov %[val], %[WR_LOCKED] \n" | ||
431 | " scond %[val], [%[rwlock]] \n" | ||
432 | " bz 4f \n" | ||
433 | " \n" | ||
434 | SCOND_FAIL_RETRY_ASM | ||
435 | |||
436 | : [val] "=&r" (val) | ||
437 | SCOND_FAIL_RETRY_VARS | ||
438 | : [rwlock] "r" (&(rw->counter)), | ||
439 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
440 | [WR_LOCKED] "ir" (0) | ||
441 | : "memory", "cc"); | ||
442 | |||
443 | smp_mb(); | ||
444 | } | ||
445 | |||
446 | /* 1 - lock taken successfully */ | ||
447 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
448 | { | ||
449 | unsigned int val, got_it = 0; | ||
450 | SCOND_FAIL_RETRY_VAR_DEF; | ||
451 | |||
452 | smp_mb(); | ||
453 | |||
454 | __asm__ __volatile__( | ||
455 | "0: mov %[delay], 1 \n" | ||
456 | "1: llock %[val], [%[rwlock]] \n" | ||
457 | " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ | ||
458 | " mov %[val], %[WR_LOCKED] \n" | ||
459 | " scond %[val], [%[rwlock]] \n" | ||
460 | " bz.d 4f \n" | ||
461 | " mov.z %[got_it], 1 \n" /* got it */ | ||
462 | " \n" | ||
463 | SCOND_FAIL_RETRY_ASM | ||
464 | |||
465 | : [val] "=&r" (val), | ||
466 | [got_it] "+&r" (got_it) | ||
467 | SCOND_FAIL_RETRY_VARS | ||
468 | : [rwlock] "r" (&(rw->counter)), | ||
469 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
470 | [WR_LOCKED] "ir" (0) | ||
471 | : "memory", "cc"); | ||
472 | |||
473 | smp_mb(); | ||
474 | |||
475 | return got_it; | ||
476 | } | ||
477 | |||
478 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
479 | { | ||
480 | unsigned int val; | ||
481 | |||
482 | smp_mb(); | ||
483 | |||
484 | /* | ||
485 | * rw->counter++; | ||
486 | */ | ||
487 | __asm__ __volatile__( | ||
488 | "1: llock %[val], [%[rwlock]] \n" | ||
489 | " add %[val], %[val], 1 \n" | ||
490 | " scond %[val], [%[rwlock]] \n" | ||
491 | " bnz 1b \n" | ||
492 | " \n" | ||
493 | : [val] "=&r" (val) | ||
494 | : [rwlock] "r" (&(rw->counter)) | ||
495 | : "memory", "cc"); | ||
496 | |||
497 | smp_mb(); | ||
498 | } | ||
499 | |||
500 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
501 | { | ||
502 | unsigned int val; | ||
503 | |||
504 | smp_mb(); | ||
505 | |||
506 | /* | ||
507 | * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; | ||
508 | */ | ||
509 | __asm__ __volatile__( | ||
510 | "1: llock %[val], [%[rwlock]] \n" | ||
511 | " scond %[UNLOCKED], [%[rwlock]]\n" | ||
512 | " bnz 1b \n" | ||
513 | " \n" | ||
514 | : [val] "=&r" (val) | ||
515 | : [rwlock] "r" (&(rw->counter)), | ||
516 | [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) | ||
517 | : "memory", "cc"); | ||
518 | |||
519 | smp_mb(); | ||
520 | } | ||
521 | |||
522 | #undef SCOND_FAIL_RETRY_VAR_DEF | ||
523 | #undef SCOND_FAIL_RETRY_ASM | ||
524 | #undef SCOND_FAIL_RETRY_VARS | ||
525 | |||
526 | #endif /* CONFIG_ARC_STAR_9000923308 */ | ||
527 | |||
528 | #else /* !CONFIG_ARC_HAS_LLSC */ | 236 | #else /* !CONFIG_ARC_HAS_LLSC */ |
529 | 237 | ||
530 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 238 | static inline void arch_spin_lock(arch_spinlock_t *lock) |