diff options
Diffstat (limited to 'kernel/locking/qspinlock_paravirt.h')
| -rw-r--r-- | kernel/locking/qspinlock_paravirt.h | 47 |
1 files changed, 38 insertions, 9 deletions
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 15b6a39366c6..6ee477765e6c 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h | |||
| @@ -61,21 +61,50 @@ struct pv_node { | |||
| 61 | #include "qspinlock_stat.h" | 61 | #include "qspinlock_stat.h" |
| 62 | 62 | ||
| 63 | /* | 63 | /* |
| 64 | * Hybrid PV queued/unfair lock | ||
| 65 | * | ||
| 64 | * By replacing the regular queued_spin_trylock() with the function below, | 66 | * By replacing the regular queued_spin_trylock() with the function below, |
| 65 | * it will be called once when a lock waiter enter the PV slowpath before | 67 | * it will be called once when a lock waiter enter the PV slowpath before |
| 66 | * being queued. By allowing one lock stealing attempt here when the pending | 68 | * being queued. |
| 67 | * bit is off, it helps to reduce the performance impact of lock waiter | 69 | * |
| 68 | * preemption without the drawback of lock starvation. | 70 | * The pending bit is set by the queue head vCPU of the MCS wait queue in |
| 71 | * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. | ||
| 72 | * When that bit becomes visible to the incoming waiters, no lock stealing | ||
| 73 | * is allowed. The function will return immediately to make the waiters | ||
| 74 | * enter the MCS wait queue. So lock starvation shouldn't happen as long | ||
| 75 | * as the queued mode vCPUs are actively running to set the pending bit | ||
| 76 | * and hence disabling lock stealing. | ||
| 77 | * | ||
| 78 | * When the pending bit isn't set, the lock waiters will stay in the unfair | ||
| 79 | * mode spinning on the lock unless the MCS wait queue is empty. In this | ||
| 80 | * case, the lock waiters will enter the queued mode slowpath trying to | ||
| 81 | * become the queue head and set the pending bit. | ||
| 82 | * | ||
| 83 | * This hybrid PV queued/unfair lock combines the best attributes of a | ||
| 84 | * queued lock (no lock starvation) and an unfair lock (good performance | ||
| 85 | * on not heavily contended locks). | ||
| 69 | */ | 86 | */ |
| 70 | #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) | 87 | #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) |
| 71 | static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) | 88 | static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) |
| 72 | { | 89 | { |
| 73 | struct __qspinlock *l = (void *)lock; | 90 | struct __qspinlock *l = (void *)lock; |
| 74 | 91 | ||
| 75 | if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && | 92 | /* |
| 76 | (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { | 93 | * Stay in unfair lock mode as long as queued mode waiters are |
| 77 | qstat_inc(qstat_pv_lock_stealing, true); | 94 | * present in the MCS wait queue but the pending bit isn't set. |
| 78 | return true; | 95 | */ |
| 96 | for (;;) { | ||
| 97 | int val = atomic_read(&lock->val); | ||
| 98 | |||
| 99 | if (!(val & _Q_LOCKED_PENDING_MASK) && | ||
| 100 | (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { | ||
| 101 | qstat_inc(qstat_pv_lock_stealing, true); | ||
| 102 | return true; | ||
| 103 | } | ||
| 104 | if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) | ||
| 105 | break; | ||
| 106 | |||
| 107 | cpu_relax(); | ||
| 79 | } | 108 | } |
| 80 | 109 | ||
| 81 | return false; | 110 | return false; |
