aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/queuelock.h
blob: 5ed74899da9563dd5f467ff0284a06c4ed78ad0c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#ifndef _UNC_QUEUELOCK_H_
#define _UNC_QUEUELOCK_H_
/**
* Queue lock
*
* This is an implementation of T. Anderson's queue lock.
* It strives to follow the normal Linux locking conventions
* as much as possible. The rules for acquiring a lock are:
*
*  1) The caller must ensure interrupts and preemptions are disabled.
* 
*  2) The caller _cannot_ recursively acquire the lock.
* 
*  3) The caller may not sleep while holding the lock. This is currently
*     not enforced, but it will not work.
*/

#include <linux/spinlock.h>
#include <linux/cache.h>
#include <asm/atomic.h>
#include <linux/cpumask.h>
#include <linux/smp.h>

typedef struct {
	/* pad the values being spun on to make sure
	   that they are cache local
	 */
	union {
		enum {
			MUST_WAIT,
			HAS_LOCK
		} val;
		char 	padding[SMP_CACHE_BYTES];
	} slots[NR_CPUS];

	/* since spin_slot is not being spun on it can be
	 * in a shared cache line. next_slot will be evicted
	 * anyway on every attempt to acquire the lock.
	 */
	int		spin_slot[NR_CPUS];

	/* The next slot that will be available.
	 */
	atomic_t 	next_slot;
} queuelock_t;


static inline void queue_lock_init(queuelock_t *lock)
{
	int i;
	for_each_possible_cpu(i) {
		lock->slots[i].val 	= MUST_WAIT;
		lock->spin_slot[i]	= i;
	}
	lock->slots[0].val 	= HAS_LOCK;
	atomic_set(&lock->next_slot, 0);
}


static inline void queue_lock(queuelock_t *lock)
{
	int me = smp_processor_id();
	/* Get slot to spin on. atomic_inc_return() returns the incremented
	 * value, so take one of again
	 */
	lock->spin_slot[me] = atomic_inc_return(&lock->next_slot) - 1;
	/* check for wrap-around
	 * This could probably optimized away if we ensure that NR_CPUS divides 
	 * INT_MAX...
	 */
	if (unlikely(lock->spin_slot[me] == NR_CPUS - 1))
		atomic_add(-NR_CPUS, &lock->next_slot);
	/* range limit*/
	lock->spin_slot[me] %= NR_CPUS;
	/* spin until you acquire the lock */
	while (lock->slots[lock->spin_slot[me]].val == MUST_WAIT);
	/* reset the lock */
	lock->slots[lock->spin_slot[me]].val = MUST_WAIT;
}


static inline void queue_unlock(queuelock_t *lock)
{
	int me = smp_processor_id();
	lock->slots[(lock->spin_slot[me] + 1) % NR_CPUS].val = HAS_LOCK;
}

#endif /*	_UNC_QUEUELOCK_H_	*/