aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm_x86.h
blob: ccc6cce73a8f37ff503049f03a10e45646ec4dd5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/* Intel ia32 assembly.
 * Don't include directly, use asm.h instead.
 *
 * Most of this code comes straight out of the Linux kernel.
 *
 * The terms of the GPL v2 apply.
 */

static inline void barrier(void)
{
	__asm__ __volatile__("mfence": : :"memory");
}

static __inline__ void cpu_relax(void)
{
	__asm__ __volatile("pause");
}

/* please, use these only if you _really_ know what you're doing
 * ... and remember iopl(3) first!! (include sys/io.h)
 */
static inline void cli(void) {
	asm volatile("cli": : :"memory");
}

static inline void sti(void) {
	asm volatile("sti": : :"memory");
}

typedef struct { int counter; } atomic_t;

#ifdef __i386__

#define ATOMIC_INIT(i)	{ (i) }

/**
 * atomic_read - read atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically reads the value of @v.
 */
#define atomic_read(v)		((v)->counter)

/**
 * atomic_set - set atomic variable
 * @v: pointer of type atomic_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
#define atomic_set(v,i)		(((v)->counter) = (i))

static __inline__ void atomic_add(int i, atomic_t *v)
{
	__asm__ __volatile__(
		"lock; addl %1,%0"
		:"+m" (v->counter)
		:"ir" (i));
}

/**
 * atomic_add_return - add and return
 * @v: pointer of type atomic_t
 * @i: integer value to add
 *
 * Atomically adds @i to @v and returns @i + @v
 */
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
	int __i;
	__i = i;
	__asm__ __volatile__(
	        "lock; xaddl %0, %1"
		:"+r" (i), "+m" (v->counter)
		: : "memory");
	return i + __i;
}

#define atomic_inc_return(v)  (atomic_add_return(1,v))

#elif defined(__x86_64__)

/* almost the same as i386, but extra care must be taken when
 * specifying clobbered registers
 */

#define ATOMIC_INIT(i)	{ (i) }

/**
 * atomic_read - read atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically reads the value of @v.
 */
static inline int atomic_read(const atomic_t *v)
{
	return v->counter;
}

/**
 * atomic_set - set atomic variable
 * @v: pointer of type atomic_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
static inline void atomic_set(atomic_t *v, int i)
{
	v->counter = i;
}

/**
 * atomic_add - add integer to atomic variable
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v.
 */
static inline void atomic_add(int i, atomic_t *v)
{
	asm volatile("lock; addl %1,%0"
		     : "=m" (v->counter)
		     : "ir" (i), "m" (v->counter));
}

/**
 * atomic_add_return - add and return
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns @i + @v
 */
static inline int atomic_add_return(int i, atomic_t *v)
{
	int __i = i;
	asm volatile("lock; xaddl %0, %1"
		     : "+r" (i), "+m" (v->counter)
		     : : "memory");
	return i + __i;
}

#define atomic_inc_return(v)  (atomic_add_return(1, v))

#endif