aboutsummaryrefslogtreecommitdiffstats
path: root/include/nvgpu/linux/atomic.h
blob: 0734672e4306cd13ec627ce18a0e5bd9dbbdf401 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
/*
 * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __NVGPU_ATOMIC_LINUX_H__
#define __NVGPU_ATOMIC_LINUX_H__

#ifdef __KERNEL__
#include <linux/atomic.h>
#endif

typedef struct nvgpu_atomic {
  atomic_t atomic_var;
} nvgpu_atomic_t;

typedef struct nvgpu_atomic64 {
  atomic64_t atomic_var;
} nvgpu_atomic64_t;

#define __nvgpu_atomic_init(i)	{ ATOMIC_INIT(i) }
#define __nvgpu_atomic64_init(i)	{ ATOMIC64_INIT(i) }

static inline void __nvgpu_atomic_set(nvgpu_atomic_t *v, int i)
{
	atomic_set(&v->atomic_var, i);
}

static inline int __nvgpu_atomic_read(nvgpu_atomic_t *v)
{
	return atomic_read(&v->atomic_var);
}

static inline void __nvgpu_atomic_inc(nvgpu_atomic_t *v)
{
	atomic_inc(&v->atomic_var);
}

static inline int __nvgpu_atomic_inc_return(nvgpu_atomic_t *v)
{
	return atomic_inc_return(&v->atomic_var);
}

static inline void __nvgpu_atomic_dec(nvgpu_atomic_t *v)
{
	atomic_dec(&v->atomic_var);
}

static inline int __nvgpu_atomic_dec_return(nvgpu_atomic_t *v)
{
	return atomic_dec_return(&v->atomic_var);
}

static inline int __nvgpu_atomic_cmpxchg(nvgpu_atomic_t *v, int old, int new)
{
	return atomic_cmpxchg(&v->atomic_var, old, new);
}

static inline int __nvgpu_atomic_xchg(nvgpu_atomic_t *v, int new)
{
	return atomic_xchg(&v->atomic_var, new);
}

static inline bool __nvgpu_atomic_inc_and_test(nvgpu_atomic_t *v)
{
	return atomic_inc_and_test(&v->atomic_var);
}

static inline bool __nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v)
{
	return atomic_dec_and_test(&v->atomic_var);
}

static inline bool __nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v)
{
	return atomic_sub_and_test(i, &v->atomic_var);
}

static inline int __nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v)
{
	return atomic_add_return(i, &v->atomic_var);
}

static inline int __nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u)
{
	return atomic_add_unless(&v->atomic_var, a, u);
}

static inline void __nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i)
{
	atomic64_set(&v->atomic_var, i);
}

static inline long __nvgpu_atomic64_read(nvgpu_atomic64_t *v)
{
	return atomic64_read(&v->atomic_var);
}

static inline void __nvgpu_atomic64_add(long x, nvgpu_atomic64_t *v)
{
	atomic64_add(x, &v->atomic_var);
}

static inline void __nvgpu_atomic64_inc(nvgpu_atomic64_t *v)
{
	atomic64_inc(&v->atomic_var);
}

static inline long __nvgpu_atomic64_inc_return(nvgpu_atomic64_t *v)
{
	return atomic64_inc_return(&v->atomic_var);
}

static inline void __nvgpu_atomic64_dec(nvgpu_atomic64_t *v)
{
	atomic64_dec(&v->atomic_var);
}

static inline void __nvgpu_atomic64_dec_return(nvgpu_atomic64_t *v)
{
	atomic64_dec_return(&v->atomic_var);
}

static inline long __nvgpu_atomic64_cmpxchg(nvgpu_atomic64_t *v,
					long old, long new)
{
	return atomic64_cmpxchg(&v->atomic_var, old, new);
}

static inline void __nvgpu_atomic64_sub(long x, nvgpu_atomic64_t *v)
{
	atomic64_sub(x, &v->atomic_var);
}

static inline long __nvgpu_atomic64_sub_return(long x, nvgpu_atomic64_t *v)
{
	return atomic64_sub_return(x, &v->atomic_var);
}
#endif /*__NVGPU_ATOMIC_LINUX_H__ */