diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/mutex-debug.h | 1 | ||||
-rw-r--r-- | include/linux/mutex.h | 355 |
2 files changed, 355 insertions, 1 deletions
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h index 731d77d6e155..4ac8b1977b73 100644 --- a/include/linux/mutex-debug.h +++ b/include/linux/mutex-debug.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
5 | #include <linux/lockdep.h> | 5 | #include <linux/lockdep.h> |
6 | #include <linux/debug_locks.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Mutexes - debugging helpers: | 9 | * Mutexes - debugging helpers: |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 433da8a1a426..a56b0ccc8a6c 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifndef __LINUX_MUTEX_H | 10 | #ifndef __LINUX_MUTEX_H |
11 | #define __LINUX_MUTEX_H | 11 | #define __LINUX_MUTEX_H |
12 | 12 | ||
13 | #include <asm/current.h> | ||
13 | #include <linux/list.h> | 14 | #include <linux/list.h> |
14 | #include <linux/spinlock_types.h> | 15 | #include <linux/spinlock_types.h> |
15 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
@@ -77,6 +78,36 @@ struct mutex_waiter { | |||
77 | #endif | 78 | #endif |
78 | }; | 79 | }; |
79 | 80 | ||
81 | struct ww_class { | ||
82 | atomic_long_t stamp; | ||
83 | struct lock_class_key acquire_key; | ||
84 | struct lock_class_key mutex_key; | ||
85 | const char *acquire_name; | ||
86 | const char *mutex_name; | ||
87 | }; | ||
88 | |||
89 | struct ww_acquire_ctx { | ||
90 | struct task_struct *task; | ||
91 | unsigned long stamp; | ||
92 | unsigned acquired; | ||
93 | #ifdef CONFIG_DEBUG_MUTEXES | ||
94 | unsigned done_acquire; | ||
95 | struct ww_class *ww_class; | ||
96 | struct ww_mutex *contending_lock; | ||
97 | #endif | ||
98 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
99 | struct lockdep_map dep_map; | ||
100 | #endif | ||
101 | }; | ||
102 | |||
103 | struct ww_mutex { | ||
104 | struct mutex base; | ||
105 | struct ww_acquire_ctx *ctx; | ||
106 | #ifdef CONFIG_DEBUG_MUTEXES | ||
107 | struct ww_class *ww_class; | ||
108 | #endif | ||
109 | }; | ||
110 | |||
80 | #ifdef CONFIG_DEBUG_MUTEXES | 111 | #ifdef CONFIG_DEBUG_MUTEXES |
81 | # include <linux/mutex-debug.h> | 112 | # include <linux/mutex-debug.h> |
82 | #else | 113 | #else |
@@ -101,8 +132,11 @@ static inline void mutex_destroy(struct mutex *lock) {} | |||
101 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 132 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
102 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ | 133 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ |
103 | , .dep_map = { .name = #lockname } | 134 | , .dep_map = { .name = #lockname } |
135 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ | ||
136 | , .ww_class = &ww_class | ||
104 | #else | 137 | #else |
105 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) | 138 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) |
139 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) | ||
106 | #endif | 140 | #endif |
107 | 141 | ||
108 | #define __MUTEX_INITIALIZER(lockname) \ | 142 | #define __MUTEX_INITIALIZER(lockname) \ |
@@ -112,13 +146,49 @@ static inline void mutex_destroy(struct mutex *lock) {} | |||
112 | __DEBUG_MUTEX_INITIALIZER(lockname) \ | 146 | __DEBUG_MUTEX_INITIALIZER(lockname) \ |
113 | __DEP_MAP_MUTEX_INITIALIZER(lockname) } | 147 | __DEP_MAP_MUTEX_INITIALIZER(lockname) } |
114 | 148 | ||
149 | #define __WW_CLASS_INITIALIZER(ww_class) \ | ||
150 | { .stamp = ATOMIC_LONG_INIT(0) \ | ||
151 | , .acquire_name = #ww_class "_acquire" \ | ||
152 | , .mutex_name = #ww_class "_mutex" } | ||
153 | |||
154 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ | ||
155 | { .base = { \__MUTEX_INITIALIZER(lockname) } \ | ||
156 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } | ||
157 | |||
115 | #define DEFINE_MUTEX(mutexname) \ | 158 | #define DEFINE_MUTEX(mutexname) \ |
116 | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) | 159 | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) |
117 | 160 | ||
161 | #define DEFINE_WW_CLASS(classname) \ | ||
162 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) | ||
163 | |||
164 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ | ||
165 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) | ||
166 | |||
167 | |||
118 | extern void __mutex_init(struct mutex *lock, const char *name, | 168 | extern void __mutex_init(struct mutex *lock, const char *name, |
119 | struct lock_class_key *key); | 169 | struct lock_class_key *key); |
120 | 170 | ||
121 | /** | 171 | /** |
172 | * ww_mutex_init - initialize the w/w mutex | ||
173 | * @lock: the mutex to be initialized | ||
174 | * @ww_class: the w/w class the mutex should belong to | ||
175 | * | ||
176 | * Initialize the w/w mutex to unlocked state and associate it with the given | ||
177 | * class. | ||
178 | * | ||
179 | * It is not allowed to initialize an already locked mutex. | ||
180 | */ | ||
181 | static inline void ww_mutex_init(struct ww_mutex *lock, | ||
182 | struct ww_class *ww_class) | ||
183 | { | ||
184 | __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); | ||
185 | lock->ctx = NULL; | ||
186 | #ifdef CONFIG_DEBUG_MUTEXES | ||
187 | lock->ww_class = ww_class; | ||
188 | #endif | ||
189 | } | ||
190 | |||
191 | /** | ||
122 | * mutex_is_locked - is the mutex locked | 192 | * mutex_is_locked - is the mutex locked |
123 | * @lock: the mutex to be queried | 193 | * @lock: the mutex to be queried |
124 | * | 194 | * |
@@ -136,6 +206,7 @@ static inline int mutex_is_locked(struct mutex *lock) | |||
136 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 206 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
137 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 207 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
138 | extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); | 208 | extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); |
209 | |||
139 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, | 210 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
140 | unsigned int subclass); | 211 | unsigned int subclass); |
141 | extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | 212 | extern int __must_check mutex_lock_killable_nested(struct mutex *lock, |
@@ -147,7 +218,7 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | |||
147 | 218 | ||
148 | #define mutex_lock_nest_lock(lock, nest_lock) \ | 219 | #define mutex_lock_nest_lock(lock, nest_lock) \ |
149 | do { \ | 220 | do { \ |
150 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ | 221 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ |
151 | _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | 222 | _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
152 | } while (0) | 223 | } while (0) |
153 | 224 | ||
@@ -170,6 +241,288 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
170 | */ | 241 | */ |
171 | extern int mutex_trylock(struct mutex *lock); | 242 | extern int mutex_trylock(struct mutex *lock); |
172 | extern void mutex_unlock(struct mutex *lock); | 243 | extern void mutex_unlock(struct mutex *lock); |
244 | |||
245 | /** | ||
246 | * ww_acquire_init - initialize a w/w acquire context | ||
247 | * @ctx: w/w acquire context to initialize | ||
248 | * @ww_class: w/w class of the context | ||
249 | * | ||
250 | * Initializes an context to acquire multiple mutexes of the given w/w class. | ||
251 | * | ||
252 | * Context-based w/w mutex acquiring can be done in any order whatsoever within | ||
253 | * a given lock class. Deadlocks will be detected and handled with the | ||
254 | * wait/wound logic. | ||
255 | * | ||
256 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can | ||
257 | * result in undetected deadlocks and is so forbidden. Mixing different contexts | ||
258 | * for the same w/w class when acquiring mutexes can also result in undetected | ||
259 | * deadlocks, and is hence also forbidden. Both types of abuse will be caught by | ||
260 | * enabling CONFIG_PROVE_LOCKING. | ||
261 | * | ||
262 | * Nesting of acquire contexts for _different_ w/w classes is possible, subject | ||
263 | * to the usual locking rules between different lock classes. | ||
264 | * | ||
265 | * An acquire context must be released with ww_acquire_fini by the same task | ||
266 | * before the memory is freed. It is recommended to allocate the context itself | ||
267 | * on the stack. | ||
268 | */ | ||
269 | static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | ||
270 | struct ww_class *ww_class) | ||
271 | { | ||
272 | ctx->task = current; | ||
273 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); | ||
274 | ctx->acquired = 0; | ||
275 | #ifdef CONFIG_DEBUG_MUTEXES | ||
276 | ctx->ww_class = ww_class; | ||
277 | ctx->done_acquire = 0; | ||
278 | ctx->contending_lock = NULL; | ||
279 | #endif | ||
280 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
281 | debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); | ||
282 | lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, | ||
283 | &ww_class->acquire_key, 0); | ||
284 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); | ||
285 | #endif | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * ww_acquire_done - marks the end of the acquire phase | ||
290 | * @ctx: the acquire context | ||
291 | * | ||
292 | * Marks the end of the acquire phase, any further w/w mutex lock calls using | ||
293 | * this context are forbidden. | ||
294 | * | ||
295 | * Calling this function is optional, it is just useful to document w/w mutex | ||
296 | * code and clearly designated the acquire phase from actually using the locked | ||
297 | * data structures. | ||
298 | */ | ||
299 | static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) | ||
300 | { | ||
301 | #ifdef CONFIG_DEBUG_MUTEXES | ||
302 | lockdep_assert_held(ctx); | ||
303 | |||
304 | DEBUG_LOCKS_WARN_ON(ctx->done_acquire); | ||
305 | ctx->done_acquire = 1; | ||
306 | #endif | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * ww_acquire_fini - releases a w/w acquire context | ||
311 | * @ctx: the acquire context to free | ||
312 | * | ||
313 | * Releases a w/w acquire context. This must be called _after_ all acquired w/w | ||
314 | * mutexes have been released with ww_mutex_unlock. | ||
315 | */ | ||
316 | static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | ||
317 | { | ||
318 | #ifdef CONFIG_DEBUG_MUTEXES | ||
319 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); | ||
320 | |||
321 | DEBUG_LOCKS_WARN_ON(ctx->acquired); | ||
322 | if (!config_enabled(CONFIG_PROVE_LOCKING)) | ||
323 | /* | ||
324 | * lockdep will normally handle this, | ||
325 | * but fail without anyway | ||
326 | */ | ||
327 | ctx->done_acquire = 1; | ||
328 | |||
329 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) | ||
330 | /* ensure ww_acquire_fini will still fail if called twice */ | ||
331 | ctx->acquired = ~0U; | ||
332 | #endif | ||
333 | } | ||
334 | |||
335 | extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, | ||
336 | struct ww_acquire_ctx *ctx); | ||
337 | extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
338 | struct ww_acquire_ctx *ctx); | ||
339 | |||
340 | /** | ||
341 | * ww_mutex_lock - acquire the w/w mutex | ||
342 | * @lock: the mutex to be acquired | ||
343 | * @ctx: w/w acquire context, or NULL to acquire only a single lock. | ||
344 | * | ||
345 | * Lock the w/w mutex exclusively for this task. | ||
346 | * | ||
347 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
348 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
349 | * will either sleep until it is (wait case). Or it selects the current context | ||
350 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
351 | * same lock with the same context twice is also detected and signalled by | ||
352 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. | ||
353 | * | ||
354 | * In the wound case the caller must release all currently held w/w mutexes for | ||
355 | * the given context and then wait for this contending lock to be available by | ||
356 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this | ||
357 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when | ||
358 | * scanning through lru lists trying to free resources). | ||
359 | * | ||
360 | * The mutex must later on be released by the same task that | ||
361 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
362 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
363 | * locked. The mutex must first be initialized (or statically defined) before it | ||
364 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
365 | * of the same w/w lock class as was used to initialize the acquire context. | ||
366 | * | ||
367 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
368 | */ | ||
369 | static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
370 | { | ||
371 | if (ctx) | ||
372 | return __ww_mutex_lock(lock, ctx); | ||
373 | else { | ||
374 | mutex_lock(&lock->base); | ||
375 | return 0; | ||
376 | } | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible | ||
381 | * @lock: the mutex to be acquired | ||
382 | * @ctx: w/w acquire context | ||
383 | * | ||
384 | * Lock the w/w mutex exclusively for this task. | ||
385 | * | ||
386 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
387 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
388 | * will either sleep until it is (wait case). Or it selects the current context | ||
389 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
390 | * same lock with the same context twice is also detected and signalled by | ||
391 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a | ||
392 | * signal arrives while waiting for the lock then this function returns -EINTR. | ||
393 | * | ||
394 | * In the wound case the caller must release all currently held w/w mutexes for | ||
395 | * the given context and then wait for this contending lock to be available by | ||
396 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to | ||
397 | * not acquire this lock and proceed with trying to acquire further w/w mutexes | ||
398 | * (e.g. when scanning through lru lists trying to free resources). | ||
399 | * | ||
400 | * The mutex must later on be released by the same task that | ||
401 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
402 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
403 | * locked. The mutex must first be initialized (or statically defined) before it | ||
404 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
405 | * of the same w/w lock class as was used to initialize the acquire context. | ||
406 | * | ||
407 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
408 | */ | ||
409 | static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
410 | struct ww_acquire_ctx *ctx) | ||
411 | { | ||
412 | if (ctx) | ||
413 | return __ww_mutex_lock_interruptible(lock, ctx); | ||
414 | else | ||
415 | return mutex_lock_interruptible(&lock->base); | ||
416 | } | ||
417 | |||
418 | /** | ||
419 | * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex | ||
420 | * @lock: the mutex to be acquired | ||
421 | * @ctx: w/w acquire context | ||
422 | * | ||
423 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
424 | * will sleep until the lock becomes available. | ||
425 | * | ||
426 | * The caller must have released all w/w mutexes already acquired with the | ||
427 | * context and then call this function on the contended lock. | ||
428 | * | ||
429 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
430 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
431 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
432 | * | ||
433 | * It is forbidden to call this function with any other w/w mutexes associated | ||
434 | * with the context held. It is forbidden to call this on anything else than the | ||
435 | * contending mutex. | ||
436 | * | ||
437 | * Note that the slowpath lock acquiring can also be done by calling | ||
438 | * ww_mutex_lock directly. This function here is simply to help w/w mutex | ||
439 | * locking code readability by clearly denoting the slowpath. | ||
440 | */ | ||
441 | static inline void | ||
442 | ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
443 | { | ||
444 | int ret; | ||
445 | #ifdef CONFIG_DEBUG_MUTEXES | ||
446 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
447 | #endif | ||
448 | ret = ww_mutex_lock(lock, ctx); | ||
449 | (void)ret; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, | ||
454 | * interruptible | ||
455 | * @lock: the mutex to be acquired | ||
456 | * @ctx: w/w acquire context | ||
457 | * | ||
458 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
459 | * will sleep until the lock becomes available and returns 0 when the lock has | ||
460 | * been acquired. If a signal arrives while waiting for the lock then this | ||
461 | * function returns -EINTR. | ||
462 | * | ||
463 | * The caller must have released all w/w mutexes already acquired with the | ||
464 | * context and then call this function on the contended lock. | ||
465 | * | ||
466 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
467 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
468 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
469 | * | ||
470 | * It is forbidden to call this function with any other w/w mutexes associated | ||
471 | * with the given context held. It is forbidden to call this on anything else | ||
472 | * than the contending mutex. | ||
473 | * | ||
474 | * Note that the slowpath lock acquiring can also be done by calling | ||
475 | * ww_mutex_lock_interruptible directly. This function here is simply to help | ||
476 | * w/w mutex locking code readability by clearly denoting the slowpath. | ||
477 | */ | ||
478 | static inline int __must_check | ||
479 | ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, | ||
480 | struct ww_acquire_ctx *ctx) | ||
481 | { | ||
482 | #ifdef CONFIG_DEBUG_MUTEXES | ||
483 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
484 | #endif | ||
485 | return ww_mutex_lock_interruptible(lock, ctx); | ||
486 | } | ||
487 | |||
488 | extern void ww_mutex_unlock(struct ww_mutex *lock); | ||
489 | |||
490 | /** | ||
491 | * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context | ||
492 | * @lock: mutex to lock | ||
493 | * | ||
494 | * Trylocks a mutex without acquire context, so no deadlock detection is | ||
495 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | ||
496 | */ | ||
497 | static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) | ||
498 | { | ||
499 | return mutex_trylock(&lock->base); | ||
500 | } | ||
501 | |||
502 | /*** | ||
503 | * ww_mutex_destroy - mark a w/w mutex unusable | ||
504 | * @lock: the mutex to be destroyed | ||
505 | * | ||
506 | * This function marks the mutex uninitialized, and any subsequent | ||
507 | * use of the mutex is forbidden. The mutex must not be locked when | ||
508 | * this function is called. | ||
509 | */ | ||
510 | static inline void ww_mutex_destroy(struct ww_mutex *lock) | ||
511 | { | ||
512 | mutex_destroy(&lock->base); | ||
513 | } | ||
514 | |||
515 | /** | ||
516 | * ww_mutex_is_locked - is the w/w mutex locked | ||
517 | * @lock: the mutex to be queried | ||
518 | * | ||
519 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
520 | */ | ||
521 | static inline bool ww_mutex_is_locked(struct ww_mutex *lock) | ||
522 | { | ||
523 | return mutex_is_locked(&lock->base); | ||
524 | } | ||
525 | |||
173 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | 526 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
174 | 527 | ||
175 | #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX | 528 | #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX |