aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2019-02-08 14:02:45 -0500
committerMatthew Wilcox <willy@infradead.org>2019-02-09 00:00:49 -0500
commitf818b82b80164014d7ee3df89bb110808778c796 (patch)
tree36ee086ab1bd913f9a0519b1a4b8c08b7176b875 /lib/test_xarray.c
parent2fa044e51a1f35d7b04cbde07ec513b0ba195e38 (diff)
XArray: Mark xa_insert and xa_reserve as must_check
If the user doesn't care about the return value from xa_insert(), then they should be using xa_store() instead. The point of xa_reserve() is to get the return value early before taking another lock, so this should also be __must_check. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index eaf53f742c72..3eaa40ddc390 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -364,21 +364,21 @@ static noinline void check_reserve(struct xarray *xa)
364 364
365 /* An array with a reserved entry is not empty */ 365 /* An array with a reserved entry is not empty */
366 XA_BUG_ON(xa, !xa_empty(xa)); 366 XA_BUG_ON(xa, !xa_empty(xa));
367 xa_reserve(xa, 12345678, GFP_KERNEL); 367 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
368 XA_BUG_ON(xa, xa_empty(xa)); 368 XA_BUG_ON(xa, xa_empty(xa));
369 XA_BUG_ON(xa, xa_load(xa, 12345678)); 369 XA_BUG_ON(xa, xa_load(xa, 12345678));
370 xa_release(xa, 12345678); 370 xa_release(xa, 12345678);
371 XA_BUG_ON(xa, !xa_empty(xa)); 371 XA_BUG_ON(xa, !xa_empty(xa));
372 372
373 /* Releasing a used entry does nothing */ 373 /* Releasing a used entry does nothing */
374 xa_reserve(xa, 12345678, GFP_KERNEL); 374 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
375 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 375 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
376 xa_release(xa, 12345678); 376 xa_release(xa, 12345678);
377 xa_erase_index(xa, 12345678); 377 xa_erase_index(xa, 12345678);
378 XA_BUG_ON(xa, !xa_empty(xa)); 378 XA_BUG_ON(xa, !xa_empty(xa));
379 379
380 /* cmpxchg sees a reserved entry as NULL */ 380 /* cmpxchg sees a reserved entry as NULL */
381 xa_reserve(xa, 12345678, GFP_KERNEL); 381 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
382 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), 382 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
383 GFP_NOWAIT) != NULL); 383 GFP_NOWAIT) != NULL);
384 xa_release(xa, 12345678); 384 xa_release(xa, 12345678);
@@ -386,7 +386,7 @@ static noinline void check_reserve(struct xarray *xa)
386 XA_BUG_ON(xa, !xa_empty(xa)); 386 XA_BUG_ON(xa, !xa_empty(xa));
387 387
388 /* But xa_insert does not */ 388 /* But xa_insert does not */
389 xa_reserve(xa, 12345678, GFP_KERNEL); 389 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
390 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 390 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
391 -EBUSY); 391 -EBUSY);
392 XA_BUG_ON(xa, xa_empty(xa)); 392 XA_BUG_ON(xa, xa_empty(xa));
@@ -395,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
395 395
396 /* Can iterate through a reserved entry */ 396 /* Can iterate through a reserved entry */
397 xa_store_index(xa, 5, GFP_KERNEL); 397 xa_store_index(xa, 5, GFP_KERNEL);
398 xa_reserve(xa, 6, GFP_KERNEL); 398 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
399 xa_store_index(xa, 7, GFP_KERNEL); 399 xa_store_index(xa, 7, GFP_KERNEL);
400 400
401 xa_for_each(xa, index, entry) { 401 xa_for_each(xa, index, entry) {