aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@linux.intel.com>2016-03-17 17:21:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit1366c37ed84b166a0fffe201154b0d3b78a3976b (patch)
tree3a688edaf09069694db90421f200568b886a4295
parentf67c07f07fca95a7f330b8bb928eabaf9fcce75d (diff)
radix tree test harness
This code is mostly from Andrew Morton and Nick Piggin; tarball downloaded from http://ozlabs.org/~akpm/rtth.tar.gz with sha1sum 0ce679db9ec047296b5d1ff7a1dfaa03a7bef1bd Some small modifications were necessary to the test harness to fix the build with the current Linux source code. I also made minor modifications to automatically test the radix-tree.c and radix-tree.h files that are in the current source tree, as opposed to a copied and slightly modified version. I am sure more could be done to tidy up the harness, as well as adding more tests. [koct9i@gmail.com: fix compilation] Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Cc: Shuah Khan <shuahkh@osg.samsung.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--tools/testing/radix-tree/.gitignore2
-rw-r--r--tools/testing/radix-tree/Makefile19
-rw-r--r--tools/testing/radix-tree/find_next_bit.c57
-rw-r--r--tools/testing/radix-tree/linux.c60
-rw-r--r--tools/testing/radix-tree/linux/bitops.h150
-rw-r--r--tools/testing/radix-tree/linux/bitops/__ffs.h43
-rw-r--r--tools/testing/radix-tree/linux/bitops/ffs.h41
-rw-r--r--tools/testing/radix-tree/linux/bitops/ffz.h12
-rw-r--r--tools/testing/radix-tree/linux/bitops/find.h13
-rw-r--r--tools/testing/radix-tree/linux/bitops/fls.h41
-rw-r--r--tools/testing/radix-tree/linux/bitops/fls64.h14
-rw-r--r--tools/testing/radix-tree/linux/bitops/hweight.h11
-rw-r--r--tools/testing/radix-tree/linux/bitops/le.h53
-rw-r--r--tools/testing/radix-tree/linux/bitops/non-atomic.h111
-rw-r--r--tools/testing/radix-tree/linux/bug.h1
-rw-r--r--tools/testing/radix-tree/linux/cpu.h34
-rw-r--r--tools/testing/radix-tree/linux/export.h2
-rw-r--r--tools/testing/radix-tree/linux/gfp.h10
-rw-r--r--tools/testing/radix-tree/linux/kernel.h34
-rw-r--r--tools/testing/radix-tree/linux/kmemleak.h1
-rw-r--r--tools/testing/radix-tree/linux/mempool.h16
-rw-r--r--tools/testing/radix-tree/linux/notifier.h8
-rw-r--r--tools/testing/radix-tree/linux/percpu.h7
-rw-r--r--tools/testing/radix-tree/linux/preempt.h4
-rw-r--r--tools/testing/radix-tree/linux/radix-tree.h1
-rw-r--r--tools/testing/radix-tree/linux/rcupdate.h9
-rw-r--r--tools/testing/radix-tree/linux/slab.h28
-rw-r--r--tools/testing/radix-tree/linux/types.h28
-rw-r--r--tools/testing/radix-tree/main.c271
-rw-r--r--tools/testing/radix-tree/rcupdate.c86
-rw-r--r--tools/testing/radix-tree/regression.h7
-rw-r--r--tools/testing/radix-tree/regression1.c220
-rw-r--r--tools/testing/radix-tree/regression2.c126
-rw-r--r--tools/testing/radix-tree/tag_check.c332
-rw-r--r--tools/testing/radix-tree/test.c218
-rw-r--r--tools/testing/radix-tree/test.h40
36 files changed, 2110 insertions, 0 deletions
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
new file mode 100644
index 000000000000..11d888ca6a92
--- /dev/null
+++ b/tools/testing/radix-tree/.gitignore
@@ -0,0 +1,2 @@
1main
2radix-tree.c
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
new file mode 100644
index 000000000000..e33ac3159b49
--- /dev/null
+++ b/tools/testing/radix-tree/Makefile
@@ -0,0 +1,19 @@
1
2CFLAGS += -I. -g -Wall -D_LGPL_SOURCE
3LDFLAGS += -lpthread -lurcu
4TARGETS = main
5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
6 regression1.o regression2.o
7
8targets: $(TARGETS)
9
10main: $(OFILES)
11 $(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main
12
13clean:
14 $(RM) -f $(TARGETS) *.o radix-tree.c
15
16$(OFILES): *.h */*.h
17
18radix-tree.c: ../../../lib/radix-tree.c
19 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
diff --git a/tools/testing/radix-tree/find_next_bit.c b/tools/testing/radix-tree/find_next_bit.c
new file mode 100644
index 000000000000..d1c2178bb2d4
--- /dev/null
+++ b/tools/testing/radix-tree/find_next_bit.c
@@ -0,0 +1,57 @@
1/* find_next_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/types.h>
13#include <linux/bitops.h>
14
15#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
16
17/*
18 * Find the next set bit in a memory region.
19 */
20unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
21 unsigned long offset)
22{
23 const unsigned long *p = addr + BITOP_WORD(offset);
24 unsigned long result = offset & ~(BITS_PER_LONG-1);
25 unsigned long tmp;
26
27 if (offset >= size)
28 return size;
29 size -= result;
30 offset %= BITS_PER_LONG;
31 if (offset) {
32 tmp = *(p++);
33 tmp &= (~0UL << offset);
34 if (size < BITS_PER_LONG)
35 goto found_first;
36 if (tmp)
37 goto found_middle;
38 size -= BITS_PER_LONG;
39 result += BITS_PER_LONG;
40 }
41 while (size & ~(BITS_PER_LONG-1)) {
42 if ((tmp = *(p++)))
43 goto found_middle;
44 result += BITS_PER_LONG;
45 size -= BITS_PER_LONG;
46 }
47 if (!size)
48 return result;
49 tmp = *p;
50
51found_first:
52 tmp &= (~0UL >> (BITS_PER_LONG - size));
53 if (tmp == 0UL) /* Are any bits set? */
54 return result + size; /* Nope. */
55found_middle:
56 return result + __ffs(tmp);
57}
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
new file mode 100644
index 000000000000..154823737b20
--- /dev/null
+++ b/tools/testing/radix-tree/linux.c
@@ -0,0 +1,60 @@
1#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
4#include <unistd.h>
5#include <assert.h>
6
7#include <linux/mempool.h>
8#include <linux/slab.h>
9#include <urcu/uatomic.h>
10
11int nr_allocated;
12
13void *mempool_alloc(mempool_t *pool, int gfp_mask)
14{
15 return pool->alloc(gfp_mask, pool->data);
16}
17
18void mempool_free(void *element, mempool_t *pool)
19{
20 pool->free(element, pool->data);
21}
22
23mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
24 mempool_free_t *free_fn, void *pool_data)
25{
26 mempool_t *ret = malloc(sizeof(*ret));
27
28 ret->alloc = alloc_fn;
29 ret->free = free_fn;
30 ret->data = pool_data;
31 return ret;
32}
33
34void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
35{
36 void *ret = malloc(cachep->size);
37 if (cachep->ctor)
38 cachep->ctor(ret);
39 uatomic_inc(&nr_allocated);
40 return ret;
41}
42
43void kmem_cache_free(struct kmem_cache *cachep, void *objp)
44{
45 assert(objp);
46 uatomic_dec(&nr_allocated);
47 memset(objp, 0, cachep->size);
48 free(objp);
49}
50
51struct kmem_cache *
52kmem_cache_create(const char *name, size_t size, size_t offset,
53 unsigned long flags, void (*ctor)(void *))
54{
55 struct kmem_cache *ret = malloc(sizeof(*ret));
56
57 ret->size = size;
58 ret->ctor = ctor;
59 return ret;
60}
diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h
new file mode 100644
index 000000000000..71d58427ab60
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops.h
@@ -0,0 +1,150 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <linux/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9/**
10 * __set_bit - Set a bit in memory
11 * @nr: the bit to set
12 * @addr: the address to start counting from
13 *
14 * Unlike set_bit(), this function is non-atomic and may be reordered.
15 * If it's called on the same region of memory simultaneously, the effect
16 * may be that only one operation succeeds.
17 */
18static inline void __set_bit(int nr, volatile unsigned long *addr)
19{
20 unsigned long mask = BITOP_MASK(nr);
21 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
22
23 *p |= mask;
24}
25
26static inline void __clear_bit(int nr, volatile unsigned long *addr)
27{
28 unsigned long mask = BITOP_MASK(nr);
29 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
30
31 *p &= ~mask;
32}
33
34/**
35 * __change_bit - Toggle a bit in memory
36 * @nr: the bit to change
37 * @addr: the address to start counting from
38 *
39 * Unlike change_bit(), this function is non-atomic and may be reordered.
40 * If it's called on the same region of memory simultaneously, the effect
41 * may be that only one operation succeeds.
42 */
43static inline void __change_bit(int nr, volatile unsigned long *addr)
44{
45 unsigned long mask = BITOP_MASK(nr);
46 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
47
48 *p ^= mask;
49}
50
51/**
52 * __test_and_set_bit - Set a bit and return its old value
53 * @nr: Bit to set
54 * @addr: Address to count from
55 *
56 * This operation is non-atomic and can be reordered.
57 * If two examples of this operation race, one can appear to succeed
58 * but actually fail. You must protect multiple accesses with a lock.
59 */
60static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64 unsigned long old = *p;
65
66 *p = old | mask;
67 return (old & mask) != 0;
68}
69
70/**
71 * __test_and_clear_bit - Clear a bit and return its old value
72 * @nr: Bit to clear
73 * @addr: Address to count from
74 *
75 * This operation is non-atomic and can be reordered.
76 * If two examples of this operation race, one can appear to succeed
77 * but actually fail. You must protect multiple accesses with a lock.
78 */
79static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
80{
81 unsigned long mask = BITOP_MASK(nr);
82 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
83 unsigned long old = *p;
84
85 *p = old & ~mask;
86 return (old & mask) != 0;
87}
88
89/* WARNING: non atomic and it can be reordered! */
90static inline int __test_and_change_bit(int nr,
91 volatile unsigned long *addr)
92{
93 unsigned long mask = BITOP_MASK(nr);
94 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
95 unsigned long old = *p;
96
97 *p = old ^ mask;
98 return (old & mask) != 0;
99}
100
101/**
102 * test_bit - Determine whether a bit is set
103 * @nr: bit number to test
104 * @addr: Address to start counting from
105 */
106static inline int test_bit(int nr, const volatile unsigned long *addr)
107{
108 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
109}
110
111/**
112 * __ffs - find first bit in word.
113 * @word: The word to search
114 *
115 * Undefined if no bit exists, so code should check against 0 first.
116 */
117static inline unsigned long __ffs(unsigned long word)
118{
119 int num = 0;
120
121 if ((word & 0xffffffff) == 0) {
122 num += 32;
123 word >>= 32;
124 }
125 if ((word & 0xffff) == 0) {
126 num += 16;
127 word >>= 16;
128 }
129 if ((word & 0xff) == 0) {
130 num += 8;
131 word >>= 8;
132 }
133 if ((word & 0xf) == 0) {
134 num += 4;
135 word >>= 4;
136 }
137 if ((word & 0x3) == 0) {
138 num += 2;
139 word >>= 2;
140 }
141 if ((word & 0x1) == 0)
142 num += 1;
143 return num;
144}
145
146unsigned long find_next_bit(const unsigned long *addr,
147 unsigned long size,
148 unsigned long offset);
149
150#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/__ffs.h b/tools/testing/radix-tree/linux/bitops/__ffs.h
new file mode 100644
index 000000000000..9a3274aecf83
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/__ffs.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_GENERIC_BITOPS___FFS_H_
2#define _ASM_GENERIC_BITOPS___FFS_H_
3
4#include <asm/types.h>
5
6/**
7 * __ffs - find first bit in word.
8 * @word: The word to search
9 *
10 * Undefined if no bit exists, so code should check against 0 first.
11 */
12static inline unsigned long __ffs(unsigned long word)
13{
14 int num = 0;
15
16#if BITS_PER_LONG == 64
17 if ((word & 0xffffffff) == 0) {
18 num += 32;
19 word >>= 32;
20 }
21#endif
22 if ((word & 0xffff) == 0) {
23 num += 16;
24 word >>= 16;
25 }
26 if ((word & 0xff) == 0) {
27 num += 8;
28 word >>= 8;
29 }
30 if ((word & 0xf) == 0) {
31 num += 4;
32 word >>= 4;
33 }
34 if ((word & 0x3) == 0) {
35 num += 2;
36 word >>= 2;
37 }
38 if ((word & 0x1) == 0)
39 num += 1;
40 return num;
41}
42
43#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/ffs.h b/tools/testing/radix-tree/linux/bitops/ffs.h
new file mode 100644
index 000000000000..fbbb43af7dc0
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/ffs.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FFS_H_
2#define _ASM_GENERIC_BITOPS_FFS_H_
3
4/**
5 * ffs - find first bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as
9 * the libc and compiler builtin ffs routines, therefore
10 * differs in spirit from the above ffz (man ffs).
11 */
12static inline int ffs(int x)
13{
14 int r = 1;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff)) {
19 x >>= 16;
20 r += 16;
21 }
22 if (!(x & 0xff)) {
23 x >>= 8;
24 r += 8;
25 }
26 if (!(x & 0xf)) {
27 x >>= 4;
28 r += 4;
29 }
30 if (!(x & 3)) {
31 x >>= 2;
32 r += 2;
33 }
34 if (!(x & 1)) {
35 x >>= 1;
36 r += 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/ffz.h b/tools/testing/radix-tree/linux/bitops/ffz.h
new file mode 100644
index 000000000000..6744bd4cdf46
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/ffz.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
2#define _ASM_GENERIC_BITOPS_FFZ_H_
3
4/*
5 * ffz - find first zero in word.
6 * @word: The word to search
7 *
8 * Undefined if no zero exists, so code should check against ~0UL first.
9 */
10#define ffz(x) __ffs(~(x))
11
12#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/find.h b/tools/testing/radix-tree/linux/bitops/find.h
new file mode 100644
index 000000000000..72a51e5a12ef
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/find.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_
3
4extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
5 size, unsigned long offset);
6
7extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
8 long size, unsigned long offset);
9
10#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
11#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
12
13#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/fls.h b/tools/testing/radix-tree/linux/bitops/fls.h
new file mode 100644
index 000000000000..850859bc5069
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/fls.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS_H_
2#define _ASM_GENERIC_BITOPS_FLS_H_
3
4/**
5 * fls - find last (most-significant) bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as ffs.
9 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
10 */
11
12static inline int fls(int x)
13{
14 int r = 32;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff0000u)) {
19 x <<= 16;
20 r -= 16;
21 }
22 if (!(x & 0xff000000u)) {
23 x <<= 8;
24 r -= 8;
25 }
26 if (!(x & 0xf0000000u)) {
27 x <<= 4;
28 r -= 4;
29 }
30 if (!(x & 0xc0000000u)) {
31 x <<= 2;
32 r -= 2;
33 }
34 if (!(x & 0x80000000u)) {
35 x <<= 1;
36 r -= 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/fls64.h b/tools/testing/radix-tree/linux/bitops/fls64.h
new file mode 100644
index 000000000000..1b6b17ce2428
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/fls64.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
2#define _ASM_GENERIC_BITOPS_FLS64_H_
3
4#include <asm/types.h>
5
6static inline int fls64(__u64 x)
7{
8 __u32 h = x >> 32;
9 if (h)
10 return fls(h) + 32;
11 return fls(x);
12}
13
14#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/hweight.h b/tools/testing/radix-tree/linux/bitops/hweight.h
new file mode 100644
index 000000000000..fbbc383771da
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/hweight.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
3
4#include <asm/types.h>
5
6extern unsigned int hweight32(unsigned int w);
7extern unsigned int hweight16(unsigned int w);
8extern unsigned int hweight8(unsigned int w);
9extern unsigned long hweight64(__u64 w);
10
11#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/le.h b/tools/testing/radix-tree/linux/bitops/le.h
new file mode 100644
index 000000000000..b9c7e5d2d2ad
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/le.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_GENERIC_BITOPS_LE_H_
2#define _ASM_GENERIC_BITOPS_LE_H_
3
4#include <asm/types.h>
5#include <asm/byteorder.h>
6
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
9
10#if defined(__LITTLE_ENDIAN)
11
12#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
13#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
14#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
15
16#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
17#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
18
19#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
21
22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
23
24#elif defined(__BIG_ENDIAN)
25
26#define generic_test_le_bit(nr, addr) \
27 test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
28#define generic___set_le_bit(nr, addr) \
29 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
30#define generic___clear_le_bit(nr, addr) \
31 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
32
33#define generic_test_and_set_le_bit(nr, addr) \
34 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
35#define generic_test_and_clear_le_bit(nr, addr) \
36 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
37
38#define generic___test_and_set_le_bit(nr, addr) \
39 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
40#define generic___test_and_clear_le_bit(nr, addr) \
41 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
42
43extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
44 unsigned long size, unsigned long offset);
45
46#else
47#error "Please fix <asm/byteorder.h>"
48#endif
49
50#define generic_find_first_zero_le_bit(addr, size) \
51 generic_find_next_zero_le_bit((addr), (size), 0)
52
53#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h
new file mode 100644
index 000000000000..46a825cf2ae1
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bitops/non-atomic.h
@@ -0,0 +1,111 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9/**
10 * __set_bit - Set a bit in memory
11 * @nr: the bit to set
12 * @addr: the address to start counting from
13 *
14 * Unlike set_bit(), this function is non-atomic and may be reordered.
15 * If it's called on the same region of memory simultaneously, the effect
16 * may be that only one operation succeeds.
17 */
18static inline void __set_bit(int nr, volatile unsigned long *addr)
19{
20 unsigned long mask = BITOP_MASK(nr);
21 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
22
23 *p |= mask;
24}
25
26static inline void __clear_bit(int nr, volatile unsigned long *addr)
27{
28 unsigned long mask = BITOP_MASK(nr);
29 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
30
31 *p &= ~mask;
32}
33
34/**
35 * __change_bit - Toggle a bit in memory
36 * @nr: the bit to change
37 * @addr: the address to start counting from
38 *
39 * Unlike change_bit(), this function is non-atomic and may be reordered.
40 * If it's called on the same region of memory simultaneously, the effect
41 * may be that only one operation succeeds.
42 */
43static inline void __change_bit(int nr, volatile unsigned long *addr)
44{
45 unsigned long mask = BITOP_MASK(nr);
46 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
47
48 *p ^= mask;
49}
50
51/**
52 * __test_and_set_bit - Set a bit and return its old value
53 * @nr: Bit to set
54 * @addr: Address to count from
55 *
56 * This operation is non-atomic and can be reordered.
57 * If two examples of this operation race, one can appear to succeed
58 * but actually fail. You must protect multiple accesses with a lock.
59 */
60static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64 unsigned long old = *p;
65
66 *p = old | mask;
67 return (old & mask) != 0;
68}
69
70/**
71 * __test_and_clear_bit - Clear a bit and return its old value
72 * @nr: Bit to clear
73 * @addr: Address to count from
74 *
75 * This operation is non-atomic and can be reordered.
76 * If two examples of this operation race, one can appear to succeed
77 * but actually fail. You must protect multiple accesses with a lock.
78 */
79static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
80{
81 unsigned long mask = BITOP_MASK(nr);
82 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
83 unsigned long old = *p;
84
85 *p = old & ~mask;
86 return (old & mask) != 0;
87}
88
89/* WARNING: non atomic and it can be reordered! */
90static inline int __test_and_change_bit(int nr,
91 volatile unsigned long *addr)
92{
93 unsigned long mask = BITOP_MASK(nr);
94 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
95 unsigned long old = *p;
96
97 *p = old ^ mask;
98 return (old & mask) != 0;
99}
100
101/**
102 * test_bit - Determine whether a bit is set
103 * @nr: bit number to test
104 * @addr: Address to start counting from
105 */
106static inline int test_bit(int nr, const volatile unsigned long *addr)
107{
108 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
109}
110
111#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/testing/radix-tree/linux/bug.h b/tools/testing/radix-tree/linux/bug.h
new file mode 100644
index 000000000000..ccbe444977df
--- /dev/null
+++ b/tools/testing/radix-tree/linux/bug.h
@@ -0,0 +1 @@
#define WARN_ON_ONCE(x) assert(x)
diff --git a/tools/testing/radix-tree/linux/cpu.h b/tools/testing/radix-tree/linux/cpu.h
new file mode 100644
index 000000000000..60a40459f269
--- /dev/null
+++ b/tools/testing/radix-tree/linux/cpu.h
@@ -0,0 +1,34 @@
1
2#define hotcpu_notifier(a, b)
3
4#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
5#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
6#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
7#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
8#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
9#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
10#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
11 * not handling interrupts, soon dead.
12 * Called on the dying cpu, interrupts
13 * are already disabled. Must not
14 * sleep, must not fail */
15#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
16 * lock is dropped */
17#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
18 * Called on the new cpu, just before
19 * enabling interrupts. Must not sleep,
20 * must not fail */
21#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
22 * idle loop. */
23#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
24 * perhaps due to preemption. */
25#define CPU_TASKS_FROZEN 0x0010
26
27#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
28#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
29#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
30#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
31#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
32#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
33#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
34#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
diff --git a/tools/testing/radix-tree/linux/export.h b/tools/testing/radix-tree/linux/export.h
new file mode 100644
index 000000000000..b6afd131998d
--- /dev/null
+++ b/tools/testing/radix-tree/linux/export.h
@@ -0,0 +1,2 @@
1
2#define EXPORT_SYMBOL(sym)
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
new file mode 100644
index 000000000000..0e37f7a760eb
--- /dev/null
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -0,0 +1,10 @@
1#ifndef _GFP_H
2#define _GFP_H
3
4#define __GFP_BITS_SHIFT 22
5#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
6#define __GFP_WAIT 1
7#define __GFP_ACCOUNT 0
8#define __GFP_NOWARN 0
9
10#endif
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
new file mode 100644
index 000000000000..27d5fe41515a
--- /dev/null
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -0,0 +1,34 @@
1#ifndef _KERNEL_H
2#define _KERNEL_H
3
4#include <assert.h>
5#include <string.h>
6#include <stdio.h>
7#include <stddef.h>
8#include <limits.h>
9
10#ifndef NULL
11#define NULL 0
12#endif
13
14#define BUG_ON(expr) assert(!(expr))
15#define __init
16#define panic(expr)
17#define printk printf
18#define __force
19#define likely(c) (c)
20#define unlikely(c) (c)
21#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
22
23#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
24
25#define container_of(ptr, type, member) ({ \
26 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
27 (type *)( (char *)__mptr - offsetof(type, member) );})
28#define min(a, b) ((a) < (b) ? (a) : (b))
29
30static inline int in_interrupt(void)
31{
32 return 0;
33}
34#endif /* _KERNEL_H */
diff --git a/tools/testing/radix-tree/linux/kmemleak.h b/tools/testing/radix-tree/linux/kmemleak.h
new file mode 100644
index 000000000000..155f112786c4
--- /dev/null
+++ b/tools/testing/radix-tree/linux/kmemleak.h
@@ -0,0 +1 @@
static inline void kmemleak_update_trace(const void *ptr) { }
diff --git a/tools/testing/radix-tree/linux/mempool.h b/tools/testing/radix-tree/linux/mempool.h
new file mode 100644
index 000000000000..6a2dc55b41d6
--- /dev/null
+++ b/tools/testing/radix-tree/linux/mempool.h
@@ -0,0 +1,16 @@
1
2#include <linux/slab.h>
3
4typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data);
5typedef void (mempool_free_t)(void *element, void *pool_data);
6
7typedef struct {
8 mempool_alloc_t *alloc;
9 mempool_free_t *free;
10 void *data;
11} mempool_t;
12
13void *mempool_alloc(mempool_t *pool, int gfp_mask);
14void mempool_free(void *element, mempool_t *pool);
15mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
16 mempool_free_t *free_fn, void *pool_data);
diff --git a/tools/testing/radix-tree/linux/notifier.h b/tools/testing/radix-tree/linux/notifier.h
new file mode 100644
index 000000000000..70e4797d5a46
--- /dev/null
+++ b/tools/testing/radix-tree/linux/notifier.h
@@ -0,0 +1,8 @@
1#ifndef _NOTIFIER_H
2#define _NOTIFIER_H
3
4struct notifier_block;
5
6#define NOTIFY_OK 0x0001 /* Suits me */
7
8#endif
diff --git a/tools/testing/radix-tree/linux/percpu.h b/tools/testing/radix-tree/linux/percpu.h
new file mode 100644
index 000000000000..5837f1d56f17
--- /dev/null
+++ b/tools/testing/radix-tree/linux/percpu.h
@@ -0,0 +1,7 @@
1
2#define DEFINE_PER_CPU(type, val) type val
3
4#define __get_cpu_var(var) var
5#define this_cpu_ptr(var) var
6#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
7#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h
new file mode 100644
index 000000000000..6210672e3baa
--- /dev/null
+++ b/tools/testing/radix-tree/linux/preempt.h
@@ -0,0 +1,4 @@
1/* */
2
3#define preempt_disable() do { } while (0)
4#define preempt_enable() do { } while (0)
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
new file mode 100644
index 000000000000..ce694ddd4aea
--- /dev/null
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -0,0 +1 @@
#include "../../../../include/linux/radix-tree.h"
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
new file mode 100644
index 000000000000..f7129ea2a899
--- /dev/null
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -0,0 +1,9 @@
1#ifndef _RCUPDATE_H
2#define _RCUPDATE_H
3
4#include <urcu.h>
5
6#define rcu_dereference_raw(p) rcu_dereference(p)
7#define rcu_dereference_protected(p, cond) rcu_dereference(p)
8
9#endif
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h
new file mode 100644
index 000000000000..57282506c21d
--- /dev/null
+++ b/tools/testing/radix-tree/linux/slab.h
@@ -0,0 +1,28 @@
1#ifndef SLAB_H
2#define SLAB_H
3
4#include <linux/types.h>
5
6#define GFP_KERNEL 1
7#define SLAB_HWCACHE_ALIGN 1
8#define SLAB_PANIC 2
9#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
10
11static inline int gfpflags_allow_blocking(gfp_t mask)
12{
13 return 1;
14}
15
16struct kmem_cache {
17 int size;
18 void (*ctor)(void *);
19};
20
21void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
22void kmem_cache_free(struct kmem_cache *cachep, void *objp);
23
24struct kmem_cache *
25kmem_cache_create(const char *name, size_t size, size_t offset,
26 unsigned long flags, void (*ctor)(void *));
27
28#endif /* SLAB_H */
diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h
new file mode 100644
index 000000000000..72a9d85f6c76
--- /dev/null
+++ b/tools/testing/radix-tree/linux/types.h
@@ -0,0 +1,28 @@
1#ifndef _TYPES_H
2#define _TYPES_H
3
4#define __rcu
5#define __read_mostly
6
7#define BITS_PER_LONG (sizeof(long) * 8)
8
9struct list_head {
10 struct list_head *next, *prev;
11};
12
13static inline void INIT_LIST_HEAD(struct list_head *list)
14{
15 list->next = list;
16 list->prev = list;
17}
18
19typedef struct {
20 unsigned int x;
21} spinlock_t;
22
23#define uninitialized_var(x) x = x
24
25typedef unsigned gfp_t;
26#include <linux/gfp.h>
27
28#endif
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
new file mode 100644
index 000000000000..6b8a412c6a11
--- /dev/null
+++ b/tools/testing/radix-tree/main.c
@@ -0,0 +1,271 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <time.h>
5#include <assert.h>
6
7#include <linux/slab.h>
8#include <linux/radix-tree.h>
9
10#include "test.h"
11#include "regression.h"
12
13void __gang_check(unsigned long middle, long down, long up, int chunk, int hop)
14{
15 long idx;
16 RADIX_TREE(tree, GFP_KERNEL);
17
18 middle = 1 << 30;
19
20 for (idx = -down; idx < up; idx++)
21 item_insert(&tree, middle + idx);
22
23 item_check_absent(&tree, middle - down - 1);
24 for (idx = -down; idx < up; idx++)
25 item_check_present(&tree, middle + idx);
26 item_check_absent(&tree, middle + up);
27
28 item_gang_check_present(&tree, middle - down,
29 up + down, chunk, hop);
30 item_full_scan(&tree, middle - down, down + up, chunk);
31 item_kill_tree(&tree);
32}
33
34void gang_check(void)
35{
36 __gang_check(1 << 30, 128, 128, 35, 2);
37 __gang_check(1 << 31, 128, 128, 32, 32);
38 __gang_check(1 << 31, 128, 128, 32, 100);
39 __gang_check(1 << 31, 128, 128, 17, 7);
40 __gang_check(0xffff0000, 0, 65536, 17, 7);
41 __gang_check(0xfffffffe, 1, 1, 17, 7);
42}
43
44void __big_gang_check(void)
45{
46 unsigned long start;
47 int wrapped = 0;
48
49 start = 0;
50 do {
51 unsigned long old_start;
52
53// printf("0x%08lx\n", start);
54 __gang_check(start, rand() % 113 + 1, rand() % 71,
55 rand() % 157, rand() % 91 + 1);
56 old_start = start;
57 start += rand() % 1000000;
58 start %= 1ULL << 33;
59 if (start < old_start)
60 wrapped = 1;
61 } while (!wrapped);
62}
63
64void big_gang_check(void)
65{
66 int i;
67
68 for (i = 0; i < 1000; i++) {
69 __big_gang_check();
70 srand(time(0));
71 printf("%d ", i);
72 fflush(stdout);
73 }
74}
75
76void add_and_check(void)
77{
78 RADIX_TREE(tree, GFP_KERNEL);
79
80 item_insert(&tree, 44);
81 item_check_present(&tree, 44);
82 item_check_absent(&tree, 43);
83 item_kill_tree(&tree);
84}
85
86void dynamic_height_check(void)
87{
88 int i;
89 RADIX_TREE(tree, GFP_KERNEL);
90 tree_verify_min_height(&tree, 0);
91
92 item_insert(&tree, 42);
93 tree_verify_min_height(&tree, 42);
94
95 item_insert(&tree, 1000000);
96 tree_verify_min_height(&tree, 1000000);
97
98 assert(item_delete(&tree, 1000000));
99 tree_verify_min_height(&tree, 42);
100
101 assert(item_delete(&tree, 42));
102 tree_verify_min_height(&tree, 0);
103
104 for (i = 0; i < 1000; i++) {
105 item_insert(&tree, i);
106 tree_verify_min_height(&tree, i);
107 }
108
109 i--;
110 for (;;) {
111 assert(item_delete(&tree, i));
112 if (i == 0) {
113 tree_verify_min_height(&tree, 0);
114 break;
115 }
116 i--;
117 tree_verify_min_height(&tree, i);
118 }
119
120 item_kill_tree(&tree);
121}
122
123void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsigned long end, unsigned long *idx, int count, int fromtag, int totag)
124{
125 int i;
126
127 for (i = 0; i < count; i++) {
128/* if (i % 1000 == 0)
129 putchar('.'); */
130 if (idx[i] < start || idx[i] > end) {
131 if (item_tag_get(tree, idx[i], totag)) {
132 printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag));
133 }
134 assert(!item_tag_get(tree, idx[i], totag));
135 continue;
136 }
137 if (item_tag_get(tree, idx[i], fromtag) ^
138 item_tag_get(tree, idx[i], totag)) {
139 printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag));
140 }
141 assert(!(item_tag_get(tree, idx[i], fromtag) ^
142 item_tag_get(tree, idx[i], totag)));
143 }
144}
145
146#define ITEMS 50000
147
148void copy_tag_check(void)
149{
150 RADIX_TREE(tree, GFP_KERNEL);
151 unsigned long idx[ITEMS];
152 unsigned long start, end, count = 0, tagged, cur, tmp;
153 int i;
154
155// printf("generating radix tree indices...\n");
156 start = rand();
157 end = rand();
158 if (start > end && (rand() % 10)) {
159 cur = start;
160 start = end;
161 end = cur;
162 }
163 /* Specifically create items around the start and the end of the range
164 * with high probability to check for off by one errors */
165 cur = rand();
166 if (cur & 1) {
167 item_insert(&tree, start);
168 if (cur & 2) {
169 if (start <= end)
170 count++;
171 item_tag_set(&tree, start, 0);
172 }
173 }
174 if (cur & 4) {
175 item_insert(&tree, start-1);
176 if (cur & 8)
177 item_tag_set(&tree, start-1, 0);
178 }
179 if (cur & 16) {
180 item_insert(&tree, end);
181 if (cur & 32) {
182 if (start <= end)
183 count++;
184 item_tag_set(&tree, end, 0);
185 }
186 }
187 if (cur & 64) {
188 item_insert(&tree, end+1);
189 if (cur & 128)
190 item_tag_set(&tree, end+1, 0);
191 }
192
193 for (i = 0; i < ITEMS; i++) {
194 do {
195 idx[i] = rand();
196 } while (item_lookup(&tree, idx[i]));
197
198 item_insert(&tree, idx[i]);
199 if (rand() & 1) {
200 item_tag_set(&tree, idx[i], 0);
201 if (idx[i] >= start && idx[i] <= end)
202 count++;
203 }
204/* if (i % 1000 == 0)
205 putchar('.'); */
206 }
207
208// printf("\ncopying tags...\n");
209 cur = start;
210 tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, ITEMS, 0, 1);
211
212// printf("checking copied tags\n");
213 assert(tagged == count);
214 check_copied_tags(&tree, start, end, idx, ITEMS, 0, 1);
215
216 /* Copy tags in several rounds */
217// printf("\ncopying tags...\n");
218 cur = start;
219 do {
220 tmp = rand() % (count/10+2);
221 tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, tmp, 0, 2);
222 } while (tmp == tagged);
223
224// printf("%lu %lu %lu\n", tagged, tmp, count);
225// printf("checking copied tags\n");
226 check_copied_tags(&tree, start, end, idx, ITEMS, 0, 2);
227 assert(tagged < tmp);
228 verify_tag_consistency(&tree, 0);
229 verify_tag_consistency(&tree, 1);
230 verify_tag_consistency(&tree, 2);
231// printf("\n");
232 item_kill_tree(&tree);
233}
234
235static void single_thread_tests(void)
236{
237 int i;
238
239 tag_check();
240 printf("after tag_check: %d allocated\n", nr_allocated);
241 gang_check();
242 printf("after gang_check: %d allocated\n", nr_allocated);
243 add_and_check();
244 printf("after add_and_check: %d allocated\n", nr_allocated);
245 dynamic_height_check();
246 printf("after dynamic_height_check: %d allocated\n", nr_allocated);
247 big_gang_check();
248 printf("after big_gang_check: %d allocated\n", nr_allocated);
249 for (i = 0; i < 2000; i++) {
250 copy_tag_check();
251 printf("%d ", i);
252 fflush(stdout);
253 }
254 printf("after copy_tag_check: %d allocated\n", nr_allocated);
255}
256
257int main(void)
258{
259 rcu_register_thread();
260 radix_tree_init();
261
262 regression1_test();
263 regression2_test();
264 single_thread_tests();
265
266 sleep(1);
267 printf("after sleep(1): %d allocated\n", nr_allocated);
268 rcu_unregister_thread();
269
270 exit(0);
271}
diff --git a/tools/testing/radix-tree/rcupdate.c b/tools/testing/radix-tree/rcupdate.c
new file mode 100644
index 000000000000..31a2d14225d6
--- /dev/null
+++ b/tools/testing/radix-tree/rcupdate.c
@@ -0,0 +1,86 @@
1#include <linux/rcupdate.h>
2#include <pthread.h>
3#include <stdio.h>
4#include <assert.h>
5
6static pthread_mutex_t rculock = PTHREAD_MUTEX_INITIALIZER;
7static struct rcu_head *rcuhead_global = NULL;
8static __thread int nr_rcuhead = 0;
9static __thread struct rcu_head *rcuhead = NULL;
10static __thread struct rcu_head *rcutail = NULL;
11
12static pthread_cond_t rcu_worker_cond = PTHREAD_COND_INITIALIZER;
13
14/* switch to urcu implementation when it is merged. */
15void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))
16{
17 head->func = func;
18 head->next = rcuhead;
19 rcuhead = head;
20 if (!rcutail)
21 rcutail = head;
22 nr_rcuhead++;
23 if (nr_rcuhead >= 1000) {
24 int signal = 0;
25
26 pthread_mutex_lock(&rculock);
27 if (!rcuhead_global)
28 signal = 1;
29 rcutail->next = rcuhead_global;
30 rcuhead_global = head;
31 pthread_mutex_unlock(&rculock);
32
33 nr_rcuhead = 0;
34 rcuhead = NULL;
35 rcutail = NULL;
36
37 if (signal) {
38 pthread_cond_signal(&rcu_worker_cond);
39 }
40 }
41}
42
43static void *rcu_worker(void *arg)
44{
45 struct rcu_head *r;
46
47 rcupdate_thread_init();
48
49 while (1) {
50 pthread_mutex_lock(&rculock);
51 while (!rcuhead_global) {
52 pthread_cond_wait(&rcu_worker_cond, &rculock);
53 }
54 r = rcuhead_global;
55 rcuhead_global = NULL;
56
57 pthread_mutex_unlock(&rculock);
58
59 synchronize_rcu();
60
61 while (r) {
62 struct rcu_head *tmp = r->next;
63 r->func(r);
64 r = tmp;
65 }
66 }
67
68 rcupdate_thread_exit();
69
70 return NULL;
71}
72
73static pthread_t worker_thread;
74void rcupdate_init(void)
75{
76 pthread_create(&worker_thread, NULL, rcu_worker, NULL);
77}
78
79void rcupdate_thread_init(void)
80{
81 rcu_register_thread();
82}
83void rcupdate_thread_exit(void)
84{
85 rcu_unregister_thread();
86}
diff --git a/tools/testing/radix-tree/regression.h b/tools/testing/radix-tree/regression.h
new file mode 100644
index 000000000000..bb1c2ab1ae80
--- /dev/null
+++ b/tools/testing/radix-tree/regression.h
@@ -0,0 +1,7 @@
1#ifndef __REGRESSION_H__
2#define __REGRESSION_H__
3
4void regression1_test(void);
5void regression2_test(void);
6
7#endif
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
new file mode 100644
index 000000000000..2d03a63bb79c
--- /dev/null
+++ b/tools/testing/radix-tree/regression1.c
@@ -0,0 +1,220 @@
1/*
2 * Regression1
3 * Description:
4 * Salman Qazi describes the following radix-tree bug:
5 *
6 * In the following case, we get can get a deadlock:
7 *
8 * 0. The radix tree contains two items, one has the index 0.
9 * 1. The reader (in this case find_get_pages) takes the rcu_read_lock.
10 * 2. The reader acquires slot(s) for item(s) including the index 0 item.
11 * 3. The non-zero index item is deleted, and as a consequence the other item
12 * is moved to the root of the tree. The place where it used to be is queued
13 * for deletion after the readers finish.
14 * 3b. The zero item is deleted, removing it from the direct slot, it remains in
15 * the rcu-delayed indirect node.
16 * 4. The reader looks at the index 0 slot, and finds that the page has 0 ref
17 * count
18 * 5. The reader looks at it again, hoping that the item will either be freed
19 * or the ref count will increase. This never happens, as the slot it is
20 * looking at will never be updated. Also, this slot can never be reclaimed
21 * because the reader is holding rcu_read_lock and is in an infinite loop.
22 *
23 * The fix is to re-use the same "indirect" pointer case that requires a slot
24 * lookup retry into a general "retry the lookup" bit.
25 *
26 * Running:
27 * This test should run to completion in a few seconds. The above bug would
28 * cause it to hang indefinitely.
29 *
30 * Upstream commit:
31 * Not yet
32 */
33#include <linux/kernel.h>
34#include <linux/gfp.h>
35#include <linux/slab.h>
36#include <linux/radix-tree.h>
37#include <linux/rcupdate.h>
38#include <stdlib.h>
39#include <pthread.h>
40#include <stdio.h>
41#include <assert.h>
42
43#include "regression.h"
44
45static RADIX_TREE(mt_tree, GFP_KERNEL);
46static pthread_mutex_t mt_lock;
47
48struct page {
49 pthread_mutex_t lock;
50 struct rcu_head rcu;
51 int count;
52 unsigned long index;
53};
54
55static struct page *page_alloc(void)
56{
57 struct page *p;
58 p = malloc(sizeof(struct page));
59 p->count = 1;
60 p->index = 1;
61 pthread_mutex_init(&p->lock, NULL);
62
63 return p;
64}
65
66static void page_rcu_free(struct rcu_head *rcu)
67{
68 struct page *p = container_of(rcu, struct page, rcu);
69 assert(!p->count);
70 pthread_mutex_destroy(&p->lock);
71 free(p);
72}
73
74static void page_free(struct page *p)
75{
76 call_rcu(&p->rcu, page_rcu_free);
77}
78
79static unsigned find_get_pages(unsigned long start,
80 unsigned int nr_pages, struct page **pages)
81{
82 unsigned int i;
83 unsigned int ret;
84 unsigned int nr_found;
85
86 rcu_read_lock();
87restart:
88 nr_found = radix_tree_gang_lookup_slot(&mt_tree,
89 (void ***)pages, NULL, start, nr_pages);
90 ret = 0;
91 for (i = 0; i < nr_found; i++) {
92 struct page *page;
93repeat:
94 page = radix_tree_deref_slot((void **)pages[i]);
95 if (unlikely(!page))
96 continue;
97
98 if (radix_tree_exception(page)) {
99 if (radix_tree_deref_retry(page)) {
100 /*
101 * Transient condition which can only trigger
102 * when entry at index 0 moves out of or back
103 * to root: none yet gotten, safe to restart.
104 */
105 assert((start | i) == 0);
106 goto restart;
107 }
108 /*
109 * No exceptional entries are inserted in this test.
110 */
111 assert(0);
112 }
113
114 pthread_mutex_lock(&page->lock);
115 if (!page->count) {
116 pthread_mutex_unlock(&page->lock);
117 goto repeat;
118 }
119 /* don't actually update page refcount */
120 pthread_mutex_unlock(&page->lock);
121
122 /* Has the page moved? */
123 if (unlikely(page != *((void **)pages[i]))) {
124 goto repeat;
125 }
126
127 pages[ret] = page;
128 ret++;
129 }
130 rcu_read_unlock();
131 return ret;
132}
133
134static pthread_barrier_t worker_barrier;
135
136static void *regression1_fn(void *arg)
137{
138 rcu_register_thread();
139
140 if (pthread_barrier_wait(&worker_barrier) ==
141 PTHREAD_BARRIER_SERIAL_THREAD) {
142 int j;
143
144 for (j = 0; j < 1000000; j++) {
145 struct page *p;
146
147 p = page_alloc();
148 pthread_mutex_lock(&mt_lock);
149 radix_tree_insert(&mt_tree, 0, p);
150 pthread_mutex_unlock(&mt_lock);
151
152 p = page_alloc();
153 pthread_mutex_lock(&mt_lock);
154 radix_tree_insert(&mt_tree, 1, p);
155 pthread_mutex_unlock(&mt_lock);
156
157 pthread_mutex_lock(&mt_lock);
158 p = radix_tree_delete(&mt_tree, 1);
159 pthread_mutex_lock(&p->lock);
160 p->count--;
161 pthread_mutex_unlock(&p->lock);
162 pthread_mutex_unlock(&mt_lock);
163 page_free(p);
164
165 pthread_mutex_lock(&mt_lock);
166 p = radix_tree_delete(&mt_tree, 0);
167 pthread_mutex_lock(&p->lock);
168 p->count--;
169 pthread_mutex_unlock(&p->lock);
170 pthread_mutex_unlock(&mt_lock);
171 page_free(p);
172 }
173 } else {
174 int j;
175
176 for (j = 0; j < 100000000; j++) {
177 struct page *pages[10];
178
179 find_get_pages(0, 10, pages);
180 }
181 }
182
183 rcu_unregister_thread();
184
185 return NULL;
186}
187
188static pthread_t *threads;
189void regression1_test(void)
190{
191 int nr_threads;
192 int i;
193 long arg;
194
195 /* Regression #1 */
196 printf("running regression test 1, should finish in under a minute\n");
197 nr_threads = 2;
198 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
199
200 threads = malloc(nr_threads * sizeof(pthread_t *));
201
202 for (i = 0; i < nr_threads; i++) {
203 arg = i;
204 if (pthread_create(&threads[i], NULL, regression1_fn, (void *)arg)) {
205 perror("pthread_create");
206 exit(1);
207 }
208 }
209
210 for (i = 0; i < nr_threads; i++) {
211 if (pthread_join(threads[i], NULL)) {
212 perror("pthread_join");
213 exit(1);
214 }
215 }
216
217 free(threads);
218
219 printf("regression test 1, done\n");
220}
diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c
new file mode 100644
index 000000000000..5d2fa28cdca3
--- /dev/null
+++ b/tools/testing/radix-tree/regression2.c
@@ -0,0 +1,126 @@
1/*
2 * Regression2
3 * Description:
4 * Toshiyuki Okajima describes the following radix-tree bug:
5 *
6 * In the following case, we can get a hangup on
7 * radix_radix_tree_gang_lookup_tag_slot.
8 *
9 * 0. The radix tree contains RADIX_TREE_MAP_SIZE items. And the tag of
10 * a certain item has PAGECACHE_TAG_DIRTY.
11 * 1. radix_tree_range_tag_if_tagged(, start, end, , PAGECACHE_TAG_DIRTY,
12 * PAGECACHE_TAG_TOWRITE) is called to add PAGECACHE_TAG_TOWRITE tag
13 * for the tag which has PAGECACHE_TAG_DIRTY. However, there is no tag with
14 * PAGECACHE_TAG_DIRTY within the range from start to end. As the result,
15 * There is no tag with PAGECACHE_TAG_TOWRITE but the root tag has
16 * PAGECACHE_TAG_TOWRITE.
17 * 2. An item is added into the radix tree and then the level of it is
18 * extended into 2 from 1. At that time, the new radix tree node succeeds
19 * the tag status of the root tag. Therefore the tag of the new radix tree
20 * node has PAGECACHE_TAG_TOWRITE but there is not slot with
21 * PAGECACHE_TAG_TOWRITE tag in the child node of the new radix tree node.
22 * 3. The tag of a certain item is cleared with PAGECACHE_TAG_DIRTY.
23 * 4. All items within the index range from 0 to RADIX_TREE_MAP_SIZE - 1 are
24 * released. (Only the item which index is RADIX_TREE_MAP_SIZE exist in the
25 * radix tree.) As the result, the slot of the radix tree node is NULL but
26 * the tag which corresponds to the slot has PAGECACHE_TAG_TOWRITE.
27 * 5. radix_tree_gang_lookup_tag_slot(PAGECACHE_TAG_TOWRITE) calls
28 * __lookup_tag. __lookup_tag returns with 0. And __lookup_tag doesn't
29 * change the index that is the input and output parameter. Because the 1st
30 * slot of the radix tree node is NULL, but the tag which corresponds to
31 * the slot has PAGECACHE_TAG_TOWRITE.
32 * Therefore radix_tree_gang_lookup_tag_slot tries to get some items by
33 * calling __lookup_tag, but it cannot get any items forever.
34 *
35 * The fix is to change that radix_tree_tag_if_tagged doesn't tag the root tag
36 * if it doesn't set any tags within the specified range.
37 *
38 * Running:
39 * This test should run to completion immediately. The above bug would cause it
40 * to hang indefinitely.
41 *
42 * Upstream commit:
43 * Not yet
44 */
45#include <linux/kernel.h>
46#include <linux/gfp.h>
47#include <linux/slab.h>
48#include <linux/radix-tree.h>
49#include <stdlib.h>
50#include <stdio.h>
51
52#include "regression.h"
53
54#ifdef __KERNEL__
55#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
56#else
57#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
58#endif
59
60#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
61#define PAGECACHE_TAG_DIRTY 0
62#define PAGECACHE_TAG_WRITEBACK 1
63#define PAGECACHE_TAG_TOWRITE 2
64
65static RADIX_TREE(mt_tree, GFP_KERNEL);
66unsigned long page_count = 0;
67
68struct page {
69 unsigned long index;
70};
71
72static struct page *page_alloc(void)
73{
74 struct page *p;
75 p = malloc(sizeof(struct page));
76 p->index = page_count++;
77
78 return p;
79}
80
81void regression2_test(void)
82{
83 int i;
84 struct page *p;
85 int max_slots = RADIX_TREE_MAP_SIZE;
86 unsigned long int start, end;
87 struct page *pages[1];
88
89 printf("running regression test 2 (should take milliseconds)\n");
90 /* 0. */
91 for (i = 0; i <= max_slots - 1; i++) {
92 p = page_alloc();
93 radix_tree_insert(&mt_tree, i, p);
94 }
95 radix_tree_tag_set(&mt_tree, max_slots - 1, PAGECACHE_TAG_DIRTY);
96
97 /* 1. */
98 start = 0;
99 end = max_slots - 2;
100 radix_tree_range_tag_if_tagged(&mt_tree, &start, end, 1,
101 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
102
103 /* 2. */
104 p = page_alloc();
105 radix_tree_insert(&mt_tree, max_slots, p);
106
107 /* 3. */
108 radix_tree_tag_clear(&mt_tree, max_slots - 1, PAGECACHE_TAG_DIRTY);
109
110 /* 4. */
111 for (i = max_slots - 1; i >= 0; i--)
112 radix_tree_delete(&mt_tree, i);
113
114 /* 5. */
115 // NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot
116 // can return.
117 start = 1;
118 end = max_slots - 2;
119 radix_tree_gang_lookup_tag_slot(&mt_tree, (void ***)pages, start, end,
120 PAGECACHE_TAG_TOWRITE);
121
122 /* We remove all the remained nodes */
123 radix_tree_delete(&mt_tree, max_slots);
124
125 printf("regression test 2, done\n");
126}
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
new file mode 100644
index 000000000000..83136be552a0
--- /dev/null
+++ b/tools/testing/radix-tree/tag_check.c
@@ -0,0 +1,332 @@
1#include <stdlib.h>
2#include <assert.h>
3#include <stdio.h>
4#include <string.h>
5
6#include <linux/slab.h>
7#include <linux/radix-tree.h>
8
9#include "test.h"
10
11
12static void
13__simple_checks(struct radix_tree_root *tree, unsigned long index, int tag)
14{
15 int ret;
16
17 item_check_absent(tree, index);
18 assert(item_tag_get(tree, index, tag) == 0);
19
20 item_insert(tree, index);
21 assert(item_tag_get(tree, index, tag) == 0);
22 item_tag_set(tree, index, tag);
23 ret = item_tag_get(tree, index, tag);
24 assert(ret != 0);
25 ret = item_delete(tree, index);
26 assert(ret != 0);
27 item_insert(tree, index);
28 ret = item_tag_get(tree, index, tag);
29 assert(ret == 0);
30 ret = item_delete(tree, index);
31 assert(ret != 0);
32 ret = item_delete(tree, index);
33 assert(ret == 0);
34}
35
36void simple_checks(void)
37{
38 unsigned long index;
39 RADIX_TREE(tree, GFP_KERNEL);
40
41 for (index = 0; index < 10000; index++) {
42 __simple_checks(&tree, index, 0);
43 __simple_checks(&tree, index, 1);
44 }
45 verify_tag_consistency(&tree, 0);
46 verify_tag_consistency(&tree, 1);
47 printf("before item_kill_tree: %d allocated\n", nr_allocated);
48 item_kill_tree(&tree);
49 printf("after item_kill_tree: %d allocated\n", nr_allocated);
50}
51
52/*
53 * Check that tags propagate correctly when extending a tree.
54 */
55static void extend_checks(void)
56{
57 RADIX_TREE(tree, GFP_KERNEL);
58
59 item_insert(&tree, 43);
60 assert(item_tag_get(&tree, 43, 0) == 0);
61 item_tag_set(&tree, 43, 0);
62 assert(item_tag_get(&tree, 43, 0) == 1);
63 item_insert(&tree, 1000000);
64 assert(item_tag_get(&tree, 43, 0) == 1);
65
66 item_insert(&tree, 0);
67 item_tag_set(&tree, 0, 0);
68 item_delete(&tree, 1000000);
69 assert(item_tag_get(&tree, 43, 0) != 0);
70 item_delete(&tree, 43);
71 assert(item_tag_get(&tree, 43, 0) == 0); /* crash */
72 assert(item_tag_get(&tree, 0, 0) == 1);
73
74 verify_tag_consistency(&tree, 0);
75
76 item_kill_tree(&tree);
77}
78
79/*
80 * Check that tags propagate correctly when contracting a tree.
81 */
82static void contract_checks(void)
83{
84 struct item *item;
85 int tmp;
86 RADIX_TREE(tree, GFP_KERNEL);
87
88 tmp = 1<<RADIX_TREE_MAP_SHIFT;
89 item_insert(&tree, tmp);
90 item_insert(&tree, tmp+1);
91 item_tag_set(&tree, tmp, 0);
92 item_tag_set(&tree, tmp, 1);
93 item_tag_set(&tree, tmp+1, 0);
94 item_delete(&tree, tmp+1);
95 item_tag_clear(&tree, tmp, 1);
96
97 assert(radix_tree_gang_lookup_tag(&tree, (void **)&item, 0, 1, 0) == 1);
98 assert(radix_tree_gang_lookup_tag(&tree, (void **)&item, 0, 1, 1) == 0);
99
100 assert(item_tag_get(&tree, tmp, 0) == 1);
101 assert(item_tag_get(&tree, tmp, 1) == 0);
102
103 verify_tag_consistency(&tree, 0);
104 item_kill_tree(&tree);
105}
106
107/*
108 * Stupid tag thrasher
109 *
110 * Create a large linear array corresponding to the tree. Each element in
111 * the array is coherent with each node in the tree
112 */
113
114enum {
115 NODE_ABSENT = 0,
116 NODE_PRESENT = 1,
117 NODE_TAGGED = 2,
118};
119
120#define THRASH_SIZE 1000 * 1000
121#define N 127
122#define BATCH 33
123
124static void gang_check(struct radix_tree_root *tree,
125 char *thrash_state, int tag)
126{
127 struct item *items[BATCH];
128 int nr_found;
129 unsigned long index = 0;
130 unsigned long last_index = 0;
131
132 while ((nr_found = radix_tree_gang_lookup_tag(tree, (void **)items,
133 index, BATCH, tag))) {
134 int i;
135
136 for (i = 0; i < nr_found; i++) {
137 struct item *item = items[i];
138
139 while (last_index < item->index) {
140 assert(thrash_state[last_index] != NODE_TAGGED);
141 last_index++;
142 }
143 assert(thrash_state[last_index] == NODE_TAGGED);
144 last_index++;
145 }
146 index = items[nr_found - 1]->index + 1;
147 }
148}
149
150static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag)
151{
152 int insert_chunk;
153 int delete_chunk;
154 int tag_chunk;
155 int untag_chunk;
156 int total_tagged = 0;
157 int total_present = 0;
158
159 for (insert_chunk = 1; insert_chunk < THRASH_SIZE; insert_chunk *= N)
160 for (delete_chunk = 1; delete_chunk < THRASH_SIZE; delete_chunk *= N)
161 for (tag_chunk = 1; tag_chunk < THRASH_SIZE; tag_chunk *= N)
162 for (untag_chunk = 1; untag_chunk < THRASH_SIZE; untag_chunk *= N) {
163 int i;
164 unsigned long index;
165 int nr_inserted = 0;
166 int nr_deleted = 0;
167 int nr_tagged = 0;
168 int nr_untagged = 0;
169 int actual_total_tagged;
170 int actual_total_present;
171
172 for (i = 0; i < insert_chunk; i++) {
173 index = rand() % THRASH_SIZE;
174 if (thrash_state[index] != NODE_ABSENT)
175 continue;
176 item_check_absent(tree, index);
177 item_insert(tree, index);
178 assert(thrash_state[index] != NODE_PRESENT);
179 thrash_state[index] = NODE_PRESENT;
180 nr_inserted++;
181 total_present++;
182 }
183
184 for (i = 0; i < delete_chunk; i++) {
185 index = rand() % THRASH_SIZE;
186 if (thrash_state[index] == NODE_ABSENT)
187 continue;
188 item_check_present(tree, index);
189 if (item_tag_get(tree, index, tag)) {
190 assert(thrash_state[index] == NODE_TAGGED);
191 total_tagged--;
192 } else {
193 assert(thrash_state[index] == NODE_PRESENT);
194 }
195 item_delete(tree, index);
196 assert(thrash_state[index] != NODE_ABSENT);
197 thrash_state[index] = NODE_ABSENT;
198 nr_deleted++;
199 total_present--;
200 }
201
202 for (i = 0; i < tag_chunk; i++) {
203 index = rand() % THRASH_SIZE;
204 if (thrash_state[index] != NODE_PRESENT) {
205 if (item_lookup(tree, index))
206 assert(item_tag_get(tree, index, tag));
207 continue;
208 }
209 item_tag_set(tree, index, tag);
210 item_tag_set(tree, index, tag);
211 assert(thrash_state[index] != NODE_TAGGED);
212 thrash_state[index] = NODE_TAGGED;
213 nr_tagged++;
214 total_tagged++;
215 }
216
217 for (i = 0; i < untag_chunk; i++) {
218 index = rand() % THRASH_SIZE;
219 if (thrash_state[index] != NODE_TAGGED)
220 continue;
221 item_check_present(tree, index);
222 assert(item_tag_get(tree, index, tag));
223 item_tag_clear(tree, index, tag);
224 item_tag_clear(tree, index, tag);
225 assert(thrash_state[index] != NODE_PRESENT);
226 thrash_state[index] = NODE_PRESENT;
227 nr_untagged++;
228 total_tagged--;
229 }
230
231 actual_total_tagged = 0;
232 actual_total_present = 0;
233 for (index = 0; index < THRASH_SIZE; index++) {
234 switch (thrash_state[index]) {
235 case NODE_ABSENT:
236 item_check_absent(tree, index);
237 break;
238 case NODE_PRESENT:
239 item_check_present(tree, index);
240 assert(!item_tag_get(tree, index, tag));
241 actual_total_present++;
242 break;
243 case NODE_TAGGED:
244 item_check_present(tree, index);
245 assert(item_tag_get(tree, index, tag));
246 actual_total_present++;
247 actual_total_tagged++;
248 break;
249 }
250 }
251
252 gang_check(tree, thrash_state, tag);
253
254 printf("%d(%d) %d(%d) %d(%d) %d(%d) / "
255 "%d(%d) present, %d(%d) tagged\n",
256 insert_chunk, nr_inserted,
257 delete_chunk, nr_deleted,
258 tag_chunk, nr_tagged,
259 untag_chunk, nr_untagged,
260 total_present, actual_total_present,
261 total_tagged, actual_total_tagged);
262 }
263}
264
265static void thrash_tags(void)
266{
267 RADIX_TREE(tree, GFP_KERNEL);
268 char *thrash_state;
269
270 thrash_state = malloc(THRASH_SIZE);
271 memset(thrash_state, 0, THRASH_SIZE);
272
273 do_thrash(&tree, thrash_state, 0);
274
275 verify_tag_consistency(&tree, 0);
276 item_kill_tree(&tree);
277 free(thrash_state);
278}
279
280static void leak_check(void)
281{
282 RADIX_TREE(tree, GFP_KERNEL);
283
284 item_insert(&tree, 1000000);
285 item_delete(&tree, 1000000);
286 item_kill_tree(&tree);
287}
288
289static void __leak_check(void)
290{
291 RADIX_TREE(tree, GFP_KERNEL);
292
293 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
294 item_insert(&tree, 1000000);
295 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
296 item_delete(&tree, 1000000);
297 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
298 item_kill_tree(&tree);
299 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
300}
301
302static void single_check(void)
303{
304 struct item *items[BATCH];
305 RADIX_TREE(tree, GFP_KERNEL);
306 int ret;
307
308 item_insert(&tree, 0);
309 item_tag_set(&tree, 0, 0);
310 ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 0);
311 assert(ret == 1);
312 ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 1, BATCH, 0);
313 assert(ret == 0);
314 verify_tag_consistency(&tree, 0);
315 verify_tag_consistency(&tree, 1);
316 item_kill_tree(&tree);
317}
318
319void tag_check(void)
320{
321 single_check();
322 extend_checks();
323 contract_checks();
324 printf("after extend_checks: %d allocated\n", nr_allocated);
325 __leak_check();
326 leak_check();
327 printf("after leak_check: %d allocated\n", nr_allocated);
328 simple_checks();
329 printf("after simple_checks: %d allocated\n", nr_allocated);
330 thrash_tags();
331 printf("after thrash_tags: %d allocated\n", nr_allocated);
332}
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c
new file mode 100644
index 000000000000..c9b0bd75b6c6
--- /dev/null
+++ b/tools/testing/radix-tree/test.c
@@ -0,0 +1,218 @@
1#include <stdlib.h>
2#include <assert.h>
3#include <stdio.h>
4#include <linux/types.h>
5#include <linux/kernel.h>
6#include <linux/bitops.h>
7
8#include "test.h"
9
10struct item *
11item_tag_set(struct radix_tree_root *root, unsigned long index, int tag)
12{
13 return radix_tree_tag_set(root, index, tag);
14}
15
16struct item *
17item_tag_clear(struct radix_tree_root *root, unsigned long index, int tag)
18{
19 return radix_tree_tag_clear(root, index, tag);
20}
21
22int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag)
23{
24 return radix_tree_tag_get(root, index, tag);
25}
26
27int __item_insert(struct radix_tree_root *root, struct item *item)
28{
29 return radix_tree_insert(root, item->index, item);
30}
31
32int item_insert(struct radix_tree_root *root, unsigned long index)
33{
34 return __item_insert(root, item_create(index));
35}
36
37int item_delete(struct radix_tree_root *root, unsigned long index)
38{
39 struct item *item = radix_tree_delete(root, index);
40
41 if (item) {
42 assert(item->index == index);
43 free(item);
44 return 1;
45 }
46 return 0;
47}
48
49struct item *item_create(unsigned long index)
50{
51 struct item *ret = malloc(sizeof(*ret));
52
53 ret->index = index;
54 return ret;
55}
56
57void item_check_present(struct radix_tree_root *root, unsigned long index)
58{
59 struct item *item;
60
61 item = radix_tree_lookup(root, index);
62 assert(item != 0);
63 assert(item->index == index);
64}
65
66struct item *item_lookup(struct radix_tree_root *root, unsigned long index)
67{
68 return radix_tree_lookup(root, index);
69}
70
71void item_check_absent(struct radix_tree_root *root, unsigned long index)
72{
73 struct item *item;
74
75 item = radix_tree_lookup(root, index);
76 assert(item == 0);
77}
78
79/*
80 * Scan only the passed (start, start+nr] for present items
81 */
82void item_gang_check_present(struct radix_tree_root *root,
83 unsigned long start, unsigned long nr,
84 int chunk, int hop)
85{
86 struct item *items[chunk];
87 unsigned long into;
88
89 for (into = 0; into < nr; ) {
90 int nfound;
91 int nr_to_find = chunk;
92 int i;
93
94 if (nr_to_find > (nr - into))
95 nr_to_find = nr - into;
96
97 nfound = radix_tree_gang_lookup(root, (void **)items,
98 start + into, nr_to_find);
99 assert(nfound == nr_to_find);
100 for (i = 0; i < nfound; i++)
101 assert(items[i]->index == start + into + i);
102 into += hop;
103 }
104}
105
106/*
107 * Scan the entire tree, only expecting present items (start, start+nr]
108 */
109void item_full_scan(struct radix_tree_root *root, unsigned long start,
110 unsigned long nr, int chunk)
111{
112 struct item *items[chunk];
113 unsigned long into = 0;
114 unsigned long this_index = start;
115 int nfound;
116 int i;
117
118// printf("%s(0x%08lx, 0x%08lx, %d)\n", __FUNCTION__, start, nr, chunk);
119
120 while ((nfound = radix_tree_gang_lookup(root, (void **)items, into,
121 chunk))) {
122// printf("At 0x%08lx, nfound=%d\n", into, nfound);
123 for (i = 0; i < nfound; i++) {
124 assert(items[i]->index == this_index);
125 this_index++;
126 }
127// printf("Found 0x%08lx->0x%08lx\n",
128// items[0]->index, items[nfound-1]->index);
129 into = this_index;
130 }
131 if (chunk)
132 assert(this_index == start + nr);
133 nfound = radix_tree_gang_lookup(root, (void **)items,
134 this_index, chunk);
135 assert(nfound == 0);
136}
137
138static int verify_node(struct radix_tree_node *slot, unsigned int tag,
139 unsigned int height, int tagged)
140{
141 int anyset = 0;
142 int i;
143 int j;
144
145 /* Verify consistency at this level */
146 for (i = 0; i < RADIX_TREE_TAG_LONGS; i++) {
147 if (slot->tags[tag][i]) {
148 anyset = 1;
149 break;
150 }
151 }
152 if (tagged != anyset) {
153 printf("tag: %u, height %u, tagged: %d, anyset: %d\n", tag, height, tagged, anyset);
154 for (j = 0; j < RADIX_TREE_MAX_TAGS; j++) {
155 printf("tag %d: ", j);
156 for (i = 0; i < RADIX_TREE_TAG_LONGS; i++)
157 printf("%016lx ", slot->tags[j][i]);
158 printf("\n");
159 }
160 return 1;
161 }
162 assert(tagged == anyset);
163
164 /* Go for next level */
165 if (height > 1) {
166 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
167 if (slot->slots[i])
168 if (verify_node(slot->slots[i], tag, height - 1,
169 !!test_bit(i, slot->tags[tag]))) {
170 printf("Failure at off %d\n", i);
171 for (j = 0; j < RADIX_TREE_MAX_TAGS; j++) {
172 printf("tag %d: ", j);
173 for (i = 0; i < RADIX_TREE_TAG_LONGS; i++)
174 printf("%016lx ", slot->tags[j][i]);
175 printf("\n");
176 }
177 return 1;
178 }
179 }
180 return 0;
181}
182
183void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag)
184{
185 if (!root->height)
186 return;
187 verify_node(indirect_to_ptr(root->rnode),
188 tag, root->height, !!root_tag_get(root, tag));
189}
190
191void item_kill_tree(struct radix_tree_root *root)
192{
193 struct item *items[32];
194 int nfound;
195
196 while ((nfound = radix_tree_gang_lookup(root, (void **)items, 0, 32))) {
197 int i;
198
199 for (i = 0; i < nfound; i++) {
200 void *ret;
201
202 ret = radix_tree_delete(root, items[i]->index);
203 assert(ret == items[i]);
204 free(items[i]);
205 }
206 }
207 assert(radix_tree_gang_lookup(root, (void **)items, 0, 32) == 0);
208 assert(root->rnode == NULL);
209}
210
211void tree_verify_min_height(struct radix_tree_root *root, int maxindex)
212{
213 assert(radix_tree_maxindex(root->height) >= maxindex);
214 if (root->height > 1)
215 assert(radix_tree_maxindex(root->height-1) < maxindex);
216 else if (root->height == 1)
217 assert(radix_tree_maxindex(root->height-1) <= maxindex);
218}
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
new file mode 100644
index 000000000000..4e1d95faaa94
--- /dev/null
+++ b/tools/testing/radix-tree/test.h
@@ -0,0 +1,40 @@
1#include <linux/gfp.h>
2#include <linux/types.h>
3#include <linux/radix-tree.h>
4#include <linux/rcupdate.h>
5
6struct item {
7 unsigned long index;
8};
9
10struct item *item_create(unsigned long index);
11int __item_insert(struct radix_tree_root *root, struct item *item);
12int item_insert(struct radix_tree_root *root, unsigned long index);
13int item_delete(struct radix_tree_root *root, unsigned long index);
14struct item *item_lookup(struct radix_tree_root *root, unsigned long index);
15
16void item_check_present(struct radix_tree_root *root, unsigned long index);
17void item_check_absent(struct radix_tree_root *root, unsigned long index);
18void item_gang_check_present(struct radix_tree_root *root,
19 unsigned long start, unsigned long nr,
20 int chunk, int hop);
21void item_full_scan(struct radix_tree_root *root, unsigned long start,
22 unsigned long nr, int chunk);
23void item_kill_tree(struct radix_tree_root *root);
24
25void tag_check(void);
26
27struct item *
28item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
29struct item *
30item_tag_clear(struct radix_tree_root *root, unsigned long index, int tag);
31int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag);
32void tree_verify_min_height(struct radix_tree_root *root, int maxindex);
33void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag);
34
35extern int nr_allocated;
36
37/* Normally private parts of lib/radix-tree.c */
38void *indirect_to_ptr(void *ptr);
39int root_tag_get(struct radix_tree_root *root, unsigned int tag);
40unsigned long radix_tree_maxindex(unsigned int height);