aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/radix-tree/regression1.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-05-17 00:13:27 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:35 -0400
commita332125fc3223e1092f765db442b7afb9fd4ecde (patch)
treef4b896bcbe096e3522506869f01c2fe4bd9e40a6 /tools/testing/radix-tree/regression1.c
parent070e807c690bf9a648d4a878f3c68ea9f5f5ce14 (diff)
radix tree test suite: Convert regression1 to XArray
Now the page cache lookup is using the XArray, let's convert this regression test from the radix tree API to the XArray so it's testing roughly the same thing it was testing before. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'tools/testing/radix-tree/regression1.c')
-rw-r--r--tools/testing/radix-tree/regression1.c58
1 files changed, 19 insertions, 39 deletions
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0aece092f40e..b4a4a7168986 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -53,12 +53,12 @@ struct page {
53 unsigned long index; 53 unsigned long index;
54}; 54};
55 55
56static struct page *page_alloc(void) 56static struct page *page_alloc(int index)
57{ 57{
58 struct page *p; 58 struct page *p;
59 p = malloc(sizeof(struct page)); 59 p = malloc(sizeof(struct page));
60 p->count = 1; 60 p->count = 1;
61 p->index = 1; 61 p->index = index;
62 pthread_mutex_init(&p->lock, NULL); 62 pthread_mutex_init(&p->lock, NULL);
63 63
64 return p; 64 return p;
@@ -80,53 +80,33 @@ static void page_free(struct page *p)
80static unsigned find_get_pages(unsigned long start, 80static unsigned find_get_pages(unsigned long start,
81 unsigned int nr_pages, struct page **pages) 81 unsigned int nr_pages, struct page **pages)
82{ 82{
83 unsigned int i; 83 XA_STATE(xas, &mt_tree, start);
84 unsigned int ret; 84 struct page *page;
85 unsigned int nr_found; 85 unsigned int ret = 0;
86 86
87 rcu_read_lock(); 87 rcu_read_lock();
88restart: 88 xas_for_each(&xas, page, ULONG_MAX) {
89 nr_found = radix_tree_gang_lookup_slot(&mt_tree, 89 if (xas_retry(&xas, page))
90 (void ***)pages, NULL, start, nr_pages);
91 ret = 0;
92 for (i = 0; i < nr_found; i++) {
93 struct page *page;
94repeat:
95 page = radix_tree_deref_slot((void **)pages[i]);
96 if (unlikely(!page))
97 continue; 90 continue;
98 91
99 if (radix_tree_exception(page)) {
100 if (radix_tree_deref_retry(page)) {
101 /*
102 * Transient condition which can only trigger
103 * when entry at index 0 moves out of or back
104 * to root: none yet gotten, safe to restart.
105 */
106 assert((start | i) == 0);
107 goto restart;
108 }
109 /*
110 * No exceptional entries are inserted in this test.
111 */
112 assert(0);
113 }
114
115 pthread_mutex_lock(&page->lock); 92 pthread_mutex_lock(&page->lock);
116 if (!page->count) { 93 if (!page->count)
117 pthread_mutex_unlock(&page->lock); 94 goto unlock;
118 goto repeat; 95
119 }
120 /* don't actually update page refcount */ 96 /* don't actually update page refcount */
121 pthread_mutex_unlock(&page->lock); 97 pthread_mutex_unlock(&page->lock);
122 98
123 /* Has the page moved? */ 99 /* Has the page moved? */
124 if (unlikely(page != *((void **)pages[i]))) { 100 if (unlikely(page != xas_reload(&xas)))
125 goto repeat; 101 goto put_page;
126 }
127 102
128 pages[ret] = page; 103 pages[ret] = page;
129 ret++; 104 ret++;
105 continue;
106unlock:
107 pthread_mutex_unlock(&page->lock);
108put_page:
109 xas_reset(&xas);
130 } 110 }
131 rcu_read_unlock(); 111 rcu_read_unlock();
132 return ret; 112 return ret;
@@ -145,12 +125,12 @@ static void *regression1_fn(void *arg)
145 for (j = 0; j < 1000000; j++) { 125 for (j = 0; j < 1000000; j++) {
146 struct page *p; 126 struct page *p;
147 127
148 p = page_alloc(); 128 p = page_alloc(0);
149 pthread_mutex_lock(&mt_lock); 129 pthread_mutex_lock(&mt_lock);
150 radix_tree_insert(&mt_tree, 0, p); 130 radix_tree_insert(&mt_tree, 0, p);
151 pthread_mutex_unlock(&mt_lock); 131 pthread_mutex_unlock(&mt_lock);
152 132
153 p = page_alloc(); 133 p = page_alloc(1);
154 pthread_mutex_lock(&mt_lock); 134 pthread_mutex_lock(&mt_lock);
155 radix_tree_insert(&mt_tree, 1, p); 135 radix_tree_insert(&mt_tree, 1, p);
156 pthread_mutex_unlock(&mt_lock); 136 pthread_mutex_unlock(&mt_lock);