diff options
author | Wang Shilong <wangsl.fnst@cn.fujitsu.com> | 2014-01-28 11:25:34 -0500 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2014-01-29 10:06:27 -0500 |
commit | 4c7a6f74ceeafd738b55d1c57349327f7ea8e895 (patch) | |
tree | eb3d4c6e6406490e6c31ee4da3512506daddc4b8 /fs/btrfs/ulist.c | |
parent | f05c474688762f186b16a26366755b6ef0bfed0c (diff) |
Btrfs: rework ulist with list+rb_tree
We are really suffering from now ulist's implementation, some developers
gave their try, and i just gave some of my ideas for things:
1. use list+rb_tree instead of arrary+rb_tree
2. add cur_list to iterator rather than ulist structure.
3. add seqnum into every node when they are added, this is
used to do selfcheck when iterating node.
I noticed Zach Brown's comments before, long term is to kick off
ulist implementation, however, for now, we need at least avoid
arrary from ulist.
Cc: Liu Bo <bo.li.liu@oracle.com>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Zach Brown <zab@redhat.com>
Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/ulist.c')
-rw-r--r-- | fs/btrfs/ulist.c | 105 |
1 files changed, 44 insertions, 61 deletions
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index 35f5de9dd498..8dd0e8dfdaf4 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/export.h> | 8 | #include <linux/export.h> |
9 | #include "ulist.h" | 9 | #include "ulist.h" |
10 | #include "ctree.h" | ||
10 | 11 | ||
11 | /* | 12 | /* |
12 | * ulist is a generic data structure to hold a collection of unique u64 | 13 | * ulist is a generic data structure to hold a collection of unique u64 |
@@ -14,10 +15,6 @@ | |||
14 | * enumerating it. | 15 | * enumerating it. |
15 | * It is possible to store an auxiliary value along with the key. | 16 | * It is possible to store an auxiliary value along with the key. |
16 | * | 17 | * |
17 | * The implementation is preliminary and can probably be sped up | ||
18 | * significantly. A first step would be to store the values in an rbtree | ||
19 | * as soon as ULIST_SIZE is exceeded. | ||
20 | * | ||
21 | * A sample usage for ulists is the enumeration of directed graphs without | 18 | * A sample usage for ulists is the enumeration of directed graphs without |
22 | * visiting a node twice. The pseudo-code could look like this: | 19 | * visiting a node twice. The pseudo-code could look like this: |
23 | * | 20 | * |
@@ -50,10 +47,9 @@ | |||
50 | */ | 47 | */ |
51 | void ulist_init(struct ulist *ulist) | 48 | void ulist_init(struct ulist *ulist) |
52 | { | 49 | { |
53 | ulist->nnodes = 0; | 50 | INIT_LIST_HEAD(&ulist->nodes); |
54 | ulist->nodes = ulist->int_nodes; | ||
55 | ulist->nodes_alloced = ULIST_SIZE; | ||
56 | ulist->root = RB_ROOT; | 51 | ulist->root = RB_ROOT; |
52 | ulist->nnodes = 0; | ||
57 | } | 53 | } |
58 | EXPORT_SYMBOL(ulist_init); | 54 | EXPORT_SYMBOL(ulist_init); |
59 | 55 | ||
@@ -66,14 +62,14 @@ EXPORT_SYMBOL(ulist_init); | |||
66 | */ | 62 | */ |
67 | void ulist_fini(struct ulist *ulist) | 63 | void ulist_fini(struct ulist *ulist) |
68 | { | 64 | { |
69 | /* | 65 | struct ulist_node *node; |
70 | * The first ULIST_SIZE elements are stored inline in struct ulist. | 66 | struct ulist_node *next; |
71 | * Only if more elements are alocated they need to be freed. | 67 | |
72 | */ | 68 | list_for_each_entry_safe(node, next, &ulist->nodes, list) { |
73 | if (ulist->nodes_alloced > ULIST_SIZE) | 69 | kfree(node); |
74 | kfree(ulist->nodes); | 70 | } |
75 | ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */ | ||
76 | ulist->root = RB_ROOT; | 71 | ulist->root = RB_ROOT; |
72 | INIT_LIST_HEAD(&ulist->nodes); | ||
77 | } | 73 | } |
78 | EXPORT_SYMBOL(ulist_fini); | 74 | EXPORT_SYMBOL(ulist_fini); |
79 | 75 | ||
@@ -192,57 +188,29 @@ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) | |||
192 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, | 188 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
193 | u64 *old_aux, gfp_t gfp_mask) | 189 | u64 *old_aux, gfp_t gfp_mask) |
194 | { | 190 | { |
195 | int ret = 0; | 191 | int ret; |
196 | struct ulist_node *node = NULL; | 192 | struct ulist_node *node; |
193 | |||
197 | node = ulist_rbtree_search(ulist, val); | 194 | node = ulist_rbtree_search(ulist, val); |
198 | if (node) { | 195 | if (node) { |
199 | if (old_aux) | 196 | if (old_aux) |
200 | *old_aux = node->aux; | 197 | *old_aux = node->aux; |
201 | return 0; | 198 | return 0; |
202 | } | 199 | } |
200 | node = kmalloc(sizeof(*node), gfp_mask); | ||
201 | if (!node) | ||
202 | return -ENOMEM; | ||
203 | 203 | ||
204 | if (ulist->nnodes >= ulist->nodes_alloced) { | 204 | node->val = val; |
205 | u64 new_alloced = ulist->nodes_alloced + 128; | 205 | node->aux = aux; |
206 | struct ulist_node *new_nodes; | 206 | #ifdef CONFIG_BTRFS_DEBUG |
207 | void *old = NULL; | 207 | node->seqnum = ulist->nnodes; |
208 | int i; | 208 | #endif |
209 | |||
210 | /* | ||
211 | * if nodes_alloced == ULIST_SIZE no memory has been allocated | ||
212 | * yet, so pass NULL to krealloc | ||
213 | */ | ||
214 | if (ulist->nodes_alloced > ULIST_SIZE) | ||
215 | old = ulist->nodes; | ||
216 | 209 | ||
217 | new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced, | 210 | ret = ulist_rbtree_insert(ulist, node); |
218 | gfp_mask); | 211 | ASSERT(!ret); |
219 | if (!new_nodes) | 212 | list_add_tail(&node->list, &ulist->nodes); |
220 | return -ENOMEM; | 213 | ulist->nnodes++; |
221 | |||
222 | if (!old) | ||
223 | memcpy(new_nodes, ulist->int_nodes, | ||
224 | sizeof(ulist->int_nodes)); | ||
225 | |||
226 | ulist->nodes = new_nodes; | ||
227 | ulist->nodes_alloced = new_alloced; | ||
228 | |||
229 | /* | ||
230 | * krealloc actually uses memcpy, which does not copy rb_node | ||
231 | * pointers, so we have to do it ourselves. Otherwise we may | ||
232 | * be bitten by crashes. | ||
233 | */ | ||
234 | ulist->root = RB_ROOT; | ||
235 | for (i = 0; i < ulist->nnodes; i++) { | ||
236 | ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]); | ||
237 | if (ret < 0) | ||
238 | return ret; | ||
239 | } | ||
240 | } | ||
241 | ulist->nodes[ulist->nnodes].val = val; | ||
242 | ulist->nodes[ulist->nnodes].aux = aux; | ||
243 | ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]); | ||
244 | BUG_ON(ret); | ||
245 | ++ulist->nnodes; | ||
246 | 214 | ||
247 | return 1; | 215 | return 1; |
248 | } | 216 | } |
@@ -266,11 +234,26 @@ EXPORT_SYMBOL(ulist_add); | |||
266 | */ | 234 | */ |
267 | struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter) | 235 | struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter) |
268 | { | 236 | { |
269 | if (ulist->nnodes == 0) | 237 | struct ulist_node *node; |
238 | |||
239 | if (list_empty(&ulist->nodes)) | ||
270 | return NULL; | 240 | return NULL; |
271 | if (uiter->i < 0 || uiter->i >= ulist->nnodes) | 241 | if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes) |
272 | return NULL; | 242 | return NULL; |
273 | 243 | if (uiter->cur_list) { | |
274 | return &ulist->nodes[uiter->i++]; | 244 | uiter->cur_list = uiter->cur_list->next; |
245 | } else { | ||
246 | uiter->cur_list = ulist->nodes.next; | ||
247 | #ifdef CONFIG_BTRFS_DEBUG | ||
248 | uiter->i = 0; | ||
249 | #endif | ||
250 | } | ||
251 | node = list_entry(uiter->cur_list, struct ulist_node, list); | ||
252 | #ifdef CONFIG_BTRFS_DEBUG | ||
253 | ASSERT(node->seqnum == uiter->i); | ||
254 | ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes); | ||
255 | uiter->i++; | ||
256 | #endif | ||
257 | return node; | ||
275 | } | 258 | } |
276 | EXPORT_SYMBOL(ulist_next); | 259 | EXPORT_SYMBOL(ulist_next); |